text
stringlengths
78
104k
score
float64
0
0.18
def split_pem(pem_bytes): """ Split a give PEM file with multiple certificates :param pem_bytes: The pem data in bytes with multiple certs :return: yields a list of certificates contained in the pem file """ started, pem_data = False, b'' for line in pem_bytes.splitlines(False): if line == b'' and not started: continue if line[0:5] in (b'-----', b'---- '): if not started: started = True else: pem_data = pem_data + line + b'\r\n' yield pem_data started = False pem_data = b'' if started: pem_data = pem_data + line + b'\r\n'
0.001399
def cross(self, vec): """Cross product with another Vector3Array""" if not isinstance(vec, Vector3Array): raise TypeError('Cross product operand must be a Vector3Array') if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV: raise ValueError('Cross product operands must have the same ' 'number of elements.') return Vector3Array(np.cross(self, vec))
0.004598
def cli(ctx, packages, all, list, platform): """Uninstall packages.""" if packages: _uninstall(packages, platform) elif all: # pragma: no cover packages = Resources(platform).packages _uninstall(packages, platform) elif list: Resources(platform).list_packages(installed=True, notinstalled=False) else: click.secho(ctx.get_help())
0.002558
def save_user_config(username, conf_dict, path=settings.LOGIN_FILE): """ Save user's configuration to otherwise unused field ``full_name`` in passwd file. """ users = load_users(path=path) users[username]["full_name"] = _encode_config(conf_dict) save_users(users, path=path)
0.003311
def read_folder(folder, ext='*', uppercase=False, replace_dot='.', parent=''): """ This will read all of the files in the folder with the extension equal to ext :param folder: str of the folder name :param ext: str of the extension :param uppercase: bool if True will uppercase all the file names :param replace_dot: str will replace "." in the filename :param parent: str of the parent folder :return: dict of basename with the value of the text in the file """ ret = {} if os.path.exists(folder): for file in os.listdir(folder): if os.path.isdir(os.path.join(folder, file)): child = read_folder(os.path.join(folder, file), ext, uppercase, replace_dot, parent=parent + file + '/') ret.update(child) else: if ext == '*' or file.endswith(ext): key = file.replace('.', replace_dot) key = uppercase and key.upper() or key ret[parent + key] = read_file(os.path.join(folder, file)) return ret
0.000863
def _extract_from_subworkflow(vs, step): """Remove internal variable names when moving from sub-workflow to main. """ substep_ids = set([x.name for x in step.workflow]) out = [] for var in vs: internal = False parts = var["id"].split("/") if len(parts) > 1: if parts[0] in substep_ids: internal = True if not internal: var.pop("source", None) out.append(var) return out
0.002096
def removeItems(self, items): """ Removes all the inputed items from the scene at once. The \ list of items will be stored in an internal cache. When \ updating a node or connection's prepareToRemove method, \ any additional items that need to be removed as a result \ of that object being removed, should use the \ scene.forceRemove method which will keep track of all the \ items queued up to remove, so it won't be removed twice. :sa forceRemove :param items <list> [ <QGraphicsItem>, .. ] :return <int> number removed """ count = 0 self._removalQueue = items blocked = self.signalsBlocked() self.blockSignals(True) update = set() for item in items: if isinstance(item, XNodeConnection): update.add(item.inputNode()) update.add(item.outputNode()) if self.removeItem(item): count += 1 self.blockSignals(blocked) self._removalQueue = [] # update any existing nodes once the connections have been removed for node in update.difference(items): node.setDirty(True) if not self.signalsBlocked(): self.itemsRemoved.emit() return count
0.007047
def _parse_pubkey(stream, packet_type='pubkey'): """See https://tools.ietf.org/html/rfc4880#section-5.5 for details.""" p = {'type': packet_type} packet = io.BytesIO() with stream.capture(packet): p['version'] = stream.readfmt('B') p['created'] = stream.readfmt('>L') p['algo'] = stream.readfmt('B') if p['algo'] in ECDSA_ALGO_IDS: log.debug('parsing elliptic curve key') # https://tools.ietf.org/html/rfc6637#section-11 oid_size = stream.readfmt('B') oid = stream.read(oid_size) assert oid in SUPPORTED_CURVES, util.hexlify(oid) p['curve_oid'] = oid mpi = parse_mpi(stream) log.debug('mpi: %x (%d bits)', mpi, mpi.bit_length()) leftover = stream.read() if leftover: leftover = io.BytesIO(leftover) # https://tools.ietf.org/html/rfc6637#section-8 # should be b'\x03\x01\x08\x07': SHA256 + AES128 size, = util.readfmt(leftover, 'B') p['kdf'] = leftover.read(size) p['secret'] = leftover.read() parse_func, keygrip_func = SUPPORTED_CURVES[oid] keygrip = keygrip_func(parse_func(mpi)) log.debug('keygrip: %s', util.hexlify(keygrip)) p['keygrip'] = keygrip elif p['algo'] == DSA_ALGO_ID: parse_mpis(stream, n=4) # DSA keys are not supported elif p['algo'] == ELGAMAL_ALGO_ID: parse_mpis(stream, n=3) # ElGamal keys are not supported else: # assume RSA parse_mpis(stream, n=2) # RSA keys are not supported assert not stream.read() # https://tools.ietf.org/html/rfc4880#section-12.2 packet_data = packet.getvalue() data_to_hash = (b'\x99' + struct.pack('>H', len(packet_data)) + packet_data) p['key_id'] = hashlib.sha1(data_to_hash).digest()[-8:] p['_to_hash'] = data_to_hash log.debug('key ID: %s', util.hexlify(p['key_id'])) return p
0.000484
def flattened(self, pred=flattened_pred_default): """Flattens nodes by hoisting children up to ancestor nodes. A node is hoisted if pred(node) returns True. """ if self.is_value: return self new_children = [] for child in self.children: if child.is_empty: continue new_child = child.flattened(pred) if pred(new_child, self): new_children.extend(new_child.children) else: new_children.append(new_child) return ParseNode(self.node_type, children=new_children, consumed=self.consumed, position=self.position, ignored=self.ignored)
0.008584
def functions(self): """ Returns all documented module level functions in the module sorted alphabetically as a list of `pydoc.Function`. """ p = lambda o: isinstance(o, Function) and self._docfilter(o) return sorted(filter(p, self.doc.values()))
0.010204
def proxy_alias(alias_name, node_type): """Get a Proxy from the given name to the given node type.""" proxy = type( alias_name, (lazy_object_proxy.Proxy,), { "__class__": object.__dict__["__class__"], "__instancecheck__": _instancecheck, }, ) return proxy(lambda: node_type)
0.00289
def make_regex(separator): """Utility function to create regexp for matching escaped separators in strings. """ return re.compile(r'(?:' + re.escape(separator) + r')?((?:[^' + re.escape(separator) + r'\\]|\\.)+)')
0.003968
def _process_state(self): """Process the application state configuration. Google Alerts manages the account information and alert data through some custom state configuration. Not all values have been completely enumerated. """ self._log.debug("Capturing state from the request") response = self._session.get(url=self.ALERTS_URL, headers=self.HEADERS) soup = BeautifulSoup(response.content, "html.parser") for i in soup.findAll('script'): if i.text.find('window.STATE') == -1: continue state = json.loads(i.text[15:-1]) if state != "": self._state = state self._log.debug("State value set: %s" % self._state) return self._state
0.002525
def _getScalesRand(self): """ Internal function for parameter initialization Return a vector of random scales """ if self.P>1: scales = [] for term_i in range(self.n_randEffs): _scales = sp.randn(self.diag[term_i].shape[0]) if self.jitter[term_i]>0: _scales = sp.concatenate((_scales,sp.array([sp.sqrt(self.jitter[term_i])]))) scales.append(_scales) scales = sp.concatenate(scales) else: scales=sp.randn(self.vd.getNumberScales()) return scales
0.011364
def close(self): """ Closes this VPCS VM. """ if not (yield from super().close()): return False nio = self._ethernet_adapter.get_nio(0) if isinstance(nio, NIOUDP): self.manager.port_manager.release_udp_port(nio.lport, self._project) if self._local_udp_tunnel: self.manager.port_manager.release_udp_port(self._local_udp_tunnel[0].lport, self._project) self.manager.port_manager.release_udp_port(self._local_udp_tunnel[1].lport, self._project) self._local_udp_tunnel = None yield from self._stop_ubridge() if self.is_running(): self._terminate_process() return True
0.006935
def init_read_line(self): """init_read_line() initializes fields relevant to input matching""" format_list = self._format_list self._re_cvt = self.match_input_fmt(format_list) regexp0_str = "".join([subs[0] for subs in self._re_cvt]) self._regexp_str = regexp0_str self._re = re.compile(regexp0_str) self._match_exps = [ subs[1] for subs in self._re_cvt if subs[1] is not None ] self._divisors = [subs[2] for subs in self._re_cvt if subs[2] is not None] self._in_cvt_fns = [ subs[3] for subs in self._re_cvt if subs[3] is not None ] self._read_line_init = True
0.004412
def _writer(func): """ Decorator for a custom writer, but a default reader """ name = func.__name__ return property(fget=lambda self: getattr(self, '_%s' % name), fset=func)
0.005155
def create_bucket(self, bucket_name, headers=None, location=Location.DEFAULT, policy=None): """ Creates a new located bucket. By default it's in the USA. You can pass Location.EU to create an European bucket. :type bucket_name: string :param bucket_name: The name of the new bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type location: :class:`boto.s3.connection.Location` :param location: The location of the new bucket :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in S3. """ check_lowercase_bucketname(bucket_name) if policy: if headers: headers[self.provider.acl_header] = policy else: headers = {self.provider.acl_header : policy} if location == Location.DEFAULT: data = '' else: data = '<CreateBucketConstraint><LocationConstraint>' + \ location + '</LocationConstraint></CreateBucketConstraint>' response = self.make_request('PUT', bucket_name, headers=headers, data=data) body = response.read() if response.status == 409: raise self.provider.storage_create_error( response.status, response.reason, body) if response.status == 200: return self.bucket_class(self, bucket_name) else: raise self.provider.storage_response_error( response.status, response.reason, body)
0.005214
def saveXml(self, xml): """ Saves this view's content to XML. :param xml | <str> """ xscript = ElementTree.SubElement(xml, 'script') xscript.text = escape(self._edit.toPlainText())
0.012048
def _watchdog_switch(self): """ 切换页面线程 """ # init self.current_controller = self.view_control_map['main'] self.current_controller.run(self.switch_queue) while not self.quit_quit: key = self.switch_queue.get() if key == 'quit_quit': self.quit_quit = True else: self.current_controller = self.view_control_map[key] self.current_controller.run(self.switch_queue) # 退出保存信息 self.quit() os._exit(0)
0.003584
def _credentials_from_request(request): """Gets the authorized credentials for this flow, if they exist.""" # ORM storage requires a logged in user if (oauth2_settings.storage_model is None or request.user.is_authenticated()): return get_storage(request).get() else: return None
0.003106
def get(self, status_code): """ Returns the requested status. :param int status_code: the status code to return :queryparam str reason: optional reason phrase """ status_code = int(status_code) if status_code >= 400: kwargs = {'status_code': status_code} if self.get_query_argument('reason', None): kwargs['reason'] = self.get_query_argument('reason') if self.get_query_argument('log_message', None): kwargs['log_message'] = self.get_query_argument('log_message') self.send_error(**kwargs) else: self.set_status(status_code)
0.002933
def _load(self, load_dict): """Loads the data and exploration range from the `load_dict`. The `load_dict` needs to be in the same format as the result of the :func:`~pypet.parameter.Parameter._store` method. """ if self.v_locked: raise pex.ParameterLockedException('Parameter `%s` is locked!' % self.v_full_name) if 'data' in load_dict: self._data = load_dict['data']['data'][0] self._default = self._data else: self._logger.warning('Your parameter `%s` is empty, ' 'I did not find any data on disk.' % self.v_full_name) if 'explored_data' in load_dict: self._explored_range = [x for x in load_dict['explored_data']['data'].tolist()] self._explored = True self._locked = True
0.005834
def cancel(self, event): """Remove an event from the queue. This must be presented the ID as returned by enter(). If the event is not in the queue, this raises ValueError. """ with self._lock: self._queue.remove(event) heapq.heapify(self._queue)
0.006431
def from_outcars(cls, outcars, structures, **kwargs): """ Initializes an NEBAnalysis from Outcar and Structure objects. Use the static constructors, e.g., :class:`from_dir` instead if you prefer to have these automatically generated from a directory of NEB calculations. Args: outcars ([Outcar]): List of Outcar objects. Note that these have to be ordered from start to end along reaction coordinates. structures ([Structure]): List of Structures along reaction coordinate. Must be same length as outcar. interpolation_order (int): Order of polynomial to use to interpolate between images. Same format as order parameter in scipy.interplotate.PiecewisePolynomial. """ if len(outcars) != len(structures): raise ValueError("# of Outcars must be same as # of Structures") # Calculate cumulative root mean square distance between structures, # which serves as the reaction coordinate. Note that these are # calculated from the final relaxed structures as the coordinates may # have changed from the initial interpolation. r = [0] prev = structures[0] for st in structures[1:]: dists = np.array([s2.distance(s1) for s1, s2 in zip(prev, st)]) r.append(np.sqrt(np.sum(dists ** 2))) prev = st r = np.cumsum(r) energies = [] forces = [] for i, o in enumerate(outcars): o.read_neb() energies.append(o.data["energy"]) if i in [0, len(outcars) - 1]: forces.append(0) else: forces.append(o.data["tangent_force"]) forces = np.array(forces) r = np.array(r) return cls(r=r, energies=energies, forces=forces, structures=structures, **kwargs)
0.001538
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(ActorHandler, self).fix_config(options) opt = "actors" if opt not in options: options[opt] = self.default_actors() if opt not in self.help: self.help[opt] = "The list of sub-actors that this actor manages." return options
0.005034
def UpdateClientsFromFleetspeak(clients): """Updates ApiClient records to include info from Fleetspeak.""" if not fleetspeak_connector.CONN or not fleetspeak_connector.CONN.outgoing: # FS not configured, or an outgoing connection is otherwise unavailable. return id_map = {} for client in clients: if client.fleetspeak_enabled: id_map[fleetspeak_utils.GRRIDToFleetspeakID(client.client_id)] = client if not id_map: return res = fleetspeak_connector.CONN.outgoing.ListClients( admin_pb2.ListClientsRequest(client_ids=list(iterkeys(id_map)))) for read in res.clients: api_client = id_map[read.client_id] api_client.last_seen_at = fleetspeak_utils.TSToRDFDatetime( read.last_contact_time) api_client.last_clock = fleetspeak_utils.TSToRDFDatetime(read.last_clock)
0.010962
def _apply_search_backrefs(pattern, flags=0): """Apply the search backrefs to the search pattern.""" if isinstance(pattern, (str, bytes)): re_verbose = bool(VERBOSE & flags) re_unicode = None if bool((ASCII | LOCALE) & flags): re_unicode = False elif bool(UNICODE & flags): re_unicode = True if not (flags & DEBUG): pattern = _cached_search_compile(pattern, re_verbose, re_unicode, type(pattern)) else: # pragma: no cover pattern = _bre_parse._SearchParser(pattern, re_verbose, re_unicode).parse() elif isinstance(pattern, Bre): if flags: raise ValueError("Cannot process flags argument with a compiled pattern") pattern = pattern._pattern elif isinstance(pattern, (_RE_TYPE, Bre)): if flags: raise ValueError("Cannot process flags argument with a compiled pattern!") else: raise TypeError("Not a string or compiled pattern!") return pattern
0.004907
def inasafe_analysis_summary_field_value(field, feature, parent): """Retrieve a value from a field in the analysis summary layer. e.g. inasafe_analysis_summary_field_value('total_not_exposed') -> 3 """ _ = feature, parent # NOQA project_context_scope = QgsExpressionContextUtils.projectScope( QgsProject.instance()) registry = QgsProject.instance() key = provenance_layer_analysis_impacted_id['provenance_key'] if not project_context_scope.hasVariable(key): return None layer = registry.mapLayer(project_context_scope.variable(key)) if not layer: return None index = layer.fields().lookupField(field) if index < 0: return None feature = next(layer.getFeatures()) return feature[index]
0.001284
def OnToggle(self, event): """Toggle button event handler""" if self.selection_toggle_button.GetValue(): self.entry_line.last_selection = self.entry_line.GetSelection() self.entry_line.last_selection_string = \ self.entry_line.GetStringSelection() self.entry_line.last_table = self.main_window.grid.current_table self.entry_line.Disable() post_command_event(self, self.EnterSelectionModeMsg) else: self.entry_line.Enable() post_command_event(self, self.GridActionTableSwitchMsg, newtable=self.entry_line.last_table) post_command_event(self, self.ExitSelectionModeMsg)
0.002717
def _extract_rows(self, rows): """ Extract an array of rows from an input scalar or sequence """ if rows is not None: rows = numpy.array(rows, ndmin=1, copy=False, dtype='i8') # returns unique, sorted rows = numpy.unique(rows) maxrow = self._info['nrows']-1 if rows[0] < 0 or rows[-1] > maxrow: raise ValueError("rows must be in [%d,%d]" % (0, maxrow)) return rows
0.004158
def _notebook_dir_changed(self, name, old, new): """do a bit of validation of the notebook dir""" if os.path.exists(new) and not os.path.isdir(new): raise TraitError("notebook dir %r is not a directory" % new) if not os.path.exists(new): self.log.info("Creating notebook dir %s", new) try: os.mkdir(new) except: raise TraitError("Couldn't create notebook dir %r" % new)
0.006342
def set_dataset(self): ''' Retrieves the designated dataset, creates NCSS object, and creates a NCSS query object. ''' keys = list(self.model.datasets.keys()) labels = [item.split()[0].lower() for item in keys] if self.set_type == 'best': self.dataset = self.model.datasets[keys[labels.index('best')]] elif self.set_type == 'latest': self.dataset = self.model.datasets[keys[labels.index('latest')]] elif self.set_type == 'full': self.dataset = self.model.datasets[keys[labels.index('full')]] self.access_url = self.dataset.access_urls[self.access_url_key] self.ncss = NCSS(self.access_url) self.query = self.ncss.query()
0.002653
def add_namespace(self, module): """Add item uri:prefix for `module` to `self.namespaces`. The prefix to be actually used for `uri` is returned. If the namespace is already present, the old prefix is used. Prefix clashes are resolved by disambiguating `prefix`. """ uri = module.search_one("namespace").arg prefix = module.search_one("prefix").arg if uri in self.namespaces: return self.namespaces[uri] end = 1 new = prefix while new in list(self.namespaces.values()): new = "%s%x" % (prefix,end) end += 1 self.namespaces[uri] = new self.module_prefixes[module.arg] = new for inc in module.search("include"): self.module_prefixes[inc.arg] = new return new
0.004914
def bind(nodemask): """ Binds the current thread and its children to the nodes specified in nodemask. They will only run on the CPUs of the specified nodes and only be able to allocate memory from them. @param nodemask: node mask @type nodemask: C{set} """ mask = set_to_numa_nodemask(nodemask) bitmask = libnuma.numa_allocate_nodemask() libnuma.copy_nodemask_to_bitmask(byref(mask), bitmask) libnuma.numa_bind(bitmask) libnuma.numa_bitmask_free(bitmask)
0.006012
def nvrtcDestroyProgram(self, prog): """ Destroys the given NVRTC program object. """ code = self._lib.nvrtcDestroyProgram(byref(prog)) self._throw_on_error(code) return
0.009217
def get_stats_daily(start=None, end=None, last=None, **kwargs): """ MOVED to iexfinance.iexdata.get_stats_daily """ import warnings warnings.warn(WNG_MSG % ("get_stats_daily", "iexdata.get_stats_daily")) start, end = _sanitize_dates(start, end) return DailySummaryReader(start=start, end=end, last=last, **kwargs).fetch()
0.002584
def _parse_sentencetree(self, tree, parent_node_id=None, ignore_traces=True): """parse a sentence Tree into this document graph""" def get_nodelabel(node): if isinstance(node, nltk.tree.Tree): return node.label() elif isinstance(node, unicode): return node.encode('utf-8') else: raise ValueError("Unexpected node type: {0}, {1}".format(type(node), node)) root_node_id = self._node_id self.node[root_node_id]['label'] = get_nodelabel(tree) for subtree in tree: self._node_id += 1 node_label = get_nodelabel(subtree) # unescape the node label, if necessary node_label = PTB_BRACKET_UNESCAPE.get(node_label, node_label) # TODO: refactor this, so we don't need to query this all the time if ignore_traces and node_label == '-NONE-': # ignore tokens annotated for traces continue if isinstance(subtree, nltk.tree.Tree): if len(subtree) > 1: # subtree is a syntactic category node_attrs = {'label': node_label, self.ns+':cat': node_label} layers = {self.ns, self.ns+':syntax'} else: # subtree represents a token and its POS tag node_attrs = {'label': node_label} layers = {self.ns} edge_type = dg.EdgeTypes.dominance_relation self.add_node(self._node_id, layers=layers, attr_dict=node_attrs) self.add_edge(root_node_id, self._node_id, edge_type=edge_type) else: # isinstance(subtree, unicode); subtree is a token # we'll have to modify the parent node of a token, since # in NLTK Trees, even a leaf node (with its POS tag) is # represented as a Tree (an iterator over a single unicode # string), e.g. ``Tree('NNS', ['prices'])`` pos_tag = self.node[parent_node_id]['label'] token_attrs = { 'label': node_label, self.ns+':token': node_label, self.ns+':pos': pos_tag} self.node[parent_node_id].update(token_attrs) self.tokens.append(parent_node_id) if isinstance(subtree, nltk.tree.Tree): self._parse_sentencetree(subtree, parent_node_id=self._node_id)
0.002793
def firmware_download_input_protocol_type_ftp_protocol_ftp_directory(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") firmware_download = ET.Element("firmware_download") config = firmware_download input = ET.SubElement(firmware_download, "input") protocol_type = ET.SubElement(input, "protocol-type") ftp_protocol = ET.SubElement(protocol_type, "ftp-protocol") ftp = ET.SubElement(ftp_protocol, "ftp") directory = ET.SubElement(ftp, "directory") directory.text = kwargs.pop('directory') callback = kwargs.pop('callback', self._callback) return callback(config)
0.004348
def server_poweron(host=None, admin_username=None, admin_password=None, module=None): ''' Powers up the managed server. host The chassis host. admin_username The username used to access the chassis. admin_password The password used to access the chassis. module The element to power on located on the chassis such as a blade. If not provided, the chassis will be powered on. CLI Example: .. code-block:: bash salt dell dracr.server_poweron salt dell dracr.server_poweron module=server-1 ''' return __execute_cmd('serveraction powerup', host=host, admin_username=admin_username, admin_password=admin_password, module=module)
0.0012
def extract_user(user_value): """ Extract the user for running a container from the following possible input formats: * Integer (UID) * User name string * Tuple of ``user, group`` * String in the format ``user:group`` :param user_value: User name, uid, user-group tuple, or user:group string. :type user_value: int | tuple | unicode | str :return: User name or id. :rtype: unicode | str """ user = resolve_value(user_value) if not user and user != 0 and user != '0': return None if isinstance(user, tuple): return user[0] if isinstance(user, six.integer_types): return six.text_type(user) return user.partition(':')[0]
0.002825
def export(self): """Returns requirements XML.""" top = self._top_element() properties = self._properties_element(top) self._fill_requirements(top) self._fill_lookup_prop(properties) return utils.prettify_xml(top)
0.007663
def groups_filter_from_query(query, field_map={}): """Creates an F object for the groups of a search query.""" f = None # filter groups for group in query.get("groups", []): group_f = MatchAll() for condition in group.get("conditions", []): field_name = condition["field"] field_name = field_map.get(field_name, field_name) operation = condition["type"] values = condition["values"] if values: values = [v["value"] for v in values] if operation == "all": # NOTE: is there a better way to express this? for value in values: if "." in field_name: path = field_name.split(".")[0] group_f &= Nested(path=path, filter=Term(**{field_name: value})) else: group_f &= Term(**{field_name: value}) elif operation == "any": if "." in field_name: path = field_name.split(".")[0] group_f &= Nested(path=path, filter=Terms(**{field_name: values})) else: group_f &= Terms(**{field_name: values}) elif operation == "none": if "." in field_name: path = field_name.split(".")[0] group_f &= ~Nested(path=path, filter=Terms(**{field_name: values})) else: group_f &= ~Terms(**{field_name: values}) date_range = group.get("time") if date_range: group_f &= date_range_filter(date_range) if f: f |= group_f else: f = group_f return f
0.002182
def devname_from_index(self, if_index): """Return interface name from interface index""" for devname, iface in self.items(): if iface.win_index == if_index: return iface.name raise ValueError("Unknown network interface index %r" % if_index)
0.006849
def metadata(self): """ Retrieves the remote database metadata dictionary. :returns: Dictionary containing database metadata details """ resp = self.r_session.get(self.database_url) resp.raise_for_status() return response_to_json_dict(resp)
0.006734
def rewrite_links(self, func): """ Add a callback for rewriting links. The callback should take a single argument, the url, and should return a replacement url. The callback function is called everytime a ``[]()`` or ``<link>`` is processed. You can use this method as a decorator on the function you want to set as the callback. """ @libmarkdown.e_url_callback def _rewrite_links_func(string, size, context): ret = func(string[:size]) if ret is not None: buf = ctypes.create_string_buffer(ret) self._alloc.append(buf) return ctypes.addressof(buf) self._rewrite_links_func = _rewrite_links_func return func
0.002571
def bindex(start, stop=None, dim=1, sort="G", cross_truncation=1.): """ Generator for creating multi-indices. Args: start (int): The lower order of the indices stop (:py:data:typing.Optional[int]): the maximum shape included. If omitted: stop <- start; start <- 0 If int is provided, set as largest total order. If array of int, set as largest order along each axis. dim (int): The number of dimensions in the expansion cross_truncation (float): Use hyperbolic cross truncation scheme to reduce the number of terms in expansion. Returns: list: Order list of indices. Examples: >>> print(chaospy.bertran.bindex(2, 3, 2)) [[2, 0], [1, 1], [0, 2], [3, 0], [2, 1], [1, 2], [0, 3]] >>> print(chaospy.bertran.bindex(0, 1, 3)) [[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]] """ if stop is None: start, stop = 0, start start = numpy.array(start, dtype=int).flatten() stop = numpy.array(stop, dtype=int).flatten() sort = sort.upper() total = numpy.mgrid[(slice(numpy.max(stop), -1, -1),)*dim] total = numpy.array(total).reshape(dim, -1) if start.size > 1: for idx, start_ in enumerate(start): total = total[:, total[idx] >= start_] else: total = total[:, total.sum(0) >= start] if stop.size > 1: for idx, stop_ in enumerate(stop): total = total[:, total[idx] <= stop_] total = total.T.tolist() if "G" in sort: total = sorted(total, key=sum) else: def cmp_(idxi, idxj): """Old style compare method.""" if not numpy.any(idxi): return 0 if idxi[0] == idxj[0]: return cmp(idxi[:-1], idxj[:-1]) return (idxi[-1] > idxj[-1]) - (idxi[-1] < idxj[-1]) key = functools.cmp_to_key(cmp_) total = sorted(total, key=key) if "I" in sort: total = total[::-1] if "R" in sort: total = [idx[::-1] for idx in total] for pos, idx in reversed(list(enumerate(total))): idx = numpy.array(idx) cross_truncation = numpy.asfarray(cross_truncation) try: if numpy.any(numpy.sum(idx**(1./cross_truncation)) > numpy.max(stop)**(1./cross_truncation)): del total[pos] except (OverflowError, ZeroDivisionError): pass return total
0.000797
def _set_optional_tlv(self, v, load=False): """ Setter method for optional_tlv, mapped from YANG variable /protocol/lldp/advertise/optional_tlv (container) If this variable is read-only (config: false) in the source YANG file, then _set_optional_tlv is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_optional_tlv() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=optional_tlv.optional_tlv, is_container='container', presence=False, yang_name="optional-tlv", rest_name="optional-tlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Optional TLVs.', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-lldp', defining_module='brocade-lldp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """optional_tlv must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=optional_tlv.optional_tlv, is_container='container', presence=False, yang_name="optional-tlv", rest_name="optional-tlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Optional TLVs.', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-lldp', defining_module='brocade-lldp', yang_type='container', is_config=True)""", }) self.__optional_tlv = t if hasattr(self, '_set'): self._set()
0.005907
def dictFlat(l): """Given a list of list of dicts, return just the dicts.""" if type(l) is dict: return [l] if "numpy" in str(type(l)): return l dicts=[] for item in l: if type(item)==dict: dicts.append(item) elif type(item)==list: for item2 in item: dicts.append(item2) return dicts
0.013193
def detectIphone(self): """Return detection of an iPhone Detects if the current device is an iPhone. """ # The iPad and iPod touch say they're an iPhone! So let's disambiguate. return UAgentInfo.deviceIphone in self.__userAgent \ and not self.detectIpad() \ and not self.detectIpod()
0.005747
def extract_from_stack(stack, template, length, pre_pick, pre_pad, Z_include=False, pre_processed=True, samp_rate=None, lowcut=None, highcut=None, filt_order=3): """ Extract a multiplexed template from a stack of detections. Function to extract a new template from a stack of previous detections. Requires the stack, the template used to make the detections for the \ stack, and we need to know if the stack has been pre-processed. :type stack: obspy.core.stream.Stream :param stack: Waveform stack from detections. Can be of any length and \ can have delays already included, or not. :type template: obspy.core.stream.Stream :param template: Template used to make the detections in the stack. Will \ use the delays of this for the new template. :type length: float :param length: Length of new template in seconds :type pre_pick: float :param pre_pick: Extract additional data before the detection, seconds :type pre_pad: float :param pre_pad: Pad used in seconds when extracting the data, e.g. the \ time before the detection extracted. If using \ clustering.extract_detections this half the length of the extracted \ waveform. :type Z_include: bool :param Z_include: If True will include any Z-channels even if there is \ no template for this channel, as long as there is a template for this \ station at a different channel. If this is False and Z channels are \ included in the template Z channels will be included in the \ new_template anyway. :type pre_processed: bool :param pre_processed: Have the data been pre-processed, if True (default) \ then we will only cut the data here. :type samp_rate: float :param samp_rate: If pre_processed=False then this is required, desired \ sampling rate in Hz, defaults to False. :type lowcut: float :param lowcut: If pre_processed=False then this is required, lowcut in \ Hz, defaults to False. :type highcut: float :param highcut: If pre_processed=False then this is required, highcut in \ Hz, defaults to False :type filt_order: int :param filt_order: If pre_processed=False then this is required, filter \ order, defaults to False :returns: Newly cut template. :rtype: :class:`obspy.core.stream.Stream` """ new_template = stack.copy() # Copy the data before we trim it to keep the stack safe # Get the earliest time in the template as this is when the detection is # taken. mintime = min([tr.stats.starttime for tr in template]) # Generate a list of tuples of (station, channel, delay) with delay in # seconds delays = [(tr.stats.station, tr.stats.channel[-1], tr.stats.starttime - mintime) for tr in template] # Process the data if necessary if not pre_processed: new_template = pre_processing.shortproc( st=new_template, lowcut=lowcut, highcut=highcut, filt_order=filt_order, samp_rate=samp_rate, debug=0) # Loop through the stack and trim! out = Stream() for tr in new_template: # Find the matching delay delay = [d[2] for d in delays if d[0] == tr.stats.station and d[1] == tr.stats.channel[-1]] if Z_include and len(delay) == 0: delay = [d[2] for d in delays if d[0] == tr.stats.station] if len(delay) == 0: debug_print("No matching template channel found for stack channel" " {0}.{1}".format(tr.stats.station, tr.stats.channel), 2, 3) new_template.remove(tr) else: for d in delay: out += tr.copy().trim( starttime=tr.stats.starttime + d + pre_pad - pre_pick, endtime=tr.stats.starttime + d + pre_pad + length - pre_pick) return out
0.00025
def get_rules_from_disk(): ''' Recursively traverse the yara/rules directory for rules ''' # Try to find the yara rules directory relative to the worker my_dir = os.path.dirname(os.path.realpath(__file__)) yara_rule_path = os.path.join(my_dir, 'yara/rules') if not os.path.exists(yara_rule_path): raise RuntimeError('yara could not find yara rules directory under: %s' % my_dir) # Okay load in all the rules under the yara rule path rules = yara.load_rules(rules_rootpath=yara_rule_path, fast_match=True) return rules
0.003571
def polytropic_exponent(k, n=None, eta_p=None): r'''Calculates one of: * Polytropic exponent from polytropic efficiency * Polytropic efficiency from the polytropic exponent .. math:: n = \frac{k\eta_p}{1 - k(1-\eta_p)} .. math:: \eta_p = \frac{\left(\frac{n}{n-1}\right)}{\left(\frac{k}{k-1} \right)} = \frac{n(k-1)}{k(n-1)} Parameters ---------- k : float Isentropic exponent of the gas (Cp/Cv) [-] n : float, optional Polytropic exponent of the process [-] eta_p : float, optional Polytropic efficiency of the process, [-] Returns ------- n or eta_p : float Polytropic exponent or polytropic efficiency, depending on input, [-] Notes ----- Examples -------- >>> polytropic_exponent(1.4, eta_p=0.78) 1.5780346820809246 References ---------- .. [1] Couper, James R., W. Roy Penney, and James R. Fair. Chemical Process Equipment: Selection and Design. 2nd ed. Amsterdam ; Boston: Gulf Professional Publishing, 2009. ''' if n is None and eta_p: return k*eta_p/(1.0 - k*(1.0 - eta_p)) elif eta_p is None and n: return n*(k - 1.0)/(k*(n - 1.0)) else: raise Exception('Either n or eta_p is required')
0.001523
def python_executable(check=True, short=False): r""" Args: short (bool): (default = False) Returns: str: Example: >>> # ENABLE_DOCTEST >>> from utool.util_cplat import * # NOQA >>> short = False >>> result = python_executable(short) >>> print(result) """ if not check: python_exe = 'python' else: from os.path import isdir python_exe_long = unixpath(sys.executable) python_exe = python_exe_long if short: python_exe_short = basename(python_exe_long) found = search_env_paths(python_exe_short, key_list=['PATH'], verbose=False) found = [f for f in found if not isdir(f)] if len(found) > 0: if found[0] == python_exe_long: # Safe to use the short name in this env python_exe = python_exe_short return python_exe
0.001019
def set_regex(self, regex=None, reset=False): """Update the regex text for the shortcut finder.""" if reset: text = '' else: text = self.finder.text().replace(' ', '').lower() self.proxy_model.set_filter(text) self.source_model.update_search_letters(text) self.sortByColumn(SEARCH_SCORE, Qt.AscendingOrder) if self.last_regex != regex: self.selectRow(0) self.last_regex = regex
0.004065
def check_dynamic_route_exists(pattern, routes_to_check, parameters): """ Check if a URL pattern exists in a list of routes provided based on the comparison of URL pattern and the parameters. :param pattern: URL parameter pattern :param routes_to_check: list of dynamic routes either hashable or unhashable routes. :param parameters: List of :class:`Parameter` items :return: Tuple of index and route if matching route exists else -1 for index and None for route """ for ndx, route in enumerate(routes_to_check): if route.pattern == pattern and route.parameters == parameters: return ndx, route else: return -1, None
0.002628
def _handle_successor(self, job, successor, successors): """ Returns a new CFGJob instance for further analysis, or None if there is no immediate state to perform the analysis on. :param CFGJob job: The current job. """ state = successor all_successor_states = successors addr = job.addr # The PathWrapper instance to return pw = None job.successor_status[state] = "" new_state = state.copy() suc_jumpkind = state.history.jumpkind suc_exit_stmt_idx = state.scratch.exit_stmt_idx suc_exit_ins_addr = state.scratch.exit_ins_addr if suc_jumpkind in {'Ijk_EmWarn', 'Ijk_NoDecode', 'Ijk_MapFail', 'Ijk_NoRedir', 'Ijk_SigTRAP', 'Ijk_SigSEGV', 'Ijk_ClientReq'}: # Ignore SimExits that are of these jumpkinds job.successor_status[state] = "Skipped" return [ ] call_target = job.extra_info['call_target'] if suc_jumpkind == "Ijk_FakeRet" and call_target is not None: # if the call points to a SimProcedure that doesn't return, we don't follow the fakeret anymore if self.project.is_hooked(call_target): sim_proc = self.project._sim_procedures[call_target] if sim_proc.NO_RET: return [ ] # Get target address try: target_addr = state.solver.eval_one(state.ip) except (SimValueError, SimSolverModeError): # It cannot be concretized currently. Maybe we can handle it later, maybe it just cannot be concretized target_addr = None if suc_jumpkind == "Ijk_Ret": target_addr = job.call_stack.current_return_target if target_addr is not None: new_state.ip = new_state.solver.BVV(target_addr, new_state.arch.bits) if target_addr is None: # Unlucky... return [ ] if state.thumb: # Make sure addresses are always odd. It is important to encode this information in the address for the # time being. target_addr |= 1 # see if the target successor is in our whitelist if self._address_whitelist is not None: if target_addr not in self._address_whitelist: l.debug("Successor %#x is not in the address whitelist. Skip.", target_addr) return [ ] # see if this edge is in the base graph if self._base_graph is not None: # TODO: make it more efficient. the current implementation is half-assed and extremely slow for src_, dst_ in self._base_graph.edges(): if src_.addr == addr and dst_.addr == target_addr: break else: # not found l.debug("Edge (%#x -> %#x) is not found in the base graph. Skip.", addr, target_addr) return [ ] # Fix target_addr for syscalls if suc_jumpkind.startswith("Ijk_Sys"): syscall_proc = self.project.simos.syscall(new_state) if syscall_proc is not None: target_addr = syscall_proc.addr self._pre_handle_successor_state(job.extra_info, suc_jumpkind, target_addr) if suc_jumpkind == "Ijk_FakeRet": if target_addr == job.extra_info['last_call_exit_target']: l.debug("... skipping a fake return exit that has the same target with its call exit.") job.successor_status[state] = "Skipped" return [ ] if job.extra_info['skip_fakeret']: l.debug('... skipping a fake return exit since the function it\'s calling doesn\'t return') job.successor_status[state] = "Skipped - non-returning function 0x%x" % job.extra_info['call_target'] return [ ] # TODO: Make it optional if (suc_jumpkind == 'Ijk_Ret' and self._call_depth is not None and len(job.call_stack) <= 1 ): # We cannot continue anymore since this is the end of the function where we started tracing l.debug('... reaching the end of the starting function, skip.') job.successor_status[state] = "Skipped - reaching the end of the starting function" return [ ] # Create the new call stack of target block new_call_stack = self._create_new_call_stack(addr, all_successor_states, job, target_addr, suc_jumpkind) # Create the callstack suffix new_call_stack_suffix = new_call_stack.stack_suffix(self._context_sensitivity_level) # Tuple that will be used to index this exit new_tpl = self._generate_block_id(new_call_stack_suffix, target_addr, suc_jumpkind.startswith('Ijk_Sys')) # We might have changed the mode for this basic block # before. Make sure it is still running in 'fastpath' mode self._reset_state_mode(new_state, 'fastpath') pw = CFGJob(target_addr, new_state, self._context_sensitivity_level, src_block_id=job.block_id, src_exit_stmt_idx=suc_exit_stmt_idx, src_ins_addr=suc_exit_ins_addr, call_stack=new_call_stack, jumpkind=suc_jumpkind, ) # Special case: If the binary has symbols and the target address is a function, but for some reason (e.g., # a tail-call optimization) the CallStack's function address is still the old function address, we will have to # overwrite it here. if not self._is_call_jumpkind(pw.jumpkind): target_symbol = self.project.loader.find_symbol(target_addr) if target_symbol and target_symbol.is_function: # Force update the function address pw.func_addr = target_addr # Generate new exits if suc_jumpkind == "Ijk_Ret": # This is the real return exit job.successor_status[state] = "Appended" elif suc_jumpkind == "Ijk_FakeRet": # This is the default "fake" retn that generated at each # call. Save them first, but don't process them right # away # st = self.project._simos.prepare_call_state(new_state, initial_state=saved_state) st = new_state self._reset_state_mode(st, 'fastpath') pw = None # clear the job pe = PendingJob(job.func_addr, job.extra_info['call_target'], st, job.block_id, suc_exit_stmt_idx, suc_exit_ins_addr, new_call_stack ) self._pending_jobs[new_tpl] = pe self._register_analysis_job(pe.caller_func_addr, pe) job.successor_status[state] = "Pended" elif self._traced_addrs[new_call_stack_suffix][target_addr] >= 1 and suc_jumpkind == "Ijk_Ret": # This is a corner case for the f****** ARM instruction # like # BLEQ <address> # If we have analyzed the boring exit before returning from that called address, we will lose the link # between the last block of the function being called and the basic block it returns to. We cannot # reanalyze the basic block as we are not flow-sensitive, but we can still record the connection and make # for it afterwards. pass else: job.successor_status[state] = "Appended" if job.extra_info['is_call_jump'] and job.extra_info['call_target'] in self._non_returning_functions: job.extra_info['skip_fakeret'] = True if not pw: return [ ] if self._base_graph is not None: # remove all existing jobs that has the same block ID if next((en for en in self.jobs if en.block_id == pw.block_id), None): # TODO: this is very hackish. Reimplement this logic later self._job_info_queue = [entry for entry in self._job_info_queue if entry.job.block_id != pw.block_id] # register the job self._register_analysis_job(pw.func_addr, pw) return [ pw ]
0.005297
def general_conv(x, num_filters=64, filter_size=7, stride=1, stddev=0.02, padding="VALID", name="conv", do_norm="instance", do_relu=True, relufactor=0): """Generalized convolution layer.""" with tf.variable_scope(name): x = layers().Conv2D( num_filters, filter_size, stride, padding, activation=None, kernel_initializer=tf.truncated_normal_initializer(stddev=stddev), bias_initializer=tf.constant_initializer(0.0))(x) if do_norm == "layer": x = layer_norm(x) elif do_norm == "instance": x = instance_norm(x) if do_relu: if relufactor == 0: x = tf.nn.relu(x, "relu") else: x = lrelu(x, leak=relufactor) return x
0.00791
def status(self, job_id): """ Check the status of a job. ## Arguments * `job_id` (int): The job to check. ## Returns * `code` (int): The status. * `status` (str): The human-readable name of the current status. """ params = {"jobid": job_id} r = self._send_request("GetJobStatus", params=params) status = int(self._parse_single(r.text, "int")) return status, self.status_codes[status]
0.004132
def read_plink(file_prefix, verbose=True): r"""Read PLINK files into Pandas data frames. Parameters ---------- file_prefix : str Path prefix to the set of PLINK files. It supports loading many BED files at once using globstrings wildcard. verbose : bool ``True`` for progress information; ``False`` otherwise. Returns ------- :class:`pandas.DataFrame` Alleles. :class:`pandas.DataFrame` Samples. :class:`numpy.ndarray` Genotype. Examples -------- We have shipped this package with an example so can load and inspect by doing .. doctest:: >>> from pandas_plink import read_plink >>> from pandas_plink import example_file_prefix >>> (bim, fam, bed) = read_plink(example_file_prefix(), verbose=False) >>> print(bim.head()) #doctest: +NORMALIZE_WHITESPACE chrom snp cm pos a0 a1 i 0 1 rs10399749 0.0 45162 G C 0 1 1 rs2949420 0.0 45257 C T 1 2 1 rs2949421 0.0 45413 0 0 2 3 1 rs2691310 0.0 46844 A T 3 4 1 rs4030303 0.0 72434 0 G 4 >>> print(fam.head()) #doctest: +NORMALIZE_WHITESPACE fid iid father mother gender trait i 0 Sample_1 Sample_1 0 0 1 -9 0 1 Sample_2 Sample_2 0 0 2 -9 1 2 Sample_3 Sample_3 Sample_1 Sample_2 2 -9 2 >>> print(bed.compute()) #doctest: +NORMALIZE_WHITESPACE [[ 2. 2. 1.] [ 2. 1. 2.] [nan nan nan] [nan nan 1.] [ 2. 2. 2.] [ 2. 2. 2.] [ 2. 1. 0.] [ 2. 2. 2.] [ 1. 2. 2.] [ 2. 1. 2.]] The values of the ``bed`` matrix denote how many alleles ``a1`` (see output of data frame ``bim``) are in the corresponding position and individual. Notice the column ``i`` in ``bim`` and ``fam`` data frames. It maps to the corresponding position of the bed matrix: .. doctest:: >>> chrom1 = bim.query("chrom=='1'") >>> X = bed[chrom1.i.values, :].compute() >>> print(X) #doctest: +NORMALIZE_WHITESPACE [[ 2. 2. 1.] [ 2. 1. 2.] [nan nan nan] [nan nan 1.] [ 2. 2. 2.] [ 2. 2. 2.] [ 2. 1. 0.] [ 2. 2. 2.] [ 1. 2. 2.] [ 2. 1. 2.]] It also allows the use of the wildcard character ``*`` for mapping multiple BED files at once: ``(bim, fam, bed) = read_plink("chrom*")``. In this case, only one of the FAM files will be used to define sample information. Data from BIM and BED files are concatenated to provide a single view of the files. """ from dask.array import concatenate file_prefixes = sorted(glob(file_prefix)) if len(file_prefixes) == 0: file_prefixes = [file_prefix.replace("*", "")] file_prefixes = sorted(_clean_prefixes(file_prefixes)) fn = [] for fp in file_prefixes: fn.append({s: "%s.%s" % (fp, s) for s in ["bed", "bim", "fam"]}) pbar = tqdm(desc="Mapping files", total=3 * len(fn), disable=not verbose) msg = "Reading bim file(s)..." bim = _read_file(fn, msg, lambda fn: _read_bim(fn["bim"]), pbar) if len(file_prefixes) > 1: if verbose: msg = "Multiple files read in this order: {}" print(msg.format([basename(f) for f in file_prefixes])) nmarkers = dict() index_offset = 0 for i, bi in enumerate(bim): nmarkers[fn[i]["bed"]] = bi.shape[0] bi["i"] += index_offset index_offset += bi.shape[0] bim = pd.concat(bim, axis=0, ignore_index=True) msg = "Reading fam file(s)..." fam = _read_file([fn[0]], msg, lambda fn: _read_fam(fn["fam"]), pbar)[0] nsamples = fam.shape[0] bed = _read_file( fn, "Reading bed file(s)...", lambda fn: _read_bed(fn["bed"], nsamples, nmarkers[fn["bed"]]), pbar, ) bed = concatenate(bed, axis=0) pbar.close() return (bim, fam, bed)
0.00024
def create_custom_field(connection, cf_type, cf_name, auto_attached, value_names=None, bundle_policy="0"): """ Creates custom field prototype(if not exist) and sets default values bundle if needed Args: connection: An opened Connection instance. cf_type: Type of custom field to be created cf_name: Name of custom field that should be created (if not exists) auto_attached: If this field should be auto attached or not. value_names: Values, that should be attached with this cf by default. If None, no bundle is created to this field, if empty, empty bundle is created. bundle_policy: ??? Raises: LogicException: If custom field already exists, but has wrong type. YouTrackException: If something is wrong with queries. """ if (value_names is None) and (not auto_attached or "[" not in cf_type): _create_custom_field_prototype(connection, cf_type, cf_name, auto_attached) return if value_names is None: value_names = set([]) else: value_names = set(value_names) field = _get_custom_field(connection, cf_name) if field is not None: if hasattr(field, "defaultBundle"): bundle = connection.get_bundle(field.type, field.defaultBundle) elif field.autoAttached: return else: bundle = create_bundle_safe(connection, cf_name + "_bundle", cf_type) else: bundle = create_bundle_safe(connection, cf_name + "_bundle", cf_type) _create_custom_field_prototype(connection, cf_type, cf_name, auto_attached, {"defaultBundle": bundle.name, "attachBundlePolicy": bundle_policy}) for value_name in value_names: try: connection.add_value_to_bundle(bundle, value_name) except YouTrackException: pass
0.00361
def from_flowcell(run_folder, lane_details, out_dir=None): """Convert a flowcell into a samplesheet for demultiplexing. """ fcid = os.path.basename(run_folder) if out_dir is None: out_dir = run_folder out_file = os.path.join(out_dir, "%s.csv" % fcid) with open(out_file, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(["FCID", "Lane", "Sample_ID", "SampleRef", "Index", "Description", "Control", "Recipe", "Operator", "SampleProject"]) for ldetail in lane_details: writer.writerow(_lane_detail_to_ss(fcid, ldetail)) return out_file
0.003082
def render_thread(self): """A render loop that pulls observations off the queue to render.""" obs = True while obs: # Send something falsy through the queue to shut down. obs = self._obs_queue.get() if obs: for alert in obs.observation.alerts: self._alerts[sc_pb.Alert.Name(alert)] = time.time() for err in obs.action_errors: if err.result != sc_err.Success: self._alerts[sc_err.ActionResult.Name(err.result)] = time.time() self.prepare_actions(obs) if self._obs_queue.empty(): # Only render the latest observation so we keep up with the game. self.render_obs(obs) if self._video_writer: self._video_writer.add(np.transpose( pygame.surfarray.pixels3d(self._window), axes=(1, 0, 2))) self._obs_queue.task_done()
0.010502
def _element(self): """ Return XML form of this content types item, suitable for storage as ``[Content_Types].xml`` in an OPC package. Although the sequence of elements is not strictly significant, as an aid to testing and readability Default elements are sorted by extension and Override elements are sorted by partname. """ _types_elm = CT_Types.new() for ext in sorted(self._defaults.keys()): _types_elm.add_default(ext, self._defaults[ext]) for partname in sorted(self._overrides.keys()): _types_elm.add_override(partname, self._overrides[partname]) return _types_elm
0.002928
def make_logging_undefined(logger=None, base=None): """Given a logger object this returns a new undefined class that will log certain failures. It will log iterations and printing. If no logger is given a default logger is created. Example:: logger = logging.getLogger(__name__) LoggingUndefined = make_logging_undefined( logger=logger, base=Undefined ) .. versionadded:: 2.8 :param logger: the logger to use. If not provided, a default logger is created. :param base: the base class to add logging functionality to. This defaults to :class:`Undefined`. """ if logger is None: import logging logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stderr)) if base is None: base = Undefined def _log_message(undef): if undef._undefined_hint is None: if undef._undefined_obj is missing: hint = '%s is undefined' % undef._undefined_name elif not isinstance(undef._undefined_name, string_types): hint = '%s has no element %s' % ( object_type_repr(undef._undefined_obj), undef._undefined_name) else: hint = '%s has no attribute %s' % ( object_type_repr(undef._undefined_obj), undef._undefined_name) else: hint = undef._undefined_hint logger.warning('Template variable warning: %s', hint) class LoggingUndefined(base): def _fail_with_undefined_error(self, *args, **kwargs): try: return base._fail_with_undefined_error(self, *args, **kwargs) except self._undefined_exception as e: logger.error('Template variable error: %s', str(e)) raise e def __str__(self): rv = base.__str__(self) _log_message(self) return rv def __iter__(self): rv = base.__iter__(self) _log_message(self) return rv if PY2: def __nonzero__(self): rv = base.__nonzero__(self) _log_message(self) return rv def __unicode__(self): rv = base.__unicode__(self) _log_message(self) return rv else: def __bool__(self): rv = base.__bool__(self) _log_message(self) return rv return LoggingUndefined
0.000381
def __stream_format_allowed(self, stream): """ Check whether a stream allows formatting such as coloring. Inspired from Python cookbook, #475186 """ # curses isn't available on all platforms try: import curses as CURSES except: return False try: CURSES.setupterm() return CURSES.tigetnum("colors") >= 2 except: return False
0.008791
def delete_local_operator(self, onnx_name): ''' Remove the operator whose onnx_name is the input onnx_name ''' if onnx_name not in self.onnx_operator_names or onnx_name not in self.operators: raise RuntimeError('The operator to be removed not found') self.onnx_operator_names.discard(onnx_name) del self.operators[onnx_name]
0.007813
def add_spaces(self, spaces, ret=False): """ Add ``pyny.Spaces`` to the current space. In other words, it merges multiple ``pyny.Spaces`` in this instance. :param places: ``pyny.Spaces`` to add. :type places: list of pyny.Spaces :param ret: If True, returns the whole updated Space. :type ret: bool :returns: None, ``pyny.Space`` .. warning:: This method acts inplace. """ if type(spaces) != list: spaces = [spaces] Space.add_places(self, sum([space.places for space in spaces], [])) if ret: return self
0.017296
def from_neighbor_throats(target, throat_prop='throat.seed', mode='min'): r""" Adopt a value from the values found in neighboring throats Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_prop : string The dictionary key of the array containing the throat property to be used in the calculation. The default is 'throat.seed'. mode : string Controls how the pore property is calculated. Options are 'min', 'max' and 'mean'. """ prj = target.project network = prj.network lookup = prj.find_full_domain(target) Ps = lookup.map_pores(target.pores(), target) data = lookup[throat_prop] neighborTs = network.find_neighbor_throats(pores=Ps, flatten=False, mode='or') values = np.ones((np.shape(Ps)[0],))*np.nan if mode == 'min': for pore in range(len(Ps)): values[pore] = np.amin(data[neighborTs[pore]]) if mode == 'max': for pore in range(len(Ps)): values[pore] = np.amax(data[neighborTs[pore]]) if mode == 'mean': for pore in range(len(Ps)): values[pore] = np.mean(data[neighborTs[pore]]) return values
0.000692
def tags(self): """ Returns a dict containing tags and their localized labels as values """ return dict([(t, self._catalog.tags.get(t, t)) for t in self._asset.get("tags", [])])
0.020725
def to_dict(self): """ Return a transmission-safe dictionary representation of the API message properties. """ d = {field: getattr(self, field) for field in self._top_fields if hasattr(self, field)} # set values of data fields d['data'] = {} for field in self._fields: if hasattr(self, field): # base64 encode the field for transmission if necessary if field in self._encode_fields: val = getattr(self, field) if val: val = cast_s(base64.b64encode(cast_b(val))) d['data'][field] = val else: d['data'][field] = getattr(self, field) return d
0.005202
def load_template(template_name=None, template_source=None, context=None, defaults=None, template_engine='jinja', saltenv='base', template_hash=None, template_hash_name=None, skip_verify=False, test=False, commit=True, debug=False, replace=False, commit_in=None, commit_at=None, revert_in=None, revert_at=None, inherit_napalm_device=None, # pylint: disable=unused-argument **template_vars): ''' Renders a configuration template (default: Jinja) and loads the result on the device. By default this function will commit the changes. If there are no changes, it does not commit, discards he config and the flag ``already_configured`` will be set as ``True`` to point this out. To avoid committing the configuration, set the argument ``test`` to ``True`` and will discard (dry run). To preserve the changes, set ``commit`` to ``False``. However, this is recommended to be used only in exceptional cases when there are applied few consecutive states and/or configuration changes. Otherwise the user might forget that the config DB is locked and the candidate config buffer is not cleared/merged in the running config. To replace the config, set ``replace`` to ``True``. template_name Identifies path to the template source. The template can be either stored on the local machine, either remotely. The recommended location is under the ``file_roots`` as specified in the master config file. For example, let's suppose the ``file_roots`` is configured as: .. code-block:: yaml file_roots: base: - /etc/salt/states Placing the template under ``/etc/salt/states/templates/example.jinja``, it can be used as ``salt://templates/example.jinja``. Alternatively, for local files, the user can specify the absolute path. If remotely, the source can be retrieved via ``http``, ``https`` or ``ftp``. Examples: - ``salt://my_template.jinja`` - ``/absolute/path/to/my_template.jinja`` - ``http://example.com/template.cheetah`` - ``https:/example.com/template.mako`` - ``ftp://example.com/template.py`` .. versionchanged:: 2019.2.0 This argument can now support a list of templates to be rendered. The resulting configuration text is loaded at once, as a single configuration chunk. template_source: None Inline config template to be rendered and loaded on the device. template_hash: None Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}`` .. versionadded:: 2016.11.2 context: None Overrides default context variables passed to the template. .. versionadded:: 2019.2.0 template_hash_name: None When ``template_hash`` refers to a remote file, this specifies the filename to look for in that file. .. versionadded:: 2016.11.2 saltenv: ``base`` Specifies the template environment. This will influence the relative imports inside the templates. .. versionadded:: 2016.11.2 template_engine: jinja The following templates engines are supported: - :mod:`cheetah<salt.renderers.cheetah>` - :mod:`genshi<salt.renderers.genshi>` - :mod:`jinja<salt.renderers.jinja>` - :mod:`mako<salt.renderers.mako>` - :mod:`py<salt.renderers.py>` - :mod:`wempy<salt.renderers.wempy>` .. versionadded:: 2016.11.2 skip_verify: True If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionadded:: 2016.11.2 test: False Dry run? If set to ``True``, will apply the config, discard and return the changes. Default: ``False`` and will commit the changes on the device. commit: True Commit? (default: ``True``) debug: False Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw result after the template was rendered. .. versionadded:: 2016.11.2 replace: False Load and replace the configuration. .. versionadded:: 2016.11.2 commit_in: ``None`` Commit the changes in a specific number of minutes / hours. Example of accepted formats: ``5`` (commit in 5 minutes), ``2m`` (commit in 2 minutes), ``1h`` (commit the changes in 1 hour)`, ``5h30m`` (commit the changes in 5 hours and 30 minutes). .. note:: This feature works on any platforms, as it does not rely on the native features of the network operating system. .. note:: After the command is executed and the ``diff`` is not satisfactory, or for any other reasons you have to discard the commit, you are able to do so using the :py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>` execution function, using the commit ID returned by this function. .. warning:: Using this feature, Salt will load the exact configuration you expect, however the diff may change in time (i.e., if an user applies a manual configuration change, or a different process or command changes the configuration in the meanwhile). .. versionadded:: 2019.2.0 commit_at: ``None`` Commit the changes at a specific time. Example of accepted formats: ``1am`` (will commit the changes at the next 1AM), ``13:20`` (will commit at 13:20), ``1:20am``, etc. .. note:: This feature works on any platforms, as it does not rely on the native features of the network operating system. .. note:: After the command is executed and the ``diff`` is not satisfactory, or for any other reasons you have to discard the commit, you are able to do so using the :py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>` execution function, using the commit ID returned by this function. .. warning:: Using this feature, Salt will load the exact configuration you expect, however the diff may change in time (i.e., if an user applies a manual configuration change, or a different process or command changes the configuration in the meanwhile). .. versionadded:: 2019.2.0 revert_in: ``None`` Commit and revert the changes in a specific number of minutes / hours. Example of accepted formats: ``5`` (revert in 5 minutes), ``2m`` (revert in 2 minutes), ``1h`` (revert the changes in 1 hour)`, ``5h30m`` (revert the changes in 5 hours and 30 minutes). .. note:: To confirm the commit, and prevent reverting the changes, you will have to execute the :mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>` function, using the commit ID returned by this function. .. warning:: This works on any platform, regardless if they have or don't have native capabilities to confirming a commit. However, please be *very* cautious when using this feature: on Junos (as it is the only NAPALM core platform supporting this natively) it executes a commit confirmed as you would do from the command line. All the other platforms don't have this capability natively, therefore the revert is done via Salt. That means, your device needs to be reachable at the moment when Salt will attempt to revert your changes. Be cautious when pushing configuration changes that would prevent you reach the device. Similarly, if an user or a different process apply other configuration changes in the meanwhile (between the moment you commit and till the changes are reverted), these changes would be equally reverted, as Salt cannot be aware of them. .. versionadded:: 2019.2.0 revert_at: ``None`` Commit and revert the changes at a specific time. Example of accepted formats: ``1am`` (will commit and revert the changes at the next 1AM), ``13:20`` (will commit and revert at 13:20), ``1:20am``, etc. .. note:: To confirm the commit, and prevent reverting the changes, you will have to execute the :mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>` function, using the commit ID returned by this function. .. warning:: This works on any platform, regardless if they have or don't have native capabilities to confirming a commit. However, please be *very* cautious when using this feature: on Junos (as it is the only NAPALM core platform supporting this natively) it executes a commit confirmed as you would do from the command line. All the other platforms don't have this capability natively, therefore the revert is done via Salt. That means, your device needs to be reachable at the moment when Salt will attempt to revert your changes. Be cautious when pushing configuration changes that would prevent you reach the device. Similarly, if an user or a different process apply other configuration changes in the meanwhile (between the moment you commit and till the changes are reverted), these changes would be equally reverted, as Salt cannot be aware of them. .. versionadded:: 2019.2.0 defaults: None Default variables/context passed to the template. .. versionadded:: 2016.11.2 template_vars Dictionary with the arguments/context to be used when the template is rendered. .. note:: Do not explicitly specify this argument. This represents any other variable that will be sent to the template rendering system. Please see the examples below! .. note:: It is more recommended to use the ``context`` argument to avoid conflicts between CLI arguments and template variables. :return: a dictionary having the following keys: - result (bool): if the config was applied successfully. It is ``False`` only in case of failure. In case there are no changes to be applied and successfully performs all operations it is still ``True`` and so will be the ``already_configured`` flag (example below) - comment (str): a message for the user - already_configured (bool): flag to check if there were no changes applied - loaded_config (str): the configuration loaded on the device, after rendering the template. Requires ``debug`` to be set as ``True`` - diff (str): returns the config changes applied The template can use variables from the ``grains``, ``pillar`` or ``opts``, for example: .. code-block:: jinja {% set router_model = grains.get('model') -%} {% set router_vendor = grains.get('vendor') -%} {% set os_version = grains.get('version') -%} {% set hostname = pillar.get('proxy', {}).get('host') -%} {% if router_vendor|lower == 'juniper' %} system { host-name {{hostname}}; } {% elif router_vendor|lower == 'cisco' %} hostname {{hostname}} {% endif %} CLI Examples: .. code-block:: bash salt '*' net.load_template set_ntp_peers peers=[192.168.0.1] # uses NAPALM default templates # inline template: salt -G 'os:junos' net.load_template template_source='system { host-name {{host_name}}; }' \ host_name='MX480.lab' # inline template using grains info: salt -G 'os:junos' net.load_template \ template_source='system { host-name {{grains.model}}.lab; }' # if the device is a MX480, the command above will set the hostname as: MX480.lab # inline template using pillar data: salt -G 'os:junos' net.load_template template_source='system { host-name {{pillar.proxy.host}}; }' salt '*' net.load_template https://bit.ly/2OhSgqP hostname=example # will commit salt '*' net.load_template https://bit.ly/2OhSgqP hostname=example test=True # dry run salt '*' net.load_template salt://templates/example.jinja debug=True # Using the salt:// URI # render a mako template: salt '*' net.load_template salt://templates/example.mako template_engine=mako debug=True # render remote template salt -G 'os:junos' net.load_template http://bit.ly/2fReJg7 test=True debug=True peers=['192.168.0.1'] salt -G 'os:ios' net.load_template http://bit.ly/2gKOj20 test=True debug=True peers=['192.168.0.1'] # render multiple templates at once salt '*' net.load_template "['https://bit.ly/2OhSgqP', 'salt://templates/example.jinja']" context="{'hostname': 'example'}" Example output: .. code-block:: python { 'comment': '', 'already_configured': False, 'result': True, 'diff': '[edit system]+ host-name edge01.bjm01', 'loaded_config': 'system { host-name edge01.bjm01; }'' } ''' _rendered = '' _loaded = { 'result': True, 'comment': '', 'out': None } loaded_config = None # prechecks deprecated_args = ('template_user', 'template_attrs', 'template_group', 'template_mode') for deprecated_arg in deprecated_args: if template_vars.get(deprecated_arg): del template_vars[deprecated_arg] salt.utils.versions.warn_until( 'Sodium', ('The \'{arg}\' argument to \'net.load_template\' is deprecated ' 'and has been ignored').format(arg=deprecated_arg) ) if template_engine not in salt.utils.templates.TEMPLATE_REGISTRY: _loaded.update({ 'result': False, 'comment': 'Invalid templating engine! Choose between: {tpl_eng_opts}'.format( tpl_eng_opts=', '.join(list(salt.utils.templates.TEMPLATE_REGISTRY.keys())) ) }) return _loaded # exit # to check if will be rendered by salt or NAPALM salt_render_prefixes = ('salt://', 'http://', 'https://', 'ftp://') salt_render = False file_exists = False if not isinstance(template_name, (tuple, list)): for salt_render_prefix in salt_render_prefixes: if not salt_render: salt_render = salt_render or template_name.startswith(salt_render_prefix) file_exists = __salt__['file.file_exists'](template_name) if template_source or file_exists or salt_render or isinstance(template_name, (tuple, list)): # either inline template # either template in a custom path # either abs path send # either starts with salt:// and # then use Salt render system if context is None: context = {} context.update(template_vars) # if needed to render the template send as inline arg if template_source: # render the content _rendered = __salt__['file.apply_template_on_contents']( contents=template_source, template=template_engine, context=context, defaults=defaults, saltenv=saltenv ) if not isinstance(_rendered, six.string_types): if 'result' in _rendered: _loaded['result'] = _rendered['result'] else: _loaded['result'] = False if 'comment' in _rendered: _loaded['comment'] = _rendered['comment'] else: _loaded['comment'] = 'Error while rendering the template.' return _loaded else: # render the file - either local, either remote if not isinstance(template_name, (list, tuple)): template_name = [template_name] if template_hash_name and not isinstance(template_hash_name, (list, tuple)): template_hash_name = [template_hash_name] elif not template_hash_name: template_hash_name = [None] * len(template_name) if template_hash and isinstance(template_hash, six.string_types) and not\ (template_hash.startswith('salt://') or template_hash.startswith('file://')): # If the template hash is passed as string, and it's not a file # (starts with the salt:// or file:// URI), then make it a list # of 1 element (for the iteration below) template_hash = [template_hash] elif template_hash and isinstance(template_hash, six.string_types) and\ (template_hash.startswith('salt://') or template_hash.startswith('file://')): # If the template hash is a file URI, then provide the same value # for each of the templates in the list, as probably they all # share the same hash file, otherwise the user should provide # this as a list template_hash = [template_hash] * len(template_name) elif not template_hash: template_hash = [None] * len(template_name) for tpl_index, tpl_name in enumerate(template_name): tpl_hash = template_hash[tpl_index] tpl_hash_name = template_hash_name[tpl_index] _rand_filename = __salt__['random.hash'](tpl_name, 'md5') _temp_file = __salt__['file.join']('/tmp', _rand_filename) _managed = __salt__['file.get_managed'](name=_temp_file, source=tpl_name, source_hash=tpl_hash, source_hash_name=tpl_hash_name, user=None, group=None, mode=None, attrs=None, template=template_engine, context=context, defaults=defaults, saltenv=saltenv, skip_verify=skip_verify) if not isinstance(_managed, (list, tuple)) and isinstance(_managed, six.string_types): _loaded['comment'] += _managed _loaded['result'] = False elif isinstance(_managed, (list, tuple)) and not _managed: _loaded['result'] = False _loaded['comment'] += 'Error while rendering the template.' elif isinstance(_managed, (list, tuple)) and not _managed[0]: _loaded['result'] = False _loaded['comment'] += _managed[-1] # contains the error message if _loaded['result']: # all good _temp_tpl_file = _managed[0] _temp_tpl_file_exists = __salt__['file.file_exists'](_temp_tpl_file) if not _temp_tpl_file_exists: _loaded['result'] = False _loaded['comment'] += 'Error while rendering the template.' return _loaded _rendered += __salt__['file.read'](_temp_tpl_file) __salt__['file.remove'](_temp_tpl_file) else: return _loaded # exit loaded_config = _rendered if _loaded['result']: # all good fun = 'load_merge_candidate' if replace: # replace requested fun = 'load_replace_candidate' if salt.utils.napalm.not_always_alive(__opts__): # if a not-always-alive proxy # or regular minion # do not close the connection after loading the config # this will be handled in _config_logic # after running the other features: # compare_config, discard / commit # which have to be over the same session napalm_device['CLOSE'] = False # pylint: disable=undefined-variable _loaded = salt.utils.napalm.call( napalm_device, # pylint: disable=undefined-variable fun, **{ 'config': _rendered } ) else: salt.utils.versions.warn_until( 'Sodium', 'Native NAPALM templates support will be removed in the Sodium ' 'release. Please consider using the Salt rendering pipeline instead.' 'If you are using the \'netntp\', \'netsnmp\', or \'netusers\' Salt ' 'State modules, you can ignore this message' ) # otherwise, use NAPALM render system, injecting pillar/grains/opts vars load_templates_params = defaults if defaults else {} load_templates_params.update(template_vars) load_templates_params.update( { 'template_name': template_name, 'template_source': template_source, # inline template 'pillar': __pillar__, # inject pillar content 'grains': __grains__, # inject grains content 'opts': __opts__ # inject opts content } ) if salt.utils.napalm.not_always_alive(__opts__): # if a not-always-alive proxy # or regular minion # do not close the connection after loading the config # this will be handled in _config_logic # after running the other features: # compare_config, discard / commit # which have to be over the same session # so we'll set the CLOSE global explicitly as False napalm_device['CLOSE'] = False # pylint: disable=undefined-variable _loaded = salt.utils.napalm.call( napalm_device, # pylint: disable=undefined-variable 'load_template', **load_templates_params ) return _config_logic(napalm_device, # pylint: disable=undefined-variable _loaded, test=test, debug=debug, replace=replace, commit_config=commit, loaded_config=loaded_config, commit_at=commit_at, commit_in=commit_in, revert_in=revert_in, revert_at=revert_at, **template_vars)
0.002393
def wait(self, timeout): """Calling thread waits for a given number of milliseconds or until notified.""" self.condition.acquire() self.condition.wait(timeout // 1000) self.condition.release()
0.008621
def range_interleave(ranges, sizes={}, empty=False): """ Returns the ranges in between the given ranges. >>> ranges = [("1", 30, 40), ("1", 45, 50), ("1", 10, 30)] >>> range_interleave(ranges) [('1', 41, 44)] >>> ranges = [("1", 30, 40), ("1", 42, 50)] >>> range_interleave(ranges) [('1', 41, 41)] >>> range_interleave(ranges, sizes={"1": 70}) [('1', 1, 29), ('1', 41, 41), ('1', 51, 70)] """ from jcvi.utils.iter import pairwise ranges = range_merge(ranges) interleaved_ranges = [] for ch, cranges in groupby(ranges, key=lambda x: x[0]): cranges = list(cranges) size = sizes.get(ch, None) if size: ch, astart, aend = cranges[0] if astart > 1: interleaved_ranges.append((ch, 1, astart - 1)) elif empty: interleaved_ranges.append(None) for a, b in pairwise(cranges): ch, astart, aend = a ch, bstart, bend = b istart, iend = aend + 1, bstart - 1 if istart <= iend: interleaved_ranges.append((ch, istart, iend)) elif empty: interleaved_ranges.append(None) if size: ch, astart, aend = cranges[-1] if aend < size: interleaved_ranges.append((ch, aend + 1, size)) elif empty: interleaved_ranges.append(None) return interleaved_ranges
0.000684
def convert_p(element, text): """ Adds 2 newlines to the end of text """ depth = -1 while element: if (not element.name == '[document]' and not element.parent.get('id') == '__RESTRUCTIFY_WRAPPER__'): depth += 1 element = element.parent if text: text = ' ' * depth + text return text
0.002725
def register_event(self): """ 注册事件 """ event_bus = Environment.get_instance().event_bus event_bus.prepend_listener(EVENT.PRE_BEFORE_TRADING, self._pre_before_trading) event_bus.prepend_listener(EVENT.POST_SETTLEMENT, self._post_settlement)
0.013937
def _call_pyfftw(self, x, out, **kwargs): """Implement ``self(x[, out, **kwargs])`` for pyfftw back-end. Parameters ---------- x : `numpy.ndarray` Array representing the function to be transformed out : `numpy.ndarray` Array to which the output is written planning_effort : {'estimate', 'measure', 'patient', 'exhaustive'} Flag for the amount of effort put into finding an optimal FFTW plan. See the `FFTW doc on planner flags <http://www.fftw.org/fftw3_doc/Planner-Flags.html>`_. planning_timelimit : float or ``None``, optional Limit planning time to roughly this many seconds. Default: ``None`` (no limit) threads : int, optional Number of threads to use. Default: 1 Returns ------- out : `numpy.ndarray` Result of the transform. If ``out`` was given, the returned object is a reference to it. """ # We pop some kwargs options here so that we always use the ones # given during init or implicitly assumed. kwargs.pop('axes', None) kwargs.pop('halfcomplex', None) kwargs.pop('normalise_idft', None) # We use `True` # Pre-processing in IFT = post-processing in FT, but with division # instead of multiplication and switched grids. In-place for C2C only. if self.range.field == ComplexNumbers(): # preproc is out in this case preproc = self._preprocess(x, out=out) else: preproc = self._preprocess(x) # The actual call to the FFT library. We store the plan for re-use. direction = 'forward' if self.sign == '-' else 'backward' if self.range.field == RealNumbers() and not self.halfcomplex: # Need to use a complex array as out if we do C2R since the # FFT has to be C2C self._fftw_plan = pyfftw_call( preproc, preproc, direction=direction, halfcomplex=self.halfcomplex, axes=self.axes, normalise_idft=True, **kwargs) fft_arr = preproc else: # Only here we can use out directly self._fftw_plan = pyfftw_call( preproc, out, direction=direction, halfcomplex=self.halfcomplex, axes=self.axes, normalise_idft=True, **kwargs) fft_arr = out # Normalization is only done for 'backward', we need it for 'forward', # too. if self.sign == '-': fft_arr /= np.prod(np.take(self.domain.shape, self.axes)) # Post-processing in IFT = pre-processing in FT. In-place for # C2C and HC2R. For C2R, this is out-of-place and discards the # imaginary part. self._postprocess(fft_arr, out=out) return out
0.00069
def prev(self): """Fetch a set of items with IDs greater than current set.""" if self.limit and self.limit == self.num_tweets: raise StopIteration self.index -= 1 if self.index < 0: # There's no way to fetch a set of tweets directly 'above' the # current set raise StopIteration data = self.results[self.index] self.max_id = self.model_results[self.index].max_id self.num_tweets += 1 return data
0.003929
def setup_ci(): # type: () -> None """ Setup AppEngine SDK on CircleCI """ gcloud_path = shell.run('which gcloud', capture=True).stdout.strip() sdk_path = normpath(join(gcloud_path, '../../platform/google_appengine')) gcloud_cmd = gcloud_path + ' --quiet' if not exists(sdk_path): log.info("Installing AppEngine SDK") shell.run('sudo {} components install app-engine-python'.format( gcloud_cmd )) else: # Only initialise once. To reinitialise, just build without cache. log.info("AppEngine SDK already initialised") log.info("Using service account authentication") shell.run('{} auth activate-service-account --key-file {}'.format( gcloud_cmd, conf.proj_path('ops/client_secret.json') ))
0.001255
def _jsonld(graph, format, *args, **kwargs): """Return formatted graph in JSON-LD ``format`` function.""" import json from pyld import jsonld from renku.models._jsonld import asjsonld output = getattr(jsonld, format)([ asjsonld(action) for action in graph.activities.values() ]) return json.dumps(output, indent=2)
0.002841
def get_notebook_app_versions(): """ Get the valid version numbers of the notebook app. """ notebook_apps = dxpy.find_apps(name=NOTEBOOK_APP, all_versions=True) versions = [str(dxpy.describe(app['id'])['version']) for app in notebook_apps] return versions
0.007168
def add_extra_model_fields(sender, **kwargs): """ Injects custom fields onto the given sender model as defined by the ``EXTRA_MODEL_FIELDS`` setting. This is a closure over the "fields" variable. """ model_key = sender._meta.app_label, sender._meta.model_name for field_name, field in fields.get(model_key, {}): field.contribute_to_class(sender, field_name)
0.002545
def add(self, *items): """ Add items to be sorted. @param items: One or more items to be added. @type items: I{item} @return: self @rtype: L{DepList} """ for item in items: self.unsorted.append(item) key = item[0] self.index[key] = item return self
0.005618
def builds(self, confs): """For retro compatibility directly assigning builds""" self._named_builds = {} self._builds = [] for values in confs: if len(values) == 2: self._builds.append(BuildConf(values[0], values[1], {}, {}, self.reference)) elif len(values) == 4: self._builds.append(BuildConf(values[0], values[1], values[2], values[3], self.reference)) elif len(values) != 5: raise Exception("Invalid build configuration, has to be a tuple of " "(settings, options, env_vars, build_requires, reference)") else: self._builds.append(BuildConf(*values))
0.007742
def get_issuers(self): """ Gets the issuers (from message and from assertion) :returns: The issuers :rtype: list """ issuers = set() message_issuer_nodes = OneLogin_Saml2_XML.query(self.document, '/samlp:Response/saml:Issuer') if len(message_issuer_nodes) > 0: if len(message_issuer_nodes) == 1: issuer_value = OneLogin_Saml2_XML.element_text(message_issuer_nodes[0]) if issuer_value: issuers.add(issuer_value) else: raise OneLogin_Saml2_ValidationError( 'Issuer of the Response is multiple.', OneLogin_Saml2_ValidationError.ISSUER_MULTIPLE_IN_RESPONSE ) assertion_issuer_nodes = self.__query_assertion('/saml:Issuer') if len(assertion_issuer_nodes) == 1: issuer_value = OneLogin_Saml2_XML.element_text(assertion_issuer_nodes[0]) if issuer_value: issuers.add(issuer_value) else: raise OneLogin_Saml2_ValidationError( 'Issuer of the Assertion not found or multiple.', OneLogin_Saml2_ValidationError.ISSUER_NOT_FOUND_IN_ASSERTION ) return list(set(issuers))
0.003849
def communicate(self): """Retrieve information.""" self._communicate_first = True self._process.waitForFinished() enco = self._get_encoding() if self._partial_stdout is None: raw_stdout = self._process.readAllStandardOutput() stdout = handle_qbytearray(raw_stdout, enco) else: stdout = self._partial_stdout raw_stderr = self._process.readAllStandardError() stderr = handle_qbytearray(raw_stderr, enco) result = [stdout.encode(enco), stderr.encode(enco)] if PY2: stderr = stderr.decode() result[-1] = '' self._result = result if not self._fired: self.sig_finished.emit(self, result[0], result[-1]) self._fired = True return result
0.002451
def insert(cls, jobs_data, queue=None, statuses_no_storage=None, return_jobs=True, w=None, j=None): """ Insert a job into MongoDB """ now = datetime.datetime.utcnow() for data in jobs_data: if data["status"] == "started": data["datestarted"] = now no_storage = (statuses_no_storage is not None) and ("started" in statuses_no_storage) if no_storage and return_jobs: for data in jobs_data: data["_id"] = ObjectId() # Give the job a temporary ID else: inserted = context.connections.mongodb_jobs.mrq_jobs.insert( jobs_data, manipulate=True, w=w, j=j ) if return_jobs: jobs = [] for data in jobs_data: job = cls(data["_id"], queue=queue) job.set_data(data) job.statuses_no_storage = statuses_no_storage job.stored = (not no_storage) if data["status"] == "started": job.datestarted = data["datestarted"] jobs.append(job) return jobs else: return inserted
0.003263
def get_multi_causal_downstream(graph, nbunch: Union[BaseEntity, Iterable[BaseEntity]]): """Get the union of all of the 2-level deep causal downstream subgraphs from the nbunch. :param pybel.BELGraph graph: A BEL graph :param nbunch: A BEL node or list of BEL nodes :return: A subgraph of the original BEL graph :rtype: pybel.BELGraph """ result = get_downstream_causal_subgraph(graph, nbunch) expand_downstream_causal(graph, result) return result
0.006198
def _warn_about_problematic_credentials(credentials): """Determines if the credentials are problematic. Credentials from the Cloud SDK that are associated with Cloud SDK's project are problematic because they may not have APIs enabled and have limited quota. If this is the case, warn about it. """ from google.auth import _cloud_sdk if credentials.client_id == _cloud_sdk.CLOUD_SDK_CLIENT_ID: warnings.warn(_CLOUD_SDK_CREDENTIALS_WARNING)
0.002101
def scramble_string(s, key): """ s is the puzzle's solution in column-major order, omitting black squares: i.e. if the puzzle is: C A T # # A # # R solution is CATAR Key is a 4-digit number in the range 1000 <= key <= 9999 """ key = key_digits(key) for k in key: # foreach digit in the key s = shift(s, key) # for each char by each digit in the key in sequence s = s[k:] + s[:k] # cut the sequence around the key digit s = shuffle(s) # do a 1:1 shuffle of the 'deck' return s
0.00173
def _format_usage_without_prefix(parser): """ Use private argparse APIs to get the usage string without the 'usage: ' prefix. """ fmt = parser._get_formatter() fmt.add_usage(parser.usage, parser._actions, parser._mutually_exclusive_groups, prefix='') return fmt.format_help().strip()
0.00304
def replace_cluster_custom_object_status(self, group, version, plural, name, body, **kwargs): # noqa: E501 """replace_cluster_custom_object_status # noqa: E501 replace status of the cluster scoped specified custom object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_cluster_custom_object_status(group, version, plural, name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str group: the custom resource's group (required) :param str version: the custom resource's version (required) :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required) :param str name: the custom object's name (required) :param UNKNOWN_BASE_TYPE body: (required) :return: object If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_cluster_custom_object_status_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501 else: (data) = self.replace_cluster_custom_object_status_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501 return data
0.001362
def listAcquisitionEras_CI(self, acq=''): """ Returns all acquistion eras in dbs """ try: acq = str(acq) except: dbsExceptionHandler('dbsException-invalid-input', 'aquistion_era_name given is not valid : %s'%acq) conn = self.dbi.connection() try: result = self.acqlst_ci.execute(conn, acq) return result finally: if conn:conn.close()
0.015317
def closeEvent(self, event): """ Saves dirty editors on close and cancel the event if the user choosed to continue to work. :param event: close event """ dirty_widgets = [] for w in self.widgets(include_clones=False): if w.dirty: dirty_widgets.append(w) filenames = [] for w in dirty_widgets: if os.path.exists(w.file.path): filenames.append(w.file.path) else: filenames.append(w.documentTitle()) if len(filenames) == 0: self.close_all() return dlg = DlgUnsavedFiles(self, files=filenames) if dlg.exec_() == dlg.Accepted: if not dlg.discarded: for item in dlg.listWidget.selectedItems(): filename = item.text() widget = None for widget in dirty_widgets: if widget.file.path == filename or \ widget.documentTitle() == filename: break tw = widget.parent_tab_widget tw.save_widget(widget) tw.remove_tab(tw.indexOf(widget)) self.close_all() else: event.ignore()
0.001508
def _versions_from_changelog(changelog): """ Return all released versions from given ``changelog``, sorted. :param dict changelog: A changelog dict as returned by ``releases.util.parse_changelog``. :returns: A sorted list of `semantic_version.Version` objects. """ versions = [Version(x) for x in changelog if BUGFIX_RELEASE_RE.match(x)] return sorted(versions)
0.002506
def check_dihedral(self, construction_table): """Checks, if the dihedral defining atom is colinear. Checks for each index starting from the third row of the ``construction_table``, if the reference atoms are colinear. Args: construction_table (pd.DataFrame): Returns: list: A list of problematic indices. """ c_table = construction_table angles = self.get_angle_degrees(c_table.iloc[3:, :].values) problem_index = np.nonzero((175 < angles) | (angles < 5))[0] rename = dict(enumerate(c_table.index[3:])) problem_index = [rename[i] for i in problem_index] return problem_index
0.002869
def process_raw_data(cls, raw_data): """Create a new model using raw API response.""" properties = raw_data["properties"] ip_pools = [] for raw_content in properties.get("ipPools", []): raw_content["parentResourceID"] = raw_data["resourceId"] raw_content["grandParentResourceID"] = raw_data["parentResourceID"] ip_pools.append(IPPools.from_raw_data(raw_content)) properties["ipPools"] = ip_pools ip_configurations = [] for raw_content in properties.get("ipConfigurations", []): resource = Resource.from_raw_data(raw_content) ip_configurations.append(resource) properties["ipConfigurations"] = ip_configurations network_interfaces = [] for raw_content in properties.get("networkInterfaces", []): resource = Resource.from_raw_data(raw_content) network_interfaces.append(resource) properties["networkInterfaces"] = network_interfaces return super(LogicalSubnetworks, cls).process_raw_data(raw_data)
0.001854
def plot_prof_2(self, mod, species, xlim1, xlim2): """ Plot one species for cycle between xlim1 and xlim2 Parameters ---------- mod : string or integer Model to plot, same as cycle number. species : list Which species to plot. xlim1, xlim2 : float Mass coordinate range. """ mass=self.se.get(mod,'mass') Xspecies=self.se.get(mod,'yps',species) pyl.plot(mass,Xspecies,'-',label=str(mod)+', '+species) pyl.xlim(xlim1,xlim2) pyl.legend()
0.019031
def render(self, **kwargs): """ Plots the surface and the control points grid. """ # Calling parent function super(VisSurface, self).render(**kwargs) # Initialize a list to store VTK actors vtk_actors = [] # Start plotting for plot in self._plots: # Plot control points if plot['type'] == 'ctrlpts' and self.vconf.display_ctrlpts: vertices = [v.data for v in plot['ptsarr'][0]] faces = [q.data for q in plot['ptsarr'][1]] # Points as spheres pts = np.array(vertices, dtype=np.float) vtkpts = numpy_to_vtk(pts, deep=False, array_type=VTK_FLOAT) vtkpts.SetName(plot['name']) actor1 = vtkh.create_actor_pts(pts=vtkpts, color=vtkh.create_color(plot['color']), name=plot['name'], index=plot['idx']) vtk_actors.append(actor1) # Quad mesh lines = np.array(faces, dtype=np.int) actor2 = vtkh.create_actor_mesh(pts=vtkpts, lines=lines, color=vtkh.create_color(plot['color']), name=plot['name'], index=plot['idx'], size=self.vconf.line_width) vtk_actors.append(actor2) # Plot evaluated points if plot['type'] == 'evalpts' and self.vconf.display_evalpts: vertices = [v.data for v in plot['ptsarr'][0]] vtkpts = numpy_to_vtk(vertices, deep=False, array_type=VTK_FLOAT) vtkpts.SetName(plot['name']) faces = [t.data for t in plot['ptsarr'][1]] tris = np.array(faces, dtype=np.int) actor1 = vtkh.create_actor_tri(pts=vtkpts, tris=tris, color=vtkh.create_color(plot['color']), name=plot['name'], index=plot['idx']) vtk_actors.append(actor1) # Plot trim curves if self.vconf.display_trims: if plot['type'] == 'trimcurve': pts = np.array(plot['ptsarr'], dtype=np.float) vtkpts = numpy_to_vtk(pts, deep=False, array_type=VTK_FLOAT) vtkpts.SetName(plot['name']) actor1 = vtkh.create_actor_polygon(pts=vtkpts, color=vtkh.create_color(plot['color']), name=plot['name'], index=plot['idx'], size=self.vconf.trim_size) vtk_actors.append(actor1) # Render actors return vtkh.create_render_window(vtk_actors, dict(KeyPressEvent=(self.vconf.keypress_callback, 1.0)), figure_size=self.vconf.figure_size)
0.004676
def smartparse(cls, toparse, tzinfo=None): """Method which uses dateutil.parse and extras to try and parse the string. Valid dates are found at: http://labix.org/python-dateutil#head-1443e0f14ad5dff07efd465e080d1110920673d8-2 Other valid formats include: "now" or "today" "yesterday" "tomorrow" "5 minutes ago" "10 hours ago" "10h5m ago" "start of yesterday" "end of tomorrow" "end of 3rd of March" Args: toparse: The string to parse. tzinfo: Timezone for the resultant datetime_tz object should be in. (Defaults to your local timezone.) Returns: New datetime_tz object. Raises: ValueError: If unable to make sense of the input. """ # Default for empty fields are: # year/month/day == now # hour/minute/second/microsecond == 0 toparse = toparse.strip() if tzinfo is None: dt = cls.now() else: dt = cls.now(tzinfo) default = dt.replace(hour=0, minute=0, second=0, microsecond=0) # Remove "start of " and "end of " prefix in the string if toparse.lower().startswith("end of "): toparse = toparse[7:].strip() dt += datetime.timedelta(days=1) dt = dt.replace(hour=0, minute=0, second=0, microsecond=0) dt -= datetime.timedelta(microseconds=1) default = dt elif toparse.lower().startswith("start of "): toparse = toparse[9:].strip() dt = dt.replace(hour=0, minute=0, second=0, microsecond=0) default = dt # Handle strings with "now", "today", "yesterday", "tomorrow" and "ago". # Need to use lowercase toparselower = toparse.lower() if toparselower in ["now", "today"]: pass elif toparselower == "yesterday": dt -= datetime.timedelta(days=1) elif toparselower in ("tomorrow", "tommorrow"): # tommorrow is spelled wrong, but code out there might be depending on it # working dt += datetime.timedelta(days=1) elif "ago" in toparselower: # Remove the "ago" bit toparselower = toparselower[:-3] # Replace all "a day and an hour" with "1 day 1 hour" toparselower = toparselower.replace("a ", "1 ") toparselower = toparselower.replace("an ", "1 ") toparselower = toparselower.replace(" and ", " ") # Match the following # 1 hour ago # 1h ago # 1 h ago # 1 hour ago # 2 hours ago # Same with minutes, seconds, etc. tocheck = ("seconds", "minutes", "hours", "days", "weeks", "months", "years") result = {} for match in re.finditer("([0-9]+)([^0-9]*)", toparselower): amount = int(match.group(1)) unit = match.group(2).strip() for bit in tocheck: regex = "^([%s]|((%s)s?))$" % ( bit[0], bit[:-1]) bitmatch = re.search(regex, unit) if bitmatch: result[bit] = amount break else: raise ValueError("Was not able to parse date unit %r!" % unit) delta = dateutil.relativedelta.relativedelta(**result) dt -= delta else: # Handle strings with normal datetime format, use original case. dt = dateutil.parser.parse(toparse, default=default.asdatetime(), tzinfos=pytz_abbr.tzinfos) if dt is None: raise ValueError("Was not able to parse date!") if dt.tzinfo is pytz_abbr.unknown: dt = dt.replace(tzinfo=None) if dt.tzinfo is None: if tzinfo is None: tzinfo = localtz() dt = cls(dt, tzinfo) else: if isinstance(dt.tzinfo, pytz_abbr.tzabbr): abbr = dt.tzinfo dt = dt.replace(tzinfo=None) dt = cls(dt, abbr.zone, is_dst=abbr.dst) dt = cls(dt) return dt
0.012513