text
stringlengths
78
104k
score
float64
0
0.18
def get_completed_task(self, task, timeout=-1): """ Waits until the task is completed and returns the task resource. Args: task: TaskResource timeout: Timeout in seconds Returns: dict: TaskResource """ self.__wait_task_completion(task, timeout) return self.get(task)
0.00554
def add_legend(self, labels=None, **kwargs): """Specify legend for a plot. Adds labels and basic legend specifications for specific plot. For the optional Args, refer to https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html for more information. # TODO: Add legend capabilities for Loss/Gain plots. This is possible using the return_fig_ax kwarg in the main plotting function. Args: labels (list of str): String representing each item in plot that will be added to the legend. Keyword Arguments: loc (str, int, len-2 list of floats, optional): Location of legend. See matplotlib documentation for more detail. Default is None. bbox_to_anchor (2-tuple or 4-tuple of floats, optional): Specify position and size of legend box. 2-tuple will specify (x,y) coordinate of part of box specified with `loc` kwarg. 4-tuple will specify (x, y, width, height). See matplotlib documentation for more detail. Default is None. size (float, optional): Set size of legend using call to `prop` dict in legend call. See matplotlib documentaiton for more detail. Default is None. ncol (int, optional): Number of columns in the legend. Note: Other kwargs are available. See: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html """ if 'size' in kwargs: if 'prop' not in kwargs: kwargs['prop'] = {'size': kwargs['size']} else: kwargs['prop']['size'] = kwargs['size'] del kwargs['size'] self.legend.add_legend = True self.legend.legend_labels = labels self.legend.legend_kwargs = kwargs return
0.001031
def threshold_absolute(W, thr, copy=True): ''' This function thresholds the connectivity matrix by absolute weight magnitude. All weights below the given threshold, and all weights on the main diagonal (self-self connections) are set to 0. If copy is not set, this function will *modify W in place.* Parameters ---------- W : np.ndarray weighted connectivity matrix thr : float absolute weight threshold copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : np.ndarray thresholded connectivity matrix ''' if copy: W = W.copy() np.fill_diagonal(W, 0) # clear diagonal W[W < thr] = 0 # apply threshold return W
0.001233
def search_image(self, search_term): """ Search for a specific image by providing a search term (mainly used with ec2's community and public images) :param search_term: Search term to be used when searching for images's names containing this term. :returns: A list of all images, whose names contain the given search_term. """ payload = { 'search_term': search_term } data = json.dumps(payload) req = self.request(self.mist_client.uri+'/clouds/'+self.id+'/images', data=data) images = req.get().json() return images
0.009724
def handle_block( mediator_state: MediatorTransferState, state_change: Block, channelidentifiers_to_channels: ChannelMap, pseudo_random_generator: random.Random, ) -> TransitionResult[MediatorTransferState]: """ After Raiden learns about a new block this function must be called to handle expiration of the hash time locks. Args: state: The current state. Return: TransitionResult: The resulting iteration """ expired_locks_events = events_to_remove_expired_locks( mediator_state, channelidentifiers_to_channels, state_change.block_number, pseudo_random_generator, ) secret_reveal_events = events_for_onchain_secretreveal_if_dangerzone( channelmap=channelidentifiers_to_channels, secrethash=mediator_state.secrethash, transfers_pair=mediator_state.transfers_pair, block_number=state_change.block_number, block_hash=state_change.block_hash, ) unlock_fail_events = events_for_expired_pairs( channelidentifiers_to_channels=channelidentifiers_to_channels, transfers_pair=mediator_state.transfers_pair, waiting_transfer=mediator_state.waiting_transfer, block_number=state_change.block_number, ) iteration = TransitionResult( mediator_state, unlock_fail_events + secret_reveal_events + expired_locks_events, ) return iteration
0.000691
def load(cls, webfinger, pypump): """ Load JSON from disk into store object """ filename = cls.get_filename() if os.path.isfile(filename): data = open(filename).read() data = json.loads(data) store = cls(data, filename=filename) else: store = cls(filename=filename) store.prefix = webfinger return store
0.004988
def screenshot(self, filename=None, transparent_background=None, return_img=None, window_size=None): """ Takes screenshot at current camera position Parameters ---------- filename : str, optional Location to write image to. If None, no image is written. transparent_background : bool, optional Makes the background transparent. Default False. return_img : bool, optional If a string filename is given and this is true, a NumPy array of the image will be returned. Returns ------- img : numpy.ndarray Array containing pixel RGB and alpha. Sized: [Window height x Window width x 3] for transparent_background=False [Window height x Window width x 4] for transparent_background=True Examples -------- >>> import vtki >>> sphere = vtki.Sphere() >>> plotter = vtki.Plotter() >>> actor = plotter.add_mesh(sphere) >>> plotter.screenshot('screenshot.png') # doctest:+SKIP """ if window_size is not None: self.window_size = window_size # configure image filter if transparent_background is None: transparent_background = rcParams['transparent_background'] self.image_transparent_background = transparent_background # This if statement allows you to save screenshots of closed plotters # This is needed for the sphinx-gallery work if not hasattr(self, 'ren_win'): # If plotter has been closed... # check if last_image exists if hasattr(self, 'last_image'): # Save last image return self._save_image(self.last_image, filename, return_img) # Plotter hasn't been rendered or was improperly closed raise AttributeError('This plotter is unable to save a screenshot.') if isinstance(self, Plotter): # TODO: we need a consistent rendering function self.render() else: self._render() # debug: this needs to be called twice for some reason, img = self.image img = self.image return self._save_image(img, filename, return_img)
0.001722
def get_assessments_metadata(self): """Gets the metadata for the assessments. return: (osid.Metadata) - metadata for the assessments *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.learning.ActivityForm.get_assets_metadata_template metadata = dict(self._mdata['assessments']) metadata.update({'existing_assessments_values': self._my_map['assessmentIds']}) return Metadata(**metadata)
0.007968
def convert2hdf5(platform_name): """Retrieve original RSR data and convert to internal hdf5 format""" import h5py ahi = AhiRSR(platform_name) filename = os.path.join(ahi.output_dir, "rsr_ahi_{platform}.h5".format(platform=platform_name)) with h5py.File(filename, "w") as h5f: h5f.attrs['description'] = 'Relative Spectral Responses for AHI' h5f.attrs['platform_name'] = platform_name h5f.attrs['sensor'] = 'ahi' h5f.attrs['band_names'] = AHI_BAND_NAMES.values() for chname in AHI_BAND_NAMES.values(): grp = h5f.create_group(chname) wvl = ahi.rsr[chname][ 'wavelength'][~np.isnan(ahi.rsr[chname]['wavelength'])] rsp = ahi.rsr[chname][ 'response'][~np.isnan(ahi.rsr[chname]['wavelength'])] grp.attrs['central_wavelength'] = get_central_wave(wvl, rsp) arr = ahi.rsr[chname]['wavelength'] dset = grp.create_dataset('wavelength', arr.shape, dtype='f') dset.attrs['unit'] = 'm' dset.attrs['scale'] = 1e-06 dset[...] = arr arr = ahi.rsr[chname]['response'] dset = grp.create_dataset('response', arr.shape, dtype='f') dset[...] = arr
0.001546
def event_loop(self): """ Run the event loop once. """ if hasattr(self.loop, '._run_once'): self.loop._thread_id = threading.get_ident() try: self.loop._run_once() finally: self.loop._thread_id = None else: self.loop.call_soon(self.loop.stop) self.loop.run_forever()
0.005236
def get_level(self, level=2): """Get all nodes that are exactly this far away.""" if level == 1: for child in self.children.values(): yield child else: for child in self.children.values(): for node in child.get_level(level-1): yield node
0.013289
def make_bound(self, for_instance): """ Create a new :ref:`bound field class <api-aioxmpp.forms-bound-fields>` or return an existing one for the given form object. :param for_instance: The form instance to which the bound field should be bound. If no bound field can be found on the given `for_instance` for this field, a new one is created using :meth:`create_bound`, stored at the instance and returned. Otherwise, the existing instance is returned. .. seealso:: :meth:`create_bound` creates a new bound field for the given form instance (without storing it anywhere). """ try: return for_instance._descriptor_data[self] except KeyError: bound = self.create_bound(for_instance) for_instance._descriptor_data[self] = bound return bound
0.002116
def avail_images(call=None): ''' Get list of available images CLI Example: .. code-block:: bash salt-cloud --list-images Can use a custom URL for images. Default is: .. code-block:: yaml image_url: images.joyent.com/images ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) user = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ) img_url = config.get_cloud_config_value( 'image_url', get_configured_provider(), __opts__, search_global=False, default='{0}{1}/{2}/images'.format(DEFAULT_LOCATION, JOYENT_API_HOST_SUFFIX, user) ) if not img_url.startswith('http://') and not img_url.startswith('https://'): img_url = '{0}://{1}'.format(_get_proto(), img_url) rcode, data = query(command='my/images', method='GET') log.debug(data) ret = {} for image in data: ret[image['name']] = image return ret
0.002611
def add_package(self, package): """ Add a package to this project """ self._data.setdefault('packages', {}) self._data['packages'][package.name] = package.source for package in package.deploy_packages: self.add_package(package) self._save()
0.009404
def get_all_tags(image_name, branch=None): """ GET /v1/repositories/<namespace>/<repository_name>/tags :param image_name: The docker image name :param branch: The branch to filter by :return: A list of Version instances, latest first """ try: return get_all_tags_no_auth(image_name, branch) except AuthException: return get_all_tags_with_auth(image_name, branch)
0.002433
def from_unit_cube(self, x): """ Used by multinest :param x: 0 < x < 1 :param lower_bound: :param upper_bound: :return: """ mu = self.mu.value sigma = self.sigma.value sqrt_two = 1.414213562 if x < 1e-16 or (1 - x) < 1e-16: res = -1e32 else: res = mu + sigma * sqrt_two * erfcinv(2 * (1 - x)) return res
0.004556
def how_long(length=4, choices=len(words), speed=1000 * 1000 * 1000 * 1000, optimism=2): """ How long might it take to guess a password? @param length: the number of words that we're going to choose. @type length: L{int} @param choice: the number of words we might choose between. @type choice: L{int} @param speed: the speed of our hypothetical password guesser, in guesses per second. @type speed: L{int} @param optimism: When we start guessing all the options, we probably won't have to guess I{all} of them to get a hit. This assumes that the guesser will have to guess only C{1/optimism} of the total number of possible options before it finds a hit. """ return ((choices ** length) / (speed * optimism))
0.001245
def members(self): """获取小组所有成员的信息列表""" all_members = [] for page in range(1, self.max_page() + 1): all_members.extend(self.single_page_members(page)) return all_members
0.009434
def rotate_grid_from_profile(self, grid_elliptical): """ Rotate a grid of elliptical (y,x) coordinates from the reference frame of the profile back to the \ unrotated coordinate grid reference frame (coordinates are not shifted back to their original centre). This routine is used after computing deflection angles in the reference frame of the profile, so that the \ deflection angles can be re-rotated to the frame of the original coordinates before performing ray-tracing. Parameters ---------- grid_elliptical : TransformedGrid(ndarray) The (y, x) coordinates in the reference frame of an elliptical profile. """ y = np.add(np.multiply(grid_elliptical[:, 1], self.sin_phi), np.multiply(grid_elliptical[:, 0], self.cos_phi)) x = np.add(np.multiply(grid_elliptical[:, 1], self.cos_phi), - np.multiply(grid_elliptical[:, 0], self.sin_phi)) return np.vstack((y, x)).T
0.009288
def set_components(self): """ Sets the Components Model nodes. """ node_flags = attributes_flags = int(Qt.ItemIsSelectable | Qt.ItemIsEnabled) root_node = umbra.ui.nodes.DefaultNode(name="InvisibleRootNode") paths = {} for path in self.__engine.components_manager.paths: basename = os.path.basename(path) if not paths.get(basename): paths[basename] = {} paths[basename].update(dict((name, component) for (name, component) in self.__engine.components_manager if basename == os.path.basename(os.path.dirname(component.directory)))) for path, components in paths.iteritems(): path_node = PathNode(name=path.title(), parent=root_node, node_flags=node_flags, attributes_flags=attributes_flags) for component in components.itervalues(): if not component.interface: continue component_node = ComponentNode(component, name=component.title, parent=path_node, node_flags=node_flags, attributes_flags=attributes_flags, activated=umbra.ui.nodes.GraphModelAttribute(name="activated", flags=attributes_flags, roles={ Qt.DisplayRole: foundations.strings.to_string( component.interface.activated), Qt.DecorationRole: os.path.join( self.__ui_resources_directory, component.interface.activated and self.__ui_activated_image or self.__ui_deactivated_image)})) component_node.roles[Qt.DecorationRole] = os.path.join(self.__ui_resources_directory, "{0}{1}".format(component.category, self.__ui_category_affixe)) root_node.sort_children() self.__model.initialize_model(root_node) return True
0.006592
def soap_fault(message=None, actor=None, code=None, detail=None): """ Create a SOAP Fault message :param message: Human readable error message :param actor: Who discovered the error :param code: Error code :param detail: More specific error message :return: A SOAP Fault message as a string """ _string = _actor = _code = _detail = None if message: _string = soapenv.Fault_faultstring(text=message) if actor: _actor = soapenv.Fault_faultactor(text=actor) if code: _code = soapenv.Fault_faultcode(text=code) if detail: _detail = soapenv.Fault_detail(text=detail) fault = soapenv.Fault( faultcode=_code, faultstring=_string, faultactor=_actor, detail=_detail, ) return "%s" % fault
0.001239
def start_session(self, causal_consistency=True, default_transaction_options=None): """Start a logical session. This method takes the same parameters as :class:`~pymongo.client_session.SessionOptions`. See the :mod:`~pymongo.client_session` module for details and examples. Requires MongoDB 3.6. It is an error to call :meth:`start_session` if this client has been authenticated to multiple databases using the deprecated method :meth:`~pymongo.database.Database.authenticate`. A :class:`~pymongo.client_session.ClientSession` may only be used with the MongoClient that started it. :Returns: An instance of :class:`~pymongo.client_session.ClientSession`. .. versionadded:: 3.6 """ return self.__start_session( False, causal_consistency=causal_consistency, default_transaction_options=default_transaction_options)
0.003956
def mod(ctx, number, divisor): """ Returns the remainder after number is divided by divisor """ number = conversions.to_decimal(number, ctx) divisor = conversions.to_decimal(divisor, ctx) return number - divisor * _int(ctx, number / divisor)
0.003774
def OnGoToCell(self, event): """Shift a given cell into view""" row, col, tab = event.key try: self.grid.actions.cursor = row, col, tab except ValueError: msg = _("Cell {key} outside grid shape {shape}").format( key=event.key, shape=self.grid.code_array.shape) post_command_event(self.grid.main_window, self.grid.StatusBarMsg, text=msg) event.Skip() return self.grid.MakeCellVisible(row, col) event.Skip()
0.003534
def api_key(self, api_key): """ Sets the api_key of this GlobalSignCredentials. Unique ID for API client (provided by GlobalSign). :param api_key: The api_key of this GlobalSignCredentials. :type: str """ if api_key is None: raise ValueError("Invalid value for `api_key`, must not be `None`") if api_key is not None and len(api_key) > 1000: raise ValueError("Invalid value for `api_key`, length must be less than or equal to `1000`") self._api_key = api_key
0.007194
def threadpool(num_workers=None): """Apply stutils.mapreduce.map to the given function""" def decorator(func): @functools.wraps(func) def wrapper(data): return mapreduce.map(func, data, num_workers) return wrapper return decorator
0.003597
def get_detailed_update(self, uid, uuid): """Returns the update object for the ID""" r = requests.get(api_url+'users/'+str(uid)+'/update/'+str(uuid)+'/', headers=self.headers) print(request_status(r)) r.raise_for_status() return Update(r.json())
0.03876
def get_datacenter(conn): ''' Return the datacenter from the config provider datacenter ID ''' datacenter_id = get_datacenter_id() for item in conn.list_datacenters()['items']: if item['id'] == datacenter_id: return item raise SaltCloudNotFound( 'The specified datacenter \'{0}\' could not be found.'.format( datacenter_id ) )
0.002475
def css(self): """Returns ------- str The CSS. """ css_list = [DEFAULT_MARK_CSS] for aes in self.aesthetics: css_list.extend(get_mark_css(aes, self.values[aes])) #print('\n'.join(css_list)) return '\n'.join(css_list)
0.009868
def display_callback(self, cpu_cycles, op_address, address, value): """ called via memory write_byte_middleware """ self.display.write_byte(cpu_cycles, op_address, address, value) return value
0.009259
def install_required(f): """ Return an exception if the namespace is not already installed """ @wraps(f) def wrapped(self, *args, **kwargs): if self.directory.new: raise SprinterException("Namespace %s is not yet installed!" % self.namespace) return f(self, *args, **kwargs) return wrapped
0.005988
def on_aborted(self): """Device authentication aborted. Triggered when device authentication was aborted (either with `DeviceOAuthPoller.stop()` or via the "poll" event) """ print('Authentication aborted') # Authentication aborted self.is_authenticating.acquire() self.is_authenticating.notify_all() self.is_authenticating.release()
0.007371
def stop(config, container, timeout=10, *args, **kwargs): ''' Stop a running container :type container: string :param container: The container id to stop :type timeout: int :param timeout: Wait for a timeout to let the container exit gracefully before killing it :rtype: dict :returns: boolean ''' err = "Unknown" client = _get_client(config) try: dcontainer = _get_container_infos(config, container)['Id'] if is_running(config, dcontainer): client.stop(dcontainer, timeout=timeout) if not is_running(config, dcontainer): print "Container stopped." return True else: i = 0 while is_running(config, dcontainer): time.sleep(0.1) if i > 100: return kill(config,container) i += 1 return True else: return True except Exception as e: err = e utils.warning("Container not existing") return True
0.001813
def write(self, f): """ Write namespace as INI file. :param f: File object or path to file. """ if isinstance(f, str): f = io.open(f, 'w', encoding='utf-8') if not hasattr(f, 'read'): raise AttributeError("Wrong type of file: {0}".format(type(f))) NS_LOGGER.info('Write to `{0}`'.format(f.name)) for section in self.sections.keys(): f.write('[{0}]\n'.format(section)) for k, v in self[section].items(): f.write('{0:15}= {1}\n'.format(k, v)) f.write('\n') f.close()
0.003289
def get_observations(params: Dict) -> Dict[str, Any]: """Search observations, see: http://api.inaturalist.org/v1/docs/#!/Observations/get_observations. Returns the parsed JSON returned by iNaturalist (observations in r['results'], a list of dicts) """ r = make_inaturalist_api_get_call('observations', params=params) return r.json()
0.008475
def parse_output(self, s): ''' Example output: AVR Memory Usage ---------------- Device: atmega2561 Program: 4168 bytes (1.6% Full) (.text + .data + .bootloader) Data: 72 bytes (0.9% Full) (.data + .bss + .noinit) ''' for x in s.splitlines(): if '%' in x: name = x.split(':')[0].strip().lower() nbytes = x.split(':')[1].split('b')[0].strip() nbytes = int(nbytes) perc = x.split('(')[1].split('%')[0].strip() perc = float(perc) if name == 'program': self.program_bytes = nbytes self.program_percentage = perc else: self.data_bytes = nbytes self.data_percentage = perc
0.002291
def build_date(self): """ get build date. :return: build date. None if not found """ # pylint: disable=len-as-condition if len(self.dutinformation) > 0 and (self.dutinformation.get(0).build is not None): return self.dutinformation.get(0).build.date return None
0.009119
def _send_api_message(self, message): """Send a Slack message via the chat.postMessage api. :param message: a dict of kwargs to be passed to slacker. """ self.slack.chat.post_message(**message) self.log.debug("sent api message %r", message)
0.007092
def append(self, lines): """ Args: lines (list): List of line strings to append to the end of the editor """ if isinstance(lines, list): self._lines = self._lines + lines elif isinstance(lines, str): lines = lines.split('\n') self._lines = self._lines + lines else: raise TypeError('Unsupported type {0} for lines.'.format(type(lines)))
0.009009
def start_agent(self, cfgin=True): """ CLI interface to start 12-factor service """ default_conf = { "threads": { "result": { "number": 0, "function": None }, "worker": { "number": 0, "function": None }, }, "interval": { "refresh": 900, "heartbeat": 300, "reporting": 300, "test": 60 }, "heartbeat-hook": False } indata = {} if cfgin: indata = json.load(sys.stdin) elif os.environ.get("REFLEX_MONITOR_CONFIG"): indata = os.environ.get("REFLEX_MONITOR_CONFIG") if indata[0] != "{": indata = base64.b64decode(indata) else: self.NOTIFY("Using default configuration") conf = dictlib.union(default_conf, indata) conf['threads']['result']['function'] = self.handler_thread conf['threads']['worker']['function'] = self.worker_thread self.NOTIFY("Starting monitor Agent") try: self.configure(conf).start() except KeyboardInterrupt: self.thread_stopper.set() if self.refresh_stopper: self.refresh_stopper.set() if self.heartbeat_stopper: self.heartbeat_stopper.set() if self.reporting_stopper: self.reporting_stopper.set()
0.001268
async def is_pull_request(context, task): """Determine if a task is a pull-request-like task (restricted privs). This goes further than checking ``tasks_for``. We may or may not want to keep this. This checks for the following things:: * ``task.extra.env.tasks_for`` == "github-pull-request" * ``task.payload.env.MOBILE_HEAD_REPOSITORY`` doesn't come from an official repo * ``task.metadata.source`` doesn't come from an official repo, either * The last 2 items are landed on the official repo Args: context (scriptworker.context.Context): the scriptworker context. task (dict): the task definition to check. Returns: bool: True if it's a pull-request. False if it either comes from the official repos or if the origin can't be determined. In fact, valid scriptworker tasks don't expose ``task.extra.env.tasks_for`` or ``task.payload.env.MOBILE_HEAD_REPOSITORY``, for instance. """ tasks_for = task.get('extra', {}).get('tasks_for') repo_url_from_payload = get_repo(task, context.config['source_env_prefix']) revision_from_payload = get_revision(task, context.config['source_env_prefix']) metadata_source_url = task['metadata'].get('source', '') repo_from_source_url, revision_from_source_url = \ extract_github_repo_and_revision_from_source_url(metadata_source_url) conditions = [tasks_for == 'github-pull-request'] urls_revisions_and_can_skip = ( (repo_url_from_payload, revision_from_payload, True), (repo_from_source_url, revision_from_source_url, False), ) for repo_url, revision, can_skip in urls_revisions_and_can_skip: # XXX In the case of scriptworker tasks, neither the repo nor the revision is defined if not repo_url and can_skip: continue repo_owner, repo_name = extract_github_repo_owner_and_name(repo_url) conditions.append(not is_github_repo_owner_the_official_one(context, repo_owner)) if not revision and can_skip: continue github_repository = GitHubRepository(repo_owner, repo_name, context.config['github_oauth_token']) conditions.append(not await github_repository.has_commit_landed_on_repository(context, revision)) return any(conditions)
0.004318
def get(self, vlr_type): """ Returns the list of vlrs of the requested type Always returns a list even if there is only one VLR of type vlr_type. >>> import pylas >>> las = pylas.read("pylastests/extrabytes.las") >>> las.vlrs [<ExtraBytesVlr(extra bytes structs: 5)>] >>> las.vlrs.get("WktCoordinateSystemVlr") [] >>> las.vlrs.get("WktCoordinateSystemVlr")[0] Traceback (most recent call last): IndexError: list index out of range >>> las.vlrs.get('ExtraBytesVlr') [<ExtraBytesVlr(extra bytes structs: 5)>] >>> las.vlrs.get('ExtraBytesVlr')[0] <ExtraBytesVlr(extra bytes structs: 5)> Parameters ---------- vlr_type: str the class name of the vlr Returns ------- :py:class:`list` a List of vlrs matching the user_id and records_ids """ return [v for v in self.vlrs if v.__class__.__name__ == vlr_type]
0.001963
def encode_content(self, robj, rpb_content): """ Fills an RpbContent message with the appropriate data and metadata from a RiakObject. :param robj: a RiakObject :type robj: RiakObject :param rpb_content: the protobuf message to fill :type rpb_content: riak.pb.riak_pb2.RpbContent """ if robj.content_type: rpb_content.content_type = str_to_bytes(robj.content_type) if robj.charset: rpb_content.charset = str_to_bytes(robj.charset) if robj.content_encoding: rpb_content.content_encoding = str_to_bytes(robj.content_encoding) for uk in robj.usermeta: pair = rpb_content.usermeta.add() pair.key = str_to_bytes(uk) pair.value = str_to_bytes(robj.usermeta[uk]) for link in robj.links: pb_link = rpb_content.links.add() try: bucket, key, tag = link except ValueError: raise RiakError("Invalid link tuple %s" % link) pb_link.bucket = str_to_bytes(bucket) pb_link.key = str_to_bytes(key) if tag: pb_link.tag = str_to_bytes(tag) else: pb_link.tag = str_to_bytes('') for field, value in robj.indexes: pair = rpb_content.indexes.add() pair.key = str_to_bytes(field) pair.value = str_to_bytes(str(value)) # Python 2.x data is stored in a string if six.PY2: rpb_content.value = str(robj.encoded_data) else: rpb_content.value = robj.encoded_data
0.001211
def search_records(self, record_type, name=None, data=None): """ Returns a list of all records configured for this domain that match the supplied search criteria. """ return self.manager.search_records(self, record_type=record_type, name=name, data=data)
0.009677
def is_instance_of(obj, class_or_intf_name): """ Checks whether the Java object implements the specified interface or is a subclass of the superclass. :param obj: the Java object to check :type obj: JB_Object :param class_or_intf_name: the superclass or interface to check, dot notation or with forward slashes :type class_or_intf_name: str :return: true if either implements interface or subclass of superclass :rtype: bool """ class_or_intf_name = class_or_intf_name.replace("/", ".") classname = get_classname(obj) # array? retrieve component type and check that if is_array(obj): jarray = JavaArray(jobject=obj) classname = jarray.component_type() result = javabridge.static_call( "Lweka/core/InheritanceUtils;", "isSubclass", "(Ljava/lang/String;Ljava/lang/String;)Z", class_or_intf_name, classname) if result: return True return javabridge.static_call( "Lweka/core/InheritanceUtils;", "hasInterface", "(Ljava/lang/String;Ljava/lang/String;)Z", class_or_intf_name, classname)
0.002686
def _run_program(name, *args, **kwargs): """Runs program name with the arguments of *args :param shell: if true, runs the command in the shell :type shell: bool :param return_object: if true, returns a CommandOutput object :type return_object: bool :param ro: same as return_object :type ro: bool :param no_return: doesn't return results, allowing for non-blocking calls :type no_return: bool :param nr: same as no_return :type nr: bool :param input: specifies a string to send to the process :type input: str :returns: if return_object the output as a CommandOutput object, if no_return nothing, else the stdout of the program :rtype: CommandOutput or str or None """ shell = kwargs.get("shell", False) return_object = kwargs.get("ro", False) return_object = kwargs.get("return_object", return_object) no_return = kwargs.get("nr", False) no_return = kwargs.get("no_return", no_return) inp = kwargs.get("input", None) args = [name] + list(args) p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell) if not no_return: if inp: stdout, stderr = tuple(x.decode(sys.getdefaultencoding()).strip() for x in p.communicate(inp)) else: stdout, stderr = tuple(x.decode(sys.getdefaultencoding()).strip() for x in p.communicate()) if return_object: return CommandOutput(stdout, stderr, p.returncode) else: return stdout
0.003911
def find_recipes(folders, pattern=None, base=None): '''find recipes will use a list of base folders, files, or patterns over a subset of content to find recipe files (indicated by Starting with Singularity Parameters ========== base: if defined, consider folders recursively below this level. ''' # If the user doesn't provide a list of folders, use $PWD if folders is None: folders = os.getcwd() if not isinstance(folders,list): folders = [folders] manifest = dict() for base_folder in folders: # If we find a file, return the one file custom_pattern = None if os.path.isfile(base_folder): # updates manifest manifest = find_single_recipe(filename=base_folder, pattern=pattern, manifest=manifest) continue # The user likely provided a custom pattern elif not os.path.isdir(base_folder): custom_pattern = base_folder.split('/')[-1:][0] base_folder = "/".join(base_folder.split('/')[0:-1]) # If we don't trigger loop, we have directory manifest = find_folder_recipes(base_folder=base_folder, pattern=custom_pattern or pattern, manifest=manifest, base=base) return manifest
0.004019
def explore_show_summary(self, list, index=False, expected=None, context=None): """Show summary of one capability document. Given a capability document or index (in list, index True if it is an index), write out a simply textual summary of the document with all related documents shown as numbered options (of the form [#] ...description...) which will then form a menu for the next exploration. If expected is not None then it should be a list of expected document types. If this is set then a warning will be printed if list is not one of these document types. Look for certain top-level link types including rel="up". """ num_entries = len(list.resources) capability = '(unknown capability)' if ('capability' in list.md): capability = list.md['capability'] if (index): capability += 'index' print( "Parsed %s document with %d entries:" % (capability, num_entries)) if (expected is not None and capability not in expected): print("WARNING - expected a %s document" % (','.join(expected))) if (capability not in ['description', 'descriptionoindex', 'capabilitylist', 'resourcelist', 'resourcelistindex', 'changelist', 'changelistindex', 'resourcedump', 'resourcedumpindex', 'changedump', 'changedumpindex', 'resourcelist-archive', 'resourcedump-archive', 'changelist-archive', 'changedump-archive']): print("WARNING - unknown %s document type" % (capability)) to_show = num_entries if (num_entries > 21): to_show = 20 # What capability entries are allowed/expected? entry_caps = self.allowed_entries(capability) options = {} n = 0 # Look for <rs:ln> elements in this document ln_describedby = list.link('describedby') if (ln_describedby): if ('href' in ln_describedby): uri = ln_describedby['href'] print("[%s] rel='describedby' link to %s" % ('d', uri)) uri = self.expand_relative_uri(context, uri) options['d'] = Resource(uri, capability='resource') else: print("WARNING - describedby link with no href, ignoring") ln_up = list.link('up') if (ln_up): if ('href' in ln_up): uri = ln_up['href'] print("[%s] rel='up' link to %s" % ('u', uri)) uri = self.expand_relative_uri(context, uri) options['u'] = Resource(uri) else: print("WARNING - up link with no href, ignoring") ln_index = list.link('index') if (ln_index): if ('href' in ln_index): uri = ln_index['href'] print("[%s] rel='index' link to %s" % ('i', uri)) uri = self.expand_relative_uri(context, uri) options['i'] = Resource(uri) else: print("WARNING - index link with no href, ignoring") # Show listed resources as numbered options for r in list.resources: if (n >= to_show): print("(not showing remaining %d entries)" % (num_entries - n)) break n += 1 options[str(n)] = r print("[%d] %s" % (n, r.uri)) if (self.verbose): print(" " + str(r)) r.uri = self.expand_relative_uri(context, r.uri) if (r.capability is not None): warning = '' if (r.capability not in entry_caps): warning = " (EXPECTED %s)" % (' or '.join(entry_caps)) print(" %s%s" % (r.capability, warning)) elif (len(entry_caps) == 1): r.capability = entry_caps[0] print( " capability not specified, should be %s" % (r.capability)) return(options, capability)
0.001431
def repl_command(fxn): """ Decorator for cmd methods Parses arguments from the arg string and passes them to the method as *args and **kwargs. """ @functools.wraps(fxn) def wrapper(self, arglist): """Wraps the command method""" args = [] kwargs = {} if arglist: for arg in shlex.split(arglist): if "=" in arg: split = arg.split("=", 1) kwargs[split[0]] = split[1] else: args.append(arg) return fxn(self, *args, **kwargs) return wrapper
0.001626
def otp(ctx, access_code): """ Manage OTP Application. The YubiKey provides two keyboard-based slots which can each be configured with a credential. Several credential types are supported. A slot configuration may be write-protected with an access code. This prevents the configuration to be overwritten without the access code provided. Mode switching the YubiKey is not possible when a slot is configured with an access code. Examples: \b Swap the configurations between the two slots: $ ykman otp swap \b Program a random challenge-response credential to slot 2: $ ykman otp chalresp --generate 2 \b Program a Yubico OTP credential to slot 2, using the serial as public id: $ ykman otp yubiotp 1 --serial-public-id \b Program a random 38 characters long static password to slot 2: $ ykman otp static --generate 2 --length 38 """ ctx.obj['controller'] = OtpController(ctx.obj['dev'].driver) if access_code is not None: if access_code == '': access_code = click.prompt( 'Enter access code', show_default=False, err=True) try: access_code = parse_access_code_hex(access_code) except Exception as e: ctx.fail('Failed to parse access code: ' + str(e)) ctx.obj['controller'].access_code = access_code
0.000715
async def connect(self, conn_id, connection_string): """Asynchronously connect to a device Args: conn_id (int): A unique identifer that will refer to this connection connection_string (string): A DeviceAdapter specific string that can be used to connect to a device using this DeviceAdapter. callback (callable): A function that will be called when the connection attempt finishes as callback(conection_id, adapter_id, success: bool, failure_reason: string or None) """ id_number = int(connection_string) if id_number not in self.devices: raise DeviceAdapterError(conn_id, 'connect', 'device not found') if self._get_conn_id(connection_string) is not None: raise DeviceAdapterError(conn_id, 'connect', 'device already connected') dev = self.devices[id_number] if dev.connected: raise DeviceAdapterError(conn_id, 'connect', 'device already connected') dev.connected = True self._setup_connection(conn_id, connection_string) self._track_property(conn_id, 'device', dev)
0.006861
def add(self, (s, p, o), context, quoted=False): """ Adds a triple to the store. >>> from rdflib.term import URIRef >>> from rdflib.namespace import RDF >>> subject = URIRef('http://zoowizard.org/resource/Artis') >>> object = URIRef('http://schema.org/Zoo') >>> g = rdflib.Graph('Django') >>> g.add((subject, RDF.type, object)) >>> len(g) 1 """ assert isinstance(s, Identifier) assert isinstance(p, Identifier) assert isinstance(o, Identifier) assert not quoted named_graph = _get_named_graph(context) query_set = _get_query_sets_for_object(o)[0] query_set.get_or_create( subject=s, predicate=p, object=o, context=named_graph, )
0.002389
def NumExpr(ex, signature=(), **kwargs): """ Compile an expression built using E.<variable> variables to a function. ex can also be specified as a string "2*a+3*b". The order of the input variables and their types can be specified using the signature parameter, which is a list of (name, type) pairs. Returns a `NumExpr` object containing the compiled function. """ # NumExpr can be called either directly by the end-user, in which case # kwargs need to be sanitized by getContext, or by evaluate, # in which case kwargs are in already sanitized. # In that case frame_depth is wrong (it should be 2) but it doesn't matter # since it will not be used (because truediv='auto' has already been # translated to either True or False). context = getContext(kwargs, frame_depth=1) threeAddrProgram, inputsig, tempsig, constants, input_names = precompile(ex, signature, context) program = compileThreeAddrForm(threeAddrProgram) return interpreter.NumExpr(inputsig.encode('ascii'), tempsig.encode('ascii'), program, constants, input_names)
0.001718
def getSwapStats(self, dev): """Returns I/O stats for swap partition. @param dev: Device name for swap partition. @return: Dict of stats. """ if self._swapList is None: self._initSwapInfo() if dev in self._swapList: return self.getDevStats(dev) else: return None
0.010753
def dentategyrus(adjusted=True): """Dentate Gyrus dataset from Hochgerner et al. (2018). Dentate gyrus is part of the hippocampus involved in learning, episodic memory formation and spatial coding. It is measured using 10X Genomics Chromium and described in Hochgerner et al. (2018). The data consists of 25,919 genes across 3,396 cells and provides several interesting characteristics. Returns ------- Returns `adata` object """ if adjusted: filename = 'data/DentateGyrus/10X43_1.h5ad' url = 'https://github.com/theislab/scvelo_notebooks/raw/master/data/DentateGyrus/10X43_1.h5ad' adata = read(filename, backup_url=url, sparse=True, cache=True) else: filename = 'data/DentateGyrus/10X43_1.loom' url = 'http://pklab.med.harvard.edu/velocyto/DG1/10X43_1.loom' adata = read(filename, backup_url=url, cleanup=True, sparse=True, cache=True) cleanup(adata, clean='all', keep={'spliced', 'unspliced', 'ambiguous'}) url_louvain = 'https://github.com/theislab/scvelo_notebooks/raw/master/data/DentateGyrus/DG_clusters.npy' url_umap = 'https://github.com/theislab/scvelo_notebooks/raw/master/data/DentateGyrus/DG_umap.npy' adata.obs['clusters'] = load('./data/DentateGyrus/DG_clusters.npy', url_louvain) adata.obsm['X_umap'] = load('./data/DentateGyrus/DG_umap.npy', url_umap) adata.obs['clusters'] = pd.Categorical(adata.obs['clusters']) return adata
0.006711
def _get_objects(self, o_type): """Get an object list from the scheduler Returns None if the required object type (`o_type`) is not known or an exception is raised. Else returns the objects list :param o_type: searched object type :type o_type: str :return: objects list :rtype: alignak.objects.item.Items """ if o_type not in [t for t in self.app.sched.pushed_conf.types_creations]: return None try: _, _, strclss, _, _ = self.app.sched.pushed_conf.types_creations[o_type] o_list = getattr(self.app.sched, strclss) except Exception: # pylint: disable=broad-except return None return o_list
0.006766
def startfile(fpath, verbose=True): # nocover """ Uses default program defined by the system to open a file. This is done via `os.startfile` on windows, `open` on mac, and `xdg-open` on linux. Args: fpath (PathLike): a file to open using the program associated with the files extension type. verbose (int): verbosity References: http://stackoverflow.com/questions/2692873/quote-posix DisableExample: >>> # This test interacts with a GUI frontend, not sure how to test. >>> import ubelt as ub >>> base = ub.ensure_app_cache_dir('ubelt') >>> fpath1 = join(base, 'test_open.txt') >>> ub.touch(fpath1) >>> proc = ub.startfile(fpath1) """ from ubelt import util_cmd if verbose: print('[ubelt] startfile("{}")'.format(fpath)) fpath = normpath(fpath) if not exists(fpath): raise Exception('Cannot start nonexistant file: %r' % fpath) if not WIN32: import pipes fpath = pipes.quote(fpath) if LINUX: info = util_cmd.cmd(('xdg-open', fpath), detach=True, verbose=verbose) elif DARWIN: info = util_cmd.cmd(('open', fpath), detach=True, verbose=verbose) elif WIN32: os.startfile(fpath) info = None else: raise RuntimeError('Unknown Platform') if info is not None: if not info['proc']: raise Exception('startfile failed')
0.000685
def Decrypt(self, data): """A convenience method which pads and decrypts at once.""" decryptor = self.GetDecryptor() try: padded_data = decryptor.update(data) + decryptor.finalize() return self.UnPad(padded_data) except ValueError as e: raise CipherError(e)
0.013699
def to_naf(self): """ Converts the coreference layer to NAF """ if self.type == 'KAF': for node_coref in self.__get_corefs_nodes(): node_coref.set('id',node_coref.get('coid')) del node_coref.attrib['coid']
0.010676
def get_appdir(self, portable_path=None, folder=None, create=False): ''' path = uac_bypass(file) This function will only operate when your program is installed check the is_installed function for details Moves working data to another folder. The idea is to get around security and uac on windows vista + returns cwd on linux, windows returns path with write access: C:\\ProgramData\\appname here if a file is passed in it will be appended to the path set create to true to create the file in the programData folder setting overwrite to True will silently overwrite, otherwise a FileExistsError is raised Setting overwrite to get, will get the file path instead of throwing an error ---Background---- If a user is using windows, Read write access is restriced in the Program Files directory without prompting for uac. We create a folder in c:\Program Data\ of the program name and save logging data etc there. This way the program doesnt need admin rights. ''' if self.is_installed(): appdir = appdirs.user_data_dir(self.app_name) elif portable_path: appdir = portable_path if not folder: folder = 'data^-^' else: appdir = os.getcwd() if folder: path = os.path.join(appdir, folder) else: path = appdir path = os.path.realpath(path) if create: os.makedirs(path, exist_ok=1) return path
0.00304
def _default_to_pandas(self, op, *args, **kwargs): """Helper method to use default pandas function""" empty_self_str = "" if not self.empty else " for empty DataFrame" ErrorMessage.default_to_pandas( "`{}.{}`{}".format( self.__name__, op if isinstance(op, str) else op.__name__, empty_self_str, ) ) if callable(op): result = op(self._to_pandas(), *args, **kwargs) elif isinstance(op, str): # The inner `getattr` is ensuring that we are treating this object (whether # it is a DataFrame, Series, etc.) as a pandas object. The outer `getattr` # will get the operation (`op`) from the pandas version of the class and run # it on the object after we have converted it to pandas. result = getattr(getattr(pandas, self.__name__), op)( self._to_pandas(), *args, **kwargs ) # SparseDataFrames cannot be serialize by arrow and cause problems for Modin. # For now we will use pandas. if isinstance(result, type(self)) and not isinstance( result, (pandas.SparseDataFrame, pandas.SparseSeries) ): return self._create_or_update_from_compiler( result, inplace=kwargs.get("inplace", False) ) elif isinstance(result, pandas.DataFrame): from .dataframe import DataFrame return DataFrame(result) elif isinstance(result, pandas.Series): from .series import Series return Series(result) else: try: if ( isinstance(result, (list, tuple)) and len(result) == 2 and isinstance(result[0], pandas.DataFrame) ): # Some operations split the DataFrame into two (e.g. align). We need to wrap # both of the returned results if isinstance(result[1], pandas.DataFrame): second = self.__constructor__(result[1]) else: second = result[1] return self.__constructor__(result[0]), second else: return result except TypeError: return result
0.002859
def getArrays(self, attr=None, specfiles=None, sort=False, reverse=False, selector=None, defaultValue=None): """Return a condensed array of data selected from :class:`Si` instances from ``self.sic`` for fast and convenient data processing. :param attr: list of :class:`Si` item attributes that should be added to the returned array. The attributes "id" and "specfile" are always included, in combination they serve as a unique id. :param defaultValue: if an item is missing an attribute, the "defaultValue" is added to the array instead. :param specfiles: filenames of ms-run files, if specified return only items from those files :type specfiles: str or [str, str, ...] :param sort: if "sort" is specified the returned list of items is sorted according to the :class:`Si` attribute specified by "sort", if the attribute is not present the item is skipped. :param reverse: bool, set True to reverse sort order :param selector: a function which is called with each :class:`Si` item and has to return True (include item) or False (discard item). Default function is: ``lambda si: True`` :returns: {'attribute1': numpy.array(), 'attribute2': numpy.array(), ... } """ selector = (lambda si: True) if selector is None else selector attr = attr if attr is not None else [] attr = set(['id', 'specfile'] + aux.toList(attr)) items = self.getItems(specfiles, sort, reverse, selector) return _getArrays(items, attr, defaultValue)
0.002907
def new(cls, nsptagname, val): """ Return a new ``CT_String`` element with tagname *nsptagname* and ``val`` attribute set to *val*. """ elm = OxmlElement(nsptagname) elm.val = val return elm
0.00813
def normalize_layout(layout, min_percentile=1, max_percentile=99, relative_margin=0.1): """Removes outliers and scales layout to between [0,1].""" # compute percentiles mins = np.percentile(layout, min_percentile, axis=(0)) maxs = np.percentile(layout, max_percentile, axis=(0)) # add margins mins -= relative_margin * (maxs - mins) maxs += relative_margin * (maxs - mins) # `clip` broadcasts, `[None]`s added only for readability clipped = np.clip(layout, mins, maxs) # embed within [0,1] along both axes clipped -= clipped.min(axis=0) clipped /= clipped.max(axis=0) return clipped
0.00313
def _get_profile(self, profile_id): '''Return the profile with the received ID as a dict''' profile_metadata = self._registry.get(profile_id) if not profile_metadata: return path = self._get_absolute_path(profile_metadata.get('schema_path')) if path and os.path.isfile(path): return self._load_json_file_or_url(path) url = profile_metadata.get('schema') if url: return self._load_json_file_or_url(url)
0.00404
def kld(p1, p2): """Compute Kullback-Leibler divergence between p1 and p2. It assumes that p1 and p2 are already normalized that each of them sums to 1. """ return np.sum(np.where(p1 != 0, p1 * np.log(p1 / p2), 0))
0.008547
def query(cls, index_name=None, filter_builder=None, scan_index_forward=None, limit=None, **key_conditions): """High level query API. :param key_filter: key conditions of the query. :type key_filter: :class:`collections.Mapping` :param filter_builder: filter expression builder. :type filter_builder: :class:`~bynamodb.filterexps.Operator` """ query_kwargs = { 'key_conditions': build_condition(key_conditions, KEY_CONDITIONS), 'index_name': index_name, 'scan_index_forward': scan_index_forward, 'limit': limit } if filter_builder: cls._build_filter_expression(filter_builder, query_kwargs) return ResultSet(cls, 'query', query_kwargs)
0.003797
def rmrf(items, verbose=True): "Silently remove a list of directories or files" if isinstance(items, str): items = [items] for item in items: if verbose: print("Removing {}".format(item)) shutil.rmtree(item, ignore_errors=True) # rmtree doesn't remove bare files try: os.remove(item) except FileNotFoundError: pass
0.002433
def _check_delete(self): '''Check project delete''' now = time.time() for project in list(itervalues(self.projects)): if project.db_status != 'STOP': continue if now - project.updatetime < self.DELETE_TIME: continue if 'delete' not in self.projectdb.split_group(project.group): continue logger.warning("deleting project: %s!", project.name) del self.projects[project.name] self.taskdb.drop(project.name) self.projectdb.drop(project.name) if self.resultdb: self.resultdb.drop(project.name) for each in self._cnt.values(): del each[project.name]
0.002646
def source_group_receiver(self, sender, source, signal, **kwargs): """ Relay source group signals to the appropriate spec strategy. """ from imagekit.cachefiles import ImageCacheFile source_group = sender # Ignore signals from unregistered groups. if source_group not in self._source_groups: return #OVERRIDE HERE -- pass specs into generator object specs = [generator_registry.get(id, source=source, specs=spec_data_field_hash[id]) for id in self._source_groups[source_group]] callback_name = self._signals[signal] #END OVERRIDE for spec in specs: file = ImageCacheFile(spec) call_strategy_method(file, callback_name)
0.009067
def Ge(self): """ Result of US from the SVD decomposition G = USVᵀ. """ from scipy.linalg import svd from numpy_sugar.linalg import ddot U, S, _ = svd(self._G, full_matrices=False, check_finite=False) if U.shape[1] < self._G.shape[1]: return ddot(U, S) return self._G
0.005797
def do_describe(self, line): "describe [-c] {tablename}..." args = self.getargs(line) if '-c' in args: create_info = True args.remove('-c') else: create_info = False if not args: if self.table: args = [self.table.name] else: args = self.tables for table in args: desc = self.conn.describe_table(table) if create_info: info = desc['Table'] schema = info['KeySchema'] name = info['TableName'] hkey = schema['HashKeyElement'] hkey = "%s:%s" % (hkey['AttributeName'], hkey['AttributeType']) if 'RangeKeyElement' in schema: rkey = schema['RangeKeyElement'] rkey = " %s:%s" % (rkey['AttributeName'], rkey['AttributeType']) else: rkey = '' prov = info['ProvisionedThroughput'] prov = "-c %d,%d" % (prov['ReadCapacityUnits'], prov['WriteCapacityUnits']) print "create %s %s %s%s" % (name, prov, hkey, rkey) else: self.pprint(desc, "%s: " % table)
0.003182
def as_dict(self): """ A JSON serializable dict representation of an object. """ d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__} try: parent_module = self.__class__.__module__.split('.')[0] module_version = import_module(parent_module).__version__ d["@version"] = u"{}".format(module_version) except AttributeError: d["@version"] = None args = getargspec(self.__class__.__init__).args def recursive_as_dict(obj): if isinstance(obj, (list, tuple)): return [recursive_as_dict(it) for it in obj] elif isinstance(obj, dict): return {kk: recursive_as_dict(vv) for kk, vv in obj.items()} elif hasattr(obj, "as_dict"): return obj.as_dict() return obj for c in args: if c != "self": try: a = self.__getattribute__(c) except AttributeError: try: a = self.__getattribute__("_" + c) except AttributeError: raise NotImplementedError( "Unable to automatically determine as_dict " "format from class. MSONAble requires all " "args to be present as either self.argname or " "self._argname, and kwargs to be present under" "a self.kwargs variable to automatically " "determine the dict format. Alternatively, " "you can implement both as_dict and from_dict.") d[c] = recursive_as_dict(a) if hasattr(self, "kwargs"): d.update(**self.kwargs) if hasattr(self, "_kwargs"): d.update(**self._kwargs) return d
0.00102
def register_opener(suffix, opener=None): """ Register a callback that opens an archive with the specified *suffix*. The object returned by the *opener* must implement the #tarfile.Tarfile interface, more specifically the following methods: - `add(filename, arcname) -> None` - `getnames() -> list of str` - `getmember(filename) -> TarInfo` - `extractfile(filename) -> file obj` This function can be used as a decorator when *opener* is not provided. The opener must accept the following arguments: %%arglist file (file-like): A file-like object to read the archive data from. mode (str): The mode to open the file in. Valid values are `'w'`, `'r'` and `'a'`. options (dict): A dictionary with possibly additional arguments. """ if opener is None: def decorator(func): register_opener(suffix, func) return func return decorator if suffix in openers: raise ValueError('opener suffix {0!r} already registered'.format(suffix)) openers[suffix] = opener
0.006883
def new_session(self): """Establish a new session.""" body = yield from self._fetch_json(URL_LOGIN, self._new_session_data) self.sma_sid = jmespath.search('result.sid', body) if self.sma_sid: return True msg = 'Could not start session, %s, got {}'.format(body) if body.get('err'): if body.get('err') == 503: _LOGGER.error("Max amount of sessions reached") else: _LOGGER.error(msg, body.get('err')) else: _LOGGER.error(msg, "Session ID expected [result.sid]") return False
0.003241
def build_parser(parser): """ Generate a subparser """ parser.add_argument( 'sequence_file', type=FileType('r'), help="""Input fastq file. A fasta-format file may also be provided if --input-qual is also specified.""") parser.add_argument( '--input-qual', type=FileType('r'), help="""The quality scores associated with the input file. Only used if input file is fasta.""") parser.add_argument( 'output_file', type=FileType('w'), help="""Output file. Format determined from extension.""") output_group = parser.add_argument_group("Output") output_group.add_argument( '--report-out', type=FileType('w'), default=sys.stdout, help="""Output file for report [default: stdout]""") output_group.add_argument( '--details-out', type=FileType('w'), help="""Output file to report fate of each sequence""") output_group.add_argument( '--no-details-comment', action='store_false', default=True, dest='details_comment', help="""Do not write comment lines with version and call to start --details-out""") parser.add_argument( '--min-mean-quality', metavar='QUALITY', type=float, default=DEFAULT_MEAN_SCORE, help="""Minimum mean quality score for each read [default: %(default)s]""") parser.add_argument( '--min-length', metavar='LENGTH', type=int, default=200, help="""Minimum length to keep sequence [default: %(default)s]""") parser.add_argument( '--max-length', metavar='LENGTH', type=int, default=1000, help="""Maximum length to keep before truncating [default: %(default)s]. This operation occurs before --max-ambiguous""") window_group = parser.add_argument_group('Quality window options') window_group.add_argument( '--quality-window-mean-qual', type=float, help="""Minimum quality score within the window defined by --quality-window. [default: same as --min-mean-quality]""") window_group.add_argument( '--quality-window-prop', help="""Proportion of reads within quality window to that must pass filter. Floats are [default: %(default).1f]""", default=1.0, type=typed_range(float, 0.0, 1.0)) window_group.add_argument( '--quality-window', type=int, metavar='WINDOW_SIZE', default=0, help="""Window size for truncating sequences. When set to a non-zero value, sequences are truncated where the mean mean quality within the window drops below --min-mean-quality. [default: %(default)s]""") parser.add_argument( '--ambiguous-action', choices=('truncate', 'drop'), help="""Action to take on ambiguous base in sequence (N's). [default: no action]""") parser.add_argument( '--max-ambiguous', default=None, help="""Maximum number of ambiguous bases in a sequence. Sequences exceeding this count will be removed.""", type=int) parser.add_argument( '--pct-ambiguous', help="""Maximun percent of ambiguous bases in a sequence. Sequences exceeding this percent will be removed.""", type=float) barcode_group = parser.add_argument_group('Barcode/Primer') primer_group = barcode_group.add_mutually_exclusive_group() primer_group.add_argument( '--primer', help="""IUPAC ambiguous primer to require""") primer_group.add_argument( '--no-primer', help="""Do not use a primer.""", action='store_const', const='', dest='primer') barcode_group.add_argument( '--barcode-file', help="""CSV file containing sample_id,barcode[,primer] in the rows. A single primer for all sequences may be specified with `--primer`, or `--no-primer` may be used to indicate barcodes should be used without a primer check.""", type=FileType('r')) barcode_group.add_argument( '--barcode-header', action='store_true', default=False, help="""Barcodes have a header row [default: %(default)s]""") barcode_group.add_argument( '--map-out', help="""Path to write sequence_id,sample_id pairs""", type=FileType('w'), metavar='SAMPLE_MAP') barcode_group.add_argument( '--quoting', help="""A string naming an attribute of the csv module defining the quoting behavior for `SAMPLE_MAP`. [default: %(default)s]""", default='QUOTE_MINIMAL', choices=[s for s in dir(csv) if s.startswith('QUOTE_')])
0.000398
def add(self, data): """ Adds a new data node to the front list. The provided data will be encapsulated into a new instance of LinkedListNode class and linked list pointers will be updated, as well as list's size. :param data: the data to be inserted in the new list node :type data: object """ node = LinkedListNode(data, None) if self._size == 0: self._first_node = node self._last_node = node else: second_node = self._first_node self._first_node = node self._first_node.update_next(second_node) self._size += 1
0.003012
def _get_accounts_client(accounts_url, email, password): """ Create an Accounts Service API client and log in using provided email and password :param accounts_url: Accounts Service URL :param email: Login Email :param password: Login Password :return: Accounts Service API Client """ client = API(accounts_url, async=False, validate_cert=False) try: response = client.accounts.login.post(email=email, password=password) client.default_headers['Authorization'] = response['data']['token'] return client except httpclient.HTTPError: msg = ('There was a problem logging into the accounts service.' ' Please check your email and password.') raise click.ClickException(click.style(msg, fg='red'))
0.003812
def contained_segments_matrix(segments): """ givens a n*n matrix m, n=len(segments), in which m[i,j] means segments[i] is contained inside segments[j] """ x1, y1 = segments[:, 0], segments[:, 1] x2, y2 = x1 + segments[:, 2], y1 + segments[:, 3] n = len(segments) x1so, x2so, y1so, y2so = list(map(numpy.argsort, (x1, x2, y1, y2))) x1soi, x2soi, y1soi, y2soi = list(map(numpy.argsort, (x1so, x2so, y1so, y2so))) # inverse transformations # let rows be x1 and collumns be x2. this array represents where x1<x2 o1 = numpy.triu(numpy.ones((n, n)), k=1).astype(bool) # let rows be x1 and collumns be x2. this array represents where x1>x2 o2 = numpy.tril(numpy.ones((n, n)), k=0).astype(bool) a_inside_b_x = o2[x1soi][:, x1soi] * o1[x2soi][:, x2soi] # (x1[a]>x1[b] and x2[a]<x2[b]) a_inside_b_y = o2[y1soi][:, y1soi] * o1[y2soi][:, y2soi] # (y1[a]>y1[b] and y2[a]<y2[b]) a_inside_b = a_inside_b_x * a_inside_b_y return a_inside_b
0.00402
def update_workspace(self, workspace_id, name=None, description=None, language=None, metadata=None, learning_opt_out=None, system_settings=None, intents=None, entities=None, dialog_nodes=None, counterexamples=None, append=None, **kwargs): """ Update workspace. Update an existing workspace with new or modified data. You must provide component objects defining the content of the updated workspace. This operation is limited to 30 request per 30 minutes. For more information, see **Rate limiting**. :param str workspace_id: Unique identifier of the workspace. :param str name: The name of the workspace. This string cannot contain carriage return, newline, or tab characters, and it must be no longer than 64 characters. :param str description: The description of the workspace. This string cannot contain carriage return, newline, or tab characters, and it must be no longer than 128 characters. :param str language: The language of the workspace. :param dict metadata: Any metadata related to the workspace. :param bool learning_opt_out: Whether training data from the workspace (including artifacts such as intents and entities) can be used by IBM for general service improvements. `true` indicates that workspace training data is not to be used. :param WorkspaceSystemSettings system_settings: Global settings for the workspace. :param list[CreateIntent] intents: An array of objects defining the intents for the workspace. :param list[CreateEntity] entities: An array of objects describing the entities for the workspace. :param list[DialogNode] dialog_nodes: An array of objects describing the dialog nodes in the workspace. :param list[Counterexample] counterexamples: An array of objects defining input examples that have been marked as irrelevant input. :param bool append: Whether the new data is to be appended to the existing data in the workspace. If **append**=`false`, elements included in the new data completely replace the corresponding existing elements, including all subelements. For example, if the new data includes **entities** and **append**=`false`, all existing entities in the workspace are discarded and replaced with the new entities. If **append**=`true`, existing elements are preserved, and the new elements are added. If any elements in the new data collide with existing elements, the update request fails. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if workspace_id is None: raise ValueError('workspace_id must be provided') if system_settings is not None: system_settings = self._convert_model(system_settings, WorkspaceSystemSettings) if intents is not None: intents = [self._convert_model(x, CreateIntent) for x in intents] if entities is not None: entities = [self._convert_model(x, CreateEntity) for x in entities] if dialog_nodes is not None: dialog_nodes = [ self._convert_model(x, DialogNode) for x in dialog_nodes ] if counterexamples is not None: counterexamples = [ self._convert_model(x, Counterexample) for x in counterexamples ] headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('conversation', 'V1', 'update_workspace') headers.update(sdk_headers) params = {'version': self.version, 'append': append} data = { 'name': name, 'description': description, 'language': language, 'metadata': metadata, 'learning_opt_out': learning_opt_out, 'system_settings': system_settings, 'intents': intents, 'entities': entities, 'dialog_nodes': dialog_nodes, 'counterexamples': counterexamples } url = '/v1/workspaces/{0}'.format(*self._encode_path_vars(workspace_id)) response = self.request( method='POST', url=url, headers=headers, params=params, json=data, accept_json=True) return response
0.00765
def focusIn(self, event=None): """Select all text (if applicable) on taking focus""" try: # doScroll returns false if the call was ignored because the # last call also came from this widget. That avoids unwanted # scrolls and text selection when the focus moves in and out # of the window. if self.doScroll(event): self.entry.selection_range(0, END) # select all text in widget else: # restore selection to what it was on the last FocusOut if self.lastSelection: self.entry.selection_range(*self.lastSelection) except AttributeError: pass
0.004196
def start_workflow(self, workflow_name, delayed=False, **kwargs): """Run the workflow specified on the object. :param workflow_name: name of workflow to run :type workflow_name: str :param delayed: should the workflow run asynchronously? :type delayed: bool :return: UUID of WorkflowEngine (or AsyncResult). """ from .tasks import start if delayed: self.save() db.session.commit() return start.delay(workflow_name, object_id=self.id, **kwargs) else: return start(workflow_name, data=[self], **kwargs)
0.003165
def do_filetype(self, line): """filetype FILE Prints the type of file (dir or file). This function is primarily for testing. """ if len(line) == 0: print_err("Must provide a filename") return filename = resolve_path(line) mode = auto(get_mode, filename) if mode_exists(mode): if mode_isdir(mode): self.print('dir') elif mode_isfile(mode): self.print('file') else: self.print('unknown') else: self.print('missing')
0.003268
def render_remarks_tag(self, ar): """Renders a remarks image icon """ if not ar.getRemarks(): return "" uid = api.get_uid(ar) url = ar.absolute_url() title = ar.Title() tooltip = _("Remarks of {}").format(title) # Note: The 'href' is picked up by the overlay handler, see # bika.lims.worksheet.coffee attrs = { "css_class": "slot-remarks", "style": "cursor: pointer;", "title": tooltip, "uid": uid, "href": "{}/base_view".format(url), } return get_image("remarks_ico.png", **attrs)
0.00304
def write(self, data): """Send raw bytes to the instrument. :param data: bytes to be sent to the instrument :type data: bytes """ begin, end, size = 0, 0, len(data) bytes_sent = 0 raw_write = super(USBRawDevice, self).write while not end > size: begin = end end = begin + self.RECV_CHUNK bytes_sent += raw_write(data[begin:end]) return bytes_sent
0.004357
def calculate_local_order_parameter(self, oscillatory_network, start_iteration = None, stop_iteration = None): """! @brief Calculates local order parameter. @details Local order parameter or so-called level of local or partial synchronization is calculated by following expression: \f[ r_{c}=\left | \sum_{i=0}^{N} \frac{1}{N_{i}} \sum_{j=0}e^{ \theta_{j} - \theta_{i} } \right |; \f] where N - total amount of oscillators in the network and \f$N_{i}\f$ - amount of neighbors of oscillator with index \f$i\f$. @param[in] oscillatory_network (sync): Sync oscillatory network whose structure of connections is required for calculation. @param[in] start_iteration (uint): The first iteration that is used for calculation, if 'None' then the last iteration is used. @param[in] stop_iteration (uint): The last iteration that is used for calculation, if 'None' then 'start_iteration' + 1 is used. @return (list) List of levels of local (partial) synchronization (local order parameter evolution). """ (start_iteration, stop_iteration) = self.__get_start_stop_iterations(start_iteration, stop_iteration); if (self._ccore_sync_dynamic_pointer is not None): network_pointer = oscillatory_network._ccore_network_pointer; return wrapper.sync_dynamic_calculate_local_order(self._ccore_sync_dynamic_pointer, network_pointer, start_iteration, stop_iteration); sequence_local_order = []; for index in range(start_iteration, stop_iteration): sequence_local_order.append(order_estimator.calculate_local_sync_order(self.output[index], oscillatory_network)); return sequence_local_order;
0.018458
def make_hone_cache_wrapper(inner_func, maxsize, maxage, finder, store_partials): """ Keeps a cache of requests we've already made and use that for generating results if possible. If the user asked for a root prior to this call we can use it to skip a new lookup using `finder`. A top-level lookup will effectively serves as a global cache. """ hits = misses = partials = 0 cache = TTLMapping(maxsize, maxage) def wrapper(*args): nonlocal hits, misses, partials radix = args[-1] # Attempt fast cache hit first. try: r = cache[radix] except KeyError: pass else: hits += 1 return r for i in range(len(radix) - 1, -1, -1): partial_radix = radix[:i] try: partial = cache[partial_radix] except KeyError: continue try: r = finder(radix, partial_radix, partial) except: break # Treat any exception as a miss. partials += 1 if store_partials: cache[radix] = r return r misses += 1 cache[radix] = r = inner_func(*args) return r def cache_info(): """ Emulate lru_cache so this is a low touch replacement. """ return HoneCacheInfo(hits, misses, maxsize, len(cache), maxage, partials, finder) def cache_clear(): """ Clear cache and stats. """ nonlocal hits, misses, partials hits = misses = partials = 0 cache.clear() wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return functools.update_wrapper(wrapper, inner_func)
0.001117
def create(self): """Create tracking collection. Does nothing if tracking collection already exists. """ if self._track is None: self._track = self.db[self.tracking_collection_name]
0.008889
def from_bytes(cls, bitstream, decode_payload=True): ''' Parse the given packet and update properties accordingly ''' packet = cls() # Convert to ConstBitStream (if not already provided) if not isinstance(bitstream, ConstBitStream): if isinstance(bitstream, Bits): bitstream = ConstBitStream(auto=bitstream) else: bitstream = ConstBitStream(bytes=bitstream) # Read the version version = bitstream.read('uint:4') if version != packet.version: raise ValueError('Provided bytes do not contain an IPv4 packet') # Read the header length ihl = bitstream.read('uint:4') if ihl < 5: raise ValueError('Invalid IPv4 header length') # Now that we know the length of the header we store it to be able # to easily recalculate the header checksum later remaining_header_bits = (ihl * 32) - 8 header = (BitStream('uint:4=4, uint:4=%d' % ihl) + bitstream.peek(remaining_header_bits)) # Read the type of service packet.tos = bitstream.read('uint:8') # Read the total length total_length = bitstream.read('uint:16') if total_length < ihl * 4: raise ValueError('Total length is shorter than the header') # Read the identification packet.identification = bitstream.read('uint:16') # Read the flags (reserved, packet.dont_fragment, packet.more_fragments) = bitstream.readlist('3*bool') if reserved: raise ValueError('Reserved flag must be 0') # Read the fragment offset packet.fragment_offset = bitstream.read('uint:13') # Read the TTL packet.ttl = bitstream.read('uint:8') # Read the protocol number packet.protocol = bitstream.read('uint:8') # Read the header checksum header_checksum = bitstream.read('uint:16') # Set the checksum bits in the header to 0 and re-calculate header[80:96] = BitStream(16) my_checksum = checksum.ones_complement(header.bytes) if my_checksum != header_checksum: raise ValueError('Header checksum does not match') # Read the source and destination addresses packet.source = IPv4Address(bitstream.read('uint:32')) packet.destination = IPv4Address(bitstream.read('uint:32')) # Read the options option_len = (ihl - 5) * 4 packet.options = bitstream.read('bytes:%d' % option_len) # And the rest is payload payload_bytes = (total_length) - (ihl * 4) packet.payload = bitstream.read('bytes:%d' % payload_bytes) if decode_payload: payload_class = protocol_registry.get_type_class(packet.protocol) if payload_class: packet.payload = payload_class.from_bytes(packet.payload) # There should be no remaining bits if bitstream.pos != bitstream.len: raise ValueError('Bits remaining after processing packet') # Verify that the properties make sense packet.sanitize() return packet
0.00062
def schema_create(dbname, name, owner=None, user=None, db_user=None, db_password=None, db_host=None, db_port=None): ''' Creates a Postgres schema. CLI Example: .. code-block:: bash salt '*' postgres.schema_create dbname name owner='owner' \\ user='user' \\ db_user='user' db_password='password' db_host='hostname' db_port='port' ''' # check if schema exists if schema_exists(dbname, name, user=user, db_user=db_user, db_password=db_password, db_host=db_host, db_port=db_port): log.info('\'%s\' already exists in \'%s\'', name, dbname) return False sub_cmd = 'CREATE SCHEMA "{0}"'.format(name) if owner is not None: sub_cmd = '{0} AUTHORIZATION "{1}"'.format(sub_cmd, owner) ret = _psql_prepare_and_run(['-c', sub_cmd], user=db_user, password=db_password, port=db_port, host=db_host, maintenance_db=dbname, runas=user) return ret['retcode'] == 0
0.000857
def save_outputs(outputs, output_fpath): """ Save model outputs in an Excel file. :param outputs: Model outputs. :type outputs: dict :param output_fpath: Output file path. :type output_fpath: str """ df = pd.DataFrame(outputs) with pd.ExcelWriter(output_fpath) as writer: df.to_excel(writer)
0.002833
def ajax_editable_boolean(attr, short_description): """ Convenience function: Assign the return value of this method to a variable of your ModelAdmin class and put the variable name into list_display. Example:: class MyTreeEditor(TreeEditor): list_display = ('__unicode__', 'active_toggle') active_toggle = ajax_editable_boolean('active', _('is active')) """ def _fn(self, item): return ajax_editable_boolean_cell(item, attr) _fn.allow_tags = True _fn.short_description = short_description _fn.editable_boolean_field = attr return _fn
0.001616
def get_rich_events(self, item): """ Get the enriched events related to a module """ module = item['data'] if not item['data']['releases']: return [] for release in item['data']['releases']: event = self.get_rich_item(item) # Update specific fields for this release event["uuid"] += "_" + release['slug'] event["author_url"] = 'https://forge.puppet.com/' + release['module']['owner']['username'] event["gravatar_id"] = release['module']['owner']['gravatar_id'] event["downloads"] = release['downloads'] event["slug"] = release['slug'] event["version"] = release['version'] event["uri"] = release['uri'] event["validation_score"] = release['validation_score'] event["homepage_url"] = None if 'project_page' in release['metadata']: event["homepage_url"] = release['metadata']['project_page'] event["issues_url"] = None if "issues_url" in release['metadata']: event["issues_url"] = release['metadata']['issues_url'] event["tags"] = release['tags'] event["license"] = release['metadata']['license'] event["source_url"] = release['metadata']['source'] event["summary"] = release['metadata']['summary'] event["metadata__updated_on"] = parser.parse(release['updated_at']).isoformat() if self.sortinghat: release["metadata__updated_on"] = event["metadata__updated_on"] # Needed in get_item_sh logic event.update(self.get_item_sh(release)) if self.prjs_map: event.update(self.get_item_project(event)) event.update(self.get_grimoire_fields(release["created_at"], "release")) yield event
0.00316
def cut(self, cutter, target, sr=None): """ The cut operation is performed on a geometry service resource. This operation splits the target polyline or polygon where it's crossed by the cutter polyline. Inputs: cutter - polyline that will be used to divide the target into pieces where it crosses the target (structured as JSON polyline objects returned by the ArcGIS REST API). target - array of polylines/polygons to be cut (structured as JSON geometry objects returned by the ArcGIS REST API). sr - spatial reference of the input geometries WKID. """ url = self._url + "/cut" params = { "f" : "json" } if sr is not None: params['sr'] = sr if isinstance(cutter, Polyline): params['cutter'] = cutter.asDictionary else: raise AttributeError("Input must be type Polyline") if isinstance(target, list) and len(target) > 0: geoms = [] template = {"geometryType": "", "geometries" : []} for g in target: if isinstance(g, Polygon): template['geometryType'] = "esriGeometryPolygon" template['geometries'].append(g.asDictionary) if isinstance(g, Polyline): template['geometryType'] = "esriGeometryPolyline" template['geometries'].append(g.asDictionary) else: AttributeError("Invalid geometry in target, entries can only be Polygon or Polyline") del g params['target'] = template else: AttributeError("You must provide at least 1 Polygon/Polyline geometry in a list") return self._get(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
0.00838
def server_receives_binary_from(self, name=None, timeout=None, connection=None, label=None): """Receive raw binary message. Returns message, ip, and port. If server `name` is not given, uses the latest server. Optional message `label` is shown on logs. Examples: | ${binary} | ${ip} | ${port} = | Server receives binary from | | ${binary} | ${ip} | ${port} = | Server receives binary from | Server1 | connection=my_connection | timeout=5 | """ server, name = self._servers.get_with_name(name) msg, ip, port = server.receive_from(timeout=timeout, alias=connection) self._register_receive(server, label, name, connection=connection) return msg, ip, port
0.005398
def _visit_recur(self, item): """ Recursively visits children of item. :param item: object: project, folder or file we will add to upload_items if necessary. """ if item.kind == KindType.file_str: if item.need_to_send: self.add_upload_item(item.path) else: if item.kind == KindType.project_str: pass else: if not item.remote_id: self.add_upload_item(item.path) for child in item.children: self._visit_recur(child)
0.005059
def on_task_status(self, task): '''Called when a status pack is arrived''' try: procesok = task['track']['process']['ok'] if not self.projects[task['project']].task_queue.done(task['taskid']): logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task) return None except KeyError as e: logger.error("Bad status pack: %s", e) return None if procesok: ret = self.on_task_done(task) else: ret = self.on_task_failed(task) if task['track']['fetch'].get('time'): self._cnt['5m_time'].event((task['project'], 'fetch_time'), task['track']['fetch']['time']) if task['track']['process'].get('time'): self._cnt['5m_time'].event((task['project'], 'process_time'), task['track']['process'].get('time')) self.projects[task['project']].active_tasks.appendleft((time.time(), task)) return ret
0.004664
def validate_metadata(self, metadata): """ Validate that the metadata of your ddo is valid. :param metadata: conforming to the Metadata accepted by Ocean Protocol, dict :return: bool """ response = self.requests_session.post( f'{self.url}/validate', data=json.dumps(metadata), headers=self._headers ) if response.content == b'true\n': return True else: logger.info(self._parse_search_response(response.content)) return False
0.005272
def envdict2listdict(envdict): """Dict --> Dict of lists""" sep = os.path.pathsep for key in envdict: if sep in envdict[key]: envdict[key] = [path.strip() for path in envdict[key].split(sep)] return envdict
0.004032
def normalize(expr): """Pass through n-ary expressions, and eliminate empty branches. Variadic and binary expressions recursively visit all their children. If all children are eliminated then the parent expression is also eliminated: (& [removed] [removed]) => [removed] If only one child is left, it is promoted to replace the parent node: (& True) => True """ children = [] for child in expr.children: branch = normalize(child) if branch is None: continue if type(branch) is type(expr): children.extend(branch.children) else: children.append(branch) if len(children) == 0: return None if len(children) == 1: return children[0] return type(expr)(*children, start=children[0].start, end=children[-1].end)
0.001148