text
stringlengths
78
104k
score
float64
0
0.18
def guarded(meth): """A decorator to add a sanity check to ConnectionResource methods.""" @functools.wraps(meth) def _check(self, *args, **kwargs): self._check_conn_validity(meth.__name__) return meth(self, *args, **kwargs) return _check
0.00369
def run_cutadapt(job, r1_id, r2_id, fwd_3pr_adapter, rev_3pr_adapter): """ Adapter trimming for RNA-seq data :param JobFunctionWrappingJob job: passed automatically by Toil :param str r1_id: FileStoreID of fastq read 1 :param str r2_id: FileStoreID of fastq read 2 (if paired data) :param str fwd_3pr_adapter: Adapter sequence for the forward 3' adapter :param str rev_3pr_adapter: Adapter sequence for the reverse 3' adapter (second fastq pair) :return: R1 and R2 FileStoreIDs :rtype: tuple """ work_dir = job.fileStore.getLocalTempDir() if r2_id: require(rev_3pr_adapter, "Paired end data requires a reverse 3' adapter sequence.") # Retrieve files parameters = ['-a', fwd_3pr_adapter, '-m', '35'] if r1_id and r2_id: job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq')) job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq')) parameters.extend(['-A', rev_3pr_adapter, '-o', '/data/R1_cutadapt.fastq', '-p', '/data/R2_cutadapt.fastq', '/data/R1.fastq', '/data/R2.fastq']) else: job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq')) parameters.extend(['-o', '/data/R1_cutadapt.fastq', '/data/R1.fastq']) # Call: CutAdapt dockerCall(job=job, tool='quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2', workDir=work_dir, parameters=parameters) # Write to fileStore if r1_id and r2_id: r1_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq')) r2_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2_cutadapt.fastq')) else: r1_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq')) r2_cut_id = None return r1_cut_id, r2_cut_id
0.00359
def is_uid(str): """ Input: string to check Output: True if UID, otherwise False """ import re if len(str)!=16: return False pattern = r'[^\.a-f0-9]' if re.search(pattern, str.lower()): return False return True
0.011236
async def send_initial_metadata(self, *, metadata=None): """Coroutine to send headers with initial metadata to the client. In gRPC you can send initial metadata as soon as possible, because gRPC doesn't use `:status` pseudo header to indicate success or failure of the current request. gRPC uses trailers for this purpose, and trailers are sent during :py:meth:`send_trailing_metadata` call, which should be called in the end. .. note:: This coroutine will be called implicitly during first :py:meth:`send_message` coroutine call, if not called before explicitly. :param metadata: custom initial metadata, dict or list of pairs """ if self._send_initial_metadata_done: raise ProtocolError('Initial metadata was already sent') headers = [ (':status', '200'), ('content-type', self._content_type), ] metadata = MultiDict(metadata or ()) metadata, = await self._dispatch.send_initial_metadata(metadata) headers.extend(encode_metadata(metadata)) await self._stream.send_headers(headers) self._send_initial_metadata_done = True
0.001635
def postcode_in_state(self, state_abbr=None): """ :example '4703' """ if state_abbr is None: state_abbr = self.random_element(self.states_abbr) if state_abbr in self.states_abbr: postcode = "%d" % (self.generator.random.randint( self.states_postcode[state_abbr][0], self.states_postcode[state_abbr][1])) if len(postcode) == 3: postcode = "0%s" % postcode return postcode else: raise Exception('State Abbreviation not found in list')
0.003247
def plot_rebit_prior(prior, rebit_axes=REBIT_AXES, n_samples=2000, true_state=None, true_size=250, force_mean=None, legend=True, mean_color_index=2 ): """ Plots rebit states drawn from a given prior. :param qinfer.tomography.DensityOperatorDistribution prior: Distribution over rebit states to plot. :param list rebit_axes: List containing indices for the :math:`x` and :math:`z` axes. :param int n_samples: Number of samples to draw from the prior. :param np.ndarray true_state: State to be plotted as a "true" state for comparison. """ pallette = plt.rcParams['axes.color_cycle'] plot_rebit_modelparams(prior.sample(n_samples), c=pallette[0], label='Prior', rebit_axes=rebit_axes ) if true_state is not None: plot_rebit_modelparams(true_state, c=pallette[1], label='True', marker='*', s=true_size, rebit_axes=rebit_axes ) if hasattr(prior, '_mean') or force_mean is not None: mean = force_mean if force_mean is not None else prior._mean plot_rebit_modelparams( prior._basis.state_to_modelparams(mean)[None, :], edgecolors=pallette[mean_color_index], s=250, facecolors='none', linewidth=3, label='Mean', rebit_axes=rebit_axes ) plot_decorate_rebits(prior.basis, rebit_axes=rebit_axes ) if legend: plt.legend(loc='lower left', ncol=3, scatterpoints=1)
0.008398
def get_inputs( self, start=0, stop=None, threshold=None, security_level=None, ): # type: (int, Optional[int], Optional[int], Optional[int]) -> dict """ Gets all possible inputs of a seed and returns them, along with the total balance. This is either done deterministically (by generating all addresses until :py:meth:`find_transactions` returns an empty result), or by providing a key range to search. :param start: Starting key index. Defaults to 0. :param stop: Stop before this index. Note that this parameter behaves like the ``stop`` attribute in a :py:class:`slice` object; the stop index is *not* included in the result. If ``None`` (default), then this method will not stop until it finds an unused address. :param threshold: If set, determines the minimum threshold for a successful result: - As soon as this threshold is reached, iteration will stop. - If the command runs out of addresses before the threshold is reached, an exception is raised. .. note:: This method does not attempt to "optimize" the result (e.g., smallest number of inputs, get as close to ``threshold`` as possible, etc.); it simply accumulates inputs in order until the threshold is met. If ``threshold`` is 0, the first address in the key range with a non-zero balance will be returned (if it exists). If ``threshold`` is ``None`` (default), this method will return **all** inputs in the specified key range. :param security_level: Number of iterations to use when generating new addresses (see :py:meth:`get_new_addresses`). This value must be between 1 and 3, inclusive. If not set, defaults to :py:attr:`AddressGenerator.DEFAULT_SECURITY_LEVEL`. :return: Dict with the following structure:: { 'inputs': List[Address], Addresses with nonzero balances that can be used as inputs. 'totalBalance': int, Aggregate balance from all matching addresses. } Note that each Address in the result has its ``balance`` attribute set. Example: .. code-block:: python response = iota.get_inputs(...) input0 = response['inputs'][0] # type: Address input0.balance # 42 :raise: - :py:class:`iota.adapter.BadApiResponse` if ``threshold`` is not met. Not applicable if ``threshold`` is ``None``. References: - https://github.com/iotaledger/wiki/blob/master/api-proposal.md#getinputs """ return extended.GetInputsCommand(self.adapter)( seed=self.seed, start=start, stop=stop, threshold=threshold, securityLevel=security_level )
0.001512
def ndarray_to_instances(array, relation, att_template="Att-#", att_list=None): """ Converts the numpy matrix into an Instances object and returns it. :param array: the numpy ndarray to convert :type array: numpy.darray :param relation: the name of the dataset :type relation: str :param att_template: the prefix to use for the attribute names, "#" is the 1-based index, "!" is the 0-based index, "@" the relation name :type att_template: str :param att_list: the list of attribute names to use :type att_list: list :return: the generated instances object :rtype: Instances """ if len(numpy.shape(array)) != 2: raise Exception("Number of array dimensions must be 2!") rows, cols = numpy.shape(array) # header atts = [] if att_list is not None: if len(att_list) != cols: raise Exception( "Number columns and provided attribute names differ: " + str(cols) + " != " + len(att_list)) for name in att_list: att = Attribute.create_numeric(name) atts.append(att) else: for i in range(cols): name = att_template.replace("#", str(i+1)).replace("!", str(i)).replace("@", relation) att = Attribute.create_numeric(name) atts.append(att) result = Instances.create_instances(relation, atts, rows) # data for i in range(rows): inst = Instance.create_instance(array[i]) result.add_instance(inst) return result
0.002581
def _load_metadata(self): """Load metadata from the archive file""" logger.debug("Loading metadata infomation of archive %s", self.archive_path) cursor = self._db.cursor() select_stmt = "SELECT origin, backend_name, backend_version, " \ "category, backend_params, created_on " \ "FROM " + self.METADATA_TABLE + " " \ "LIMIT 1" cursor.execute(select_stmt) row = cursor.fetchone() cursor.close() if row: self.origin = row[0] self.backend_name = row[1] self.backend_version = row[2] self.category = row[3] self.backend_params = pickle.loads(row[4]) self.created_on = str_to_datetime(row[5]) else: logger.debug("Metadata of archive %s was empty", self.archive_path) logger.debug("Metadata of archive %s loaded", self.archive_path)
0.003125
def rehighlight(self): """ Rehighlight the entire document, may be slow. """ start = time.time() QtWidgets.QApplication.setOverrideCursor( QtGui.QCursor(QtCore.Qt.WaitCursor)) try: super(SyntaxHighlighter, self).rehighlight() except RuntimeError: # cloned widget, no need to rehighlight the same document twice ;) pass QtWidgets.QApplication.restoreOverrideCursor() end = time.time() _logger().debug('rehighlight duration: %fs' % (end - start))
0.003497
def get_project_logs(self, request): """ Get logs from log service. Unsuccessful opertaion will cause an LogException. :type request: GetProjectLogsRequest :param request: the GetProjectLogs request parameters class. :return: GetLogsResponse :raise: LogException """ headers = {} params = {} if request.get_query() is not None: params['query'] = request.get_query() project = request.get_project() resource = "/logs" (resp, header) = self._send("GET", project, None, resource, params, headers) return GetLogsResponse(resp, header)
0.008584
def fetchGroupInfo(self, *group_ids): """ Get groups' info from IDs, unordered :param group_ids: One or more group ID(s) to query :return: :class:`models.Group` objects, labeled by their ID :rtype: dict :raises: FBchatException if request failed """ threads = self.fetchThreadInfo(*group_ids) groups = {} for id_, thread in threads.items(): if thread.type == ThreadType.GROUP: groups[id_] = thread else: raise FBchatUserError("Thread {} was not a group".format(thread)) return groups
0.004769
def outliers_grubbs(x, hypo = False, alpha = 0.05): """ Grubbs' Test for Outliers [1]_. This is the two-sided version of the test. The null hypothesis implies that there are no outliers in the data set. Parameters ---------- x : array_like or ndarray, 1d An array, any object exposing the array interface, containing data to test for an outlier in. hypo : bool, optional Specifies whether to return a bool value of a hypothesis test result. Returns True when we can reject the null hypothesis. Otherwise, False. Available options are: 1) True - return a hypothesis test result 2) False - return a filtered array without an outlier (default) alpha : float, optional Significance level for a hypothesis test. Default is 0.05. Returns ------- Numpy array if hypo is False or a bool value of a hypothesis test result. Notes ----- .. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h1.htm Examples -------- >>> x = np.array([199.31,199.53,200.19,200.82,201.92,201.95,202.18,245.57]) >>> ph.outliers_grubbs(x) array([ 199.31, 199.53, 200.19, 200.82, 201.92, 201.95, 202.18]) """ val = np.max(np.abs(x - np.mean(x))) ind = np.argmax(np.abs(x - np.mean(x))) G = val / np.std(x, ddof=1) N = len(x) result = G > (N - 1)/np.sqrt(N) * np.sqrt((t.ppf(1-alpha/(2*N), N-2) ** 2) / (N - 2 + t.ppf(1-alpha/(2*N), N-2) ** 2 )) if hypo: return result else: if result: return np.delete(x, ind) else: return x
0.004271
def create(self, public_key, friendly_name=values.unset, account_sid=values.unset): """ Create a new PublicKeyInstance :param unicode public_key: A URL encoded representation of the public key :param unicode friendly_name: A string to describe the resource :param unicode account_sid: The Subaccount this Credential should be associated with. :returns: Newly created PublicKeyInstance :rtype: twilio.rest.accounts.v1.credential.public_key.PublicKeyInstance """ data = values.of({ 'PublicKey': public_key, 'FriendlyName': friendly_name, 'AccountSid': account_sid, }) payload = self._version.create( 'POST', self._uri, data=data, ) return PublicKeyInstance(self._version, payload, )
0.005714
def find_srv_by_name_and_hostname(self, host_name, sdescr): """Get a specific service based on a host_name and service_description :param host_name: host name linked to needed service :type host_name: str :param sdescr: service name we need :type sdescr: str :return: the service found or None :rtype: alignak.objects.service.Service """ key = (host_name, sdescr) return self.name_to_item.get(key, None)
0.004124
def get_store_env_tmp(): '''Returns an unused random filepath.''' tempdir = tempfile.gettempdir() temp_name = 'envstore{0:0>3d}' temp_path = unipath(tempdir, temp_name.format(random.getrandbits(9))) if not os.path.exists(temp_path): return temp_path else: return get_store_env_tmp()
0.003096
def log_user_in(app_id, token, ticket, response, cookie_name='user', url_detail='https://pswdless.appspot.com/rest/detail'): ''' Log the user in setting the user data dictionary in cookie Returns a command that execute the logic ''' return LogUserIn(app_id, token, ticket, response, cookie_name, url_detail)
0.002915
def get_aws_secrets_from_file(credentials_file): # type: (str) -> Set[str] """Extract AWS secrets from configuration files. Read an ini-style configuration file and return a set with all found AWS secret access keys. """ aws_credentials_file_path = os.path.expanduser(credentials_file) if not os.path.exists(aws_credentials_file_path): return set() parser = configparser.ConfigParser() try: parser.read(aws_credentials_file_path) except configparser.MissingSectionHeaderError: return set() keys = set() for section in parser.sections(): for var in ( 'aws_secret_access_key', 'aws_security_token', 'aws_session_token', ): try: key = parser.get(section, var).strip() if key: keys.add(key) except configparser.NoOptionError: pass return keys
0.001057
def _load_config_files(self): # type: () -> None """Loads configuration from configuration files """ config_files = dict(self._iter_config_files()) if config_files[kinds.ENV][0:1] == [os.devnull]: logger.debug( "Skipping loading configuration files due to " "environment's PIP_CONFIG_FILE being os.devnull" ) return for variant, files in config_files.items(): for fname in files: # If there's specific variant set in `load_only`, load only # that variant, not the others. if self.load_only is not None and variant != self.load_only: logger.debug( "Skipping file '%s' (variant: %s)", fname, variant ) continue parser = self._load_file(variant, fname) # Keeping track of the parsers used self._parsers[variant].append((fname, parser))
0.002876
def export2hub(weight_file, hub_dir, options): """Exports a TF-Hub module """ spec = make_module_spec(options, str(weight_file)) try: with tf.Graph().as_default(): module = hub.Module(spec) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) if hub_dir.exists(): shutil.rmtree(hub_dir) module.export(str(hub_dir), sess) finally: pass
0.00207
def up_by_arrival(*filters, local_dir=".", remote_dir=DEFAULT_REMOTE_DIR): """Monitors a local directory and generates sets of new files to be uploaded to FlashAir. Sets to upload are generated in a tuple like (Direction.up, {...}). The generator yields before each upload actually takes place.""" local_monitor = watch_local_files(*filters, local_dir=local_dir) _, file_set = next(local_monitor) _notify_sync_ready(len(file_set), local_dir, remote_dir) for new_arrivals, file_set in local_monitor: yield Direction.up, new_arrivals # where new_arrivals is possibly empty if new_arrivals: _notify_sync(Direction.up, new_arrivals) up_by_files(new_arrivals, remote_dir) _notify_sync_ready(len(file_set), local_dir, remote_dir)
0.002475
def start(self, plugin_name=None, kwargs=None): """Start method: initial data structures and store some meta data.""" self.output = [] # Start basically resets the output data super(WorkbenchRenderer, self).start(plugin_name=plugin_name) return self
0.010676
def handle_tabbed_response(self, tab_group, context): """Sends back an AJAX-appropriate response for the tab group if needed. Otherwise renders the response as normal. """ if self.request.is_ajax(): if tab_group.selected: return http.HttpResponse(tab_group.selected.render()) else: return http.HttpResponse(tab_group.render()) return self.render_to_response(context)
0.00432
def filter_status(self, sample=None, subset=None, stds=False): """ Prints the current status of filters for specified samples. Parameters ---------- sample : str Which sample to print. subset : str Specify a subset stds : bool Whether or not to include standards. """ s = '' if sample is None and subset is None: if not self._has_subsets: s += 'Subset: All Samples\n\n' s += self.data[self.subsets['All_Samples'][0]].filt.__repr__() else: for n in sorted(str(sn) for sn in self._subset_names): if n in self.subsets: pass elif int(n) in self.subsets: n = int(n) pass s += 'Subset: ' + str(n) + '\n' s += 'Samples: ' + ', '.join(self.subsets[n]) + '\n\n' s += self.data[self.subsets[n][0]].filt.__repr__() if len(self.subsets['not_in_set']) > 0: s += '\nNot in Subset:\n' s += 'Samples: ' + ', '.join(self.subsets['not_in_set']) + '\n\n' s += self.data[self.subsets['not_in_set'][0]].filt.__repr__() print(s) return elif sample is not None: s += 'Sample: ' + sample + '\n' s += self.data[sample].filt.__repr__() print(s) return elif subset is not None: if isinstance(subset, (str, int, float)): subset = [subset] for n in subset: s += 'Subset: ' + str(n) + '\n' s += 'Samples: ' + ', '.join(self.subsets[n]) + '\n\n' s += self.data[self.subsets[n][0]].filt.__repr__() print(s) return
0.002083
def toggle_custom_value(self): """Enable or disable the custom value line edit.""" radio_button_checked_id = self.input_button_group.checkedId() if (radio_button_checked_id == len(self._parameter.options) - 1): self.custom_value.setDisabled(False) else: self.custom_value.setDisabled(True)
0.00554
def get_in_srvc_node_ip_addr(cls, tenant_id): """Retrieves the IN service node IP address. """ if tenant_id not in cls.serv_obj_dict: LOG.error("Fabric not prepared for tenant %s", tenant_id) return tenant_obj = cls.serv_obj_dict.get(tenant_id) in_subnet_dict = tenant_obj.get_in_ip_addr() next_hop = str(netaddr.IPAddress(in_subnet_dict.get('subnet')) + 2) return next_hop
0.004494
def get_parentparser(parser, description=None, help=True): """ :param parser: :class:`argparse.ArgumentParser` instance or None :param description: string used to build a new parser if parser is None :param help: flag used to build a new parser if parser is None :returns: if parser is None the new parser; otherwise the `.parentparser` attribute (if set) or the parser itself (if not set) """ if parser is None: return argparse.ArgumentParser( description=description, add_help=help) elif hasattr(parser, 'parentparser'): return parser.parentparser else: return parser
0.001527
def load_module(self, fullname): """ load_module is always called with the same argument as finder's find_module, see "How Import Works" """ mod = super(JsonLoader, self).load_module(fullname) try: with codecs.open(self.cfg_file, 'r', 'utf-8') as f: mod.__dict__.update(json.load(f)) except ValueError: # if raise here, traceback will contain ValueError self.e = "ValueError" self.err_msg = sys.exc_info()[1] if self.e == "ValueError": err_msg = "\nJson file not valid: " err_msg += self.cfg_file + '\n' err_msg += str(self.err_msg) raise InvalidJsonError(err_msg) return mod
0.002625
def wireHandlers(cfg): """ If the device is configured to run against a remote server, ping that device on a scheduled basis with our current state. :param cfg: the config object. :return: """ logger = logging.getLogger('recorder') httpPoster = cfg.handlers.get('remote') csvLogger = cfg.handlers.get('local') activeHandler = None if httpPoster is None: if csvLogger is None: logger.warning("App is running with discard handler only, ALL DATA WILL BE DISCARDED!!!") else: logger.info("App is running in standalone mode, logging data to local filesystem") activeHandler = csvLogger else: logger.info("App is running against remote server, logging data to " + httpPoster.target) activeHandler = httpPoster heartbeater.serverURL = httpPoster.target heartbeater.ping() if activeHandler is not None: for device in cfg.recordingDevices.values(): if activeHandler is httpPoster: httpPoster.deviceName = device.name copied = copy.copy(activeHandler) device.dataHandler = copied if not cfg.useAsyncHandlers else AsyncHandler('recorder', copied)
0.00487
def product(self): """ Return the USB device's product string descriptor. This property will cause some USB traffic the first time it is accessed and cache the resulting value for future use. """ if self._product is None: self._product = util.get_string(self, self.iProduct) return self._product
0.005618
def parse_option(self, option, block_name, *values): """ Parse duration option for timer. """ try: if len(values) != 1: raise TypeError self.total_duration = int(values[0]) if self.total_duration <= 0: raise ValueError except ValueError: pattern = u'"{0}" must be an integer > 0' raise ValueError(pattern.format(option))
0.004435
def _selftoken_expired(): ''' Validate the current token exists and is still valid ''' try: verify = __opts__['vault'].get('verify', None) url = '{0}/v1/auth/token/lookup-self'.format(__opts__['vault']['url']) if 'token' not in __opts__['vault']['auth']: return True headers = {'X-Vault-Token': __opts__['vault']['auth']['token']} response = requests.get(url, headers=headers, verify=verify) if response.status_code != 200: return True return False except Exception as e: raise salt.exceptions.CommandExecutionError( 'Error while looking up self token : {0}'.format(six.text_type(e)) )
0.001395
def createLabels2D(self): """ 2D labeling at zmax """ logger.debug(" Creating 2D labels...") self.zmax = np.argmax(self.values,axis=1) self.vmax = self.values[np.arange(len(self.pixels),dtype=int),self.zmax] kwargs=dict(pixels=self.pixels,values=self.vmax,nside=self.nside, threshold=self.threshold,xsize=self.xsize) labels,nlabels = CandidateSearch.labelHealpix(**kwargs) self.nlabels = nlabels self.labels = np.repeat(labels,len(self.distances)).reshape(len(labels),len(self.distances)) return self.labels, self.nlabels
0.022764
def url(self, name): """ Since we assume all public storage with no authorization keys, we can just simply dump out a URL rather than having to query S3 for new keys. """ name = urllib.quote_plus(self._clean_name(name), safe='/') if self.bucket_cname: return "http://%s/%s" % (self.bucket_cname, name) elif self.host: return "http://%s/%s/%s" % (self.host, self.bucket_name, name) # No host ? Then it's the default region return "http://s3.amazonaws.com/%s/%s" % (self.bucket_name, name)
0.003425
def dumps(data, ac_parser=None, **options): """ Return string representation of 'data' in forced type format. :param data: Config data object to dump :param ac_parser: Forced parser type or ID or parser object :param options: see :func:`dump` :return: Backend-specific string representation for the given data :raises: ValueError, UnknownProcessorTypeError """ psr = find(None, forced_type=ac_parser) return psr.dumps(data, **options)
0.002101
def _getter(self): """ Return a function object suitable for the "get" side of the attribute property descriptor. """ def get_attr_value(obj): attr_str_value = obj.get(self._clark_name) if attr_str_value is None: raise InvalidXmlError( "required '%s' attribute not present on element %s" % (self._attr_name, obj.tag) ) return self._simple_type.from_xml(attr_str_value) get_attr_value.__doc__ = self._docstring return get_attr_value
0.003361
def loggerLevel(self, logger): """ Returns the level for the inputed logger. :param logger | <str> :return <int> """ try: return self._loggerLevels[logger] except KeyError: items = sorted(self._loggerLevels.items()) for key, lvl in items: if logger.startswith(key): return lvl return logging.NOTSET
0.008333
def open_in_browser(self, session, output_filename=None): """ Open the rendered HTML in a webbrowser. If output_filename=None (the default), a tempfile is used. The filename of the HTML file is returned. """ if output_filename is None: output_file = tempfile.NamedTemporaryFile(suffix='.html', delete=False) output_filename = output_file.name with codecs.getwriter('utf-8')(output_file) as f: f.write(self.render(session)) else: with codecs.open(output_filename, 'w', 'utf-8') as f: f.write(self.render(session)) from pyinstrument.vendor.six.moves import urllib url = urllib.parse.urlunparse(('file', '', output_filename, '', '', '')) webbrowser.open(url) return output_filename
0.004706
def delete_virtual_mfa_device(serial, region=None, key=None, keyid=None, profile=None): ''' Deletes the specified virtual MFA device. CLI Example: .. code-block:: bash salt myminion boto_iam.delete_virtual_mfa_device serial_num ''' conn = __utils__['boto3.get_connection_func']('iam')() try: conn.delete_virtual_mfa_device(SerialNumber=serial) log.info('Deleted virtual MFA device %s.', serial) return True except botocore.exceptions.ClientError as e: log.debug(e) if 'NoSuchEntity' in six.text_type(e): log.info('Virtual MFA device %s not found.', serial) return True log.error('Failed to delete virtual MFA device %s.', serial) return False
0.002614
def get_album_songs(self, album_id): """Get a album's all songs. warning: use old api. :params album_id: album id. :return: a list of Song object. """ url = 'http://music.163.com/api/album/{}/'.format(album_id) result = self.get_request(url) songs = result['album']['songs'] songs = [Song(song['id'], song['name']) for song in songs] return songs
0.004662
def get(self, request, username, course_key, subsection_id): """ Returns completion for a (user, subsection, course). """ def get_completion(course_completions, all_blocks, block_id): """ Recursively get the aggregate completion for a subsection, given the subsection block and a list of all blocks. Parameters: course_completions: a dictionary of completion values by block IDs all_blocks: a dictionary of the block structure for a subsection block_id: an ID of a block for which to get completion """ block = all_blocks.get(block_id) child_ids = block.get('children', []) if not child_ids: return course_completions.get(block.serializer.instance, 0) completion = 0 total_children = 0 for child_id in child_ids: completion += get_completion(course_completions, all_blocks, child_id) total_children += 1 return int(completion == total_children) user_id = User.objects.get(username=username).id block_types_filter = [ 'course', 'chapter', 'sequential', 'vertical', 'html', 'problem', 'video', 'discussion', 'drag-and-drop-v2' ] blocks = get_blocks( request, UsageKey.from_string(subsection_id), nav_depth=2, requested_fields=[ 'children' ], block_types_filter=block_types_filter ) course_completions = BlockCompletion.get_course_completions(user_id, CourseKey.from_string(course_key)) aggregated_completion = get_completion(course_completions, blocks['blocks'], blocks['root']) return Response({"completion": aggregated_completion}, status=status.HTTP_200_OK)
0.004006
def perform_smooth(x_values, y_values, span=None, smoother_cls=None): """ Convenience function to run the basic smoother. Parameters ---------- x_values : iterable List of x value observations y_ values : iterable list of y value observations span : float, optional Fraction of data to use as the window smoother_cls : Class The class of smoother to use to smooth the data Returns ------- smoother : object The smoother object with results stored on it. """ if smoother_cls is None: smoother_cls = DEFAULT_BASIC_SMOOTHER smoother = smoother_cls() smoother.specify_data_set(x_values, y_values) smoother.set_span(span) smoother.compute() return smoother
0.001294
def cancel_task(self, task_id): """Cancel or 'un-schedule' a task. :param task_id: identifier of the task to cancel :raises NotFoundError: raised when the requested task is not found in the registry """ self.registry.remove(task_id) self._scheduler.cancel_job_task(task_id) logger.info("Task %s canceled", task_id)
0.005195
def _inc_path(self): """:returns: The path of the next sibling of a given node path.""" newpos = self._str2int(self.path[-self.steplen:]) + 1 key = self._int2str(newpos) if len(key) > self.steplen: raise PathOverflow(_("Path Overflow from: '%s'" % (self.path, ))) return '{0}{1}{2}'.format( self.path[:-self.steplen], self.alphabet[0] * (self.steplen - len(key)), key )
0.004301
def get_prep_value(self, value): """Override the base class so it doesn't cast all values to strings. psqlextra supports expressions in hstore fields, so casting all values to strings is a bad idea.""" value = Field.get_prep_value(self, value) if isinstance(value, dict): prep_value = {} for key, val in value.items(): if isinstance(val, Expression): prep_value[key] = val elif val is not None: prep_value[key] = str(val) else: prep_value[key] = val value = prep_value if isinstance(value, list): value = [str(item) for item in value] return value
0.002587
def stop_cfms(self, stop_cfms): '''Set the CFM values for this object's DOF limits. Parameters ---------- stop_cfms : float or sequence of float A CFM value to set on all degrees of freedom limits, or a list containing one such value for each degree of freedom limit. ''' _set_params(self.ode_obj, 'StopCFM', stop_cfms, self.ADOF + self.LDOF)
0.004819
def listChecksumAlgorithms(self, vendorSpecific=None): """See Also: listChecksumAlgorithmsResponse() Args: vendorSpecific: Returns: """ response = self.listChecksumAlgorithmsResponse(vendorSpecific) return self._read_dataone_type_response(response, 'ChecksumAlgorithmList')
0.008982
def _marker(self, lat, long, text, xmap, color=None, icon=None, text_mark=False, style=None): """ Adds a marker to the default map """ kwargs = {} if icon is not None: kwargs["icon"] = icon if color is not None: kwargs["color"] = color if style is None: style = "font-size:18pt;font-weight:bold;" + \ "color:black;border-radius:0.5" try: xicon1 = folium.Icon(**kwargs) if text_mark is True: xicon = DivIcon( icon_size=(150, 36), icon_anchor=(0, 0), html='<div style="' + style + '">' + text + '</div>', ) folium.Marker([lat, long], popup=text, icon=xicon).add_to(xmap) folium.Marker([lat, long], popup=text, icon=xicon1).add_to(xmap) return xmap except Exception as e: self.err(e, self._marker, "Can not get marker")
0.002778
def _openResources(self): """ Uses numpy.loadtxt to open the underlying file. """ try: rate, data = scipy.io.wavfile.read(self._fileName, mmap=True) except Exception as ex: logger.warning(ex) logger.warning("Unable to read wav with memmory mapping. Trying without now.") rate, data = scipy.io.wavfile.read(self._fileName, mmap=False) self._array = data self.attributes['rate'] = rate
0.00625
def fromrdd(rdd, dims=None, nrecords=None, dtype=None, labels=None, ordered=False): """ Load images from a Spark RDD. Input RDD must be a collection of key-value pairs where keys are singleton tuples indexing images, and values are 2d or 3d ndarrays. Parameters ---------- rdd : SparkRDD An RDD containing the images. dims : tuple or array, optional, default = None Image dimensions (if provided will avoid check). nrecords : int, optional, default = None Number of images (if provided will avoid check). dtype : string, default = None Data numerical type (if provided will avoid check) labels : array, optional, default = None Labels for records. If provided, should be one-dimensional. ordered : boolean, optional, default = False Whether or not the rdd is ordered by key """ from .images import Images from bolt.spark.array import BoltArraySpark if dims is None or dtype is None: item = rdd.values().first() dtype = item.dtype dims = item.shape if nrecords is None: nrecords = rdd.count() def process_keys(record): k, v = record if isinstance(k, int): k = (k,) return k, v values = BoltArraySpark(rdd.map(process_keys), shape=(nrecords,) + tuple(dims), dtype=dtype, split=1, ordered=ordered) return Images(values, labels=labels)
0.00208
def get_nlcd_fn(): """Calls external shell script `get_nlcd.sh` to fetch: 2011 Land Use Land Cover (nlcd) grids, 30 m http://www.mrlc.gov/nlcd11_leg.php """ #This is original filename, which requires ~17 GB #nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img') #get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif') if not os.path.exists(nlcd_fn): cmd = ['get_nlcd.sh',] #subprocess.call(cmd) sys.exit("Missing nlcd data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0]) return nlcd_fn
0.012376
def inject_documentation(**options): """ Generate configuration documentation in reStructuredText_ syntax. :param options: Any keyword arguments are passed on to the :class:`ConfigLoader` initializer. This methods injects the generated documentation into the output generated by cog_. .. _cog: https://pypi.python.org/pypi/cogapp """ import cog loader = ConfigLoader(**options) cog.out("\n" + loader.documentation + "\n\n")
0.002058
def start(self): """ Start the daemon """ # Check for a pidfile to see if the daemon already runs try: pf = file(self.pidfile, 'r') pid = int(pf.read().strip()) pf.close() os.kill(pid, 0) except IOError: pid = None except OSError: pid = None if pid: message = "pidfile %s already exist. Daemon already running?\n" sys.stderr.write(message % self.pidfile) sys.exit(1) # Start the daemon self._daemonize()
0.003373
def validate(self): """ Validates that all required fields are present """ if not self.start: raise ValueError("Event has no start date") if not self.end: raise ValueError("Event has no end date") if self.end < self.start: raise ValueError("Start date is after end date") if self.reminder_minutes_before_start and not isinstance(self.reminder_minutes_before_start, int): raise TypeError("reminder_minutes_before_start must be of type int") if self.is_all_day and not isinstance(self.is_all_day, bool): raise TypeError("is_all_day must be of type bool")
0.011475
def setMethod(self, method, override_analyses=False): """ Sets the specified method to the Analyses from the Worksheet. Only sets the method if the Analysis allows to keep the integrity. If an analysis has already been assigned to a method, it won't be overriden. Returns the number of analyses affected. """ analyses = [an for an in self.getAnalyses() if (not an.getMethod() or not an.getInstrument() or override_analyses) and an.isMethodAllowed(method)] total = 0 for an in analyses: success = False if an.isMethodAllowed(method): success = an.setMethod(method) if success is True: total += 1 self.getField('Method').set(self, method) return total
0.002217
def _getitem(item, i): """Extract value or values from dicts. Covers the case of a single key or multiple keys. If not found, return placeholders instead. """ if not isinstance(i, (tuple, list)): return item.get(i, _none) type_ = list if isinstance(item, list) else tuple return type_(item.get(j, _none) for j in i)
0.002833
def remove_nondescendants_of(self, node): """Remove all of the non-descendants operation nodes of node.""" if isinstance(node, int): warnings.warn('Calling remove_nondescendants_of() with a node id is deprecated,' ' use a DAGNode instead', DeprecationWarning, 2) node = self._id_to_node[node] dec = nx.descendants(self._multi_graph, node) comp = list(set(self._multi_graph.nodes()) - set(dec)) for n in comp: if n.type == "op": self.remove_op_node(n)
0.005042
def nanvar(values, axis=None, skipna=True, ddof=1, mask=None): """ Compute the variance along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanvar(s) 1.0 """ values = com.values_from_object(values) dtype = values.dtype if mask is None: mask = isna(values) if is_any_int_dtype(values): values = values.astype('f8') values[mask] = np.nan if is_float_dtype(values): count, d = _get_counts_nanvar(mask, axis, ddof, values.dtype) else: count, d = _get_counts_nanvar(mask, axis, ddof) if skipna: values = values.copy() np.putmask(values, mask, 0) # xref GH10242 # Compute variance via two-pass algorithm, which is stable against # cancellation errors and relatively accurate for small numbers of # observations. # # See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count if axis is not None: avg = np.expand_dims(avg, axis) sqr = _ensure_numeric((avg - values) ** 2) np.putmask(sqr, mask, 0) result = sqr.sum(axis=axis, dtype=np.float64) / d # Return variance as np.float64 (the datatype used in the accumulator), # unless we were dealing with a float array, in which case use the same # precision as the original values array. if is_float_dtype(dtype): result = result.astype(dtype) return _wrap_results(result, values.dtype)
0.000482
def get_query_kwargs(es_defs): """ Reads the es_defs and returns a dict of special kwargs to use when query for data of an instance of a class reference: rdfframework.sparl.queries.sparqlAllItemDataTemplate.rq """ rtn_dict = {} if es_defs: if es_defs.get("kds_esSpecialUnion"): rtn_dict['special_union'] = \ es_defs["kds_esSpecialUnion"][0] if es_defs.get("kds_esQueryFilter"): rtn_dict['filters'] = \ es_defs["kds_esQueryFilter"][0] return rtn_dict
0.001779
def load_cifar10_dataset(cifar_dir, mode='supervised'): """Load the cifar10 dataset. :param cifar_dir: path to the dataset directory (cPicle format from: https://www.cs.toronto.edu/~kriz/cifar.html) :param mode: 'supervised' or 'unsupervised' mode :return: train, test data: for (X, y) if 'supervised', for (X) if 'unsupervised' """ # Training set trX = None trY = np.array([]) # Test set teX = np.array([]) teY = np.array([]) for fn in os.listdir(cifar_dir): if not fn.startswith('batches') and not fn.startswith('readme'): fo = open(os.path.join(cifar_dir, fn), 'rb') data_batch = pickle.load(fo) fo.close() if fn.startswith('data'): if trX is None: trX = data_batch['data'] trY = data_batch['labels'] else: trX = np.concatenate((trX, data_batch['data']), axis=0) trY = np.concatenate((trY, data_batch['labels']), axis=0) if fn.startswith('test'): teX = data_batch['data'] teY = data_batch['labels'] trX = trX.astype(np.float32) / 255. teX = teX.astype(np.float32) / 255. if mode == 'supervised': return trX, trY, teX, teY elif mode == 'unsupervised': return trX, teX
0.000712
def provides(arg_name=None, annotated_with=None, in_scope=None): """Modifies the binding of a provider method. If arg_name is specified, then the created binding is for that arg name instead of the one gotten from the provider method name (e.g., 'foo' from 'provide_foo'). If annotated_with is specified, then the created binding includes that annotation object. If in_scope is specified, then the created binding is in the scope with that scope ID. At least one of the args must be specified. A provider method may not be decorated with @provides() twice. Args: arg_name: the name of the arg to annotate on the decorated function annotated_with: an annotation object in_scope: a scope ID Returns: a function that will decorate functions passed to it """ if arg_name is None and annotated_with is None and in_scope is None: raise errors.EmptyProvidesDecoratorError(locations.get_back_frame_loc()) return _get_pinject_wrapper(locations.get_back_frame_loc(), provider_arg_name=arg_name, provider_annotated_with=annotated_with, provider_in_scope_id=in_scope)
0.001601
def load_json(self): """Load JSON from the request body and store them in self.request.arguments, like Tornado does by default for POSTed form parameters. If JSON cannot be decoded :raises ValueError: JSON Could not be decoded """ try: self.request.arguments = json.loads(self.request.body) except ValueError: msg = "Could not decode JSON: %s" % self.request.body self.logger.debug(msg) self.raise_error(400, msg)
0.003795
def from_env(parser_modules: t.Optional[t.Union[t.List[str], t.Tuple[str]]] = DEFAULT_PARSER_MODULES, env: t.Optional[t.Dict[str, str]] = None, silent: bool = False, suppress_logs: bool = False, extra: t.Optional[dict] = None) -> 'ConfigLoader': """ Creates an instance of :class:`~django_docker_helpers.config.ConfigLoader` with parsers initialized from environment variables. By default it tries to initialize all bundled parsers. Parsers may be customized with ``parser_modules`` argument or ``CONFIG__PARSERS`` environment variable. Environment variable has a priority over the method argument. :param parser_modules: a list of dot-separated module paths :param env: a dict with environment variables, default is ``os.environ`` :param silent: passed to :class:`~django_docker_helpers.config.ConfigLoader` :param suppress_logs: passed to :class:`~django_docker_helpers.config.ConfigLoader` :param extra: pass extra arguments to *every* parser :return: an instance of :class:`~django_docker_helpers.config.ConfigLoader` Example: :: env = { 'CONFIG__PARSERS': 'EnvironmentParser,RedisParser,YamlParser', 'ENVIRONMENTPARSER__SCOPE': 'nested', 'YAMLPARSER__CONFIG': './tests/data/config.yml', 'REDISPARSER__HOST': 'wtf.test', 'NESTED__VARIABLE': 'i_am_here', } loader = ConfigLoader.from_env(env=env) assert [type(p) for p in loader.parsers] == [EnvironmentParser, RedisParser, YamlParser] assert loader.get('variable') == 'i_am_here', 'Ensure env copied from ConfigLoader' loader = ConfigLoader.from_env(parser_modules=['EnvironmentParser'], env={}) """ env = env or os.environ extra = extra or {} environment_parser = EnvironmentParser(scope='config', env=env) silent = environment_parser.get('silent', silent, coerce_type=bool) suppress_logs = environment_parser.get('suppress_logs', suppress_logs, coerce_type=bool) env_parsers = environment_parser.get('parsers', None, coercer=comma_str_to_list) if not env_parsers and not parser_modules: raise ValueError('Must specify `CONFIG__PARSERS` env var or `parser_modules`') if env_parsers: parser_classes = ConfigLoader.import_parsers(env_parsers) else: parser_classes = ConfigLoader.import_parsers(parser_modules) parsers = [] for parser_class in parser_classes: parser_options = ConfigLoader.load_parser_options_from_env(parser_class, env=env) _init_args = inspect.getfullargspec(parser_class.__init__).args # add extra args if parser's __init__ can take it it if 'env' in _init_args: parser_options['env'] = env for k, v in extra.items(): if k in _init_args: parser_options[k] = v parser_instance = parser_class(**parser_options) parsers.append(parser_instance) return ConfigLoader(parsers=parsers, silent=silent, suppress_logs=suppress_logs)
0.006333
def untrain_token(self, word, count): """ Untrains a particular token (decreases the weight/count of it) :param word: the token we're going to train :type word: str :param count: the number of occurances in the sample :type count: int """ if word not in self.tokens: return # If we're trying to untrain more tokens than we have, we end at 0 count = min(count, self.tokens[word]) self.tokens[word] -= count self.tally -= count
0.003731
def quaternion_about_axis(angle, axis): """Return quaternion for rotation about axis. >>> q = quaternion_about_axis(0.123, [1, 0, 0]) >>> np.allclose(q, [0.99810947, 0.06146124, 0, 0]) True """ q = np.array([0.0, axis[0], axis[1], axis[2]]) qlen = vector_norm(q) if qlen > _EPS: q *= math.sin(angle / 2.0) / qlen q[0] = math.cos(angle / 2.0) return q
0.0025
def decrypt_get_item(decrypt_method, crypto_config_method, read_method, **kwargs): # type: (Callable, Callable, Callable, **Any) -> Dict # TODO: narrow this down """Transparently decrypt an item after getting it from the table. :param callable decrypt_method: Method to use to decrypt item :param callable crypto_config_method: Method that accepts ``kwargs`` and provides a :class:`CryptoConfig` :param callable read_method: Method that reads from the table :param **kwargs: Keyword arguments to pass to ``read_method`` :return: DynamoDB response :rtype: dict """ validate_get_arguments(kwargs) crypto_config, ddb_kwargs = crypto_config_method(**kwargs) response = read_method(**ddb_kwargs) if "Item" in response: response["Item"] = decrypt_method( item=response["Item"], crypto_config=crypto_config.with_item(_item_transformer(decrypt_method)(response["Item"])), ) return response
0.004073
def check_rdd_dtype(rdd, expected_dtype): """Checks if the blocks in the RDD matches the expected types. Parameters: ----------- rdd: splearn.BlockRDD The RDD to check expected_dtype: {type, list of types, tuple of types, dict of types} Expected type(s). If the RDD is a DictRDD the parameter type is restricted to dict. Returns: -------- accept: bool Returns if the types are matched. """ if not isinstance(rdd, BlockRDD): raise TypeError("Expected {0} for parameter rdd, got {1}." .format(BlockRDD, type(rdd))) if isinstance(rdd, DictRDD): if not isinstance(expected_dtype, dict): raise TypeError('Expected {0} for parameter ' 'expected_dtype, got {1}.' .format(dict, type(expected_dtype))) accept = True types = dict(list(zip(rdd.columns, rdd.dtype))) for key, values in expected_dtype.items(): if not isinstance(values, (tuple, list)): values = [values] accept = accept and types[key] in values return accept if not isinstance(expected_dtype, (tuple, list)): expected_dtype = [expected_dtype] return rdd.dtype in expected_dtype
0.000765
def parse(self, buf): '''parse a FD FDM buffer''' try: t = struct.unpack(self.pack_string, buf) except struct.error as msg: raise fgFDMError('unable to parse - %s' % msg) self.values = list(t)
0.008065
def _hour_angle_to_hours(times, hourangle, longitude, equation_of_time): """converts hour angles in degrees to hours as a numpy array""" naive_times = times.tz_localize(None) # naive but still localized tzs = 1 / NS_PER_HR * ( naive_times.astype(np.int64) - times.astype(np.int64)) hours = (hourangle - longitude - equation_of_time / 4.) / 15. + 12. + tzs return np.asarray(hours)
0.002445
def is_merc_projection(srs): """ Return true if the map projection matches that used by VEarth, Google, OSM, etc. Is currently necessary for zoom-level shorthand for scale-denominator. """ if srs.lower() == '+init=epsg:900913': return True # observed srs = dict([p.split('=') for p in srs.split() if '=' in p]) # expected # note, common optional modifiers like +no_defs, +over, and +wkt # are not pairs and should not prevent matching gym = '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null' gym = dict([p.split('=') for p in gym.split() if '=' in p]) for p in gym: if srs.get(p, None) != gym.get(p, None): return False return True
0.007585
def remove(self, **kwargs): ''' :raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object Permanently removes the associated remote object from the associated project. ''' if self._proj is None: raise DXError("Remove called when a project ID was not associated with this object handler") dxpy.api.project_remove_objects(self._proj, {"objects": [self._dxid]}, **kwargs) # Reset internal state self._dxid = None self._proj = None self._desc = {}
0.006515
def notes_master_part(self): """ Return the |NotesMasterPart| object for this presentation. If the presentation does not have a notes master, one is created from a default template. The same single instance is returned on each call. """ try: return self.part_related_by(RT.NOTES_MASTER) except KeyError: notes_master_part = NotesMasterPart.create_default(self.package) self.relate_to(notes_master_part, RT.NOTES_MASTER) return notes_master_part
0.003597
def start(self): """Begin listening for events from the Client and acting upon them. Note: If configuration has not already been loaded, it will be loaded immediately before starting to listen for events. Calling this method without having specified and/or loaded a configuration will result in completely default values being used. After all modules for this controller are loaded, the STARTUP event will be dispatched. """ if not self.config and self.config_path is not None: self.load_config() self.running = True self.process_event("STARTUP", self.client, ())
0.003008
def delete_pool(hostname, username, password, name): ''' Delete an existing pool. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password name The name of the pool which will be deleted ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if __opts__['test']: return _test_output(ret, 'delete', params={ 'hostname': hostname, 'username': username, 'password': password, 'name': name, } ) #is this pool currently configured? existing = __salt__['bigip.list_pool'](hostname, username, password, name) # if it exists by name if existing['code'] == 200: deleted = __salt__['bigip.delete_pool'](hostname, username, password, name) # did we get rid of it? if deleted['code'] == 200: ret['result'] = True ret['comment'] = 'Pool was successfully deleted.' ret['changes']['old'] = existing['content'] ret['changes']['new'] = {} # something bad happened else: ret = _load_result(deleted, ret) # not found elif existing['code'] == 404: ret['result'] = True ret['comment'] = 'This pool already does not exist. No changes made.' ret['changes']['old'] = {} ret['changes']['new'] = {} else: ret = _load_result(existing, ret) return ret
0.001971
def parse_data(self, selected_region_data, selected_values, full_load=False, extension="gdm"): """ Parses all of the region data :param selected_region_data: the columns of region data that are needed :param selected_values: the selected values to be put in the matrix cells :param full_load: Specifies the method of parsing the data. If False then parser omits the parsing of zero(0) values in order to speed up and save memory. However, while creating the matrix, those zero values are going to be put into the matrix. (unless a row contains "all zero columns". This parsing is strongly recommended for sparse datasets. If the full_load parameter is True then all the zero(0) data are going to be read. :param extension: the extension of the region data files that are going to be parsed. :return: the resulting region dataframe """ regions = list(selected_region_data) if type(selected_values) is list: regions.extend(selected_values) else: regions.append(selected_values) files = self._get_files(extension, self.path) df = pd.DataFrame(dtype=float) cols = self.parse_schema(self.schema) print("Parsing the data files...") list_of_data = [] for f in tqdm(files): data = self.parse_single_data(f, cols, regions, selected_values, full_load) list_of_data.append(data) print("pre-concat") df = pd.concat(list_of_data) return df
0.006361
def trace_region(self, region_index): """Retrieves the properties of a trace region. Args: self (JLink): the ``JLink`` instance. region_index (int): the trace region index. Returns: An instance of ``JLinkTraceRegion`` describing the specified region. """ cmd = enums.JLinkTraceCommand.GET_REGION_PROPS_EX region = structs.JLinkTraceRegion() region.RegionIndex = int(region_index) res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(region)) if (res == 1): raise errors.JLinkException('Failed to get trace region.') return region
0.003044
def create(self, vips): """ Method to create vip's :param vips: List containing vip's desired to be created on database :return: None """ data = {'vips': vips} return super(ApiVipRequest, self).post('api/v3/vip-request/', data)
0.007018
def register_regex_entity(self, regex_str, domain=0): """ A regular expression making use of python named group expressions. Example: (?P<Artist>.*) Args: regex_str(str): a string representing a regular expression as defined above domain(str): a string representing the domain you wish to add the entity to """ if domain not in self.domains: self.register_domain(domain=domain) self.domains[domain].register_regex_entity(regex_str=regex_str)
0.007477
def container_fabric(container_maps=None, docker_client=None, clients=None, client_implementation=None): """ :param container_maps: Container map or a tuple / list thereof. :type container_maps: list[dockermap.map.config.main.ContainerMap] | dockermap.map.config.main.ContainerMap :param docker_client: Default Docker client instance. :type docker_client: dockerfabric.base.FabricClientConfiguration or docker.docker.Client :param clients: Optional dictionary of Docker client configuration objects. :type clients: dict[unicode | str, dockerfabric.base.FabricClientConfiguration] :param client_implementation: Client implementation to use (API or CLI). :type client_implementation: unicode | str :return: Container mapping client. :rtype: dockerfabric.base.FabricContainerClient """ ci = client_implementation or env.get('docker_fabric_implementation') or CLIENT_API if ci == CLIENT_API: return ContainerApiFabricClient(container_maps, docker_client, clients) elif ci == CLIENT_CLI: return ContainerCliFabricClient(container_maps, docker_client, clients) raise ValueError("Invalid client implementation.", ci)
0.005046
def set_sequestered(self, sequestered): """Sets the sequestered flag. arg: sequestered (boolean): the new sequestered flag raise: InvalidArgument - ``sequestered`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ if sequestered is None: raise errors.NullArgument() if self.get_sequestered_metadata().is_read_only(): raise errors.NoAccess() if not isinstance(sequestered, bool): raise errors.InvalidArgument() self._my_map['sequestered'] = sequestered
0.003053
def ensure_str(data: Union[str, bytes]) -> str: """ Convert data in str if data are bytes :param data: Data :rtype str: """ if isinstance(data, bytes): return str(data, 'utf-8') return data
0.004405
def height(cls, path): """ Get the locally-stored block height """ if os.path.exists( path ): sb = os.stat( path ) h = (sb.st_size / BLOCK_HEADER_SIZE) - 1 return h else: return None
0.022222
def findNext(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
0.007752
def slicenet_internal(inputs, targets, target_space, hparams, run_decoder=True): """The slicenet model, main step used for training.""" with tf.variable_scope("slicenet"): # Project to hidden size if necessary if inputs.get_shape().as_list()[-1] != hparams.hidden_size: inputs = common_layers.conv_block( inputs, hparams.hidden_size, [((1, 1), (3, 3))], first_relu=False, padding="SAME", force2d=True) # Flatten inputs and encode. inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2) inputs_mask = 1.0 - embedding_to_padding(inputs) inputs = common_layers.add_timing_signal(inputs) # Add position info. target_space_emb = embed_target_space(target_space, hparams.hidden_size) extra_layers = int(hparams.num_hidden_layers * 1.5) inputs_encoded = multi_conv_res( inputs, "SAME", "encoder", extra_layers, hparams, mask=inputs_mask) if not run_decoder: return inputs_encoded # Do the middle part. decoder_start, similarity_loss = slicenet_middle( inputs_encoded, targets, target_space_emb, inputs_mask, hparams) # Decode. decoder_final = multi_conv_res( decoder_start, "LEFT", "decoder", hparams.num_hidden_layers, hparams, mask=inputs_mask, source=inputs_encoded) return decoder_final, tf.reduce_mean(similarity_loss)
0.004202
def beacon(config): ''' Poll imgadm and compare available images ''' ret = [] # NOTE: lookup current images current_images = __salt__['imgadm.list'](verbose=True) # NOTE: apply configuration if IMGADM_STATE['first_run']: log.info('Applying configuration for imgadm beacon') _config = {} list(map(_config.update, config)) if 'startup_import_event' not in _config or not _config['startup_import_event']: IMGADM_STATE['images'] = current_images # NOTE: import events for uuid in current_images: event = {} if uuid not in IMGADM_STATE['images']: event['tag'] = "imported/{}".format(uuid) for label in current_images[uuid]: event[label] = current_images[uuid][label] if event: ret.append(event) # NOTE: delete events for uuid in IMGADM_STATE['images']: event = {} if uuid not in current_images: event['tag'] = "deleted/{}".format(uuid) for label in IMGADM_STATE['images'][uuid]: event[label] = IMGADM_STATE['images'][uuid][label] if event: ret.append(event) # NOTE: update stored state IMGADM_STATE['images'] = current_images # NOTE: disable first_run if IMGADM_STATE['first_run']: IMGADM_STATE['first_run'] = False return ret
0.001425
def find_next_word_ending(self, include_current_position=False, count=1, WORD=False): """ Return an index relative to the cursor position pointing to the end of the next word. Return `None` if nothing was found. """ if count < 0: return self.find_previous_word_ending(count=-count, WORD=WORD) if include_current_position: text = self.text_after_cursor else: text = self.text_after_cursor[1:] regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE iterable = regex.finditer(text) try: for i, match in enumerate(iterable): if i + 1 == count: value = match.end(1) if include_current_position: return value else: return value + 1 except StopIteration: pass
0.003236
def base(self, du): """Return the base CLB for a given DU""" parameter = 'base' if parameter not in self._by: self._by[parameter] = {} for clb in self.upi.values(): if clb.floor == 0: self._by[parameter][clb.du] = clb return self._by[parameter][du]
0.005882
def download_url_with_progress(url, stream, disable_progress): """ Downloads a given url in chunks and writes to the provided stream (can be any io stream). Displays the progress bar for the download. """ resp = requests.get(url, timeout=float(os.environ.get('PIP_TIMEOUT', 2)), stream=True) resp.raw.decode_content = True progress = tqdm(unit="B", unit_scale=True, total=int(resp.headers.get('Content-Length', 0)), disable=disable_progress) for chunk in resp.iter_content(chunk_size=1024): if chunk: progress.update(len(chunk)) stream.write(chunk) progress.close()
0.007321
def split_value(val): """ Splits a value *val* into its significand and decimal exponent (magnitude) and returns them in a 2-tuple. *val* might also be a numpy array. Example: .. code-block:: python split_value(1) # -> (1.0, 0) split_value(0.123) # -> (1.23, -1) split_value(-42.5) # -> (-4.25, 1) a = np.array([1, 0.123, -42.5]) split_value(a) # -> ([1., 1.23, -4.25], [0, -1, 1]) The significand will be a float while magnitude will be an integer. *val* can be reconstructed via ``significand * 10**magnitude``. """ val = ensure_nominal(val) if not is_numpy(val): # handle 0 separately if val == 0: return (0., 0) mag = int(math.floor(math.log10(abs(val)))) sig = float(val) / (10.**mag) else: log = np.zeros(val.shape) np.log10(np.abs(val), out=log, where=(val != 0)) mag = np.floor(log).astype(np.int) sig = val.astype(np.float) / (10.**mag) return (sig, mag)
0.002899
def save(self, identifier=None, is_best=False, save_all=False): """ Stores checkpoint to a file. :param identifier: identifier for periodic checkpoint :param is_best: if True stores checkpoint to 'model_best.pth' :param save_all: if True stores checkpoint after completed training epoch """ def write_checkpoint(state, filename): filename = os.path.join(self.save_path, filename) logging.info(f'Saving model to {filename}') torch.save(state, filename) if self.distributed: model_state = self.model.module.state_dict() else: model_state = self.model.state_dict() state = { 'epoch': self.epoch, 'state_dict': model_state, 'optimizer': self.optimizer.state_dict(), 'scheduler': self.scheduler.state_dict(), 'loss': getattr(self, 'loss', None), } state = dict(list(state.items()) + list(self.save_info.items())) if identifier is not None: filename = self.checkpoint_filename % identifier write_checkpoint(state, filename) if is_best: filename = 'model_best.pth' write_checkpoint(state, filename) if save_all: filename = f'checkpoint_epoch_{self.epoch:03d}.pth' write_checkpoint(state, filename)
0.001407
def unduplicate_field_names(field_names): """Append a number to duplicate field names to make them unique. """ res = [] for k in field_names: if k in res: i = 1 while k + '_' + str(i) in res: i += 1 k += '_' + str(i) res.append(k) return res
0.003077
def QA_indicator_SKDJ(DataFrame, N=9, M=3): """ 1.指标>80 时,回档机率大;指标<20 时,反弹机率大; 2.K在20左右向上交叉D时,视为买进信号参考; 3.K在80左右向下交叉D时,视为卖出信号参考; 4.SKDJ波动于50左右的任何讯号,其作用不大。 """ CLOSE = DataFrame['close'] LOWV = LLV(DataFrame['low'], N) HIGHV = HHV(DataFrame['high'], N) RSV = EMA((CLOSE - LOWV) / (HIGHV - LOWV) * 100, M) K = EMA(RSV, M) D = MA(K, M) DICT = {'RSV': RSV, 'SKDJ_K': K, 'SKDJ_D': D} return pd.DataFrame(DICT)
0.00432
def connect_get_namespaced_pod_portforward(self, name, namespace, **kwargs): # noqa: E501 """connect_get_namespaced_pod_portforward # noqa: E501 connect GET requests to portforward of Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_get_namespaced_pod_portforward(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodPortForwardOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param int ports: List of ports to forward Required when using WebSockets :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.connect_get_namespaced_pod_portforward_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.connect_get_namespaced_pod_portforward_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
0.001576
def _parse_conf(conf_file=_DEFAULT_CONF): ''' Parse a logrotate configuration file. Includes will also be parsed, and their configuration will be stored in the return dict, as if they were part of the main config file. A dict of which configs came from which includes will be stored in the 'include files' dict inside the return dict, for later reference by the user or module. ''' ret = {} mode = 'single' multi_names = [] multi = {} prev_comps = None with salt.utils.files.fopen(conf_file, 'r') as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line).strip() if not line: continue if line.startswith('#'): continue comps = line.split() if '{' in line and '}' not in line: mode = 'multi' if len(comps) == 1 and prev_comps: multi_names = prev_comps else: multi_names = comps multi_names.pop() continue if '}' in line: mode = 'single' for multi_name in multi_names: ret[multi_name] = multi multi_names = [] multi = {} continue if mode == 'single': key = ret else: key = multi if comps[0] == 'include': if 'include files' not in ret: ret['include files'] = {} for include in os.listdir(comps[1]): if include not in ret['include files']: ret['include files'][include] = [] include_path = os.path.join(comps[1], include) include_conf = _parse_conf(include_path) for file_key in include_conf: ret[file_key] = include_conf[file_key] ret['include files'][include].append(file_key) prev_comps = comps if len(comps) > 2: key[comps[0]] = ' '.join(comps[1:]) elif len(comps) > 1: key[comps[0]] = _convert_if_int(comps[1]) else: key[comps[0]] = True return ret
0.000428
def camelize(word): """Convert a word from lower_with_underscores to CamelCase. Args: word: The string to convert. Returns: The modified string. """ return ''.join(w[0].upper() + w[1:] for w in re.sub('[^A-Z^a-z^0-9^:]+', ' ', word).split(' '))
0.003333
def browse(self, endpoint="hot", category_path="", seed="", q="", timerange="24hr", tag="", offset=0, limit=10): """Fetch deviations from public endpoints :param endpoint: The endpoint from which the deviations will be fetched (hot/morelikethis/newest/undiscovered/popular/tags) :param category_path: category path to fetch from :param q: Search query term :param timerange: The timerange :param tag: The tag to browse :param offset: the pagination offset :param limit: the pagination limit """ if endpoint == "hot": response = self._req('/browse/hot', { "category_path":category_path, "offset":offset, "limit":limit }) elif endpoint == "morelikethis": if seed: response = self._req('/browse/morelikethis', { "seed":seed, "category_path":category_path, "offset":offset, "limit":limit }) else: raise DeviantartError("No seed defined.") elif endpoint == "newest": response = self._req('/browse/newest', { "category_path":category_path, "q":q, "offset":offset, "limit":limit }) elif endpoint == "undiscovered": response = self._req('/browse/undiscovered', { "category_path":category_path, "offset":offset, "limit":limit }) elif endpoint == "popular": response = self._req('/browse/popular', { "category_path":category_path, "q":q, "timerange":timerange, "offset":offset, "limit":limit }) elif endpoint == "tags": if tag: response = self._req('/browse/tags', { "tag":tag, "offset":offset, "limit":limit }) else: raise DeviantartError("No tag defined.") else: raise DeviantartError("Unknown endpoint.") deviations = [] for item in response['results']: d = Deviation() d.from_dict(item) deviations.append(d) return { "results" : deviations, "has_more" : response['has_more'], "next_offset" : response['next_offset'] }
0.011236
def dist(self, src, tar): """Return the NCD between two strings using bzip2 compression. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Compression distance Examples -------- >>> cmp = NCDbz2() >>> cmp.dist('cat', 'hat') 0.06666666666666667 >>> cmp.dist('Niall', 'Neil') 0.03125 >>> cmp.dist('aluminum', 'Catalan') 0.17647058823529413 >>> cmp.dist('ATCG', 'TAGC') 0.03125 """ if src == tar: return 0.0 src = src.encode('utf-8') tar = tar.encode('utf-8') src_comp = bz2.compress(src, self._level)[10:] tar_comp = bz2.compress(tar, self._level)[10:] concat_comp = bz2.compress(src + tar, self._level)[10:] concat_comp2 = bz2.compress(tar + src, self._level)[10:] return ( min(len(concat_comp), len(concat_comp2)) - min(len(src_comp), len(tar_comp)) ) / max(len(src_comp), len(tar_comp))
0.001709
def begin_update(self, x_data, h_0=None, drop=0.0): """Return the output of the wrapped PyTorch model for the given input, along with a callback to handle the backward pass. """ x_var = torch.autograd.Variable(xp2torch(x_data), requires_grad=True) # Make prediction out, h_n = self._model(x_var, h_0) # Shapes will be: # out = seq_len, batch, hidden_size * num_directions # h_n = num_layers * num_directions, batch, hidden_size def backward_pytorch_rnn(d_data, sgd=None): dy_data, _ = d_data dout = xp2torch(dy_data) torch.autograd.backward((out,), grad_tensors=(dout,)) if sgd is not None: if self._optimizer is None: self._optimizer = self._create_optimizer(sgd) self._optimizer.step() self._optimizer.zero_grad() return torch2xp(x_var.grad) return (torch2xp(out), h_n), backward_pytorch_rnn
0.001976
def blobs(shape: List[int], porosity: float = 0.5, blobiness: int = 1): """ Generates an image containing amorphous blobs Parameters ---------- shape : list The size of the image to generate in [Nx, Ny, Nz] where N is the number of voxels porosity : float If specified, this will threshold the image to the specified value prior to returning. If ``None`` is specified, then the scalar noise field is converted to a uniform distribution and returned without thresholding. blobiness : int or list of ints(default = 1) Controls the morphology of the blobs. A higher number results in a larger number of small blobs. If a list is supplied then the blobs are anisotropic. Returns ------- image : ND-array A boolean array with ``True`` values denoting the pore space See Also -------- norm_to_uniform """ blobiness = sp.array(blobiness) shape = sp.array(shape) if sp.size(shape) == 1: shape = sp.full((3, ), int(shape)) sigma = sp.mean(shape)/(40*blobiness) im = sp.random.random(shape) im = spim.gaussian_filter(im, sigma=sigma) im = norm_to_uniform(im, scale=[0, 1]) if porosity: im = im < porosity return im
0.000769
def play_list_detail(id, limit=20): """获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID 获取歌单中的所有音乐. :param id: 歌单的ID :param limit: (optional) 数据上限多少行,默认 20 """ if id is None: raise ParamsError() r = NCloudBot() r.method = 'PLAY_LIST_DETAIL' r.data = {'id': id, 'limit': limit, "csrf_token": ""} r.send() return r.response
0.002618