text
stringlengths
78
104k
score
float64
0
0.18
def toc(self): """ Returns table of contents as a block_token.List instance. """ from mistletoe.block_token import List def get_indent(level): if self.omit_title: level -= 1 return ' ' * 4 * (level - 1) def build_list_item(heading): level, content = heading template = '{indent}- {content}\n' return template.format(indent=get_indent(level), content=content) return List([build_list_item(heading) for heading in self._headings])
0.005338
def is_git_file(cls, path, name): """Determine if file is known by git.""" os.chdir(path) p = subprocess.Popen(['git', 'ls-files', '--error-unmatch', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.wait() return p.returncode == 0
0.006515
def l2traceroute_input_protocolType_IP_l4_dest_port(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") l2traceroute = ET.Element("l2traceroute") config = l2traceroute input = ET.SubElement(l2traceroute, "input") protocolType = ET.SubElement(input, "protocolType") IP = ET.SubElement(protocolType, "IP") l4_dest_port = ET.SubElement(IP, "l4-dest-port") l4_dest_port.text = kwargs.pop('l4_dest_port') callback = kwargs.pop('callback', self._callback) return callback(config)
0.003378
def match(self, sampling_req): """ Determines whether or not this sampling rule applies to the incoming request based on some of the request's parameters. Any ``None`` parameter provided will be considered an implicit match. """ if sampling_req is None: return False host = sampling_req.get('host', None) method = sampling_req.get('method', None) path = sampling_req.get('path', None) service = sampling_req.get('service', None) service_type = sampling_req.get('service_type', None) return (not host or wildcard_match(self._host, host)) \ and (not method or wildcard_match(self._method, method)) \ and (not path or wildcard_match(self._path, path)) \ and (not service or wildcard_match(self._service, service)) \ and (not service_type or wildcard_match(self._service_type, service_type))
0.003175
def read_playlist_file(self, stationFile=''): """ Read a csv file Returns: number x - number of stations or -1 - playlist is malformed -2 - playlist not found """ prev_file = self.stations_file prev_format = self.new_format self.new_format = False ret = 0 stationFile, ret = self._get_playlist_abspath_from_data(stationFile) if ret < 0: return ret self._reading_stations = [] with open(stationFile, 'r') as cfgfile: try: for row in csv.reader(filter(lambda row: row[0]!='#', cfgfile), skipinitialspace=True): if not row: continue try: name, url = [s.strip() for s in row] self._reading_stations.append((name, url, '')) except: name, url, enc = [s.strip() for s in row] self._reading_stations.append((name, url, enc)) self.new_format = True except: self._reading_stations = [] self.new_format = prev_format return -1 self.stations = list(self._reading_stations) self._reading_stations = [] self._get_playlist_elements(stationFile) self.previous_stations_file = prev_file self._is_playlist_in_config_dir() self.number_of_stations = len(self.stations) self.dirty_playlist = False if logger.isEnabledFor(logging.DEBUG): if self.new_format: logger.debug('Playlist is in new format') else: logger.debug('Playlist is in old format') return self.number_of_stations
0.003257
def toxlsx(tbl, filename, sheet=None, encoding=None): """ Write a table to a new Excel .xlsx file. """ import openpyxl if encoding is None: encoding = locale.getpreferredencoding() wb = openpyxl.Workbook(write_only=True) ws = wb.create_sheet(title=sheet) for row in tbl: ws.append(row) wb.save(filename)
0.002801
def get_pulls(self, state=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, base=github.GithubObject.NotSet, head=github.GithubObject.NotSet): """ :calls: `GET /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_ :param state: string :param sort: string :param direction: string :param base: string :param head: string :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequest.PullRequest` """ assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction assert base is github.GithubObject.NotSet or isinstance(base, (str, unicode)), base assert head is github.GithubObject.NotSet or isinstance(head, (str, unicode)), head url_parameters = dict() if state is not github.GithubObject.NotSet: url_parameters["state"] = state if sort is not github.GithubObject.NotSet: url_parameters["sort"] = sort if direction is not github.GithubObject.NotSet: url_parameters["direction"] = direction if base is not github.GithubObject.NotSet: url_parameters["base"] = base if head is not github.GithubObject.NotSet: url_parameters["head"] = head return github.PaginatedList.PaginatedList( github.PullRequest.PullRequest, self._requester, self.url + "/pulls", url_parameters )
0.00575
def stash(self, storage, url): """Stores the uploaded file in a temporary storage location.""" result = {} if self.is_valid(): upload = self.cleaned_data['upload'] name = storage.save(upload.name, upload) result['filename'] = os.path.basename(name) try: result['url'] = storage.url(name) except NotImplementedError: result['url'] = None result['stored'] = serialize_upload(name, storage, url) return result
0.00369
def put(self, url, params=None, headers=None, content=None, form_content=None): # type: (str, Optional[Dict[str, str]], Optional[Dict[str, str]], Any, Optional[Dict[str, Any]]) -> ClientRequest """Create a PUT request object. :param str url: The request URL. :param dict params: Request URL parameters. :param dict headers: Headers :param dict form_content: Form content """ request = self._request('PUT', url, params, headers, content, form_content) return request
0.009294
def query(database, query, **client_args): ''' Execute a query. database Name of the database to query on. query InfluxQL query string. ''' client = _client(**client_args) _result = client.query(query, database=database) if isinstance(_result, collections.Sequence): return [_pull_query_results(_query_result) for _query_result in _result if _query_result] return [_pull_query_results(_result) if _result else {}]
0.004202
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True): """ :param manager: Neo4jDBSessionManager :param prop: Indexed property :param value: Indexed value :param node_type: Label used for index :param lookup_func: STARTS WITH | CONTAINS | ENDS WITH :param legacy: Backwards compatibility :type manager: Neo4jDBSessionManager :type prop: str :type value: str :type node_type: str :type lookup_func: str :type legacy: bool :return: Dict or Node object :rtype: dict|Node """ q = """ MATCH (n:{label}) WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}}) RETURN n """.format(label=node_type, prop=prop, lookup_func=lookup_func) with manager.session as s: for result in s.run(q, {'value': value}): if legacy: yield result['n'].properties else: yield result['n']
0.002062
def get_regularization_penalty(self) -> Union[float, torch.Tensor]: """ Computes the regularization penalty for the model. Returns 0 if the model was not configured to use regularization. """ if self._regularizer is None: return 0.0 else: return self._regularizer(self)
0.005865
def to_half(b:Collection[Tensor])->Collection[Tensor]: "Recursively map lists of tensors in `b ` to FP16." if is_listy(b): return [to_half(o) for o in b] return b.half() if b.dtype not in [torch.int64, torch.int32, torch.int16] else b
0.020325
def remove_token(self, *, payer_id, credit_card_token_id): """ This feature allows you to delete a tokenized credit card register. Args: payer_id: credit_card_token_id: Returns: """ payload = { "language": self.client.language.value, "command": PaymentCommand.REMOVE_TOKEN.value, "merchant": { "apiLogin": self.client.api_login, "apiKey": self.client.api_key }, "removeCreditCardToken": { "payerId": payer_id, "creditCardTokenId": credit_card_token_id }, "test": self.client.is_test } return self.client._post(self.url, json=payload)
0.002591
def acctran(tree, character, feature=PARS_STATES): """ ACCTRAN (accelerated transformation) (Farris, 1970) aims at reducing the number of ambiguities in the parsimonious result. ACCTRAN forces the state changes to be performed as close to the root as possible, and therefore prioritises the reverse mutations. if N is not a tip: L, R <- left and right children of N if intersection(S(N), S(L)) is not empty: S(L) <- intersection(S(N), S(L)) if intersection(S(N), S(R)) is not empty: S(R) <- intersection(S(N), S(R)) ACCTRAN(L) ACCTRAN(R) :param tree: ete3.Tree, the tree of interest :param character: str, character for which the parsimonious states are reconstructed :return: void, adds get_personalized_feature_name(feature, PARS_STATES) feature to the tree nodes """ ps_feature_down = get_personalized_feature_name(character, BU_PARS_STATES) for node in tree.traverse('preorder'): if node.is_root(): node.add_feature(feature, getattr(node, ps_feature_down)) node_states = getattr(node, feature) for child in node.children: child_states = getattr(child, ps_feature_down) state_intersection = node_states & child_states child.add_feature(feature, state_intersection if state_intersection else child_states)
0.004313
def stop(): ''' Stop KodeDrive daemon. ''' output, err = cli_syncthing_adapter.sys(exit=True) click.echo("%s" % output, err=err)
0.02963
def InputSplines(seq_length, n_bases=10, name=None, **kwargs): """Input placeholder for array returned by `encodeSplines` Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)` """ return Input((seq_length, n_bases), name=name, **kwargs)
0.00722
def from_offset(cls, chunk_type, stream_rdr, offset): """ Return a _pHYsChunk instance containing the image resolution extracted from the pHYs chunk in *stream* at *offset*. """ horz_px_per_unit = stream_rdr.read_long(offset) vert_px_per_unit = stream_rdr.read_long(offset, 4) units_specifier = stream_rdr.read_byte(offset, 8) return cls( chunk_type, horz_px_per_unit, vert_px_per_unit, units_specifier )
0.004098
def name(self): """Get the name associated with these credentials""" return self.inquire(name=True, lifetime=False, usage=False, mechs=False).name
0.010526
def get_output(self, buildroot_id): """ Build the 'output' section of the metadata. :return: list, Output instances """ def add_buildroot_id(output): logfile, metadata = output metadata.update({'buildroot_id': buildroot_id}) return Output(file=logfile, metadata=metadata) def add_log_type(output, arch): logfile, metadata = output metadata.update({'type': 'log', 'arch': arch}) return Output(file=logfile, metadata=metadata) arch = os.uname()[4] output_files = [add_log_type(add_buildroot_id(metadata), arch) for metadata in self.get_logs()] # Parent of squashed built image is base image image_id = self.workflow.builder.image_id parent_id = None if not self.workflow.builder.base_from_scratch: parent_id = self.workflow.builder.base_image_inspect['Id'] # Read config from the registry using v2 schema 2 digest registries = self.workflow.push_conf.docker_registries if registries: config = copy.deepcopy(registries[0].config) else: config = {} # We don't need container_config section if config and 'container_config' in config: del config['container_config'] repositories, typed_digests = self.get_repositories_and_digests() tags = set(image.tag for image in self.workflow.tag_conf.images) metadata, output = self.get_image_output() metadata.update({ 'arch': arch, 'type': 'docker-image', 'components': self.get_image_components(), 'extra': { 'image': { 'arch': arch, }, 'docker': { 'id': image_id, 'parent_id': parent_id, 'repositories': repositories, 'layer_sizes': self.workflow.layer_sizes, 'tags': list(tags), 'config': config, 'digests': typed_digests }, }, }) if self.workflow.builder.base_from_scratch: del metadata['extra']['docker']['parent_id'] if not config: del metadata['extra']['docker']['config'] if not typed_digests: del metadata['extra']['docker']['digests'] # Add the 'docker save' image to the output image = add_buildroot_id(output) output_files.append(image) # add operator manifests to output operator_manifests_path = (self.workflow.postbuild_results .get(PLUGIN_EXPORT_OPERATOR_MANIFESTS_KEY)) if operator_manifests_path: operator_manifests_file = open(operator_manifests_path) manifests_metadata = self.get_output_metadata(operator_manifests_path, OPERATOR_MANIFESTS_ARCHIVE) operator_manifests_output = Output(file=operator_manifests_file, metadata=manifests_metadata) # We use log type here until a more appropriate type name is supported by koji operator_manifests_output.metadata.update({'arch': arch, 'type': 'log'}) operator_manifests = add_buildroot_id(operator_manifests_output) output_files.append(operator_manifests) return output_files
0.001695
def directory_duplicates(directory, hash_type='md5', **kwargs): """ Find all duplicates in a directory. Will return a list, in that list are lists of duplicate files. .. code: python dups = reusables.directory_duplicates('C:\\Users\\Me\\Pictures') print(len(dups)) # 56 print(dups) # [['C:\\Users\\Me\\Pictures\\IMG_20161127.jpg', # 'C:\\Users\\Me\\Pictures\\Phone\\IMG_20161127.jpg'], ... :param directory: Directory to search :param hash_type: Type of hash to perform :param kwargs: Arguments to pass to find_files to narrow file types :return: list of lists of dups""" size_map, hash_map = defaultdict(list), defaultdict(list) for item in find_files(directory, **kwargs): file_size = os.path.getsize(item) size_map[file_size].append(item) for possible_dups in (v for v in size_map.values() if len(v) > 1): for each_item in possible_dups: item_hash = file_hash(each_item, hash_type=hash_type) hash_map[item_hash].append(each_item) return [v for v in hash_map.values() if len(v) > 1]
0.00088
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs): """Initializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. """ # save these values for pickling self._pool_connections = connections self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, block=block, strict=True, **pool_kwargs)
0.004193
def build_swagger_12_endpoints(resource_listing, api_declarations): """ :param resource_listing: JSON representing a Swagger 1.2 resource listing :type resource_listing: dict :param api_declarations: JSON representing Swagger 1.2 api declarations :type api_declarations: dict :rtype: iterable of :class:`pyramid_swagger.model.PyramidEndpoint` """ yield build_swagger_12_resource_listing(resource_listing) for name, filepath in api_declarations.items(): with open(filepath) as input_file: yield build_swagger_12_api_declaration( name, simplejson.load(input_file))
0.001575
def parse_yaml(self, y): '''Parse a YAML specification of a condition into this object.''' self.sequence = int(y['sequence']) self.target_component = \ TargetExecutionContext().parse_yaml(y['targetComponent']) if RTS_EXT_NS_YAML + 'properties' in y: for p in y.get(RTS_EXT_NS_YAML + 'properties'): if 'value' in p: value = p['value'] else: value = None self._properties[p['name']] = value return self
0.005396
def render_to_console(self, message: str, **kwargs): """ Renders the specified message to the console using Jinja2 template rendering with the kwargs as render variables. The message will also be dedented prior to rendering in the same fashion as other Cauldron template rendering actions. :param message: Template string to be rendered. :param kwargs: Variables to be used in rendering the template. """ rendered = templating.render(message, **kwargs) return self.write_to_console(rendered)
0.003356
def artifacts(self): """ Property for accessing artifact manager of the current job. :return: instance of :class:`yagocd.resources.artifact.ArtifactManager` :rtype: yagocd.resources.artifact.ArtifactManager """ return ArtifactManager( session=self._session, pipeline_name=self.pipeline_name, pipeline_counter=self.pipeline_counter, stage_name=self.stage_name, stage_counter=self.stage_counter, job_name=self.data.name )
0.00365
def name(self): """Table name used in requests. For example: .. literalinclude:: snippets_table.py :start-after: [START bigtable_table_name] :end-before: [END bigtable_table_name] .. note:: This property will not change if ``table_id`` does not, but the return value is not cached. The table name is of the form ``"projects/../instances/../tables/{table_id}"`` :rtype: str :returns: The table name. """ project = self._instance._client.project instance_id = self._instance.instance_id table_client = self._instance._client.table_data_client return table_client.table_path( project=project, instance=instance_id, table=self.table_id )
0.002463
def _read_file(self, filename): """ read a Python object from a cache file. Reads a pickled object from disk and returns it. :param filename: Name of the file that should be read. :type filename: str :rtype: object """ if self.__compression: f = gzip.GzipFile(filename, "rb") else: f = open(filename, "rb") res = pickle.load(f) f.close() return res
0.004246
def cursor(self): """ Create a new ``Cursor`` instance associated with this ``Connection`` :return: A new ``Cursor`` instance """ self._assert_valid() c = Cursor(self.impl.cursor()) self.cursors.add(c) return c
0.007273
def get_splice_data(): """ Load the mushroom dataset, split it into X and y, and then call the label encoder to get an integer y column. :return: """ df = pd.read_csv('source_data/splice/splice.csv') X = df.reindex(columns=[x for x in df.columns.values if x != 'class']) X['dna'] = X['dna'].map(lambda x: list(str(x).strip())) for idx in range(60): X['dna_%d' % (idx, )] = X['dna'].map(lambda x: x[idx]) del X['dna'] y = df.reindex(columns=['class']) y = preprocessing.LabelEncoder().fit_transform(y.values.reshape(-1, )) # this data is truly categorical, with no known concept of ordering mapping = None return X, y, mapping
0.002882
def check_inclusions(item, included=[], excluded=[]): """Everything passes if both are empty, otherwise, we have to check if \ empty or is present.""" if (len(included) == 0): if len(excluded) == 0 or item not in excluded: return True else: return False else: if item in included: return True return False
0.002597
def _recursive_terminate_without_psutil(process): """Terminate a process and its descendants. """ try: _recursive_terminate(process.pid) except OSError as e: warnings.warn("Failed to kill subprocesses on this platform. Please" "install psutil: https://github.com/giampaolo/psutil") # In case we cannot introspect the children, we fall back to the # classic Process.terminate. process.terminate() process.join()
0.002033
def calculate(self, **state): """ Calculate the density at the specified temperature, pressure, and composition. :param T: [K] temperature :param P: [Pa] pressure :param x: [mole fraction] dictionary of compounds and mole fractions :returns: [kg/m3] density The **state parameter contains the keyword argument(s) specified above\ that are used to describe the state of the material. """ super().calculate(**state) mm_average = 0.0 for compound, molefraction in state["x"].items(): mm_average += molefraction * mm(compound) mm_average /= 1000.0 return mm_average * state["P"] / R / state["T"]
0.002751
def get_object(self): """Implements the GetObjectMixin interface and calls :meth:`DBObjectMixin.get_query`. Using this mixin requires usage of a response handler capable of serializing SQLAlchemy query result objects. :returns: Typically a SQLALchemy Query result. :rtype: mixed .. seealso:: :meth:`DBObjectMixin.get_query` :meth:`DBObjectMixin.filter_by_id` :meth:`DBObjectMixin.get_result` """ query = self.get_query() query = self.filter_by_id(query) return self.get_result(query)
0.004975
async def start_serving(self, connection_config: ConnectionConfig, loop: Optional[asyncio.AbstractEventLoop] = None) -> None: """ Start serving this :class:`~lahja.endpoint.Endpoint` so that it can receive events. Await until the :class:`~lahja.endpoint.Endpoint` is ready. """ self.start_serving_nowait(connection_config, loop) await self.wait_until_serving()
0.012931
def _x_credentials_parser(credentials, data): """ We need to override this method to fix Facebooks naming deviation. """ # Facebook returns "expires" instead of "expires_in". credentials.expire_in = data.get('expires') if data.get('token_type') == 'bearer': # TODO: cls is not available here, hardcode for now. credentials.token_type = 'Bearer' return credentials
0.004484
def set_popup_menu(self, menu): '''set a popup menu on the frame''' self.popup_menu = menu self.in_queue.put(MPImagePopupMenu(menu))
0.012821
def main(argv=None): '''TEST ONLY: this is called if run from command line''' parser = argparse.ArgumentParser() parser.add_argument('-i','--input_file', required=True) parser.add_argument('--input_file_format', default='sequence') parser.add_argument('--input_data_type', default='json') parser.add_argument('--input_separator', default='\t') parser.add_argument('-o','--output_dir', required=True) parser.add_argument('--output_file_format', default='sequence') parser.add_argument('--output_data_type', default='json') parser.add_argument('--output_separator', default='\t') args=parser.parse_args() # can be inconvenient to specify tab on the command line args.input_separator = "\t" if args.input_separator=='tab' else args.input_separator args.output_separator = "\t" if args.output_separator=='tab' else args.output_separator sc = SparkContext(appName="fileUtil") fUtil = FileUtil(sc) ## CONFIG LOAD input_kwargs = {"file_format": args.input_file_format, "data_type": args.input_data_type} parse_kwargs = {"separator": args.input_separator} load_kwargs = merge_dicts(input_kwargs, parse_kwargs) ## LOAD rdd = fUtil.load_file(args.input_file, **load_kwargs) ## CONFIG SAVE output_kwargs = {"file_format": args.output_file_format, "data_type": args.output_data_type} emit_kwargs = {"separator": args.output_separator} save_kwargs = merge_dicts(output_kwargs, emit_kwargs) ## SAVE fUtil.save_file(rdd, args.output_dir, **save_kwargs)
0.00875
def _compare_vector(arr1, arr2, rel_tol): """ Compares two vectors (python lists) for approximate equality. Each array contains floats or strings convertible to floats This function returns True if both arrays are of the same length and each value is within the given relative tolerance. """ length = len(arr1) if len(arr2) != length: return False for i in range(length): element_1 = float(arr1[i]) element_2 = float(arr2[i]) diff = abs(abs(element_1) - abs(element_2)) if diff != 0.0: rel = _reldiff(element_1, element_2) # For a basis set, a relatively coarse comparison # should be acceptible if rel > rel_tol: return False return True
0.001267
def iter_transform(filename, key): """Generate encrypted file with given key. This generator function reads the file in chunks and encrypts them using AES-CTR, with the specified key. :param filename: The name of the file to encrypt. :type filename: str :param key: The key used to encrypt the file. :type key: str :returns: A generator that produces encrypted file chunks. :rtype: generator """ # We are not specifying the IV here. aes = AES.new(key, AES.MODE_CTR, counter=Counter.new(128)) with open(filename, 'rb+') as f: for chunk in iter(lambda: f.read(CHUNK_SIZE), b''): yield aes.encrypt(chunk), f
0.001464
def _get_ngrams(n, text): """Calculates n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams """ ngram_set = set() text_length = len(text) max_index_ngram_start = text_length - n for i in range(max_index_ngram_start + 1): ngram_set.add(tuple(text[i:i + n])) return ngram_set
0.01983
def _ssl_login(self): """ Authenticate to the /ssllogin endpoint with Client SSL authentication. :returns: deferred that when fired returns a dict from sslLogin """ method = treq.post agent = self._ssl_agent() return self._request_login(method, agent=agent)
0.006369
def rm_filesystems(name, device, config='/etc/filesystems'): ''' .. versionadded:: 2018.3.3 Remove the mount point from the filesystems CLI Example: .. code-block:: bash salt '*' mount.rm_filesystems /mnt/foo /dev/sdg ''' modified = False view_lines = [] if 'AIX' not in __grains__['kernel']: return modified criteria = _FileSystemsEntry(name=name, dev=device) try: fsys_filedict = _filesystems(config, False) for fsys_view in six.viewitems(fsys_filedict): try: if criteria.match(fsys_view): modified = True else: view_lines.append(fsys_view) except _FileSystemsEntry.ParseError: view_lines.append(fsys_view) except (IOError, OSError) as exc: raise CommandExecutionError("Couldn't read from {0}: {1}".format(config, exc)) if modified: try: with salt.utils.files.fopen(config, 'wb') as ofile: for fsys_view in view_lines: entry = fsys_view[1] mystrg = _FileSystemsEntry.dict_to_lines(entry) ofile.writelines(salt.utils.data.encode(mystrg)) except (IOError, OSError) as exc: raise CommandExecutionError("Couldn't write to {0}: {1}".format(config, exc)) return modified
0.002143
def base_path(main_path, fmt): """Given a path and options for a format (ext, suffix, prefix), return the corresponding base path""" if not fmt: return os.path.splitext(main_path)[0] fmt = long_form_one_format(fmt) fmt_ext = fmt['extension'] suffix = fmt.get('suffix') prefix = fmt.get('prefix') base, ext = os.path.splitext(main_path) if ext != fmt_ext: raise InconsistentPath(u"Notebook path '{}' was expected to have extension '{}'".format(main_path, fmt_ext)) if suffix: if not base.endswith(suffix): raise InconsistentPath(u"Notebook name '{}' was expected to end with suffix '{}'".format(base, suffix)) base = base[:-len(suffix)] if not prefix: return base prefix_dir, prefix_file_name = os.path.split(prefix) notebook_dir, notebook_file_name = os.path.split(base) sep = base[len(notebook_dir):-len(notebook_file_name)] if prefix_file_name: if not notebook_file_name.startswith(prefix_file_name): raise InconsistentPath(u"Notebook name '{}' was expected to start with prefix '{}'" .format(notebook_file_name, prefix_file_name)) notebook_file_name = notebook_file_name[len(prefix_file_name):] if prefix_dir: if not notebook_dir.endswith(prefix_dir): raise InconsistentPath(u"Notebook directory '{}' was expected to end with directory prefix '{}'" .format(notebook_dir, prefix_dir)) notebook_dir = notebook_dir[:-len(prefix_dir)] if not notebook_dir: return notebook_file_name # Does notebook_dir ends with a path separator? if notebook_dir[-1:] == sep: return notebook_dir + notebook_file_name return notebook_dir + sep + notebook_file_name
0.003836
def merged_series(cls, *series, **kwargs): '''Merge ``series`` and return the results without storing data in the backend server.''' router, backend = cls.check_router(None, *series) if backend: target = router.register(cls(), backend) router.session().add(target) target._merge(*series, **kwargs) backend = target.backend return backend.execute( backend.structure(target).irange_and_delete(), target.load_data)
0.003717
def _call(self, target, method, target_class=None, single_result=True, raw=False, files=None, **kwargs): """ Low-level call to HasOffers API. :param target_class: type of resulting object/objects. """ if target_class is None: target_class = target params = prepare_query_params( NetworkToken=self.network_token, NetworkId=self.network_id, Target=target, Method=method, **kwargs ) kwargs = {'url': self.endpoint, 'params': params, 'verify': self.verify, 'method': 'GET'} if files: kwargs.update({'method': 'POST', 'files': files}) self.logger.debug('Request parameters: %s', params) response = self.session.request(**kwargs) self.logger.debug('Response [%s]: %s', response.status_code, response.text) response.raise_for_status() data = response.json(object_pairs_hook=OrderedDict) return self.handle_response(data, target=target_class, single_result=single_result, raw=raw)
0.005566
def compare(referenceOnto, somegraph): """ Desc """ spy1 = Ontology(referenceOnto) spy2 = Ontology(somegraph) class_comparison = {} for x in spy2.allclasses: if x not in spy1.allclasses: class_comparison[x] = False else: class_comparison[x] = True prop_comparison = {} for x in spy2.allinferredproperties: if x not in spy1.allinferredproperties: prop_comparison[x] = False else: prop_comparison[x] = True return {'stats' : { 'classes': len(spy2.allclasses), 'properties' : len(spy2.allinferredproperties), 'triples' : len(spy2.rdflib_graph)}, 'class_comparison' : class_comparison , 'prop_comparison' : prop_comparison}
0.052083
def process_command_thread(self, request): """Worker thread to process a command. """ command, data = request if multi_thread_enabled(): try: self.process_command(command, data) except Exception as e: _logger.exception(str(e)) raise else: pass
0.00545
def random_draft(card_class: CardClass, exclude=[]): """ Return a deck of 30 random cards for the \a card_class """ from . import cards from .deck import Deck deck = [] collection = [] # hero = card_class.default_hero for card in cards.db.keys(): if card in exclude: continue cls = cards.db[card] if not cls.collectible: continue if cls.type == CardType.HERO: # Heroes are collectible... continue if cls.card_class and cls.card_class not in [card_class, CardClass.NEUTRAL]: # Play with more possibilities continue collection.append(cls) while len(deck) < Deck.MAX_CARDS: card = random.choice(collection) if deck.count(card.id) < card.max_count_in_deck: deck.append(card.id) return deck
0.036635
def upgrade(): """Upgrade database.""" # Variant types: def created(): """Return instance of a column.""" return sa.Column( 'created', sa.DateTime().with_variant(mysql.DATETIME(fsp=6), 'mysql'), nullable=False ) def updated(): """Return instance of a column.""" return sa.Column( 'updated', sa.DateTime().with_variant(mysql.DATETIME(fsp=6), 'mysql'), nullable=False ) def uri(): """Return instance of a column.""" return sa.Column( 'uri', sa.Text().with_variant(mysql.VARCHAR(255), 'mysql'), nullable=True ) def key(nullable=True): """Return instance of a column.""" return sa.Column( 'key', sa.Text().with_variant(mysql.VARCHAR(255), 'mysql'), nullable=nullable ) op.create_table( 'files_files', created(), updated(), sa.Column( 'id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), uri(), sa.Column('storage_class', sa.String(length=1), nullable=True), sa.Column('size', sa.BigInteger(), nullable=True), sa.Column('checksum', sa.String(length=255), nullable=True), sa.Column('readable', sa.Boolean(name='readable'), nullable=False), sa.Column('writable', sa.Boolean(name='writable'), nullable=False), sa.Column('last_check_at', sa.DateTime(), nullable=True), sa.Column('last_check', sa.Boolean(name='last_check'), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('uri') ) op.create_table( 'files_location', created(), updated(), sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=20), nullable=False), sa.Column('uri', sa.String(length=255), nullable=False), sa.Column('default', sa.Boolean(name='default'), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name') ) op.create_table( 'files_bucket', created(), updated(), sa.Column( 'id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column('default_location', sa.Integer(), nullable=False), sa.Column( 'default_storage_class', sa.String(length=1), nullable=False ), sa.Column('size', sa.BigInteger(), nullable=False), sa.Column('quota_size', sa.BigInteger(), nullable=True), sa.Column('max_file_size', sa.BigInteger(), nullable=True), sa.Column('locked', sa.Boolean(name='locked'), nullable=False), sa.Column('deleted', sa.Boolean(name='deleted'), nullable=False), sa.ForeignKeyConstraint( ['default_location'], [u'files_location.id'], ondelete='RESTRICT'), sa.PrimaryKeyConstraint('id') ) op.create_table( 'files_buckettags', sa.Column( 'bucket_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column('key', sa.String(length=255), nullable=False), sa.Column('value', sa.Text(), nullable=False), sa.ForeignKeyConstraint( ['bucket_id'], [u'files_bucket.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('bucket_id', 'key') ) op.create_table( 'files_multipartobject', created(), updated(), sa.Column( 'upload_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column( 'bucket_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=True), key(), sa.Column( 'file_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column('chunk_size', sa.Integer(), nullable=True), sa.Column('size', sa.BigInteger(), nullable=True), sa.Column('completed', sa.Boolean(name='completed'), nullable=False), sa.ForeignKeyConstraint( ['bucket_id'], [u'files_bucket.id'], ondelete='RESTRICT'), sa.ForeignKeyConstraint( ['file_id'], [u'files_files.id'], ondelete='RESTRICT'), sa.PrimaryKeyConstraint('upload_id'), sa.UniqueConstraint('upload_id', 'bucket_id', 'key', name='uix_item') ) op.create_table( 'files_object', created(), updated(), sa.Column( 'bucket_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), key(nullable=False), sa.Column( 'version_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column( 'file_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=True), sa.Column( '_mimetype', sa.String( length=255), nullable=True), sa.Column('is_head', sa.Boolean(name='is_head'), nullable=False), sa.ForeignKeyConstraint( ['bucket_id'], [u'files_bucket.id'], ondelete='RESTRICT'), sa.ForeignKeyConstraint( ['file_id'], [u'files_files.id'], ondelete='RESTRICT'), sa.PrimaryKeyConstraint('bucket_id', 'key', 'version_id') ) op.create_index( op.f('ix_files_object__mimetype'), 'files_object', ['_mimetype'], unique=False ) op.create_table( 'files_multipartobject_part', created(), updated(), sa.Column( 'upload_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column( 'part_number', sa.Integer(), autoincrement=False, nullable=False), sa.Column('checksum', sa.String(length=255), nullable=True), sa.ForeignKeyConstraint( ['upload_id'], [u'files_multipartobject.upload_id'], ondelete='RESTRICT'), sa.PrimaryKeyConstraint('upload_id', 'part_number') )
0.000157
def recentDF(token='', version=''): '''https://iexcloud.io/docs/api/#stats-recent Args: token (string); Access token version (string); API version Returns: DataFrame: result ''' df = pd.DataFrame(recent(token, version)) _toDatetime(df) _reindex(df, 'date') return df
0.003086
def fetch(url, body=None, headers=None): """Invoke the fetch method on the default fetcher. Most users should need only this method. @raises Exception: any exceptions that may be raised by the default fetcher """ fetcher = getDefaultFetcher() return fetcher.fetch(url, body, headers)
0.003247
def check(self, results_id): """Check for results of a membership request. :param str results_id: the ID of a membership request :return: successfully created memberships :rtype: :class:`list` :raises groupy.exceptions.ResultsNotReady: if the results are not ready :raises groupy.exceptions.ResultsExpired: if the results have expired """ path = 'results/{}'.format(results_id) url = utils.urljoin(self.url, path) response = self.session.get(url) if response.status_code == 503: raise exceptions.ResultsNotReady(response) if response.status_code == 404: raise exceptions.ResultsExpired(response) return response.data['members']
0.002646
def handle_template(bot_or_project, name, target=None, **options): """ Copy either a bot layout template or a Trading-Bots project layout template into the specified directory. :param bot_or_project: The string 'bot' or 'project'. :param name: The name of the bot or project. :param target: The directory to which the template should be copied. :param options: The additional variables passed to project or bot templates """ bot_or_project = bot_or_project paths_to_remove = [] verbosity = int(options['verbosity']) validate_name(name, bot_or_project) # if some directory is given, make sure it's nicely expanded if target is None: top_dir = path.join(os.getcwd(), name) try: os.makedirs(top_dir) except FileExistsError: raise click.ClickException("'%s' already exists" % top_dir) except OSError as e: raise click.ClickException(e) else: top_dir = os.path.abspath(path.expanduser(target)) if not os.path.exists(top_dir): raise click.ClickException("Destination directory '%s' does not " "exist, please create it first." % top_dir) base_name = '%s_name' % bot_or_project base_subdir = '%s_template' % bot_or_project base_directory = '%s_directory' % bot_or_project target_name = '%s_target' % bot_or_project pascal_case_name = 'pascal_case_%s_name' % bot_or_project pascal_case_value = stringcase.pascalcase(name) snake_case_name = 'snake_case_%s_name' % bot_or_project snake_case_value = stringcase.snakecase(name) context = { **options, base_name: name, base_directory: top_dir, target_name: target, pascal_case_name: pascal_case_value, snake_case_name: snake_case_value, 'settings_files': defaults.SETTINGS, 'version': getattr(trading_bots.__version__, '__version__'), } # Setup a stub settings environment for template rendering settings.configure() trading_bots.setup() template_dir = path.join(trading_bots.__path__[0], 'conf', base_subdir) prefix_length = len(template_dir) + 1 for root, dirs, files in os.walk(template_dir): path_rest = root[prefix_length:] relative_dir = path_rest.replace(snake_case_name, snake_case_value) if relative_dir: target_dir = path.join(top_dir, relative_dir) if not path.exists(target_dir): os.mkdir(target_dir) for dirname in dirs[:]: if dirname.startswith('.') or dirname == '__pycache__': dirs.remove(dirname) for filename in files: if filename.endswith(('.pyo', '.pyc', '.py.class')): # Ignore some files as they cause various breakages. continue old_path = path.join(root, filename) new_path = path.join(top_dir, relative_dir, filename.replace(snake_case_name, snake_case_value)) for old_suffix, new_suffix in rewrite_template_suffixes: if new_path.endswith(old_suffix): new_path = new_path[:-len(old_suffix)] + new_suffix break # Only rewrite once if path.exists(new_path): raise click.ClickException("%s already exists, overlaying a " "project or bot into an existing " "directory won't replace conflicting " "files" % new_path) # Only render the Python files, as we don't want to # accidentally render Trading-Bots templates files if new_path.endswith(extensions): with open(old_path, 'r', encoding='utf-8') as template_file: content = template_file.read() template = Template(content, keep_trailing_newline=True) content = template.render(**context) with open(new_path, 'w', encoding='utf-8') as new_file: new_file.write(content) else: shutil.copyfile(old_path, new_path) if verbosity >= 2: click.echo("Creating %s\n" % new_path) try: shutil.copymode(old_path, new_path) make_writeable(new_path) except OSError: click.echo( "Notice: Couldn't set permission bits on %s. You're " "probably using an uncommon filesystem setup. No " "problem." % new_path) if paths_to_remove: if verbosity >= 2: click.echo("Cleaning up temporary files.\n") for path_to_remove in paths_to_remove: if path.isfile(path_to_remove): os.remove(path_to_remove) else: shutil.rmtree(path_to_remove)
0.0008
def get_freesurfer_cmap(vis_type): """Provides different colormaps for different visualization types.""" if vis_type in ('cortical_volumetric', 'cortical_contour'): LUT = get_freesurfer_cortical_LUT() cmap = ListedColormap(LUT) elif vis_type in ('labels_volumetric', 'labels_contour'): black = np.array([0, 0, 0, 1]) cmap = plt.get_cmap('hsv') # TODO using more than 20 labels might be a problem? cmap = cmap(np.linspace(0, 1, 20)) # prepending black to paint background as black colors = np.vstack((black, cmap)) cmap = ListedColormap(colors, 'my_colormap') else: raise NotImplementedError('color map for the visualization type {} has not been implemented!'.format(vis_type)) return cmap
0.003759
def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): """ Starts a packet capture. :param port_number: allocated port number :param output_file: PCAP destination file for the capture :param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB """ if not [port["port_number"] for port in self._ports_mapping if port_number == port["port_number"]]: raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(name=self.name, port_number=port_number)) if port_number not in self._nios: raise NodeError("Port {} is not connected".format(port_number)) nio = self._nios[port_number] if nio.capturing: raise NodeError("Packet capture is already activated on port {port_number}".format(port_number=port_number)) nio.startPacketCapture(output_file) bridge_name = "{}-{}".format(self._id, port_number) yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=bridge_name, output_file=output_file)) log.info("Cloud '{name}' [{id}]: starting packet capture on port {port_number}".format(name=self.name, id=self.id, port_number=port_number))
0.00733
def make_doc_id_range(doc_id): '''Construct a tuple(begin, end) of one-tuple kvlayer keys from a hexdigest doc_id. ''' assert len(doc_id) == 32, 'expecting 32 hex string, not: %r' % doc_id bin_docid = base64.b16decode(doc_id.upper()) doc_id_range = ((bin_docid,), (bin_docid,)) return doc_id_range
0.003067
def decode_ulid(value: str) -> bytes: """ Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`. .. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID strings specifically and is not meant for arbitrary decoding. :param value: String to decode :type value: :class:`~str` :return: Value decoded from Base32 string :rtype: :class:`~bytes` :raises ValueError: when value is not 26 characters :raises ValueError: when value cannot be encoded in ASCII """ encoded = str_to_bytes(value, 26) decoding = DECODING return bytes(( ((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF, ((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF, ((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF, ((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF, ((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF, ((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF, ((decoding[encoded[10]] << 3) | (decoding[encoded[11]] >> 2)) & 0xFF, ((decoding[encoded[11]] << 6) | (decoding[encoded[12]] << 1) | (decoding[encoded[13]] >> 4)) & 0xFF, ((decoding[encoded[13]] << 4) | (decoding[encoded[14]] >> 1)) & 0xFF, ((decoding[encoded[14]] << 7) | (decoding[encoded[15]] << 2) | (decoding[encoded[16]] >> 3)) & 0xFF, ((decoding[encoded[16]] << 5) | (decoding[encoded[17]])) & 0xFF, ((decoding[encoded[18]] << 3) | (decoding[encoded[19]] >> 2)) & 0xFF, ((decoding[encoded[19]] << 6) | (decoding[encoded[20]] << 1) | (decoding[encoded[21]] >> 4)) & 0xFF, ((decoding[encoded[21]] << 4) | (decoding[encoded[22]] >> 1)) & 0xFF, ((decoding[encoded[22]] << 7) | (decoding[encoded[23]] << 2) | (decoding[encoded[24]] >> 3)) & 0xFF, ((decoding[encoded[24]] << 5) | (decoding[encoded[25]])) & 0xFF ))
0.003925
def create_from_response_pdu(resp_pdu): """ Create instance from response PDU. :param resp_pdu: Byte array with request PDU. :return: Instance of :class:`WriteSingleCoil`. """ write_single_coil = WriteSingleCoil() address, value = struct.unpack('>HH', resp_pdu[1:5]) value = 1 if value == 0xFF00 else value write_single_coil.address = address write_single_coil.data = value return write_single_coil
0.004149
def apply_to_with_tz(self, dttm, timezone): """We make sure that after truncating we use the correct timezone, even if we 'jump' over a daylight saving time switch. I.e. if we apply "@d" to `Sun Oct 30 04:30:00 CET 2016` (1477798200) we want to have `Sun Oct 30 00:00:00 CEST 2016` (1477778400) but not `Sun Oct 30 00:00:00 CET 2016` (1477782000) """ result = self.apply_to(dttm) if self.unit in [DAYS, WEEKS, MONTHS, YEARS]: naive_dttm = datetime(result.year, result.month, result.day) result = timezone.localize(naive_dttm) return result
0.003145
def via(self, *args): """ Creates an empty error to record in the stack trace """ error = None if len(self.errors) > 0: error = self._err("via", *args) return error
0.008621
def product(target, prop1, prop2, **kwargs): r""" Calculates the product of multiple property values Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. prop1 : string The name of the first argument prop2 : string The name of the second argument Notes ----- Additional properties can be specified beyond just ``prop1`` and ``prop2`` by including additional arguments in the function call (i.e. ``prop3 = 'pore.foo'``). """ value = target[prop1]*target[prop2] for item in kwargs.values(): value *= target[item] return value
0.001272
def police_priority_map_exceed_map_pri6_exceed(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer") name_key = ET.SubElement(police_priority_map, "name") name_key.text = kwargs.pop('name') exceed = ET.SubElement(police_priority_map, "exceed") map_pri6_exceed = ET.SubElement(exceed, "map-pri6-exceed") map_pri6_exceed.text = kwargs.pop('map_pri6_exceed') callback = kwargs.pop('callback', self._callback) return callback(config)
0.004608
def get_broadcast(self, broadcast_guid, **kwargs): ''' Get a specific broadcast by guid ''' params = kwargs broadcast = self._call('broadcasts/%s' % broadcast_guid, params=params, content_type='application/json') return Broadcast(broadcast)
0.01
def edit( plugins, parent = None, default = None, modal = True ): """ Prompts the user to edit the config settings for the inputed config \ plugins. :param plugins | [<XConfigPlugin>, ..] parent | <QWidget> default | <XConfigPlugin> || None :return <bool> success """ if ( XConfigDialog._instance ): XConfigDialog._instance.show() XConfigDialog._instance.activateWindow() return True dlg = XConfigDialog( parent ) dlg.setPlugins(plugins) dlg.setCurrentPlugin(default) if ( not modal ): XConfigDialog._instance = dlg dlg.setAttribute(Qt.WA_DeleteOnClose) dlg.show() return True if ( dlg.exec_() ): return True return False
0.025
def init_widget(self): """ Initialize the underlying widget. This reads all items declared in the enamldef block for this node and sets only the values that have been specified. All other values will be left as default. Doing it this way makes atom to only create the properties that need to be overridden from defaults thus greatly reducing the number of initialization checks, saving time and memory. If you don't want this to happen override `get_declared_keys` to return an empty list. """ super(UiKitView, self).init_widget() self.widget.yoga.isEnabled = True # Initialize the widget by updating only the members that # have read expressions declared. This saves a lot of time and # simplifies widget initialization code for k, v in self.get_declared_items(): handler = getattr(self, 'set_'+k, None) if handler: handler(v)
0.002016
def import_from_xml(self, xml): ''' Standard imports for all types of object These must fail gracefully, skip if not found ''' self._import_orgid(xml) self._import_parents_from_xml(xml) self._import_instances_from_xml(xml) self._import_common_name(xml) self._import_synonyms(xml) self._import_dblinks(xml)
0.005195
def reset(self): """ Process everything all over again. """ self.indexCount = 0 indexDir = self.store.newDirectory(self.indexDirectory) if indexDir.exists(): indexDir.remove() for src in self.getSources(): src.removeReliableListener(self) src.addReliableListener(self, style=iaxiom.REMOTE)
0.005249
def extend(self, values): """ Extend the list by appending all elements from the *values*. Raises a ValueError if the sort order would be violated. """ _maxes, _lists, _load = self._maxes, self._lists, self._load if not isinstance(values, list): values = list(values) if any(values[pos - 1] > values[pos] for pos in range(1, len(values))): raise ValueError('given sequence not in sort order') offset = 0 if _maxes: if values[0] < _lists[-1][-1]: msg = '{0} not in sort order at index {1}'.format(repr(values[0]), self._len) raise ValueError(msg) if len(_lists[-1]) < self._half: _lists[-1].extend(values[:_load]) _maxes[-1] = _lists[-1][-1] offset = _load len_lists = len(_lists) for idx in range(offset, len(values), _load): _lists.append(values[idx:(idx + _load)]) _maxes.append(_lists[-1][-1]) _index = self._index if len_lists == len(_lists): len_index = len(_index) if len_index > 0: len_values = len(values) child = len_index - 1 while child: _index[child] += len_values child = (child - 1) >> 1 _index[0] += len_values else: del _index[:] self._len += len(values)
0.001991
def _format_notes(self, record): """ Extracts notes from a record and reformats them in a simplified format. """ notes = [] if "notes" in record: for note in record["notes"]: self._append_note_dict_to_list(notes, "general", note) if "language_and_script_notes" in record: self._append_note_dict_to_list( notes, "language_and_script", record["language_and_script_notes"] ) if "publication_notes" in record: self._append_note_dict_to_list( notes, "publication_notes", record["publication_notes"] ) if "physical_characteristics_and_technical_requirements" in record: self._append_note_dict_to_list( notes, "physical_condition", record["physical_characteristics_and_technical_requirements"], ) return notes
0.003128
def execs(root=None): ''' .. versionadded:: 2014.7.0 Return a list of all files specified as ``ExecStart`` for all services. root Enable/disable/mask unit files in the specified root directory CLI Example: salt '*' service.execs ''' ret = {} for service in get_all(root=root): data = show(service, root=root) if 'ExecStart' not in data: continue ret[service] = data['ExecStart']['path'] return ret
0.002045
def yield_module_imports(root, checks=string_imports()): """ Gather all require and define calls from unbundled JavaScript source files and yield all module names. The imports can either be of the CommonJS or AMD syntax. """ if not isinstance(root, asttypes.Node): raise TypeError('provided root must be a node') for child in yield_function(root, deep_filter): for f, condition in checks: if condition(child): for name in f(child): yield name continue
0.001779
def _Region1(T, P): """Basic equation for region 1 Parameters ---------- T : float Temperature, [K] P : float Pressure, [MPa] Returns ------- prop : dict Dict with calculated properties. The available properties are: * v: Specific volume, [m³/kg] * h: Specific enthalpy, [kJ/kg] * s: Specific entropy, [kJ/kgK] * cp: Specific isobaric heat capacity, [kJ/kgK] * cv: Specific isocoric heat capacity, [kJ/kgK] * w: Speed of sound, [m/s] * alfav: Cubic expansion coefficient, [1/K] * kt: Isothermal compressibility, [1/MPa] References ---------- IAPWS, Revised Release on the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam August 2007, http://www.iapws.org/relguide/IF97-Rev.html, Eq 7 Examples -------- >>> _Region1(300,3)["v"] 0.00100215168 >>> _Region1(300,3)["h"] 115.331273 >>> _Region1(300,3)["h"]-3000*_Region1(300,3)["v"] 112.324818 >>> _Region1(300,80)["s"] 0.368563852 >>> _Region1(300,80)["cp"] 4.01008987 >>> _Region1(300,80)["cv"] 3.91736606 >>> _Region1(500,3)["w"] 1240.71337 >>> _Region1(500,3)["alfav"] 0.00164118128 >>> _Region1(500,3)["kt"] 0.00112892188 """ if P < 0: P = Pmin I = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 8, 8, 21, 23, 29, 30, 31, 32] J = [-2, -1, 0, 1, 2, 3, 4, 5, -9, -7, -1, 0, 1, 3, -3, 0, 1, 3, 17, -4, 0, 6, -5, -2, 10, -8, -11, -6, -29, -31, -38, -39, -40, -41] n = [0.14632971213167, -0.84548187169114, -0.37563603672040e1, 0.33855169168385e1, -0.95791963387872, 0.15772038513228, -0.16616417199501e-1, 0.81214629983568e-3, 0.28319080123804e-3, -0.60706301565874e-3, -0.18990068218419e-1, -0.32529748770505e-1, -0.21841717175414e-1, -0.52838357969930e-4, -0.47184321073267e-3, -0.30001780793026e-3, 0.47661393906987e-4, -0.44141845330846e-5, -0.72694996297594e-15, -0.31679644845054e-4, -0.28270797985312e-5, -0.85205128120103e-9, -0.22425281908000e-5, -0.65171222895601e-6, -0.14341729937924e-12, -0.40516996860117e-6, -0.12734301741641e-8, -0.17424871230634e-9, -0.68762131295531e-18, 0.14478307828521e-19, 0.26335781662795e-22, -0.11947622640071e-22, 0.18228094581404e-23, -0.93537087292458e-25] Tr = 1386/T Pr = P/16.53 g = gp = gpp = gt = gtt = gpt = 0 for i, j, ni in zip(I, J, n): g += ni * (7.1-Pr)**i * (Tr-1.222)**j gp -= ni*i * (7.1-Pr)**(i-1) * (Tr-1.222)**j gpp += ni*i*(i-1) * (7.1-Pr)**(i-2) * (Tr-1.222)**j gt += ni*j * (7.1-Pr)**i * (Tr-1.222)**(j-1) gtt += ni*j*(j-1) * (7.1-Pr)**i * (Tr-1.222)**(j-2) gpt -= ni*i*j * (7.1-Pr)**(i-1) * (Tr-1.222)**(j-1) propiedades = {} propiedades["T"] = T propiedades["P"] = P propiedades["v"] = Pr*gp*R*T/P/1000 propiedades["h"] = Tr*gt*R*T propiedades["s"] = R*(Tr*gt-g) propiedades["cp"] = -R*Tr**2*gtt propiedades["cv"] = R*(-Tr**2*gtt+(gp-Tr*gpt)**2/gpp) propiedades["w"] = sqrt(R*T*1000*gp**2/((gp-Tr*gpt)**2/(Tr**2*gtt)-gpp)) propiedades["alfav"] = (1-Tr*gpt/gp)/T propiedades["kt"] = -Pr*gpp/gp/P propiedades["region"] = 1 propiedades["x"] = 0 return propiedades
0.000577
def resolve_out(self, ins): """ Determine which stream the output is synchronised with. If the incoming streams have different sync values, then it is unknown what synchronisation the outgoing stream should have. :param ins: dictionary of the incoming streams' sync values :return: """ values = set() for value in ins.values(): values.update(value) if len(values) > 1: msg = 'Unable to resolve sync stream. Consider adding a custom resolver to {}.' raise ValueError(msg.format(self.step.name)) return {key: values for key in self.step.outs}
0.009091
def to_timestamp(dt): """Convert a datetime object to a unix timestamp. Note that unlike a typical unix timestamp, this is seconds since 1970 *local time*, not UTC. If the passed in object is already a timestamp, then that value is simply returned unmodified. """ if isinstance(dt, int): return dt return int(total_seconds(dt.replace(tzinfo=None) - datetime.datetime(1970, 1, 1)))
0.002288
def load_method(path,method,class_name = None,instance_creator = None): ''' Returns an instance of the method specified. Args : path : The path to the module contianing the method or function. method : The name of the function. class_name : The name of the class if the funtion is a method. instance_creator: The name of the method to return the class instance. ''' #Load the module module = load_module(path) if class_name : #If a class, Create an instance class_type = getattr(module, class_name) if instance_creator: ic_rest = instance_creator nxt = module while ('.' in ic_rest) : nxt = getattr(nxt , instance_creator.split('.')[0]) ic_rest = '.'.join(ic_rest.split('.')[1:]) instance = getattr(module, instance_creator)() else : instance = class_type() return getattr(instance , method) else : return getattr(module , method)
0.022422
def gx_coords(node): """ Given a KML DOM node, grab its <gx:coord> and <gx:timestamp><when>subnodes, and convert them into a dictionary with the keys and values - ``'coordinates'``: list of lists of float coordinates - ``'times'``: list of timestamps corresponding to the coordinates """ els = get(node, 'gx:coord') coordinates = [] times = [] coordinates = [gx_coords1(val(el)) for el in els] time_els = get(node, 'when') times = [val(t) for t in time_els] return { 'coordinates': coordinates, 'times': times, }
0.00346
async def xinfo_consumers(self, name: str, group: str) -> list: """ [NOTICE] Not officially released yet XINFO command is an observability interface that can be used with sub-commands in order to get information about streams or consumer groups. :param name: name of the stream :param group: name of the consumer group """ return await self.execute_command('XINFO CONSUMERS', name, group)
0.004329
def translate_point(self, point): """ Translate world coordinates and return screen coordinates. Respects zoom level Will be returned as tuple. :rtype: tuple """ mx, my = self.get_center_offset() if self._zoom_level == 1.0: return point[0] + mx, point[1] + my else: return int(round((point[0] + mx)) * self._real_ratio_x), int(round((point[1] + my) * self._real_ratio_y))
0.008811
def facet_freq_plot(freq_csv, caller): """Prepare a facet plot of frequencies stratified by variant type and status (TP, FP, FN). Makes a nice plot with the output from validate.freq_summary """ out_file = "%s.png" % os.path.splitext(freq_csv)[0] plt.ioff() sns.set(style='dark') df = pd.read_csv(freq_csv) g = sns.FacetGrid(df, row="vtype", col="valclass", margin_titles=True, col_order=["TP", "FN", "FP"], row_order=["snp", "indel"], sharey=False) g.map(plt.hist, "freq", bins=20, align="left") g.set(xlim=(0.0, 1.0)) g.fig.set_size_inches(8, 6) g.fig.text(.05, .97, caller, horizontalalignment='center', size=14) g.fig.savefig(out_file)
0.002717
def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas objects from multiple tables Parameters ---------- keys : a list of the tables selector : the table to apply the where criteria (defaults to keys[0] if not supplied) columns : the columns I want back start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator Exceptions ---------- raises KeyError if keys or selector is not found or keys is empty raises TypeError if keys is not a list or tuple raises ValueError if the tables are not ALL THE SAME DIMENSIONS """ # default to single select where = _ensure_term(where, scope_level=1) if isinstance(keys, (list, tuple)) and len(keys) == 1: keys = keys[0] if isinstance(keys, str): return self.select(key=keys, where=where, columns=columns, start=start, stop=stop, iterator=iterator, chunksize=chunksize, **kwargs) if not isinstance(keys, (list, tuple)): raise TypeError("keys must be a list/tuple") if not len(keys): raise ValueError("keys must have a non-zero length") if selector is None: selector = keys[0] # collect the tables tbls = [self.get_storer(k) for k in keys] s = self.get_storer(selector) # validate rows nrows = None for t, k in itertools.chain([(s, selector)], zip(tbls, keys)): if t is None: raise KeyError("Invalid table [{key}]".format(key=k)) if not t.is_table: raise TypeError( "object [{obj}] is not a table, and cannot be used in all " "select as multiple".format(obj=t.pathname) ) if nrows is None: nrows = t.nrows elif t.nrows != nrows: raise ValueError( "all tables must have exactly the same nrows!") # axis is the concentation axes axis = list({t.non_index_axes[0][0] for t in tbls})[0] def func(_start, _stop, _where): # retrieve the objs, _where is always passed as a set of # coordinates here objs = [t.read(where=_where, columns=columns, start=_start, stop=_stop, **kwargs) for t in tbls] # concat and return return concat(objs, axis=axis, verify_integrity=False)._consolidate() # create the iterator it = TableIterator(self, s, func, where=where, nrows=nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) return it.get_result(coordinates=True)
0.001222
def qteAbort(self, msgObj): """ Restore the original cursor position because the user hit abort. """ self.qteWidget.setCursorPosition(*self.cursorPosOrig) self.qteMain.qtesigAbort.disconnect(self.qteAbort)
0.008163
def _read_channel(stream, num, name, ctype, epoch, start, end, scaled=True, series_class=TimeSeries): """Read a channel from a specific frame in a stream """ data = _get_frdata(stream, num, name, ctype=ctype) return read_frdata(data, epoch, start, end, scaled=scaled, series_class=series_class)
0.002849
def get_queryset(self): """ Retrieve the author by his username and build a queryset of his published entries. """ self.author = get_object_or_404( Author, **{Author.USERNAME_FIELD: self.kwargs['username']}) return self.author.entries_published()
0.006536
def get(self, bucket=None, versions=missing, uploads=missing): """Get list of objects in the bucket. :param bucket: A :class:`invenio_files_rest.models.Bucket` instance. :returns: The Flask response. """ if uploads is not missing: return self.multipart_listuploads(bucket) else: return self.listobjects(bucket, versions)
0.005089
def insert(self, name, index, value): """Insert a value at the passed index in the named header.""" return self._sequence[name].insert(index, value)
0.012195
def parse_package_json(): """ Extract the JSPM configuration from package.json. """ with open(locate_package_json()) as pjson: data = json.loads(pjson.read()) return data
0.005051
def height(self): """Returns the player's height (in inches). :returns: An int representing a player's height in inches. """ doc = self.get_main_doc() raw = doc('span[itemprop="height"]').text() try: feet, inches = map(int, raw.split('-')) return feet * 12 + inches except ValueError: return None
0.005155
def create(self, **kwargs): """Custom create method to accommodate different endpoint behavior.""" self._check_create_parameters(**kwargs) if kwargs['extractFromAllItems'] is False: self._meta_data['minimum_additional_parameters'] = { 'extractFromRegularExpression', 'extractUrlReferences', 'extractFiletypeReferences' } return self._create(**kwargs)
0.004566
def send_image(self, sender, receiver_type, receiver_id, media_id): """ 发送图片消息 详情请参考 https://qydev.weixin.qq.com/wiki/index.php?title=企业会话接口说明 :param sender: 发送人 :param receiver_type: 接收人类型:single|group,分别表示:单聊|群聊 :param receiver_id: 接收人的值,为userid|chatid,分别表示:成员id|会话id :param media_id: 图片媒体文件id,可以调用上传素材文件接口获取 :return: 返回的 JSON 数据包 """ data = { 'receiver': { 'type': receiver_type, 'id': receiver_id, }, 'sender': sender, 'msgtype': 'image', 'image': { 'media_id': media_id, } } return self._post('chat/send', data=data)
0.002677
def HuntIDToInt(hunt_id): """Convert hunt id string to an integer.""" # TODO(user): This code is only needed for a brief period of time when we # allow running new rel-db flows with old aff4-based hunts. In this scenario # parent_hunt_id is effectively not used, but it has to be an # integer. Stripping "H:" from hunt ids then makes the rel-db happy. Remove # this code when hunts are rel-db only. if hunt_id.startswith("H:"): hunt_id = hunt_id[2:] try: return int(hunt_id or "0", 16) except ValueError as e: raise HuntIDIsNotAnIntegerError(e)
0.017391
def ticker(self, pair, ignore_invalid=0): """ This method provides all the information about currently active pairs, such as: the maximum price, the minimum price, average price, trade volume, trade volume in currency, the last trade, Buy and Sell price. All information is provided over the past 24 hours. :param str or iterable pair: pair (ex. 'btc_usd' or ['btc_usd', 'eth_usd']) :param int ignore_invalid: ignore non-existing pairs """ return self._public_api_call('ticker', pair=pair, ignore_invalid=ignore_invalid)
0.010274
def redefine_position(self, position): """Redefines the current position to the new position. :param position: The new position. """ cmd = 'MOVE', [Float, Integer] self._write(cmd, position, 2)
0.008511
def instance(): """Returns a global `IOLoop` instance. Most applications have a single, global `IOLoop` running on the main thread. Use this method to get this instance from another thread. To get the current thread's `IOLoop`, use `current()`. """ if not hasattr(IOLoop, "_instance"): with IOLoop._instance_lock: if not hasattr(IOLoop, "_instance"): # New instance after double check IOLoop._instance = IOLoop() return IOLoop._instance
0.003552
def posterior_samples_f(self, X, size=10, full_cov=True, **predict_kwargs): """ Samples the posterior TP at the points X. :param X: The points at which to take the samples. :type X: np.ndarray (Nnew x self.input_dim) :param size: the number of a posteriori samples. :type size: int. :param full_cov: whether to return the full covariance matrix, or just the diagonal. :type full_cov: bool. :returns: fsim: set of simulations :rtype: np.ndarray (D x N x samples) (if D==1 we flatten out the first dimension) """ mu, var = self._raw_predict(X, full_cov=full_cov, **predict_kwargs) if self.normalizer is not None: mu, var = self.normalizer.inverse_mean(mu), self.normalizer.inverse_variance(var) def sim_one_dim(m, v): nu = self.nu + 2 + self.num_data v = np.diag(v.flatten()) if not full_cov else v Z = np.random.multivariate_normal(np.zeros(X.shape[0]), v, size).T g = np.tile(np.random.gamma(nu / 2., 2. / nu, size), (X.shape[0], 1)) return m + Z / np.sqrt(g) if self.output_dim == 1: return sim_one_dim(mu, var) else: fsim = np.empty((self.output_dim, self.num_data, size)) for d in range(self.output_dim): if full_cov and var.ndim == 3: fsim[d] = sim_one_dim(mu[:, d], var[:, :, d]) elif (not full_cov) and var.ndim == 2: fsim[d] = sim_one_dim(mu[:, d], var[:, d]) else: fsim[d] = sim_one_dim(mu[:, d], var) return fsim
0.00358
def create_from_targets(self,list_targs): """ Adds new targets to the span that are defined in a list @type list_targs: list @param list_targs: list of Ctargets """ for this_target in list_targs: self.node.append(this_target.get_node())
0.010135
def merge_runs(data, digits=None): """ Merge duplicate sequential values. This differs from unique_ordered in that values can occur in multiple places in the sequence, but only consecutive repeats are removed Parameters ----------- data: (n,) float or int Returns -------- merged: (m,) float or int Examples --------- In [1]: a Out[1]: array([-1, -1, -1, 0, 0, 1, 1, 2, 0, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9]) In [2]: trimesh.grouping.merge_runs(a) Out[2]: array([-1, 0, 1, 2, 0, 3, 4, 5, 6, 7, 8, 9]) """ data = np.asanyarray(data) mask = np.abs(np.diff(data)) > tol.merge mask = np.concatenate((np.array([True]), mask)) return data[mask]
0.001258
def threshold_otsu(image, multiplier=1.0): """Return image thresholded using Otsu's method. """ otsu_value = skimage.filters.threshold_otsu(image) return image > otsu_value * multiplier
0.004975
def show(self, commits=None, encoding='utf-8'): """Show the data of a set of commits. The method returns the output of Git show command for a set of commits using the following options: git show --raw --numstat --pretty=fuller --decorate=full --parents -M -C -c [<commit>...<commit>] When the list of commits is empty, the command will return data about the last commit, like the default behaviour of `git show`. :param commits: list of commits to show data :param encoding: encode the output using this format :returns: a generator where each item is a line from the show output :raises EmptyRepositoryError: when the repository is empty and the action cannot be performed :raises RepositoryError: when an error occurs fetching the show output """ if self.is_empty(): logger.warning("Git %s repository is empty; unable to run show", self.uri) raise EmptyRepositoryError(repository=self.uri) if commits is None: commits = [] cmd_show = ['git', 'show'] cmd_show.extend(self.GIT_PRETTY_OUTPUT_OPTS) cmd_show.extend(commits) for line in self._exec_nb(cmd_show, cwd=self.dirpath, env=self.gitenv): yield line logger.debug("Git show fetched from %s repository (%s)", self.uri, self.dirpath)
0.001349
def ext_xsect(scatterer, h_pol=True): """Extinction cross section for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The extinction cross section. """ if scatterer.psd_integrator is not None: try: return scatterer.psd_integrator.get_angular_integrated( scatterer.psd, scatterer.get_geometry(), "ext_xsect") except AttributeError: # Fall back to the usual method of computing this from S pass old_geom = scatterer.get_geometry() (thet0, thet, phi0, phi, alpha, beta) = old_geom try: scatterer.set_geometry((thet0, thet0, phi0, phi0, alpha, beta)) S = scatterer.get_S() finally: scatterer.set_geometry(old_geom) if h_pol: return 2 * scatterer.wavelength * S[1,1].imag else: return 2 * scatterer.wavelength * S[0,0].imag
0.005709