text
stringlengths
78
104k
score
float64
0
0.18
def _set_redistribute_ospf(self, v, load=False): """ Setter method for redistribute_ospf, mapped from YANG variable /rbridge_id/ipv6/router/ospf/redistribute/redistribute_ospf (container) If this variable is read-only (config: false) in the source YANG file, then _set_redistribute_ospf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_redistribute_ospf() directly. YANG Description: OSPF routes """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=redistribute_ospf.redistribute_ospf, is_container='container', presence=True, yang_name="redistribute-ospf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OSPF routes', u'alt-name': u'ospf'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """redistribute_ospf must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=redistribute_ospf.redistribute_ospf, is_container='container', presence=True, yang_name="redistribute-ospf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OSPF routes', u'alt-name': u'ospf'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)""", }) self.__redistribute_ospf = t if hasattr(self, '_set'): self._set()
0.005637
def _extract_variable_parts(variable_key, variable): """Matches a variable to individual parts. Args: variable_key: String identifier of the variable in the module scope. variable: Variable tensor. Returns: partitioned: Whether the variable is partitioned. name: Name of the variable up to the partitioning. offset: Offset of the variable into the full variable. Raises: RuntimeError: In case of unexpected variable format. """ name, offset, partitioned = None, None, False # pylint: disable=protected-access if variable._save_slice_info: name = variable_key[:variable_key.rfind("/")] if not variable._save_slice_info.full_name.endswith(name): raise RuntimeError("Unexpected handling of partitioned variable.") offset = variable._save_slice_info.var_offset[0] partitioned = True # pylint: enable=protected-access return partitioned, name, offset
0.008753
def define_density_matrix(Ne, explicitly_hermitian=False, normalized=False, variables=None): r"""Return a symbolic density matrix. The arguments are Ne (integer): The number of atomic states. explicitly_hermitian (boolean): Whether to make $\rho_{ij}=\bar{\rho}_{ij}$ for $i<j$ normalized (boolean): Whether to make $\rho_{11}=1-\sum_{i>1} \rho_{ii}$ A very simple example: >>> define_density_matrix(2) Matrix([ [rho11, rho12], [rho21, rho22]]) The density matrix can be made explicitly hermitian >>> define_density_matrix(2, explicitly_hermitian=True) Matrix([ [rho11, conjugate(rho21)], [rho21, rho22]]) or normalized >>> define_density_matrix(2, normalized=True) Matrix([ [-rho22 + 1, rho12], [ rho21, rho22]]) or it can be made an explicit function of given variables >>> from sympy import symbols >>> t, z = symbols("t, z", positive=True) >>> define_density_matrix(2, variables=[t, z]) Matrix([ [rho11(t, z), rho12(t, z)], [rho21(t, z), rho22(t, z)]]) """ if Ne > 9: comma = "," name = r"\rho" open_brace = "_{" close_brace = "}" else: comma = "" name = "rho" open_brace = "" close_brace = "" rho = [] for i in range(Ne): row_rho = [] for j in range(Ne): if i == j: row_rho += [define_symbol(name, open_brace, comma, i, j, close_brace, variables, positive=True)] elif i > j: row_rho += [define_symbol(name, open_brace, comma, i, j, close_brace, variables)] else: if explicitly_hermitian: row_rho += [conjugate(define_symbol(name, open_brace, comma, j, i, close_brace, variables))] else: row_rho += [define_symbol(name, open_brace, comma, i, j, close_brace, variables)] rho += [row_rho] if normalized: rho11 = 1-sum([rho[i][i] for i in range(1, Ne)]) rho[0][0] = rho11 rho = Matrix(rho) return rho
0.0004
def nodes(self, nodes): """Specify the set of nodes and associated data. Must include any nodes referenced in the edge list. :param nodes: Nodes and their attributes. :type point_size: Pandas dataframe :returns: Plotter. :rtype: Plotter. **Example** :: import graphistry es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]}) g = graphistry .bind(source='src', destination='dst') .edges(es) vs = pandas.DataFrame({'v': [0,1,2], 'lbl': ['a', 'b', 'c']}) g = g.bind(node='v').nodes(vs) g.plot() """ res = copy.copy(self) res._nodes = nodes return res
0.003769
def sscan(self, name, cursor=0, match=None, count=None): """ Incrementally return lists of elements in a set. Also return a cursor indicating the scan position. ``match`` allows for filtering the keys by pattern ``count`` allows for hint the minimum number of returns """ pieces = [name, cursor] if match is not None: pieces.extend([Token.get_token('MATCH'), match]) if count is not None: pieces.extend([Token.get_token('COUNT'), count]) return self.execute_command('SSCAN', *pieces)
0.003396
def _to_star(self): """Save :class:`~nmrstarlib.nmrstarlib.StarFile` into NMR-STAR or CIF formatted string. :return: NMR-STAR string. :rtype: :py:class:`str` """ star_str = io.StringIO() self.print_file(star_str) return star_str.getvalue()
0.010135
def internal_state(): '''Serve a json representation of internal agentstate as meta data ''' data = {'services': { 'capture': ServiceStatus.str(get_service_status(Service.CAPTURE)), 'ingest': ServiceStatus.str(get_service_status(Service.INGEST)), 'schedule': ServiceStatus.str(get_service_status(Service.SCHEDULE)), 'agentstate': ServiceStatus.str(get_service_status(Service.AGENTSTATE)) } } return make_response(jsonify({'meta': data}))
0.002012
def set_color(self, red, green, blue): """Set backlight color to provided red, green, and blue values. If PWM is enabled then color components can be values from 0.0 to 1.0, otherwise components should be zero for off and non-zero for on. """ if self._pwm_enabled: # Set duty cycle of PWM pins. rdc, gdc, bdc = self._rgb_to_duty_cycle((red, green, blue)) self._pwm.set_duty_cycle(self._red, rdc) self._pwm.set_duty_cycle(self._green, gdc) self._pwm.set_duty_cycle(self._blue, bdc) else: # Set appropriate backlight pins based on polarity and enabled colors. self._gpio.output_pins({self._red: self._blpol if red else not self._blpol, self._green: self._blpol if green else not self._blpol, self._blue: self._blpol if blue else not self._blpol })
0.008395
def can_update_repositories(self): """Tests if this user can update ``Repositories``. A return of true does not guarantee successful authorization. A return of false indicates that it is known updating a ``Repository`` will result in a ``PermissionDenied``. This is intended as a hint to an application that may not wish to offer update operations to unauthorized users. :return: ``false`` if ``Repository`` modification is not authorized, ``true`` otherwise :rtype: ``boolean`` *compliance: mandatory -- This method must be implemented.* """ url_path = construct_url('authorization', bank_id=self._catalog_idstr) return self._get_request(url_path)['objectiveBankHints']['canUpdate']
0.003672
def shelve(self): """Return an opened shelve object. """ logger.info('creating shelve data') fname = str(self.create_path.absolute()) inst = sh.open(fname, writeback=self.writeback) self.is_open = True return inst
0.007407
def parse_as_header(class_, f): """ Parse the Block header from the file-like object """ version, previous_block_hash, merkle_root, height = parse_struct("L##L", f) # https://github.com/BTCGPU/BTCGPU/wiki/Technical-Spec f.read(28) # reserved area (timestamp, difficulty, nonce, solution) = parse_struct("LL#S", f) return class_(version, previous_block_hash, merkle_root, timestamp, difficulty, nonce, height, solution)
0.005941
def update(self, user, **kwargs): """If parent resource is not an editable state, should not be able to update""" yield self.get_parent() if not self.parent.editable: err = 'Cannot update child of {} resource'.format(self.parent.state.name) raise exceptions.Unauthorized(err) yield super(SubResource, self).update(user, **kwargs)
0.010336
async def srandmember(self, name, number=None): """ If ``number`` is None, returns a random member of set ``name``. If ``number`` is supplied, returns a list of ``number`` random memebers of set ``name``. Note this is only available when running Redis 2.6+. """ args = number and [number] or [] return await self.execute_command('SRANDMEMBER', name, *args)
0.004751
def sql_context(self, application_name): """Create a spark context given the parameters configured in this class. The caller is responsible for calling ``.close`` on the resulting spark context Parameters ---------- application_name : string Returns ------- sc : SparkContext """ sc = self.spark_context(application_name) import pyspark sqlContext = pyspark.SQLContext(sc) return (sc, sqlContext)
0.007952
def priorities(self): """Get a list of priority Resources from the server. :rtype: List[Priority] """ r_json = self._get_json('priority') priorities = [Priority( self._options, self._session, raw_priority_json) for raw_priority_json in r_json] return priorities
0.009288
def add_file_handler(logger=None, file_path="out.log", level=logging.INFO, log_format=log_formats.easy_read): """ Addes a newly created file handler to the specified logger :param logger: logging name or object to modify, defaults to root logger :param file_path: path to file to log to :param level: logging level to set handler at :param log_format: formatter to use """ if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) logger.addHandler(get_file_handler(file_path, level, log_format))
0.001715
def represent_as(self, new_pos, new_vel=None): """ Represent the position and velocity of the orbit in an alternate coordinate system. Supports any of the Astropy coordinates representation classes. Parameters ---------- new_pos : :class:`~astropy.coordinates.BaseRepresentation` The type of representation to generate. Must be a class (not an instance), or the string name of the representation class. new_vel : :class:`~astropy.coordinates.BaseDifferential` (optional) Class in which any velocities should be represented. Must be a class (not an instance), or the string name of the differential class. If None, uses the default differential for the new position class. Returns ------- new_psp : `gala.dynamics.PhaseSpacePosition` """ if self.ndim != 3: raise ValueError("Can only change representation for " "ndim=3 instances.") # get the name of the desired representation if isinstance(new_pos, str): pos_name = new_pos else: pos_name = new_pos.get_name() if isinstance(new_vel, str): vel_name = new_vel elif new_vel is None: vel_name = pos_name else: vel_name = new_vel.get_name() Representation = coord.representation.REPRESENTATION_CLASSES[pos_name] Differential = coord.representation.DIFFERENTIAL_CLASSES[vel_name] new_pos = self.pos.represent_as(Representation) new_vel = self.vel.represent_as(Differential, self.pos) return self.__class__(pos=new_pos, vel=new_vel, frame=self.frame)
0.001653
def GetEmail(prompt): """Prompts the user for their email address and returns it. The last used email address is saved to a file and offered up as a suggestion to the user. If the user presses enter without typing in anything the last used email address is used. If the user enters a new address, it is saved for next time we prompt. """ last_email_file_name = os.path.expanduser("~/.last_codereview_email_address") last_email = "" if os.path.exists(last_email_file_name): try: last_email_file = open(last_email_file_name, "r") last_email = last_email_file.readline().strip("\n") last_email_file.close() prompt += " [%s]" % last_email except IOError, e: pass email = raw_input(prompt + ": ").strip() if email: try: last_email_file = open(last_email_file_name, "w") last_email_file.write(email) last_email_file.close() except IOError, e: pass else: email = last_email return email
0.030075
def _bp_static_url(blueprint): """ builds the absolute url path for a blueprint's static folder """ u = six.u('%s%s' % (blueprint.url_prefix or '', blueprint.static_url_path or '')) return u
0.009901
def update_task_ids(self, encoder_vocab_size): """Generate task_ids for each problem. These ids correspond to the index of the task in the task_list. Args: encoder_vocab_size: the size of the vocab which is used to compute the index offset. """ for idx, task in enumerate(self.task_list): task.set_task_id(idx + encoder_vocab_size) tf.logging.info("Task %d (%s) has id %d." % (idx, task.name, task.task_id))
0.006289
def find_metadata(model): """ :param model: Model instance """ engine_name = model.get_engine_name() engine = engine_manager[engine_name] return engine.metadata
0.005435
def for_app(*app_names, **kwargs): """Specifies that matching script is for on of app names.""" def _for_app(fn, command): if is_app(command, *app_names, **kwargs): return fn(command) else: return False return decorator(_for_app)
0.003546
def FileFinderOSFromClient(args): """This function expands paths from the args and returns related stat entries. Args: args: An `rdf_file_finder.FileFinderArgs` object. Yields: `rdf_paths.PathSpec` instances. """ stat_cache = filesystem.StatCache() opts = args.action.stat for path in GetExpandedPaths(args): try: content_conditions = conditions.ContentCondition.Parse(args.conditions) for content_condition in content_conditions: with io.open(path, "rb") as fd: result = list(content_condition.Search(fd)) if not result: raise _SkipFileException() # TODO: `opts.resolve_links` has type `RDFBool`, not `bool`. stat = stat_cache.Get(path, follow_symlink=bool(opts.resolve_links)) stat_entry = client_utils.StatEntryFromStatPathSpec( stat, ext_attrs=opts.collect_ext_attrs) yield stat_entry except _SkipFileException: pass
0.015924
def Cvg(self): r'''Gas-phase ideal-gas contant-volume heat capacity of the chemical at its current temperature, in units of [J/kg/K]. Subtracts R from the ideal-gas heat capacity; does not include pressure-compensation from an equation of state. Examples -------- >>> w = Chemical('water', T=520) >>> w.Cvg 1506.1471795798861 ''' Cvgm = self.Cvgm if Cvgm: return property_molar_to_mass(Cvgm, self.MW) return None
0.003781
def iri_to_uri(value, normalize=False): """ Encodes a unicode IRI into an ASCII byte string URI :param value: A unicode string of an IRI :param normalize: A bool that controls URI normalization :return: A byte string of the ASCII-encoded URI """ if not isinstance(value, str_cls): raise TypeError(unwrap( ''' value must be a unicode string, not %s ''', type_name(value) )) scheme = None # Python 2.6 doesn't split properly is the URL doesn't start with http:// or https:// if sys.version_info < (2, 7) and not value.startswith('http://') and not value.startswith('https://'): real_prefix = None prefix_match = re.match('^[^:]*://', value) if prefix_match: real_prefix = prefix_match.group(0) value = 'http://' + value[len(real_prefix):] parsed = urlsplit(value) if real_prefix: value = real_prefix + value[7:] scheme = _urlquote(real_prefix[:-3]) else: parsed = urlsplit(value) if scheme is None: scheme = _urlquote(parsed.scheme) hostname = parsed.hostname if hostname is not None: hostname = hostname.encode('idna') # RFC 3986 allows userinfo to contain sub-delims username = _urlquote(parsed.username, safe='!$&\'()*+,;=') password = _urlquote(parsed.password, safe='!$&\'()*+,;=') port = parsed.port if port is not None: port = str_cls(port).encode('ascii') netloc = b'' if username is not None: netloc += username if password: netloc += b':' + password netloc += b'@' if hostname is not None: netloc += hostname if port is not None: default_http = scheme == b'http' and port == b'80' default_https = scheme == b'https' and port == b'443' if not normalize or (not default_http and not default_https): netloc += b':' + port # RFC 3986 allows a path to contain sub-delims, plus "@" and ":" path = _urlquote(parsed.path, safe='/!$&\'()*+,;=@:') # RFC 3986 allows the query to contain sub-delims, plus "@", ":" , "/" and "?" query = _urlquote(parsed.query, safe='/?!$&\'()*+,;=@:') # RFC 3986 allows the fragment to contain sub-delims, plus "@", ":" , "/" and "?" fragment = _urlquote(parsed.fragment, safe='/?!$&\'()*+,;=@:') if normalize and query is None and fragment is None and path == b'/': path = None # Python 2.7 compat if path is None: path = '' output = urlunsplit((scheme, netloc, path, query, fragment)) if isinstance(output, str_cls): output = output.encode('latin1') return output
0.001813
def is_sqlatype_string(coltype: Union[TypeEngine, VisitableType]) -> bool: """ Is the SQLAlchemy column type a string type? """ coltype = _coltype_to_typeengine(coltype) return isinstance(coltype, sqltypes.String)
0.004292
def isfile(path, **kwargs): """Check if *path* is a file""" import os.path return os.path.isfile(path, **kwargs)
0.008065
def bad_signatures(self): # pragma: no cover """ A generator yielding namedtuples of all signatures that were not verified in the operation that returned this instance. The namedtuple has the following attributes: ``sigsubj.verified`` - ``bool`` of whether the signature verified successfully or not. ``sigsubj.by`` - the :py:obj:`~pgpy.PGPKey` that was used in this verify operation. ``sigsubj.signature`` - the :py:obj:`~pgpy.PGPSignature` that was verified. ``sigsubj.subject`` - the subject that was verified using the signature. """ for s in [ i for i in self._subjects if not i.verified ]: yield s
0.014451
def lastmod(self, tag): """Return the last modification of the entry.""" lastitems = EntryModel.objects.published().order_by('-modification_date').filter(tags=tag).only('modification_date') return lastitems[0].modification_date
0.011952
def os_walk_pre_35(top, topdown=True, onerror=None, followlinks=False): """Pre Python 3.5 implementation of os.walk() that doesn't use scandir.""" islink, join, isdir = os.path.islink, os.path.join, os.path.isdir try: names = os.listdir(top) except OSError as err: if onerror is not None: onerror(err) return dirs, nondirs = [], [] for name in names: if isdir(join(top, name)): dirs.append(name) else: nondirs.append(name) if topdown: yield top, dirs, nondirs for name in dirs: new_path = join(top, name) if followlinks or not islink(new_path): for x in os_walk_pre_35(new_path, topdown, onerror, followlinks): yield x if not topdown: yield top, dirs, nondirs
0.001199
def convert(self, newstart: str) -> None: """Convert to another list type by replacing starting pattern.""" match = self._match ms = match.start() for s, e in reversed(match.spans('pattern')): self[s - ms:e - ms] = newstart self.pattern = escape(newstart)
0.006515
def write_epw(self): """ Section 8 - Writing new EPW file """ epw_prec = self.epw_precision # precision of epw file input for iJ in range(len(self.UCMData)): # [iJ+self.simTime.timeInitial-8] = increments along every weather timestep in epw # [6 to 21] = column data of epw self.epwinput[iJ+self.simTime.timeInitial-8][6] = "{0:.{1}f}".format( self.UCMData[iJ].canTemp - 273.15, epw_prec) # dry bulb temperature [?C] # dew point temperature [?C] self.epwinput[iJ+self.simTime.timeInitial - 8][7] = "{0:.{1}f}".format(self.UCMData[iJ].Tdp, epw_prec) # relative humidity [%] self.epwinput[iJ+self.simTime.timeInitial - 8][8] = "{0:.{1}f}".format(self.UCMData[iJ].canRHum, epw_prec) self.epwinput[iJ+self.simTime.timeInitial-8][21] = "{0:.{1}f}".format( self.WeatherData[iJ].wind, epw_prec) # wind speed [m/s] # Writing new EPW file epw_new_id = open(self.newPathName, "w") for i in range(8): new_epw_line = '{}\n'.format(reduce(lambda x, y: x+","+y, self._header[i])) epw_new_id.write(new_epw_line) for i in range(len(self.epwinput)): printme = "" for ei in range(34): printme += "{}".format(self.epwinput[i][ei]) + ',' printme = printme + "{}".format(self.epwinput[i][ei]) new_epw_line = "{0}\n".format(printme) epw_new_id.write(new_epw_line) epw_new_id.close() print("New climate file '{}' is generated at {}.".format( self.destinationFileName, self.destinationDir))
0.004953
def listDF(option='mostactive', token='', version=''): '''Returns an array of quotes for the top 10 symbols in a specified list. https://iexcloud.io/docs/api/#list Updated intraday Args: option (string); Option to query token (string); Access token version (string); API version Returns: DataFrame: result ''' df = pd.DataFrame(list(option, token, version)) _toDatetime(df) _reindex(df, 'symbol') return df
0.002075
def unframe(packet): """ Strip leading DLE and trailing DLE/ETX from packet. :param packet: TSIP packet with leading DLE and trailing DLE/ETX. :type packet: Binary string. :return: TSIP packet with leading DLE and trailing DLE/ETX removed. :raise: ``ValueError`` if `packet` does not start with DLE and end in DLE/ETX. """ if is_framed(packet): return packet.lstrip(CHR_DLE).rstrip(CHR_ETX).rstrip(CHR_DLE) else: raise ValueError('packet does not contain leading DLE and trailing DLE/ETX')
0.005505
def escape_ampersand(string): """ Quick convert unicode ampersand characters not associated with a numbered entity or not starting with allowed characters to a plain &amp; """ if not string: return string start_with_match = r"(\#x(....);|lt;|gt;|amp;)" # The pattern below is match & that is not immediately followed by # string = re.sub(r"&(?!" + start_with_match + ")", '&amp;', string) return string
0.002242
def Qk(self, k): """ function (k). Returns noise matrix of dynamic model on iteration k. k (iteration number). starts at 0 """ return self.Q[:, :, self.index[self.Q_time_var_index, k]]
0.008621
def _get_observed_mmax(catalogue, config): '''Check see if observed mmax values are input, if not then take from the catalogue''' if config['input_mmax']: obsmax = config['input_mmax'] if config['input_mmax_uncertainty']: return config['input_mmax'], config['input_mmax_uncertainty'] else: raise ValueError('Input mmax uncertainty must be specified!') max_location = np.argmax(catalogue['magnitude']) obsmax = catalogue['magnitude'][max_location] cond = isinstance(catalogue['sigmaMagnitude'], np.ndarray) and \ len(catalogue['sigmaMagnitude']) > 0 and not \ np.all(np.isnan(catalogue['sigmaMagnitude'])) if cond: if not np.isnan(catalogue['sigmaMagnitude'][max_location]): return obsmax, catalogue['sigmaMagnitude'][max_location] else: print('Uncertainty not given on observed Mmax\n' 'Taking largest magnitude uncertainty found in catalogue') return obsmax, np.nanmax(catalogue['sigmaMagnitude']) elif config['input_mmax_uncertainty']: return obsmax, config['input_mmax_uncertainty'] else: raise ValueError('Input mmax uncertainty must be specified!')
0.000806
def get_hla_truthset(data): """Retrieve expected truth calls for annotating HLA called output. """ val_csv = tz.get_in(["config", "algorithm", "hlavalidate"], data) out = {} if val_csv and utils.file_exists(val_csv): with open(val_csv) as in_handle: reader = csv.reader(in_handle) next(reader) # header for sample, locus, alleles in (l for l in reader if l): out = tz.update_in(out, [sample, locus], lambda x: [x.strip() for x in alleles.split(";")]) return out
0.007339
def stop_capture(self, slot_number, port_number): """ Stops a packet capture. :param slot_number: slot number :param port_number: port number """ try: adapter = self._slots[slot_number] except IndexError: raise DynamipsError('Slot {slot_number} does not exist on router "{name}"'.format(name=self._name, slot_number=slot_number)) if not adapter.port_exists(port_number): raise DynamipsError("Port {port_number} does not exist in adapter {adapter}".format(adapter=adapter, port_number=port_number)) nio = adapter.get_nio(port_number) if not nio: raise DynamipsError("Port {slot_number}/{port_number} is not connected".format(slot_number=slot_number, port_number=port_number)) yield from nio.unbind_filter("both") log.info('Router "{name}" [{id}]: stopping packet capture on port {slot_number}/{port_number}'.format(name=self._name, id=self._id, nio_name=nio.name, slot_number=slot_number, port_number=port_number))
0.007287
def fso_mkdir(self, path, mode=None): 'overlays os.mkdir()' path = self.deref(path, to_parent=True) if self._lexists(path): raise OSError(17, 'File exists', path) self._addentry(OverlayEntry(self, path, stat.S_IFDIR))
0.008368
def find_page_of_state_m(self, state_m): """Return the identifier and page of a given state model :param state_m: The state model to be searched :return: page containing the state and the state_identifier """ for state_identifier, page_info in list(self.tabs.items()): if page_info['state_m'] is state_m: return page_info['page'], state_identifier return None, None
0.004525
def trim(args): """ %prog trim fasta.screen newfasta take the screen output from `cross_match` (against a vector db, for example), then trim the sequences to remove X's. Will also perform quality trim if fasta.screen.qual is found. The trimming algorithm is based on finding the subarray that maximize the sum """ from jcvi.algorithms.maxsum import max_sum p = OptionParser(trim.__doc__) p.add_option("-c", dest="min_length", type="int", default=64, help="minimum sequence length after trimming") p.add_option("-s", dest="score", default=QUAL, help="quality trimming cutoff [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) fastafile, newfastafile = args qualfile = get_qual(fastafile) newqualfile = get_qual(newfastafile, check=False) logging.debug("Trim bad sequence from fasta file `%s` to `%s`" % \ (fastafile, newfastafile)) fw = must_open(newfastafile, "w") fw_qual = open(newqualfile, "w") dropped = trimmed = 0 for rec in iter_fasta_qual(fastafile, qualfile, modify=True): qv = [x - opts.score for x in \ rec.letter_annotations["phred_quality"]] msum, trim_start, trim_end = max_sum(qv) score = trim_end - trim_start + 1 if score < opts.min_length: dropped += 1 continue if score < len(rec): trimmed += 1 rec = rec[trim_start:trim_end + 1] write_fasta_qual(rec, fw, fw_qual) print("A total of %d sequences modified." % trimmed, file=sys.stderr) print("A total of %d sequences dropped (length < %d)." % \ (dropped, opts.min_length), file=sys.stderr) fw.close() fw_qual.close()
0.004975
def _canonicalize_fraction(cls, non_repeating, repeating): """ If the same fractional value can be represented by stripping repeating part from ``non_repeating``, do it. :param non_repeating: non repeating part of fraction :type non_repeating: list of int :param repeating: repeating part of fraction :type repeating: list of int :returns: new non_repeating and repeating parts :rtype: tuple of list of int * list of int Complexity: O(len(non_repeating)) """ if repeating == []: return (non_repeating, repeating) repeat_len = len(repeating) # strip all exact matches: # * for [6, 1, 2, 1, 2], [1,2] end is 1 # * for [1, 2, 1, 2], [1,2] end is 0 # * for [6, 2, 1, 2], [1,2] end is 2 indices = range(len(non_repeating), -1, -repeat_len) end = next( # pragma: no cover i for i in indices if non_repeating[(i - repeat_len):i] != repeating ) # for remaining, find partial match and shift repeating # * for [6, 2, 1, 2], [1, 2] initial end is 2, result is [6], [2, 1] indices = range(min(repeat_len - 1, end), 0, -1) index = next( (i for i in indices \ if repeating[-i:] == non_repeating[(end-i):end]), 0 ) return ( non_repeating[:(end - index)], repeating[-index:] + repeating[:-index] )
0.003369
def Transformer(source_vocab_size, target_vocab_size, mode='train', num_layers=6, feature_depth=512, feedforward_depth=2048, num_heads=8, dropout=0.1, shared_embedding=True, max_len=200, return_evals=False): """Transformer model. Args: source_vocab_size: int: source vocab size target_vocab_size: int: target vocab size mode: str: 'train' or 'eval' num_layers: int: number of encoder/decoder layers feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) shared_embedding: bool: specify whether source/target embeddings are tied. max_len: int: maximum symbol length for positional encoding return_evals: bool: whether to generate decode-time evaluation functions Returns: A namedtuple containing model 'init' and 'apply' functions for training and the 'evals' functions that itself returns a namedtuple containing evaluation functions for the trained encoder, decoder, and generator substax. """ # Input embedding and positional encoding inject_position = layers.Serial( layers.Dropout(dropout, mode=mode), layers.PositionalEncoding(feature_depth, max_len=max_len) ) if shared_embedding: assert source_vocab_size == target_vocab_size # Weight-shared Embedding embedding = layers.Share(layers.Embedding(feature_depth, source_vocab_size)) source_embedding_layer = layers.Serial(embedding, inject_position) target_embedding_layer = source_embedding_layer else: source_embedding = layers.Embedding(feature_depth, source_vocab_size) target_embedding = layers.Embedding(feature_depth, target_vocab_size) source_embedding_layer = layers.Serial(source_embedding, inject_position) target_embedding_layer = layers.Serial(target_embedding, inject_position) # Multi-headed Attention and Feed-forward layers multi_attention = layers.MultiHeadedAttention( feature_depth, num_heads=num_heads, dropout=dropout, mode=mode) # Encoder @layers.Lambda def Encoder(source, source_mask): """Transformer encoder stack. Args: source: layer variable: raw source sequences source_mask: layer variable: self-attention mask Returns: Layer variable that outputs encoded source. """ encoder_layer = layers.Serial( # input attends to self layers.Residual(layers.LayerNorm(), layers.Branch(size=4), layers.Parallel(layers.Identity(), # query layers.Identity(), # key layers.Identity(), # value source_mask), # attention mask multi_attention, layers.Dropout(dropout, mode=mode)), # feed-forward ResidualFeedForward( feature_depth, feedforward_depth, dropout, mode=mode), ) return layers.Serial( source, source_embedding_layer, layers.repeat(encoder_layer, num_layers), layers.LayerNorm(), ) # Decoder @layers.Lambda def Decoder(memory, target, target_mask, memory_mask): """Transformer decoder stack. Args: memory: layer variable: encoded source sequences target: layer variable: raw target sequences target_mask: layer variable: self-attention mask memory_mask: layer variable: memory attention mask Returns: Layer variable that outputs encoded source. """ decoder_layer = layers.Serial( # target attends to self layers.Residual(layers.LayerNorm(), layers.Branch(size=4), layers.Parallel(layers.Identity(), # query layers.Identity(), # key layers.Identity(), # value target_mask), # attention mask multi_attention, layers.Dropout(dropout, mode=mode)), # target attends to encoded source layers.Residual(layers.LayerNorm(), layers.Branch(size=4), layers.Parallel(layers.Identity(), # query memory, # key memory, # value memory_mask), # attention mask multi_attention, layers.Dropout(dropout, mode=mode)), # feed-forward ResidualFeedForward( feature_depth, feedforward_depth, dropout, mode=mode) ) return layers.Serial( target, target_embedding_layer, layers.repeat(decoder_layer, num_layers), layers.LayerNorm(), ) # The Transformer @layers.Lambda def transformer(source, target, source_mask, target_mask, memory_mask): # pylint: disable=invalid-name encoded_source = Encoder(source, source_mask) return Decoder(encoded_source, target, target_mask, memory_mask) # Finally, bind the generator transform to use later for inference. @layers.Lambda def Generator(encoded_target): return layers.Serial( encoded_target, layers.Dense(target_vocab_size), layers.LogSoftmax ) # Model-Building and Evaluation Functions # Get entire model's the layer pair top_init, top_apply = Generator(transformer) # By default act as a normal constructor and emit an (init, apply) pair. if not return_evals: return (top_init, top_apply) else: raise ValueError('inference in this model is still a work in progress')
0.004736
def _acquire_lock(self, identifier, atime=30, ltime=5): '''Acquire a lock for a given identifier. If the lock cannot be obtained immediately, keep trying at random intervals, up to 3 seconds, until `atime` has passed. Once the lock has been obtained, continue to hold it for `ltime`. :param str identifier: lock token to write :param int atime: maximum time (in seconds) to acquire lock :param int ltime: maximum time (in seconds) to own lock :return: `identifier` if the lock was obtained, :const:`False` otherwise ''' conn = redis.Redis(connection_pool=self.pool) end = time.time() + atime while end > time.time(): if conn.set(self._lock_name, identifier, ex=ltime, nx=True): # logger.debug("won lock %s" % self._lock_name) return identifier sleep_time = random.uniform(0, 3) time.sleep(sleep_time) logger.warn('failed to acquire lock %s for %f seconds', self._lock_name, atime) return False
0.001805
def loadNiiData(lstNiiFls, strPathNiiMask=None, strPathNiiFunc=None): """load nii data. Parameters ---------- lstNiiFls : list, list of str with nii file names strPathNiiMask : str, path to nii file with mask (optional) strPathNiiFunc : str, parent path to nii files (optional) Returns ------- aryFunc : np.array Nii data """ print('---------Loading nii data') # check whether a mask is available if strPathNiiMask is not None: aryMask = nb.load(strPathNiiMask).get_data().astype('bool') # check a parent path is available that needs to be preprended to nii files if strPathNiiFunc is not None: lstNiiFls = [os.path.join(strPathNiiFunc, i) for i in lstNiiFls] aryFunc = [] for idx, path in enumerate(lstNiiFls): print('------------Loading run: ' + str(idx+1)) # Load 4D nii data: niiFunc = nb.load(path).get_data() # append to list if strPathNiiMask is not None: aryFunc.append(niiFunc[aryMask, :]) else: aryFunc.append(niiFunc) # concatenate arrys in list along time dimension aryFunc = np.concatenate(aryFunc, axis=-1) # set to type float32 aryFunc = aryFunc.astype('float32') return aryFunc
0.001479
def as_existing_file(self, filepath): """Return the file class for existing files only""" if os.path.isfile(filepath) and hasattr(self, '__file_class__'): return self.__file_class__(filepath) # pylint: disable=no-member return self.__class__(filepath)
0.006944
def collapse_segments (path): """Remove all redundant segments from the given URL path. Precondition: path is an unquoted url path""" # replace backslashes # note: this is _against_ the specification (which would require # backslashes to be left alone, and finally quoted with '%5C') # But replacing has several positive effects: # - Prevents path attacks on Windows systems (using \.. parent refs) # - Fixes bad URLs where users used backslashes instead of slashes. # This is a far more probable case than users having an intentional # backslash in the path name. path = path.replace('\\', '/') # shrink multiple slashes to one slash path = _slashes_ro.sub("/", path) # collapse redundant path segments path = _thisdir_ro.sub("", path) path = _samedir_ro.sub("/", path) # collapse parent path segments # note: here we exploit the fact that the replacements happen # to be from left to right (see also _parentdir_ro above) newpath = _parentdir_ro.sub("/", path) while newpath != path: path = newpath newpath = _parentdir_ro.sub("/", path) # collapse parent path segments of relative paths # (ie. without leading slash) newpath = _relparentdir_ro.sub("", path) while newpath != path: path = newpath newpath = _relparentdir_ro.sub("", path) return path
0.001439
def find_comprehension_as_statement(node): """Finds a comprehension as a statement""" return ( isinstance(node, ast.Expr) and isinstance(node.value, (ast.ListComp, ast.DictComp, ast.SetComp)) )
0.003413
def eci2aer(eci: Tuple[float, float, float], lat0: float, lon0: float, h0: float, t: datetime, useastropy: bool = True) -> Tuple[float, float, float]: """ takes ECI coordinates of point and gives az, el, slant range from Observer Parameters ---------- eci : tuple [meters] Nx3 target ECI location (x,y,z) lat0 : float Observer geodetic latitude lon0 : float Observer geodetic longitude h0 : float observer altitude above geodetic ellipsoid (meters) t : datetime.datetime Observation time Returns ------- az : float azimuth to target el : float elevation to target srange : float slant range [meters] """ ecef = np.atleast_2d(eci2ecef(eci, t, useastropy)) return ecef2aer(ecef[:, 0], ecef[:, 1], ecef[:, 2], lat0, lon0, h0)
0.001103
def p_state(self, state): '''state : STATE opttexts''' state[0] = State(state[1][0], state[1][1], state[1][2], state[1][3], state[2])
0.020134
def process_core(self): """ The method deals with a core found previously in :func:`get_core`. Clause selectors ``self.core_sels`` and sum assumptions involved in the core are treated separately of each other. This is handled by calling methods :func:`process_sels` and :func:`process_sums`, respectively. Whenever necessary, both methods relax the core literals, which is followed by creating a new totalizer object encoding the sum of the new relaxation variables. The totalizer object can be "exhausted" depending on the option. """ # updating the cost self.cost += self.minw # assumptions to remove self.garbage = set() if len(self.core_sels) != 1 or len(self.core_sums) > 0: # process selectors in the core self.process_sels() # process previously introducded sums in the core self.process_sums() if len(self.rels) > 1: # create a new cardunality constraint t = self.create_sum() # apply core exhaustion if required b = self.exhaust_core(t) if self.exhaust else 1 if b: # save the info about this sum and # add its assumption literal self.set_bound(t, b) else: # impossible to satisfy any of these clauses # they must become hard for relv in self.rels: self.oracle.add_clause([relv]) else: # unit cores are treated differently # (their negation is added to the hard part) self.oracle.add_clause([-self.core_sels[0]]) self.garbage.add(self.core_sels[0]) # remove unnecessary assumptions self.filter_assumps()
0.001016
def call_local_plugin_method(self, chname, plugin_name, method_name, args, kwargs): """ Parameters ---------- chname : str The name of the channel containing the plugin. plugin_name : str The name of the local plugin containing the method to call. method_name : str The name of the method to call. args : list or tuple The positional arguments to the method kwargs : dict The keyword arguments to the method Returns ------- result : return value from calling the method """ channel = self.get_channel(chname) opmon = channel.opmon p_obj = opmon.get_plugin(plugin_name) method = getattr(p_obj, method_name) return self.gui_call(method, *args, **kwargs)
0.00339
def const_shuffle(arr, seed=23980): """ Shuffle an array in-place with a fixed seed. """ old_seed = np.random.seed() np.random.seed(seed) np.random.shuffle(arr) np.random.seed(old_seed)
0.004785
def wait(self, timeout=None): """Block until the internal flag is true. If the internal flag is true on entry, return immediately. Otherwise, block until another thread calls set() to set the flag to true, or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). This method returns the internal flag on exit, so it will always return True except if a timeout is given and the operation times out. """ with self.__cond: if not self.__flag: self.__cond.wait(timeout) return self.__flag
0.003953
def overlay_gateway_site_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(overlay_gateway, "name") name_key.text = kwargs.pop('name') site = ET.SubElement(overlay_gateway, "site") name = ET.SubElement(site, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
0.005282
def parse_int(self, buff, start, end): ''' parse an integer from the buffer given the interval of bytes :param buff: :param start: :param end: ''' # num = self.parse_uint(buff, start, end) # l = (end - start) # return self.twos_comp(num, l * 8) return struct.unpack_from(self.structmap[end - start], buff, start)[0]
0.005013
def list(self, page=1, per_page=10): """List a page of chats. :param int page: which page :param int per_page: how many chats per page :return: chats with other users :rtype: :class:`~groupy.pagers.ChatList` """ return pagers.ChatList(self, self._raw_list, per_page=per_page, page=page)
0.005348
def raw_iter(self, stream=False): """https://github.com/frictionlessdata/datapackage-py#resource """ # Error for inline if self.inline: message = 'Methods raw_iter/raw_read are not supported for inline data' raise exceptions.DataPackageException(message) # Get filelike if self.multipart: filelike = _MultipartSource(self.source, remote=self.remote) elif self.remote: if self.__table_options.get('http_session'): http_session = self.__table_options['http_session'] else: http_session = requests.Session() http_session.headers = config.HTTP_HEADERS res = http_session.get(self.source, stream=True) filelike = res.raw else: filelike = io.open(self.source, 'rb') return filelike
0.003356
def _file_type(self, field): """ Returns file type for given file field. Args: field (str): File field Returns: string. File type """ type = mimetypes.guess_type(self._files[field])[0] return type.encode("utf-8") if isinstance(type, unicode) else str(type)
0.008876
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded, neighbors=None, skip_num_points=0): """Runs t-SNE.""" # t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P # and the Student's t-distributions Q. The optimization algorithm that # we use is batch gradient descent with two stages: # * initial optimization with early exaggeration and momentum at 0.5 # * final optimization with momentum at 0.8 params = X_embedded.ravel() opt_args = { "it": 0, "n_iter_check": self._N_ITER_CHECK, "min_grad_norm": self.min_grad_norm, "learning_rate": self.learning_rate, "verbose": self.verbose, "kwargs": dict(skip_num_points=skip_num_points), "args": [P, degrees_of_freedom, n_samples, self.n_components], "n_iter_without_progress": self._EXPLORATION_N_ITER, "n_iter": self._EXPLORATION_N_ITER, "momentum": 0.5, } if self.method == 'barnes_hut': obj_func = _kl_divergence_bh opt_args['kwargs']['angle'] = self.angle # Repeat verbose argument for _kl_divergence_bh opt_args['kwargs']['verbose'] = self.verbose else: obj_func = _kl_divergence # Learning schedule (part 1): do 250 iteration with lower momentum but # higher learning rate controlled via the early exageration parameter P *= self.early_exaggeration params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args) if self.verbose: print("[t-SNE] KL divergence after %d iterations with early " "exaggeration: %f" % (it + 1, kl_divergence)) # Learning schedule (part 2): disable early exaggeration and finish # optimization with a higher momentum at 0.8 P /= self.early_exaggeration remaining = self.n_iter - self._EXPLORATION_N_ITER if it < self._EXPLORATION_N_ITER or remaining > 0: opt_args['n_iter'] = self.n_iter opt_args['it'] = it + 1 opt_args['momentum'] = 0.8 opt_args['n_iter_without_progress'] = self.n_iter_without_progress params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args) # Save the final number of iterations self.n_iter_ = it if self.verbose: print("[t-SNE] Error after %d iterations: %f" % (it + 1, kl_divergence)) X_embedded = params.reshape(n_samples, self.n_components) self.kl_divergence_ = kl_divergence return X_embedded
0.001066
def concat(self, formatted_text): """:type formatted_text: FormattedText""" assert self._is_compatible(formatted_text), "Cannot concat text with different modes" self.text += formatted_text.text return self
0.012605
def set_basic_params( self, workers=None, zerg_server=None, fallback_node=None, concurrent_events=None, cheap_mode=None, stats_server=None, quiet=None, buffer_size=None, keepalive=None, resubscribe_addresses=None): """ :param int workers: Number of worker processes to spawn. :param str|unicode zerg_server: Attach the router to a zerg server. :param str|unicode fallback_node: Fallback to the specified node in case of error. :param int concurrent_events: Set the maximum number of concurrent events router can manage. Default: system dependent. :param bool cheap_mode: Enables cheap mode. When the router is in cheap mode, it will not respond to requests until a node is available. This means that when there are no nodes subscribed, only your local app (if any) will respond. When all of the nodes go down, the router will return in cheap mode. :param str|unicode stats_server: Router stats server address to run at. :param bool quiet: Do not report failed connections to instances. :param int buffer_size: Set internal buffer size in bytes. Default: page size. :param int keepalive: Allows holding the connection open even if the request has a body. * http://uwsgi.readthedocs.io/en/latest/HTTP.html#http-keep-alive .. note:: See http11 socket type for an alternative. :param str|unicode|list[str|unicode] resubscribe_addresses: Forward subscriptions to the specified subscription server. """ super(RouterHttp, self).set_basic_params(**filter_locals(locals(), drop=[ 'keepalive', 'resubscribe_addresses', ])) self._set_aliased('keepalive', keepalive) self._set_aliased('resubscribe', resubscribe_addresses, multi=True) return self
0.006218
def dump2json(self, obj, filepath, override=False, **kwargs): """ Dump a dictionary into a JSON dictionary. Uses the json.dump() function. Parameters ---------- obj : :class:`dict` A dictionary to be dumpped as JSON file. filepath : :class:`str` The filepath for the dumped file. override : :class:`bool` If True, any file in the filepath will be override. (default=False) """ # We make sure that the object passed by the user is a dictionary. if isinstance(obj, dict): pass else: raise _NotADictionary( "This function only accepts dictionaries as input") # We check if the filepath has a json extenstion, if not we add it. if str(filepath[-4:]) == 'json': pass else: filepath = ".".join((str(filepath), "json")) # First we check if the file already exists. If yes and the override # keyword is False (default), we will raise an exception. Otherwise # the file will be overwritten. if override is False: if os.path.isfile(filepath): raise _FileAlreadyExists( "The file {0} already exists. Use a different filepath, " "or set the 'override' kwarg to True.".format(filepath)) # We dump the object to the json file. Additional kwargs can be passed. with open(filepath, 'w+') as json_file: json.dump(obj, json_file, **kwargs)
0.001273
def get_identity(user): """Create an identity for a given user instance. Primarily useful for testing. """ identity = Identity(user.id) if hasattr(user, 'id'): identity.provides.add(UserNeed(user.id)) for role in getattr(user, 'roles', []): identity.provides.add(RoleNeed(role.name)) identity.user = user return identity
0.002688
def mod(cmd, params): """ Mod management command rqalpha mod list \n rqalpha mod install xxx \n rqalpha mod uninstall xxx \n rqalpha mod enable xxx \n rqalpha mod disable xxx \n """ def list(params): """ List all mod configuration """ from tabulate import tabulate from rqalpha.utils.config import get_mod_conf mod_config = get_mod_conf() table = [] for mod_name, mod in six.iteritems(mod_config['mod']): table.append([ mod_name, ("enabled" if mod['enabled'] else "disabled") ]) headers = [ "name", "status" ] six.print_(tabulate(table, headers=headers, tablefmt="psql")) six.print_("You can use `rqalpha mod list/install/uninstall/enable/disable` to manage your mods") def install(params): """ Install third-party Mod """ try: from pip._internal import main as pip_main from pip._internal.commands.install import InstallCommand except ImportError: from pip import main as pip_main from pip.commands.install import InstallCommand params = [param for param in params] options, mod_list = InstallCommand().parse_args(params) mod_list = [mod_name for mod_name in mod_list if mod_name != "."] params = ["install"] + params for mod_name in mod_list: mod_name_index = params.index(mod_name) if mod_name.startswith("rqalpha_mod_sys_"): six.print_('System Mod can not be installed or uninstalled') return if "rqalpha_mod_" in mod_name: lib_name = mod_name else: lib_name = "rqalpha_mod_" + mod_name params[mod_name_index] = lib_name # Install Mod installed_result = pip_main(params) # Export config from rqalpha.utils.config import load_yaml, user_mod_conf_path user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}} if installed_result == 0: # 如果为0,则说明安装成功 if len(mod_list) == 0: """ 主要是方便 `pip install -e .` 这种方式 本地调试 Mod 使用,需要满足以下条件: 1. `rqalpha mod install -e .` 命令是在对应 自定义 Mod 的根目录下 2. 该 Mod 必须包含 `setup.py` 文件(否则也不能正常的 `pip install -e .` 来安装) 3. 该 Mod 包名必须按照 RQAlpha 的规范来命名,具体规则如下 * 必须以 `rqalpha-mod-` 来开头,比如 `rqalpha-mod-xxx-yyy` * 对应import的库名必须要 `rqalpha_mod_` 来开头,并且需要和包名后半部分一致,但是 `-` 需要替换为 `_`, 比如 `rqalpha_mod_xxx_yyy` """ mod_name = _detect_package_name_from_dir(params) mod_name = mod_name.replace("-", "_").replace("rqalpha_mod_", "") mod_list.append(mod_name) for mod_name in mod_list: if "rqalpha_mod_" in mod_name: mod_name = mod_name.replace("rqalpha_mod_", "") if "==" in mod_name: mod_name = mod_name.split('==')[0] user_conf['mod'][mod_name] = {} user_conf['mod'][mod_name]['enabled'] = False dump_config(user_mod_conf_path(), user_conf) return installed_result def uninstall(params): """ Uninstall third-party Mod """ try: from pip._internal import main as pip_main from pip._internal.commands.uninstall import UninstallCommand except ImportError: # be compatible with pip < 10.0 from pip import main as pip_main from pip.commands.uninstall import UninstallCommand params = [param for param in params] options, mod_list = UninstallCommand().parse_args(params) params = ["uninstall"] + params for mod_name in mod_list: mod_name_index = params.index(mod_name) if mod_name.startswith("rqalpha_mod_sys_"): six.print_('System Mod can not be installed or uninstalled') return if "rqalpha_mod_" in mod_name: lib_name = mod_name else: lib_name = "rqalpha_mod_" + mod_name params[mod_name_index] = lib_name # Uninstall Mod uninstalled_result = pip_main(params) # Remove Mod Config from rqalpha.utils.config import user_mod_conf_path, load_yaml user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}} for mod_name in mod_list: if "rqalpha_mod_" in mod_name: mod_name = mod_name.replace("rqalpha_mod_", "") del user_conf['mod'][mod_name] dump_config(user_mod_conf_path(), user_conf) return uninstalled_result def enable(params): """ enable mod """ mod_name = params[0] if "rqalpha_mod_" in mod_name: mod_name = mod_name.replace("rqalpha_mod_", "") # check whether is installed module_name = "rqalpha_mod_" + mod_name if module_name.startswith("rqalpha_mod_sys_"): module_name = "rqalpha.mod." + module_name try: import_module(module_name) except ImportError: installed_result = install([module_name]) if installed_result != 0: return from rqalpha.utils.config import user_mod_conf_path, load_yaml user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}} try: user_conf['mod'][mod_name]['enabled'] = True except KeyError: user_conf['mod'][mod_name] = {'enabled': True} dump_config(user_mod_conf_path(), user_conf) def disable(params): """ disable mod """ mod_name = params[0] if "rqalpha_mod_" in mod_name: mod_name = mod_name.replace("rqalpha_mod_", "") from rqalpha.utils.config import user_mod_conf_path, load_yaml user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}} try: user_conf['mod'][mod_name]['enabled'] = False except KeyError: user_conf['mod'][mod_name] = {'enabled': False} dump_config(user_mod_conf_path(), user_conf) locals()[cmd](params)
0.00122
def _function_handler(function, args, kwargs, pipe): """Runs the actual function in separate process and returns its result.""" signal.signal(signal.SIGINT, signal.SIG_IGN) result = process_execute(function, *args, **kwargs) send_result(pipe, result)
0.003731
def _copy_if_necessary(self, local_path, overwrite): """ Return cached path to local file, copying it to the cache if necessary. """ local_path = abspath(local_path) if not exists(local_path): raise MissingLocalFile(local_path) elif not self.copy_local_files_to_cache: return local_path else: cached_path = self.cached_path(local_path) if exists(cached_path) and not overwrite: return cached_path copy2(local_path, cached_path) return cached_path
0.00339
def _getattr(self, attri, fname=None, numtype='cycNum'): ''' Private method for getting an attribute, called from get.''' if str(fname.__class__)=="<type 'list'>": isList=True else: isList=False data=[] if fname==None: fname=self.files numtype='file' isList=True if isList: for i in range(len(fname)): if attri in self.cattrs: data.append(self.getCycleData(attri,fname[i],numtype)) elif attri in self.dcols: data.append(self.getColData(attri,fname[i],numtype)) elif attri in self.get('ISOTP',fname,numtype): data.append(self.getElement(attri,fname[i],numtype)) else: print('Attribute '+attri+ ' does not exist') print('Returning none') return None else: if attri in self.cattrs: return self.getCycleData(attri,fname,numtype) elif attri in self.dcols: return self.getColData(attri,fname,numtype) elif attri in self.get('ISOTP',fname,numtype): return self.getElement(attri,fname,numtype) else: print('Attribute '+attri+ ' does not exist') print('Returning none') return None return data
0.019986
def select_army(self, shift): """Select the entire army.""" action = sc_pb.Action() action.action_ui.select_army.selection_add = shift return action
0.006098
def parse_root(raw): "Efficiently parses the root element of a *raw* XML document, returning a tuple of its qualified name and attribute dictionary." if sys.version < '3': fp = StringIO(raw) else: fp = BytesIO(raw.encode('UTF-8')) for event, element in etree.iterparse(fp, events=('start',)): return (element.tag, element.attrib)
0.00542
def encode_multipart_formdata(self, fields, files, baseName, verbose=False): """ Fields is a sequence of (name, value) elements for regular form fields. Files is a sequence of (name, filename, value) elements for data to be uploaded as files Returns: (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' CRLF = '\r\n' content_type = 'multipart/form-data; boundary=%s' % BOUNDARY if verbose is True: print(('Encoding ' + baseName + ' for upload...')) fin = self._open(files, 'rb') fout = self._open(files + '.b64', 'wb') fout.write(bytearray('--' + BOUNDARY + CRLF, 'utf-8')) fout.write(bytearray('Content-Disposition: form-data' '; name="file"; filename="' + baseName + '"' + CRLF, "utf-8")) fout.write(bytearray('Content-Type: application/octet-stream' + CRLF, 'utf-8')) fout.write(bytearray(CRLF, 'utf-8')) shutil.copyfileobj(fin, fout) fout.write(bytearray(CRLF, 'utf-8')) fout.write(bytearray('--' + BOUNDARY + '--' + CRLF, 'utf-8')) fout.write(bytearray(CRLF, 'utf-8')) fout.close() fin.close() return content_type
0.002266
def find_image_position(origin='origin.png', query='query.png', outfile=None): ''' find all image positions @return None if not found else a tuple: (origin.shape, query.shape, postions) might raise Exception ''' img1 = cv2.imread(query, 0) # query image(small) img2 = cv2.imread(origin, 0) # train image(big) # Initiate SIFT detector sift = cv2.SIFT() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None) print len(kp1), len(kp2) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) # flann flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1, des2, k=2) # store all the good matches as per Lowe's ratio test. good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) print len(kp1), len(kp2), 'good cnt:', len(good) if len(good)*1.0/len(kp1) < 0.5: #if len(good)<MIN_MATCH_COUNT: print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT) return img2.shape, img1.shape, [] queryPts = [] trainPts = [] for dm in good: queryPts.append(kp1[dm.queryIdx]) trainPts.append(kp2[dm.trainIdx]) img3 = cv2.drawKeypoints(img1, queryPts) cv2.imwrite('image/query.png', img3) img3 = cv2.drawKeypoints(img2, trainPts) point = _middlePoint(trainPts) print 'position in', point if outfile: edge = 10 top_left = (point[0]-edge, point[1]-edge) bottom_right = (point[0]+edge, point[1]+edge) cv2.rectangle(img3, top_left, bottom_right, 255, 2) cv2.imwrite(outfile, img3) return img2.shape, img1.shape, [point]
0.009164
def make_log_metric(level=logging.INFO, msg="%d items in %.2f seconds"): """Make a new metric function that logs at the given level :arg int level: logging level, defaults to ``logging.INFO`` :arg string msg: logging message format string, taking ``count`` and ``elapsed`` :rtype: function """ def log_metric(name, count, elapsed): log_name = 'instrument.{}'.format(name) if name else 'instrument' logging.getLogger(log_name).log(level, msg, count, elapsed) return log_metric
0.003846
def ensure_sink(self): """Ensure the log sink and its pub sub topic exist.""" topic_info = self.pubsub.ensure_topic() scope, sink_path, sink_info = self.get_sink(topic_info) client = self.session.client('logging', 'v2', '%s.sinks' % scope) try: sink = client.execute_command('get', {'sinkName': sink_path}) except HttpError as e: if e.resp.status != 404: raise sink = client.execute_command('create', sink_info) else: delta = delta_resource(sink, sink_info['body']) if delta: sink_info['updateMask'] = ','.join(delta) sink_info['sinkName'] = sink_path sink_info.pop('parent') sink = client.execute_command('update', sink_info) else: return sink_path self.pubsub.ensure_iam(publisher=sink['writerIdentity']) return sink_path
0.002075
def assign(self, dst_addr_ast): """ Assign a new region for under-constrained symbolic execution. :param dst_addr_ast: the symbolic AST which address of the new allocated region will be assigned to. :return: as ast of memory address that points to a new region """ if dst_addr_ast.uc_alloc_depth > self._max_alloc_depth: raise SimUCManagerAllocationError('Current allocation depth %d is greater than the cap (%d)' % \ (dst_addr_ast.uc_alloc_depth, self._max_alloc_depth)) abs_addr = self._region_base + self._pos ptr = self.state.solver.BVV(abs_addr, self.state.arch.bits) self._pos += self._region_size self._alloc_depth_map[(abs_addr - self._region_base) // self._region_size] = dst_addr_ast.uc_alloc_depth l.debug("Assigned new memory region %s", ptr) return ptr
0.00783
def deprecated(replacement_description): """States that method is deprecated. :param replacement_description: Describes what must be used instead. :return: the original method with modified docstring. """ def decorate(fn_or_class): if isinstance(fn_or_class, type): pass # Can't change __doc__ of type objects else: try: fn_or_class.__doc__ = "This API point is obsolete. %s\n\n%s" % ( replacement_description, fn_or_class.__doc__, ) except AttributeError: pass # For Cython method descriptors, etc. return fn_or_class return decorate
0.002809
def receive(self, diff, paths): """ Return Context Manager for a file-like (stream) object to store a diff. """ path = self.selectReceivePath(paths) keyName = self._keyName(diff.toUUID, diff.fromUUID, path) if self._skipDryRun(logger)("receive %s in %s", keyName, self): return None progress = _BotoProgress(diff.size) if self.showProgress is True else None return _Uploader(self.bucket, keyName, progress)
0.008547
def calculate2d(self, force=True): """ recalculate 2d coordinates. currently rings can be calculated badly. :param force: ignore existing coordinates of atoms """ for ml in (self.__reagents, self.__reactants, self.__products): for m in ml: m.calculate2d(force) self.fix_positions()
0.005587
def _jx_expression(expr, lang): """ WRAP A JSON EXPRESSION WITH OBJECT REPRESENTATION """ if is_expression(expr): # CONVERT TO lang new_op = lang[expr.id] if not new_op: # CAN NOT BE FOUND, TRY SOME PARTIAL EVAL return language[expr.id].partial_eval() return expr # return new_op(expr.args) # THIS CAN BE DONE, BUT IT NEEDS MORE CODING, AND I WOULD EXPECT IT TO BE SLOW if expr is None: return TRUE elif expr in (True, False, None) or expr == None or isinstance(expr, (float, int, Decimal, Date)): return Literal(expr) elif is_text(expr): return Variable(expr) elif is_sequence(expr): return lang[TupleOp([_jx_expression(e, lang) for e in expr])] # expr = wrap(expr) try: items = items_(expr) for op, term in items: # ONE OF THESE IS THE OPERATOR full_op = operators.get(op) if full_op: class_ = lang.ops[full_op.id] if class_: return class_.define(expr) # THIS LANGUAGE DOES NOT SUPPORT THIS OPERATOR, GOTO BASE LANGUAGE AND GET THE MACRO class_ = language[op.id] output = class_.define(expr).partial_eval() return _jx_expression(output, lang) else: if not items: return NULL raise Log.error("{{instruction|json}} is not known", instruction=items) except Exception as e: Log.error("programmer error expr = {{value|quote}}", value=expr, cause=e)
0.004334
def illum(target, et, abcorr, obsrvr, spoint): """ Deprecated: This routine has been superseded by the CSPICE routine ilumin. This routine is supported for purposes of backward compatibility only. Find the illumination angles at a specified surface point of a target body. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/illum_c.html :param target: Name of target body. :type target: str :param et: Epoch in ephemeris seconds past J2000. :type et: float :param abcorr: Desired aberration correction. :type abcorr: str :param obsrvr: Name of observing body. :type obsrvr: str :param spoint: Body-fixed coordinates of a target surface point. :type spoint: 3-Element Array of floats :return: Phase angle, Solar incidence angle, and Emission angle at the surface point. :rtype: tuple """ target = stypes.stringToCharP(target) et = ctypes.c_double(et) abcorr = stypes.stringToCharP(abcorr) obsrvr = stypes.stringToCharP(obsrvr) spoint = stypes.toDoubleVector(spoint) phase = ctypes.c_double(0) solar = ctypes.c_double(0) emissn = ctypes.c_double(0) libspice.illum_c(target, et, abcorr, obsrvr, spoint, ctypes.byref(phase), ctypes.byref(solar), ctypes.byref(emissn)) return phase.value, solar.value, emissn.value
0.000717
def _do_resumable_upload( self, client, stream, content_type, size, num_retries, predefined_acl ): """Perform a resumable upload. Assumes ``chunk_size`` is not :data:`None` on the current blob. The content type of the upload will be determined in order of precedence: - The value passed in to this method (if not :data:`None`) - The value stored on the current blob - The default value ('application/octet-stream') :type client: :class:`~google.cloud.storage.client.Client` :param client: (Optional) The client to use. If not passed, falls back to the ``client`` stored on the blob's bucket. :type stream: IO[bytes] :param stream: A bytes IO object open for reading. :type content_type: str :param content_type: Type of content being uploaded (or :data:`None`). :type size: int :param size: The number of bytes to be uploaded (which will be read from ``stream``). If not provided, the upload will be concluded once ``stream`` is exhausted (or :data:`None`). :type num_retries: int :param num_retries: Number of upload retries. (Deprecated: This argument will be removed in a future release.) :type predefined_acl: str :param predefined_acl: (Optional) predefined access control list :rtype: :class:`~requests.Response` :returns: The "200 OK" response object returned after the final chunk is uploaded. """ upload, transport = self._initiate_resumable_upload( client, stream, content_type, size, num_retries, predefined_acl=predefined_acl, ) while not upload.finished: response = upload.transmit_next_chunk(transport) return response
0.001531
def post(self, uri, data, **kwargs): """POST the provided data to the specified path See :meth:`request` for additional details. The `data` parameter here is expected to be a string type. """ return self.request("POST", uri, data=data, **kwargs)
0.010453
def data(self, filtered=False, no_empty=False): r""" Return (filtered) file data. The returned object is a list, each item is a sub-list corresponding to a row of data; each item in the sub-lists contains data corresponding to a particular column :param filtered: Filtering type :type filtered: :ref:`CsvFiltered` :param no_empty: Flag that indicates whether rows with empty columns should be filtered out (True) or not (False) :type no_empty: bool :rtype: list .. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. pcsv.csv_file.CsvFile.data :raises: * RuntimeError (Argument \`filtered\` is not valid) * RuntimeError (Argument \`no_empty\` is not valid) .. [[[end]]] """ return self._apply_filter(filtered, no_empty)
0.00209
def get_host(self, host_id, **kwargs): """Get details about a dedicated host. :param integer : the host ID :returns: A dictionary containing host information. Example:: # Print out host ID 12345. dh = mgr.get_host(12345) print dh # Print out only name and backendRouter for instance 12345 object_mask = "mask[name,backendRouter[id]]" dh = mgr.get_host(12345, mask=mask) print dh """ if 'mask' not in kwargs: kwargs['mask'] = (''' id, name, cpuCount, memoryCapacity, diskCapacity, createDate, modifyDate, backendRouter[ id, hostname, domain ], billingItem[ id, nextInvoiceTotalRecurringAmount, children[ categoryCode, nextInvoiceTotalRecurringAmount ], orderItem[ id, order.userRecord[ username ] ] ], datacenter[ id, name, longName ], guests[ id, hostname, domain, uuid ], guestCount ''') return self.host.getObject(id=host_id, **kwargs)
0.001155
def community_post_subscriptions(self, post_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/subscriptions#list-post-subscriptions" api_path = "/api/v2/community/posts/{post_id}/subscriptions.json" api_path = api_path.format(post_id=post_id) return self.call(api_path, **kwargs)
0.009009
def _describe_fields(cls): """ Return a dictionary for the class fields description. Fields should NOT be wrapped by _precomputed_field, if necessary """ dispatch_table = { 'ShortestPathModel': 'sssp', 'GraphColoringModel': 'graph_coloring', 'PagerankModel': 'pagerank', 'ConnectedComponentsModel': 'connected_components', 'TriangleCountingModel': 'triangle_counting', 'KcoreModel': 'kcore', 'DegreeCountingModel': 'degree_count', 'LabelPropagationModel': 'label_propagation' } try: toolkit_name = dispatch_table[cls.__name__] toolkit = _tc.extensions._toolkits.graph.__dict__[toolkit_name] return toolkit.get_model_fields({}) except: raise RuntimeError('Model %s does not have fields description' % cls.__name__)
0.004343
def got_arbiter_module_type_defined(self, module_type): """Check if a module type is defined in one of the arbiters Also check the module name :param module_type: module type to search for :type module_type: str :return: True if mod_type is found else False :rtype: bool TODO: Factorize it with got_broker_module_type_defined: """ for arbiter in self.arbiters: # Do like the linkify will do after.... for module in getattr(arbiter, 'modules', []): # So look at what the arbiter try to call as module module_name = module.get_name() # Ok, now look in modules... for mod in self.modules: # try to see if this module is the good type if getattr(mod, 'python_name', '').strip() == module_type.strip(): # if so, the good name? if getattr(mod, 'name', '').strip() == module_name: return True return False
0.00277
def compress_histogram_proto(histo, bps=NORMAL_HISTOGRAM_BPS): """Creates fixed size histogram by adding compression to accumulated state. This routine transforms a histogram at a particular step by interpolating its variable number of buckets to represent their cumulative weight at a constant number of compression points. This significantly reduces the size of the histogram and makes it suitable for a two-dimensional area plot where the output of this routine constitutes the ranges for a single x coordinate. Args: histo: A HistogramProto object. bps: Compression points represented in basis points, 1/100ths of a percent. Defaults to normal distribution. Returns: List of values for each basis point. """ # See also: Histogram::Percentile() in core/lib/histogram/histogram.cc if not histo.num: return [CompressedHistogramValue(b, 0.0) for b in bps] bucket = np.array(histo.bucket) bucket_limit = list(histo.bucket_limit) weights = (bucket * bps[-1] / (bucket.sum() or 1.0)).cumsum() values = [] j = 0 while j < len(bps): i = np.searchsorted(weights, bps[j], side='right') while i < len(weights): cumsum = weights[i] cumsum_prev = weights[i - 1] if i > 0 else 0.0 if cumsum == cumsum_prev: # prevent lerp divide by zero i += 1 continue if not i or not cumsum_prev: lhs = histo.min else: lhs = max(bucket_limit[i - 1], histo.min) rhs = min(bucket_limit[i], histo.max) weight = _lerp(bps[j], cumsum_prev, cumsum, lhs, rhs) values.append(CompressedHistogramValue(bps[j], weight)) j += 1 break else: break while j < len(bps): values.append(CompressedHistogramValue(bps[j], histo.max)) j += 1 return values
0.012842
def clean_list_of_twitter_list(list_of_twitter_lists, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set): """ Extracts the sets of keywords for each Twitter list. Inputs: - list_of_twitter_lists: A python list of Twitter lists in json format. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - list_of_keyword_sets: A list of sets of keywords (i.e. not a bag-of-words) in python set format. - list_of_lemma_to_keywordbags: List of python dicts that map stems/lemmas to original topic keywords. """ list_of_keyword_sets = list() append_keyword_set = list_of_keyword_sets.append list_of_lemma_to_keywordbags = list() append_lemma_to_keywordbag = list_of_lemma_to_keywordbags.append if list_of_twitter_lists is not None: for twitter_list in list_of_twitter_lists: if twitter_list is not None: keyword_set, lemma_to_keywordbag = clean_twitter_list(twitter_list, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set) append_keyword_set(keyword_set) append_lemma_to_keywordbag(lemma_to_keywordbag) return list_of_keyword_sets, list_of_lemma_to_keywordbags
0.005441
def contained(self, key, include_self=True, exclusive=False, biggest_first=True, only=None): """Get all locations that are completely within this location. If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``.contained("RoW")`` returns a list with either ``RoW`` or nothing. """ if 'RoW' not in self: if key == 'RoW': return ['RoW'] if 'RoW' in (only or []) else [] elif only and 'RoW' in only: only.pop(only.index('RoW')) possibles = self.topology if only is None else {k: self[k] for k in only} faces = self[key] lst = [ (k, len(v)) for k, v in possibles.items() if v and faces.issuperset(v) ] return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
0.006689
def addInputPort(self, node, name, i: Union[Value, RtlSignalBase], side=PortSide.WEST): """ Add and connect input port on subnode :param node: node where to add input port :param name: name of newly added port :param i: input value :param side: side where input port should be added """ root = self.node port = node.addPort(name, PortType.INPUT, side) netCtxs = self.netCtxs if isinstance(i, LPort): root.addEdge(i, port) elif isConst(i): i = i.staticEval() c, wasThereBefore = self.netCtxs.getDefault(i) if not wasThereBefore: v = ValueAsLNode(root, i).east[0] c.addDriver(v) c.addEndpoint(port) elif i.hidden: # later connect driver of this signal to output port ctx, wasThereBefore = netCtxs.getDefault(i) if not wasThereBefore: self.lazyLoadNet(i) ctx.addEndpoint(port) else: portCtx = self.portCtx rootCtx, _ = self.rootNetCtxs.getDefault(i) if self.isVirtual: # later connect signal in root to input port or input port of # wrap node rootCtx.addEndpoint(port) else: # spot input port on this wrap node if required isNewlySpotted = (i, PortType.INPUT) not in portCtx.data src = portCtx.register(i, PortType.INPUT) # connect input port on wrap node with specified output port ctx, _ = netCtxs.getDefault(i) ctx.addDriver(src) ctx.addEndpoint(port) if isNewlySpotted: # get input port from parent view _port = portCtx.getOutside(i, PortType.INPUT) rootCtx.addEndpoint(_port)
0.00253
def get_provider_fn_decorations(provider_fn, default_arg_names): """Retrieves the provider method-relevant info set by decorators. If any info wasn't set by decorators, then defaults are returned. Args: provider_fn: a (possibly decorated) provider function default_arg_names: the (possibly empty) arg names to use if none were specified via @provides() Returns: a sequence of ProviderDecoration """ if hasattr(provider_fn, _IS_WRAPPER_ATTR): provider_decorations = getattr(provider_fn, _PROVIDER_DECORATIONS_ATTR) if provider_decorations: expanded_provider_decorations = [] for provider_decoration in provider_decorations: # TODO(kurts): seems like default scope should be done at # ProviderDecoration instantiation time. if provider_decoration.in_scope_id is None: provider_decoration.in_scope_id = scoping.DEFAULT_SCOPE if provider_decoration.arg_name is not None: expanded_provider_decorations.append(provider_decoration) else: expanded_provider_decorations.extend( [ProviderDecoration(default_arg_name, provider_decoration.annotated_with, provider_decoration.in_scope_id) for default_arg_name in default_arg_names]) return expanded_provider_decorations return [ProviderDecoration(default_arg_name, annotated_with=None, in_scope_id=scoping.DEFAULT_SCOPE) for default_arg_name in default_arg_names]
0.000568
def output_spectrum(self, spectrum, filepath, header={}): """ Prints a file of the given spectrum to an ascii file with specified filepath. Parameters ---------- spectrum: int, sequence The id from the SPECTRA table or a [w,f,e] sequence filepath: str The path of the file to print the data to. header: dict A dictionary of metadata to add of update in the header """ # If an integer is supplied, get the spectrum from the SPECTRA table if isinstance(spectrum, int): data = self.query("SELECT * FROM spectra WHERE id={}".format(spectrum), fetch='one', fmt='dict') try: data['header'] = list(map(list, data['spectrum'].header.cards)) + [[k, v, ''] for k, v in header.items()] except: data['header'] = '' # If a [w,f,e] sequence is supplied, make it into a Spectrum object elif isinstance(spectrum, (list, tuple, np.ndarray)): data = {'spectrum': Spectrum(spectrum, header=header), 'wavelength_units': '', 'flux_units': ''} try: data['header'] = list(map(list, data['spectrum'].header.cards)) except: data['header'] = '' if data: fn = filepath if filepath.endswith('.txt') else filepath + 'spectrum.txt' # Write the header if data['header']: for n, line in enumerate(data['header']): data['header'][n] = ['# {}'.format(str(line[0])).ljust(10)[:10], '{:50s} / {}'.format(*map(str, line[1:]))] try: ii.write([np.asarray(i) for i in np.asarray(data['header']).T], fn, delimiter='\t', format='no_header') except IOError: pass # Write the data names = ['# wavelength [{}]'.format(data['wavelength_units']), 'flux [{}]'.format(data['flux_units'])] if len(data['spectrum'].data) == 3: if type(data['spectrum'].data[2]) in [np.ndarray, list]: names += ['unc [{}]'.format(data['flux_units'])] else: data['spectrum'].data = data['spectrum'].data[:2] with open(fn, mode='a') as f: ii.write([np.asarray(i, dtype=np.float64) for i in data['spectrum'].data], f, names=names, delimiter='\t') else: print("Could not output spectrum: {}".format(spectrum))
0.005551
def get_rate(self, currency, date): """Get the exchange rate for ``currency`` against ``_INTERNAL_CURRENCY`` If implementing your own backend, you should probably override :meth:`_get_rate()` rather than this. """ if str(currency) == defaults.INTERNAL_CURRENCY: return Decimal(1) cached = cache.get(_cache_key(currency, date)) if cached: return Decimal(cached) else: # Expect self._get_rate() to implement caching return Decimal(self._get_rate(currency, date))
0.006969
def logoff_session(session_id): ''' Initiate the logoff of a session. .. versionadded:: 2016.11.0 :param session_id: The numeric Id of the session. :return: A boolean representing whether the logoff succeeded. CLI Example: .. code-block:: bash salt '*' rdp.logoff_session session_id salt '*' rdp.logoff_session 99 ''' try: win32ts.WTSLogoffSession(win32ts.WTS_CURRENT_SERVER_HANDLE, session_id, True) except PyWinError as error: _LOG.error('Error calling WTSLogoffSession: %s', error) return False return True
0.003339
def get_verb_function(data, verb): """ Return function that implements the verb for given data type """ try: module = type_lookup[type(data)] except KeyError: # Some guess work for subclasses for type_, mod in type_lookup.items(): if isinstance(data, type_): module = mod break try: return getattr(module, verb) except (NameError, AttributeError): msg = "Data source of type '{}' is not supported." raise TypeError(msg.format(type(data)))
0.001792
def find(self, name, menu=None): """ Finds a menu item by name and returns it. :param name: The menu item name. """ menu = menu or self.menu for i in menu: if i.name == name: return i else: if i.childs: ret_item = self.find(name, menu=i.childs) if ret_item: return ret_item
0.004292
def ParserUnparserFactory(module_name, *unparser_names): """ Produce a new parser/unparser object from the names provided. """ parse_callable = import_module(PKGNAME + '.parsers.' + module_name).parse unparser_module = import_module(PKGNAME + '.unparsers.' + module_name) return RawParserUnparserFactory(module_name, parse_callable, *[ getattr(unparser_module, name) for name in unparser_names])
0.002336
def _register_bindings(self, data): """ connection_handler method which is called when we connect to pusher. Responsible for binding callbacks to channels before we connect. :return: """ self._register_diff_order_book_channels() self._register_live_orders_channels() self._register_live_trades_channels() self._register_order_book_channels()
0.004843