text
stringlengths
78
104k
score
float64
0
0.18
def infinity_norm(A): """Infinity norm of a matrix (maximum absolute row sum). Parameters ---------- A : csr_matrix, csc_matrix, sparse, or numpy matrix Sparse or dense matrix Returns ------- n : float Infinity norm of the matrix Notes ----- - This serves as an upper bound on spectral radius. - csr and csc avoid a deep copy - dense calls scipy.linalg.norm See Also -------- scipy.linalg.norm : dense matrix norms Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags >>> from pyamg.util.linalg import infinity_norm >>> n=10 >>> e = np.ones((n,1)).ravel() >>> data = [ -1*e, 2*e, -1*e ] >>> A = spdiags(data,[-1,0,1],n,n) >>> print infinity_norm(A) 4.0 """ if sparse.isspmatrix_csr(A) or sparse.isspmatrix_csc(A): # avoid copying index and ptr arrays abs_A = A.__class__((np.abs(A.data), A.indices, A.indptr), shape=A.shape) return (abs_A * np.ones((A.shape[1]), dtype=A.dtype)).max() elif sparse.isspmatrix(A): return (abs(A) * np.ones((A.shape[1]), dtype=A.dtype)).max() else: return np.dot(np.abs(A), np.ones((A.shape[1],), dtype=A.dtype)).max()
0.000755
def handle_valid(self, form=None, *args, **kwargs): """ Called after the form has validated. """ # Take a chance and try save a subclass of a ModelForm. if hasattr(form, 'save'): form.save() # Also try and call handle_valid method of the form itself. if hasattr(form, 'handle_valid'): form.handle_valid(*args, **kwargs)
0.005013
def safe_dump_pk(obj, abspath, pk_protocol=pk_protocol, compress=False, enable_verbose=True): """A stable version of dump_pk, silently overwrite existing file. When your program been interrupted, you lose nothing. Typically if your program is interrupted by any reason, it only leaves a incomplete file. If you use replace=True, then you also lose your old file. So a bettr way is to: 1. dump pickle to a temp file. 2. when it's done, rename it to #abspath, overwrite the old one. This way guarantee atomic write. :param obj: Picklable Python Object. :param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle). :type abspath: string :param pk_protocol: (default your python version) use 2, to make a py2.x/3.x compatible pickle file. But 3 is faster. :type pk_protocol: int :param compress: (default False) If ``True``, use GNU program gzip to compress the Pickle file. Disk usage can be greatly reduced. But you have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading. :type compress: boolean :param enable_verbose: (default True) Trigger for message. :type enable_verbose: boolean Usage:: >>> from weatherlab.lib.dataIO.pk import safe_dump_pk >>> pk = {"a": 1, "b": 2} >>> safe_dump_pk(pk, "test.pickle") Dumping to test.pickle... Complete! Elapse 0.001763 sec **中文文档** 在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式 写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部 都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名, 覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会 影响原文件。 参数列表 :param obj: 可Pickle化的Python对象 :param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz`` , 其中gz用于被压 缩的Pickle :type abspath: ``字符串`` :param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被 py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。 :type pk_protocol: ``整数`` :param compress: (默认 False) 当为 ``True`` 时, 使用开源压缩标准gzip压缩Pickle文件。 通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数 :func:`load_pk(abspath, compress=True)<load_pk>`. :type compress: ``布尔值`` :param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭. :type enable_verbose: ``布尔值`` """ abspath = str(abspath) # try stringlize temp_abspath = "%s.tmp" % abspath dump_pk(obj, temp_abspath, pk_protocol=pk_protocol, replace=True, compress=compress, enable_verbose=enable_verbose) shutil.move(temp_abspath, abspath)
0.00078
def iter_tokens(cls, blob): """ Iterate over tokens found in blob contents :param blob: Input string with python file contents :return: token iterator """ readline_func = io.StringIO(blob.decode('utf-8')).readline return tokenize.generate_tokens(readline_func)
0.003521
def load_EROS_lc(filename='lm0010n22323.time'): """ Read an EROS light curve and return its data. Parameters ---------- filename : str, optional A light-curve filename. Returns ------- dates : numpy.ndarray An array of dates. magnitudes : numpy.ndarray An array of magnitudes. errors : numpy.ndarray An array of magnitudes errors. """ module_path = dirname(__file__) file_path = join(module_path, 'lightcurves', filename) data = np.loadtxt(file_path) date = data[:, 0] mag = data[:, 1] err = data[:, 2] return date, mag, err
0.00158
def unquote(s): """Unquote the indicated string.""" # Ignore the left- and rightmost chars (which should be quotes). # Use the Python engine to decode the escape sequence i, N = 1, len(s) - 1 ret = [] while i < N: if s[i] == '\\' and i < N - 1: ret.append(UNQUOTE_MAP.get(s[i+1], s[i+1])) i += 2 else: ret.append(s[i]) i += 1 return ''.join(ret)
0.030691
def add_lvl_to_ui(self, level, header): """Insert the level and header into the ui. :param level: a newly created level :type level: :class:`jukeboxcore.gui.widgets.browser.AbstractLevel` :param header: a newly created header :type header: QtCore.QWidget|None :returns: None :rtype: None :raises: None """ lay = self.layout() rc = lay.rowCount() lay.addWidget(level, rc+1, 1) if header is not None: lay.addWidget(header, rc+1, 0) lay.setColumnStretch(1,1)
0.005172
def add_scalar(self, name, value, step): """Log a scalar variable.""" self.writer.add_scalar(name, value, step)
0.015748
def get_output(self): ''' Execute a command through system shell. First checks to see if the requested command is executable. Returns (returncode, stdout, 0) ''' if self.is_hostname: # short circuit for hostame with internal method return determine_hostname() # all commands should timeout after a long interval so the client does not hang # prepend native nix 'timeout' implementation timeout_command = 'timeout -s KILL %s %s' % ( self.config.cmd_timeout, self.command) # ensure consistent locale for collected command output cmd_env = {'LC_ALL': 'C', 'PATH': '/sbin:/bin:/usr/sbin:/usr/bin', 'PYTHONPATH': os.getenv('PYTHONPATH')} args = shlex.split(timeout_command) # never execute this stuff if set.intersection(set(args), constants.command_blacklist): raise RuntimeError("Command Blacklist: " + self.command) try: logger.debug('Executing: %s', args) proc0 = Popen(args, shell=False, stdout=PIPE, stderr=STDOUT, bufsize=-1, env=cmd_env, close_fds=True) except OSError as err: if err.errno == errno.ENOENT: logger.debug('Command %s not found', self.command) return else: raise err dirty = False cmd = "sed -rf " + constants.default_sed_file sedcmd = Popen(shlex.split(cmd), stdin=proc0.stdout, stdout=PIPE) proc0.stdout.close() proc0 = sedcmd if self.exclude is not None: exclude_file = NamedTemporaryFile() exclude_file.write("\n".join(self.exclude).encode('utf-8')) exclude_file.flush() cmd = "grep -F -v -f %s" % exclude_file.name proc1 = Popen(shlex.split(cmd), stdin=proc0.stdout, stdout=PIPE) proc0.stdout.close() stderr = None if self.pattern is None or len(self.pattern) == 0: stdout, stderr = proc1.communicate() # always log return codes for debug logger.debug('Proc1 Status: %s', proc1.returncode) logger.debug('Proc1 stderr: %s', stderr) proc0 = proc1 dirty = True if self.pattern is not None and len(self.pattern): pattern_file = NamedTemporaryFile() pattern_file.write("\n".join(self.pattern).encode('utf-8')) pattern_file.flush() cmd = "grep -F -f %s" % pattern_file.name proc2 = Popen(shlex.split(cmd), stdin=proc0.stdout, stdout=PIPE) proc0.stdout.close() stdout, stderr = proc2.communicate() # always log return codes for debug logger.debug('Proc2 Status: %s', proc2.returncode) logger.debug('Proc2 stderr: %s', stderr) proc0 = proc2 dirty = True if not dirty: stdout, stderr = proc0.communicate() # Required hack while we still pass shell=True to Popen; a Popen # call with shell=False for a non-existant binary will raise OSError. if proc0.returncode == 126 or proc0.returncode == 127: stdout = "Could not find cmd: %s", self.command logger.debug("Proc0 Status: %s", proc0.returncode) logger.debug("Proc0 stderr: %s", stderr) return stdout.decode('utf-8', 'ignore').strip()
0.000824
async def set_contents(self, **params): """Writes users content to database Accepts: - public key (required) - content (required) - description - price - address """ if params.get("message"): params = json.loads(params.get("message", "{}")) if not params: return {"error":400, "reason":"Missed required fields"} txid = params.get("txid") public_key = params.get("public_key") _hash = params.get("hash") coinid = params.get("coinid") access = params.get("access") cid = params.get("cid") # Try to get account account = await self.collection.find_one({"public_key":public_key}) # Return error if does not exist the one if not account: return {"error":404, "reason":"Account was not found"} database = client[coinid] content_collection = database[access] await content_collection.insert_one({ "owner": public_key, "cid":cid, "txid": txid, "hash": _hash }) success = await content_collection.find_one({"txid":txid}) if not success: return {"error":500, "reason":"Error while writing content to database"} else: return {"result":"ok"}
0.044658
def info(self): """get information about this term Parameters ---------- Returns ------- dict containing information to duplicate this term """ info = super(TensorTerm, self).info info.update({'terms':[term.info for term in self._terms]}) return info
0.009036
def raw_section_content_identifier(self, value): """ Setter for **self. __raw_section_content_identifier** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "raw_section_content_identifier", value) self.__raw_section_content_identifier = value
0.006593
def _retransmit(self, transaction, message, future_time, retransmit_count): """ Thread function to retransmit the message in the future :param transaction: the transaction that owns the message that needs retransmission :param message: the message that needs the retransmission task :param future_time: the amount of time to wait before a new attempt :param retransmit_count: the number of retransmissions """ with transaction: while retransmit_count < defines.MAX_RETRANSMIT and (not message.acknowledged and not message.rejected) \ and not self.stopped.isSet(): transaction.retransmit_stop.wait(timeout=future_time) if not message.acknowledged and not message.rejected and not self.stopped.isSet(): retransmit_count += 1 future_time *= 2 self.send_datagram(message) if message.acknowledged or message.rejected: message.timeouted = False else: logger.warning("Give up on message {message}".format(message=message.line_print)) message.timeouted = True if message.observe is not None: self._observeLayer.remove_subscriber(message) try: self.to_be_stopped.remove(transaction.retransmit_stop) except ValueError: pass transaction.retransmit_stop = None transaction.retransmit_thread = None
0.003836
def _make_output(value, output_script, version=None): ''' byte-like, byte-like -> TxOut ''' if 'decred' in riemann.get_current_network_name(): return tx.DecredTxOut( value=value, version=version, output_script=output_script) return tx.TxOut(value=value, output_script=output_script)
0.00289
def delete_app_info(app_id): """ delete app info from local db """ try: conn = get_conn() c = conn.cursor() c.execute("DELETE FROM container WHERE app_id='{0}'".format(app_id)) c.execute("DELETE FROM app WHERE id='{0}'".format(app_id)) conn.commit() #print 'clear old app %s in db succeed!' % app_id except Exception, e: raise RuntimeError('clear old app %s in db failed! %s' % (app_id,e))
0.006452
def remove_host(self, host): """ Called when the control connection observes that a node has left the ring. Intended for internal use only. """ if host and self.metadata.remove_host(host): log.info("Cassandra host %s removed", host) self.on_remove(host)
0.006289
def load_job_from_container(self, container_path, config_string=None): """ Load the job from the given :class:`aeneas.container.Container` object. If ``config_string`` is ``None``, the container must contain a configuration file; otherwise use the provided config string (i.e., the wizard case). :param string container_path: the path to the input container :param string config_string: the configuration string (from wizard) :raises: :class:`~aeneas.executejob.ExecuteJobInputError`: if the given container does not contain a valid :class:`~aeneas.job.Job` """ self.log(u"Loading job from container...") # create working directory where the input container # will be decompressed self.working_directory = gf.tmp_directory(root=self.rconf[RuntimeConfiguration.TMP_PATH]) self.log([u"Created working directory '%s'", self.working_directory]) try: self.log(u"Decompressing input container...") input_container = Container(container_path, logger=self.logger) input_container.decompress(self.working_directory) self.log(u"Decompressing input container... done") except Exception as exc: self.clean() self.log_exc(u"Unable to decompress container '%s': %s" % (container_path, exc), None, True, ExecuteJobInputError) try: self.log(u"Creating job from working directory...") working_container = Container( self.working_directory, logger=self.logger ) analyzer = AnalyzeContainer(working_container, logger=self.logger) self.job = analyzer.analyze(config_string=config_string) self.log(u"Creating job from working directory... done") except Exception as exc: self.clean() self.log_exc(u"Unable to analyze container '%s': %s" % (container_path, exc), None, True, ExecuteJobInputError) if self.job is None: self.log_exc(u"The container '%s' does not contain a valid Job" % (container_path), None, True, ExecuteJobInputError) try: # set absolute path for text file and audio file # for each task in the job self.log(u"Setting absolute paths for tasks...") for task in self.job.tasks: task.text_file_path_absolute = gf.norm_join( self.working_directory, task.text_file_path ) task.audio_file_path_absolute = gf.norm_join( self.working_directory, task.audio_file_path ) self.log(u"Setting absolute paths for tasks... done") self.log(u"Loading job from container: succeeded") except Exception as exc: self.clean() self.log_exc(u"Error while setting absolute paths for tasks", exc, True, ExecuteJobInputError)
0.002641
def write(self, font, feaFile, compiler=None): """Write features and class definitions for this font to a feaLib FeatureFile object. Returns True if feature file was modified, False if no new features were generated. """ self.setContext(font, feaFile, compiler=compiler) try: if self.shouldContinue(): return self._write() else: return False finally: del self.context
0.004008
def add_arguments(cls, parser, sys_arg_list=None): """ Arguments for the TCP health monitor plugin. """ parser.add_argument('--tcp_check_interval', dest='tcp_check_interval', required=False, default=2, type=float, help="TCP health-test interval in seconds, " "default 2 " "(only for 'tcp' health monitor plugin)") parser.add_argument('--tcp_check_port', dest='tcp_check_port', required=False, default=22, type=int, help="Port for TCP health-test, default 22 " "(only for 'tcp' health monitor plugin)") return ["tcp_check_interval", "tcp_check_port"]
0.002312
def size(cell): """ Return the size (maximum cardinality) of a SPICE cell of any data type. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/size_c.html :param cell: Input cell. :type cell: spiceypy.utils.support_types.SpiceCell :return: The size of the input cell. :rtype: int """ assert isinstance(cell, stypes.SpiceCell) return libspice.size_c(ctypes.byref(cell))
0.002387
def set_viewup(self, vector): """ sets camera viewup vector """ if isinstance(vector, np.ndarray): if vector.ndim != 1: vector = vector.ravel() self.camera.SetViewUp(vector) self._render()
0.008065
def button_input(self, title, message, buttons, default, timeout=None, dimensions=None): ''' Function to accept input in the form of a button click. ''' # Create the dialog box self.response = default self.top = tkinter.Tk() self.top.title(title) # Use dimensions if passes if dimensions is not None: self.top.minsize(width=dimensions[0], height=dimensions[1]) self.top.maxsize(width=dimensions[0], height=dimensions[1]) # Display a message labelString = tkinter.StringVar() labelString.set(message) label = tkinter.Label(self.top, textvariable=labelString, relief=tkinter.RAISED) label.pack(ipadx=100, ipady=10) # Populate dialog box with buttons for key in buttons.keys(): button = tkinter.Button(self.top, text=buttons[key], command=lambda key=key: self.selected(key)) button.pack(fill='both', pady=5, padx=10) # Destroy the dialog box if there has been no button click within the timeout period if timeout != None: try: self.top.after(timeout, lambda: self.top.destroy()) except: pass self.top.mainloop() return self.response
0.006159
def ensure_contiguity_in_observation_rows(obs_id_vector): """ Ensures that all rows pertaining to a given choice situation are located next to one another. Raises a helpful ValueError otherwise. This check is needed because the hessian calculation function requires the design matrix to have contiguity in rows with the same observation id. Parameters ---------- rows_to_obs : 2D scipy sparse array. Should map each row of the long format dataferame to the unique observations in the dataset. obs_id_vector : 1D ndarray of ints. Should contain the id (i.e. a unique integer) that corresponds to each choice situation in the dataset. Returns ------- None. """ # Check that the choice situation id for each row is larger than or equal # to the choice situation id of the preceding row. contiguity_check_array = (obs_id_vector[1:] - obs_id_vector[:-1]) >= 0 if not contiguity_check_array.all(): problem_ids = obs_id_vector[np.where(~contiguity_check_array)] msg_1 = "All rows pertaining to a given choice situation must be " msg_2 = "contiguous. \nRows pertaining to the following observation " msg_3 = "id's are not contiguous: \n{}" raise ValueError(msg_1 + msg_2 + msg_3.format(problem_ids.tolist())) else: return None
0.000729
def _handle_amqp_frame(self, data_in): """Unmarshal a single AMQP frame and return the result. :param data_in: socket data :return: data_in, channel_id, frame """ if not data_in: return data_in, None, None try: byte_count, channel_id, frame_in = pamqp_frame.unmarshal(data_in) return data_in[byte_count:], channel_id, frame_in except pamqp_exception.UnmarshalingException: pass except specification.AMQPFrameError as why: LOGGER.error('AMQPFrameError: %r', why, exc_info=True) except ValueError as why: LOGGER.error(why, exc_info=True) self.exceptions.append(AMQPConnectionError(why)) return data_in, None, None
0.002574
def _set_remote(self, stream=False): """ Call :py:meth:`~._args_for_remote`; if the return value is not None, execute 'terraform remote config' with those arguments and ensure it exits 0. :param stream: whether or not to stream TF output in realtime :type stream: bool """ args = self._args_for_remote() if args is None: logger.debug('_args_for_remote() returned None; not configuring ' 'terraform remote') return logger.warning('Setting terraform remote config: %s', ' '.join(args)) args = ['config'] + args self._run_tf('remote', cmd_args=args, stream=stream) logger.info('Terraform remote configured.')
0.002635
def wrapped(f): """ Decorator to append routed docstrings """ import inspect def extract(func): append = "" args = inspect.getargspec(func) for i, a in enumerate(args.args): if i < (len(args) - len(args.defaults)): append += str(a) + ", " else: default = args.defaults[i-len(args.defaults)] if hasattr(default, "__name__"): default = default.__name__ else: default = str(default) append += str(a) + "=" + default + ", " append = append[:-2] + ")" return append doc = f.__doc__ + "\n" doc += " local -> array(" + extract(getattr(ConstructLocal, f.__name__)) + "\n" doc += " spark -> array(" + extract(getattr(ConstructSpark, f.__name__)) + "\n" f.__doc__ = doc return f
0.003333
def partition_services(self, **args): """Partitions two connected services. Not two sets of services (TODO) Expects usual arguments and srcprobability and dstprobability, that indicates probability of terminating connections from source to dest and vice versa """ rule = args.copy() assert 'source' in rule and 'dest' in rule #assert 'srcprobability' in rule and 'dstprobability' in rule assert rule['source'] != "" and rule['dest'] != "" #check if the two services are connected assert rule['dest'] in self.app.get_dependencies(rule['source']) rule['errorcode'] = rule.pop('errorcode', -1) or -1 rule['abortprobability'] = rule.pop('srcprobability', 1) or 1 self.abort_requests(**rule) rule['abortprobability'] = rule.pop('dstprobability', 1) or 1 temp = rule['source'] rule['source'] = rule['dest'] rule['dest'] = temp self.abort_requests(**rule)
0.00603
def schedule_function(self, func, date_rule=None, time_rule=None, half_days=True, calendar=None): """Schedules a function to be called according to some timed rules. Parameters ---------- func : callable[(context, data) -> None] The function to execute when the rule is triggered. date_rule : EventRule, optional The rule for the dates to execute this function. time_rule : EventRule, optional The rule for the times to execute this function. half_days : bool, optional Should this rule fire on half days? calendar : Sentinel, optional Calendar used to reconcile date and time rules. See Also -------- :class:`zipline.api.date_rules` :class:`zipline.api.time_rules` """ # When the user calls schedule_function(func, <time_rule>), assume that # the user meant to specify a time rule but no date rule, instead of # a date rule and no time rule as the signature suggests if isinstance(date_rule, (AfterOpen, BeforeClose)) and not time_rule: warnings.warn('Got a time rule for the second positional argument ' 'date_rule. You should use keyword argument ' 'time_rule= when calling schedule_function without ' 'specifying a date_rule', stacklevel=3) date_rule = date_rule or date_rules.every_day() time_rule = ((time_rule or time_rules.every_minute()) if self.sim_params.data_frequency == 'minute' else # If we are in daily mode the time_rule is ignored. time_rules.every_minute()) # Check the type of the algorithm's schedule before pulling calendar # Note that the ExchangeTradingSchedule is currently the only # TradingSchedule class, so this is unlikely to be hit if calendar is None: cal = self.trading_calendar elif calendar is calendars.US_EQUITIES: cal = get_calendar('XNYS') elif calendar is calendars.US_FUTURES: cal = get_calendar('us_futures') else: raise ScheduleFunctionInvalidCalendar( given_calendar=calendar, allowed_calendars=( '[calendars.US_EQUITIES, calendars.US_FUTURES]' ), ) self.add_event( make_eventrule(date_rule, time_rule, cal, half_days), func, )
0.002596
def close(self): """Close the poll instance.""" if self._poll is None: return self._poll.close() self._poll = None self._readers = 0 self._writers = 0 self._events = 0 clear_callbacks(self)
0.007547
def set_dialog_position(self): """Positions the tab switcher in the top-center of the editor.""" left = self.editor.geometry().width()/2 - self.width()/2 top = self.editor.tabs.tabBar().geometry().height() self.move(self.editor.mapToGlobal(QPoint(left, top)))
0.006734
def expand_families(stmts_in, **kwargs): """Expand FamPlex Agents to individual genes. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to expand. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of expanded statements. """ from indra.tools.expand_families import Expander logger.info('Expanding families on %d statements...' % len(stmts_in)) expander = Expander(hierarchies) stmts_out = expander.expand_families(stmts_in) logger.info('%d statements after expanding families...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
0.001195
def get_relationships_by_genus_type(self, relationship_genus_type=None): """Gets a ``RelationshipList`` corresponding to the given relationship genus ``Type`` which does not include relationships of types derived from the specified ``Type``. arg: relationship_genus_type (osid.type.Type): a relationship genus type return: (osid.relationship.RelationshipList) - the returned ``Relationship list`` raise: NullArgument - ``relationship_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ if relationship_genus_type is None: raise NullArgument() url_path = ('/handcar/services/relationship/families/' + self._catalog_idstr + '/relationships?genustypeid=' + relationship_genus_type.get_identifier()) return objects.RelationshipList(self._get_request(url_path))
0.003687
def get_pos(self, sector): """Get index (into the raw data of the disk image) of start of sector This base class method assumes the sectors are one after another, in order starting from the beginning of the raw data. """ if not self.sector_is_valid(sector): raise ByteNotInFile166("Sector %d out of range" % sector) pos = sector * self.sector_size + self.header_offset size = self.sector_size return pos, size
0.004115
def OnPasteFormat(self, event): """Paste format event handler""" with undo.group(_("Paste format")): self.grid.actions.paste_format() self.grid.ForceRefresh() self.grid.update_attribute_toolbar() self.grid.actions.zoom()
0.007299
def read(self, nbytes: int = 0) -> bytes: """ read *at most* ``nbytes`` """ if nbytes == 0 or len(self.output) <= nbytes: data = bytes(self.output) del self.output[:] return data data = bytes(self.output[:nbytes]) del self.output[:nbytes] return data
0.005848
def import_module_or_none(module_label): """ Imports the module with the given name. Returns None if the module doesn't exist, but it does propagates import errors in deeper modules. """ try: # On Python 3, importlib has much more functionality compared to Python 2. return importlib.import_module(module_label) except ImportError: # Based on code from django-oscar: # There are 2 reasons why there could be an ImportError: # # 1. Module does not exist. In that case, we ignore the import and return None # 2. Module exists but another ImportError occurred when trying to import the module. # In that case, it is important to propagate the error. # # ImportError does not provide easy way to distinguish those two cases. # Fortunately, the traceback of the ImportError starts at __import__ # statement. If the traceback has more than one frame, it means that # application was found and ImportError originates within the local app __, __, exc_traceback = sys.exc_info() frames = traceback.extract_tb(exc_traceback) frames = [f for f in frames if f[0] != "<frozen importlib._bootstrap>" and # Python 3.6 f[0] != IMPORT_PATH_IMPORTLIB and not f[0].endswith(IMPORT_PATH_GEVENT) and not IMPORT_PATH_PYDEV in f[0]] if len(frames) > 1: raise return None
0.003318
def real_time_scheduling(self, availability, oauth, event, target_calendars=()): """Generates an real time scheduling link to start the OAuth process with an event to be automatically upserted :param dict availability: - A dict describing the availability details for the event: :participants - A dict stating who is required for the availability call :required_duration - A dict stating the length of time the event will last for :available_periods - A dict stating the available periods for the event :start_interval - A Integer representing the start_interval of the event :buffer - A dict representing the buffer for the event :param dict oauth: - A dict describing the OAuth flow required: :scope - A String representing the scopes to ask for within the OAuth flow :redirect_uri - A String containing a url to redirect the user to after completing the OAuth flow. :scope - A String representing additional state to be passed within the OAuth flow. :param dict event: - A dict describing the event :param list target_calendars: - An list of dics stating into which calendars to insert the created event See http://www.cronofy.com/developers/api#upsert-event for reference. """ args = { 'oauth': oauth, 'event': event, 'target_calendars': target_calendars } if availability: options = {} options['participants'] = self.map_availability_participants(availability.get('participants', None)) options['required_duration'] = self.map_availability_required_duration(availability.get('required_duration', None)) options['start_interval'] = self.map_availability_required_duration(availability.get('start_interval', None)) options['buffer'] = self.map_availability_buffer(availability.get('buffer', None)) self.translate_available_periods(availability['available_periods']) options['available_periods'] = availability['available_periods'] args['availability'] = options return self.request_handler.post(endpoint='real_time_scheduling', data=args, use_api_key=True).json()
0.005873
def create(self, password, username, realm=None): """ Creates a storage password. A `StoragePassword` can be identified by <username>, or by <realm>:<username> if the optional realm parameter is also provided. :param password: The password for the credentials - this is the only part of the credentials that will be stored securely. :type name: ``string`` :param username: The username for the credentials. :type name: ``string`` :param realm: The credential realm. (optional) :type name: ``string`` :return: The :class:`StoragePassword` object created. """ if not isinstance(username, basestring): raise ValueError("Invalid name: %s" % repr(username)) if realm is None: response = self.post(password=password, name=username) else: response = self.post(password=password, realm=realm, name=username) if response.status != 201: raise ValueError("Unexpected status code %s returned from creating a stanza" % response.status) entries = _load_atom_entries(response) state = _parse_atom_entry(entries[0]) storage_password = StoragePassword(self.service, self._entity_path(state), state=state, skip_refresh=True) return storage_password
0.004488
def encode(self, lname, max_length=4, german=False): """Calculate the PSHP Soundex/Viewex Coding of a last name. Parameters ---------- lname : str The last name to encode max_length : int The length of the code returned (defaults to 4) german : bool Set to True if the name is German (different rules apply) Returns ------- str The PSHP Soundex/Viewex Coding Examples -------- >>> pe = PSHPSoundexLast() >>> pe.encode('Smith') 'S530' >>> pe.encode('Waters') 'W350' >>> pe.encode('James') 'J500' >>> pe.encode('Schmidt') 'S530' >>> pe.encode('Ashcroft') 'A225' """ lname = unicode_normalize('NFKD', text_type(lname.upper())) lname = lname.replace('ß', 'SS') lname = ''.join(c for c in lname if c in self._uc_set) # A. Prefix treatment if lname[:3] == 'VON' or lname[:3] == 'VAN': lname = lname[3:].strip() # The rule implemented below says "MC, MAC become 1". I believe it # meant to say they become M except in German data (where superscripted # 1 indicates "except in German data"). It doesn't make sense for them # to become 1 (BPFV -> 1) or to apply outside German. Unfortunately, # both articles have this error(?). if not german: if lname[:3] == 'MAC': lname = 'M' + lname[3:] elif lname[:2] == 'MC': lname = 'M' + lname[2:] # The non-German-only rule to strip ' is unnecessary due to filtering if lname[:1] in {'E', 'I', 'O', 'U'}: lname = 'A' + lname[1:] elif lname[:2] in {'GE', 'GI', 'GY'}: lname = 'J' + lname[1:] elif lname[:2] in {'CE', 'CI', 'CY'}: lname = 'S' + lname[1:] elif lname[:3] == 'CHR': lname = 'K' + lname[1:] elif lname[:1] == 'C' and lname[:2] != 'CH': lname = 'K' + lname[1:] if lname[:2] == 'KN': lname = 'N' + lname[1:] elif lname[:2] == 'PH': lname = 'F' + lname[1:] elif lname[:3] in {'WIE', 'WEI'}: lname = 'V' + lname[1:] if german and lname[:1] in {'W', 'M', 'Y', 'Z'}: lname = {'W': 'V', 'M': 'N', 'Y': 'J', 'Z': 'S'}[lname[0]] + lname[ 1: ] code = lname[:1] # B. Postfix treatment if german: # moved from end of postfix treatment due to blocking if lname[-3:] == 'TES': lname = lname[:-3] elif lname[-2:] == 'TS': lname = lname[:-2] if lname[-3:] == 'TZE': lname = lname[:-3] elif lname[-2:] == 'ZE': lname = lname[:-2] if lname[-1:] == 'Z': lname = lname[:-1] elif lname[-2:] == 'TE': lname = lname[:-2] if lname[-1:] == 'R': lname = lname[:-1] + 'N' elif lname[-2:] in {'SE', 'CE'}: lname = lname[:-2] if lname[-2:] == 'SS': lname = lname[:-2] elif lname[-1:] == 'S': lname = lname[:-1] if not german: l5_repl = {'STOWN': 'SAWON', 'MPSON': 'MASON'} l4_repl = { 'NSEN': 'ASEN', 'MSON': 'ASON', 'STEN': 'SAEN', 'STON': 'SAON', } if lname[-5:] in l5_repl: lname = lname[:-5] + l5_repl[lname[-5:]] elif lname[-4:] in l4_repl: lname = lname[:-4] + l4_repl[lname[-4:]] if lname[-2:] in {'NG', 'ND'}: lname = lname[:-1] if not german and lname[-3:] in {'GAN', 'GEN'}: lname = lname[:-3] + 'A' + lname[-2:] # C. Infix Treatment lname = lname.replace('CK', 'C') lname = lname.replace('SCH', 'S') lname = lname.replace('DT', 'T') lname = lname.replace('ND', 'N') lname = lname.replace('NG', 'N') lname = lname.replace('LM', 'M') lname = lname.replace('MN', 'M') lname = lname.replace('WIE', 'VIE') lname = lname.replace('WEI', 'VEI') # D. Soundexing # code for X & Y are unspecified, but presumably are 2 & 0 lname = lname.translate(self._trans) lname = self._delete_consecutive_repeats(lname) code += lname[1:] code = code.replace('0', '') # rule 1 if max_length != -1: if len(code) < max_length: code += '0' * (max_length - len(code)) else: code = code[:max_length] return code
0.000416
def innerHTML(self, html: str) -> None: # type: ignore """Set innerHTML both on this node and related browser node.""" df = self._parse_html(html) if self.connected: self._set_inner_html_web(df.html) self._empty() self._append_child(df)
0.00692
def list(self, product, store_view=None, identifierType=None): """ Retrieve product image list :param product: ID or SKU of product :param store_view: Code or ID of store view :param identifierType: Defines whether the product or SKU value is passed in the "product" parameter. :return: `list` of `dict` """ return self.call('catalog_product_attribute_media.list', [product, store_view, identifierType])
0.005814
def connected_sets(C, directed=True): r"""Compute connected components for a directed graph with weights represented by the given count matrix. Parameters ---------- C : scipy.sparse matrix or numpy ndarray square matrix specifying edge weights. directed : bool, optional Whether to compute connected components for a directed or undirected graph. Default is True. Returns ------- cc : list of arrays of integers Each entry is an array containing all vertices (states) in the corresponding connected component. """ M = C.shape[0] """ Compute connected components of C. nc is the number of components, indices contain the component labels of the states """ nc, indices = csgraph.connected_components(C, directed=directed, connection='strong') states = np.arange(M) # Discrete states """Order indices""" ind = np.argsort(indices) indices = indices[ind] """Order states""" states = states[ind] """ The state index tuple is now of the following form (states, indices)=([s_23, s_17,...,s_3, s_2, ...], [0, 0, ..., 1, 1, ...]) """ """Find number of states per component""" count = np.bincount(indices) """Cumulative sum of count gives start and end indices of components""" csum = np.zeros(len(count) + 1, dtype=int) csum[1:] = np.cumsum(count) """Generate list containing components, sort each component by increasing state label""" cc = [] for i in range(nc): cc.append(np.sort(states[csum[i]:csum[i + 1]])) """Sort by size of component - largest component first""" cc = sorted(cc, key=lambda x: -len(x)) return cc
0.001165
def hz_to_octs(frequencies, A440=440.0): """Convert frequencies (Hz) to (fractional) octave numbers. Examples -------- >>> librosa.hz_to_octs(440.0) 4. >>> librosa.hz_to_octs([32, 64, 128, 256]) array([ 0.219, 1.219, 2.219, 3.219]) Parameters ---------- frequencies : number >0 or np.ndarray [shape=(n,)] or float scalar or vector of frequencies A440 : float frequency of A440 (in Hz) Returns ------- octaves : number or np.ndarray [shape=(n,)] octave number for each frequency See Also -------- octs_to_hz """ return np.log2(np.asanyarray(frequencies) / (float(A440) / 16))
0.001433
def points(ys, width=None): '''Usage: import scipy.stats def walk(steps, position=0): for step in steps: position += step yield position positions = list(walk(scipy.stats.norm.rvs(size=1000))) points(positions) ''' if width is None: width = terminal.width() ys = np.array(ys) n = len(ys) y_min, y_max = np.min(ys), np.max(ys) n_bins = min(width, n) bins_per_n = float(n_bins) / float(n) # print n, n_bins, n_per_bin, bins_per_n sums = np.zeros(n_bins) counts = np.zeros(n_bins) for i, y in enumerate(ys): bin = int(i * bins_per_n) sums[bin] += y counts[bin] += 1 bin_means = sums / counts # we want the lowest bin_height to be 0.0, and highest bin_height to be 1.0 bin_heights = normalize(bin_means) bin_chars = (bin_heights * (len(terminal.bars) - 1)).astype(int) # print sums, counts, bin_means cells = [terminal.bars[bin_char] for bin_char in bin_chars] print '[%+f]' % y_max print u''.join(cells) print '[%+f]' % y_min
0.00092
def gen(name, data): """Generate dataentry *name* from *data*.""" return '---- dataentry %s ----\n%s\n----' % (name, '\n'.join( '%s:%s' % (attr, value) for attr, value in data.items()))
0.00939
def on_kill(self): """ Cancel the submitted athena query """ if self.query_execution_id: self.log.info('⚰️⚰️⚰️ Received a kill Signal. Time to Die') self.log.info( 'Stopping Query with executionId - %s', self.query_execution_id ) response = self.hook.stop_query(self.query_execution_id) http_status_code = None try: http_status_code = response['ResponseMetadata']['HTTPStatusCode'] except Exception as ex: self.log.error('Exception while cancelling query', ex) finally: if http_status_code is None or http_status_code != 200: self.log.error('Unable to request query cancel on athena. Exiting') else: self.log.info( 'Polling Athena for query with id %s to reach final state', self.query_execution_id ) self.hook.poll_query_status(self.query_execution_id)
0.004695
def create_domain_record(self, domain_id, record_type, data, name=None, priority=None, port=None, weight=None): """ This method creates a new domain name with an A record for the specified [ip_address]. Required parameters domain_id: Integer or Domain Name (e.g. domain.com), specifies the domain for which to create a record. record_type: String, the type of record you would like to create. 'A', 'CNAME', 'NS', 'TXT', 'MX' or 'SRV' data: String, this is the value of the record Optional parameters name: String, required for 'A', 'CNAME', 'TXT' and 'SRV' records priority: Integer, required for 'SRV' and 'MX' records port: Integer, required for 'SRV' records weight: Integer, required for 'SRV' records """ params = dict(record_type=record_type, data=data) if name: params.update({'name': name}) if priority: params.update({'priority': priority}) if port: params.update({'port': port}) if weight: params.update({'weight': weight}) json = self.request('/domains/%s/records/new' % domain_id, method='GET', params=params) status = json.get('status') if status == 'OK': domain_record_json = json.get('domain_record') domain_record = Record.from_json(domain_record_json) return domain_record else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
0.002823
def _parsed_cmd(self): """ We need to take into account two cases: - ['python code.py foo bar']: Used mainly with dvc as a library - ['echo', 'foo bar']: List of arguments received from the CLI The second case would need quoting, as it was passed through: dvc run echo "foo bar" """ if len(self.args.command) < 2: return " ".join(self.args.command) return " ".join(self._quote_argument(arg) for arg in self.args.command)
0.003883
def verify_response(response, status_code, content_type=None): """Verifies that a response has the expected status and content type. Args: response: The ResponseTuple to be checked. status_code: An int, the HTTP status code to be compared with response status. content_type: A string with the acceptable Content-Type header value. None allows any content type. Returns: True if both status_code and content_type match, else False. """ status = int(response.status.split(' ', 1)[0]) if status != status_code: return False if content_type is None: return True for header, value in response.headers: if header.lower() == 'content-type': return value == content_type # If we fall through to here, the verification has failed, so return False. return False
0.00464
def guest_config_minidisks(self, userid, disk_info): """Punch the script that used to process additional disks to vm :param str userid: the user id of the vm :param disk_info: a list contains disks info for the guest. It contains dictionaries that describes disk info for each disk. Each dictionary has 3 keys, format is required, vdev and mntdir are optional. For example, if vdev is not specified, it will start from the next vdev of CONF.zvm.user_root_vdev, eg. if CONF.zvm.user_root_vdev is 0100, zvmsdk will use 0101 as the vdev for first additional disk in disk_info, and if mntdir is not specified, zvmsdk will use /mnt/ephemeral0 as the mount point of first additional disk Here are some examples: [{'vdev': '0101', 'format': 'ext3', 'mntdir': '/mnt/ephemeral0'}] In this case, the zvmsdk will treat 0101 as additional disk's vdev, and it's formatted with ext3, and will be mounted to /mnt/ephemeral0 [{'format': 'ext3'}, {'format': 'ext4'}] In this case, if CONF.zvm.user_root_vdev is 0100, zvmsdk will configure the first additional disk as 0101, mount it to /mnt/ephemeral0 with ext3, and configure the second additional disk 0102, mount it to /mnt/ephemeral1 with ext4. """ action = "config disks for userid '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.guest_config_minidisks(userid, disk_info)
0.001162
def is_contiguous(self): """Return offset and size of contiguous data, else None. Excludes prediction and fill_order. """ if (self.compression != 1 or self.bitspersample not in (8, 16, 32, 64)): return None if 'TileWidth' in self.tags: if (self.imagewidth != self.tilewidth or self.imagelength % self.tilelength or self.tilewidth % 16 or self.tilelength % 16): return None if ('ImageDepth' in self.tags and 'TileDepth' in self.tags and (self.imagelength != self.tilelength or self.imagedepth % self.tiledepth)): return None offsets = self.dataoffsets bytecounts = self.databytecounts if len(offsets) == 1: return offsets[0], bytecounts[0] if self.is_stk or all((offsets[i] + bytecounts[i] == offsets[i+1] or bytecounts[i+1] == 0) # no data/ignore offset for i in range(len(offsets)-1)): return offsets[0], sum(bytecounts) return None
0.001714
def to_dict(self): """Returns OrderedDict whose keys are self.attrs""" ret = OrderedDict() for attrname in self.attrs: ret[attrname] = self.__getattribute__(attrname) return ret
0.00885
def make_epub_base(location): """ Creates the base structure for an EPUB file in a specified location. This function creates constant components for the structure of the EPUB in a specified directory location. Parameters ---------- location : str A path string to a local directory in which the EPUB is to be built """ log.info('Making EPUB base files in {0}'.format(location)) with open(os.path.join(location, 'mimetype'), 'w') as out: # mimetype file out.write('application/epub+zip') #Create EPUB and META-INF directorys os.mkdir(os.path.join(location, 'META-INF')) os.mkdir(os.path.join(location, 'EPUB')) os.mkdir(os.path.join(location, 'EPUB', 'css')) with open(os.path.join(location, 'META-INF', 'container.xml'), 'w') as out: out.write('''\ <?xml version="1.0" encoding="UTF-8"?> <container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container"> <rootfiles> <rootfile full-path="EPUB/package.opf" media-type="application/oebps-package+xml"/> </rootfiles> </container>''') with open(os.path.join(location, 'EPUB', 'css', 'default.css') ,'wb') as out: out.write(bytes(DEFAULT_CSS, 'UTF-8'))
0.00571
def brightness(level=100, group=0): """ Assumes level is out of 100 """ if level not in range(0,101): raise Exception("Brightness must be value between 0 and 100") b = int(floor(level / 4.0) + 2) #lights want values 2-27 return (COMMANDS['ON'][group], Command(0x4E, b))
0.013652
def readObject(self): """ Reads an anonymous object from the data stream. @rtype: L{ASObject<pyamf.ASObject>} """ obj = pyamf.ASObject() self.context.addObject(obj) obj.update(self.readObjectAttributes(obj)) return obj
0.007018
def fetchall_textdefs(ont): """ fetch all text defs for an ontology """ logging.info("fetching text defs for: "+ont) namedGraph = get_named_graph(ont) query = """ prefix IAO: <http://purl.obolibrary.org/obo/IAO_> SELECT * WHERE {{ GRAPH <{g}> {{ ?c IAO:0000115 ?d }} FILTER (!isBlank(?c)) }} """.format(g=namedGraph) bindings = run_sparql(query) rows = [(r['c']['value'], r['d']['value']) for r in bindings] return rows
0.002041
def lookup_future_symbol(self, symbol): """Lookup a future contract by symbol. Parameters ---------- symbol : str The symbol of the desired contract. Returns ------- future : Future The future contract referenced by ``symbol``. Raises ------ SymbolNotFound Raised when no contract named 'symbol' is found. """ data = self._select_asset_by_symbol(self.futures_contracts, symbol)\ .execute().fetchone() # If no data found, raise an exception if not data: raise SymbolNotFound(symbol=symbol) return self.retrieve_asset(data['sid'])
0.002774
def _logsumexp(ary, *, b=None, b_inv=None, axis=None, keepdims=False, out=None, copy=True): """Stable logsumexp when b >= 0 and b is scalar. b_inv overwrites b unless b_inv is None. """ # check dimensions for result arrays ary = np.asarray(ary) if ary.dtype.kind == "i": ary = ary.astype(np.float64) dtype = ary.dtype.type shape = ary.shape shape_len = len(shape) if isinstance(axis, Sequence): axis = tuple(axis_i if axis_i >= 0 else shape_len + axis_i for axis_i in axis) agroup = axis else: axis = axis if (axis is None) or (axis >= 0) else shape_len + axis agroup = (axis,) shape_max = ( tuple(1 for _ in shape) if axis is None else tuple(1 if i in agroup else d for i, d in enumerate(shape)) ) # create result arrays if out is None: if not keepdims: out_shape = ( tuple() if axis is None else tuple(d for i, d in enumerate(shape) if i not in agroup) ) else: out_shape = shape_max out = np.empty(out_shape, dtype=dtype) if b_inv == 0: return np.full_like(out, np.inf, dtype=dtype) if out.shape else np.inf if b_inv is None and b == 0: return np.full_like(out, -np.inf) if out.shape else -np.inf ary_max = np.empty(shape_max, dtype=dtype) # calculations ary.max(axis=axis, keepdims=True, out=ary_max) if copy: ary = ary.copy() ary -= ary_max np.exp(ary, out=ary) ary.sum(axis=axis, keepdims=keepdims, out=out) np.log(out, out=out) if b_inv is not None: ary_max -= np.log(b_inv) elif b: ary_max += np.log(b) out += ary_max.squeeze() if not keepdims else ary_max # transform to scalar if possible return out if out.shape else dtype(out)
0.001604
def parse_form(self, req, name, field): """Pull a form value from the request.""" try: return core.get_value(req.form, name, field) except AttributeError: pass return core.missing
0.008511
def zipfiles(self, path=None, arcdirname='data'): """Returns a .zip archive of selected rasters.""" if path: fp = open(path, 'w+b') else: prefix = '%s-' % arcdirname fp = tempfile.NamedTemporaryFile(prefix=prefix, suffix='.zip') with zipfile.ZipFile(fp, mode='w') as zf: for obj in self: img = obj.image arcname = os.path.join(arcdirname, os.path.basename(img.name)) try: zf.write(img.path, arcname=arcname) except OSError: img.seek(0) zf.writestr(arcname, img.read()) img.close() fp.seek(0) zobj = self.model(image=fp) return [zobj]
0.002561
def configure_filters(app): """ Configure application filters (jinja2) """ for (name, filter) in _filters.iteritems(): app.jinja_env.filters[name] = filter
0.005814
def translate(value): """ Translates given schema from "pythonic" syntax to a validator. Usage:: >>> translate(str) IsA(str) >>> translate('hello') IsA(str, default='hello') """ if isinstance(value, BaseValidator): return value if value is None: return Anything() if isinstance(value, type): return IsA(value) if type(value) in compat.func_types: real_value = value() return IsA(type(real_value), default=real_value) if isinstance(value, list): if value == []: # no inner spec, just an empty list as the default value return IsA(list) elif len(value) == 1: # the only item as spec for each item of the collection return ListOf(translate(value[0])) else: raise StructureSpecificationError( 'Expected a list containing exactly 1 item; ' 'got {cnt}: {spec}'.format(cnt=len(value), spec=value)) if isinstance(value, dict): if not value: return IsA(dict) items = [] for k, v in value.items(): if isinstance(k, BaseValidator): k_validator = k else: k_validator = translate(k) default = k_validator.get_default_for(None) if default is not None: k_validator = Equals(default) v_validator = translate(v) items.append((k_validator, v_validator)) return DictOf(items) return IsA(type(value), default=value)
0.00062
def base_path(self): """Base absolute path of container.""" return os.path.join(self.container.base_path, self.name)
0.015152
def _keep_alive(self): """ Send keep alive messages continuously to bridge. """ send_next_keep_alive_at = 0 while not self.is_closed: if not self.is_ready: self._reconnect() continue if time.monotonic() > send_next_keep_alive_at: command = KEEP_ALIVE_COMMAND_PREAMBLE + [self.wb1, self.wb2] self._send_raw(command) need_response_by = time.monotonic() + KEEP_ALIVE_TIME # Wait for responses timeout = max(0, need_response_by - time.monotonic()) ready = select.select([self._socket], [], [], timeout) if ready[0]: try: response = bytearray(12) self._socket.recv_into(response) if response[:5] == bytearray(KEEP_ALIVE_RESPONSE_PREAMBLE): send_next_keep_alive_at = need_response_by except (socket.error, socket.timeout): with self._lock: self.is_ready = False elif send_next_keep_alive_at < need_response_by: # Acquire the lock to make sure we don't change self.is_ready # while _consume() is sending commands with self._lock: self.is_ready = False
0.00145
def get_repositories(self, digests): """ Build the repositories metadata :param digests: dict, image -> digests """ if self.workflow.push_conf.pulp_registries: # If pulp was used, only report pulp images registries = self.workflow.push_conf.pulp_registries else: # Otherwise report all the images we pushed registries = self.workflow.push_conf.all_registries output_images = [] for registry in registries: image = self.pullspec_image.copy() image.registry = registry.uri pullspec = image.to_str() output_images.append(pullspec) digest_list = digests.get(image.to_str(registry=False), ()) for digest in digest_list: digest_pullspec = image.to_str(tag=False) + "@" + digest output_images.append(digest_pullspec) return output_images
0.002086
def _expand_terms(self, terms): """ Expands terms of the dataset to the appropriate fields. It will parse the search phrase and return only the search term components that are applicable to a Dataset query. Args: terms (dict or str): Returns: dict: keys are field names, values are query strings """ ret = { 'keywords': list(), 'doc': list()} if not isinstance(terms, dict): stp = SearchTermParser() terms = stp.parse(terms, term_join=self.backend._and_join) if 'about' in terms: ret['doc'].append(terms['about']) if 'source' in terms: ret['keywords'].append(terms['source']) return ret
0.005202
def get_canonical_block_hash(self, block_number: BlockNumber) -> Hash32: """ Returns the block hash for the canonical block at the given number. Raises BlockNotFound if there's no block header with the given number in the canonical chain. """ return self._get_canonical_block_hash(self.db, block_number)
0.008523
def isobaric_expansion_g(self): r'''Isobaric (constant-pressure) expansion of the gas phase of the chemical at its current temperature and pressure, in units of [1/K]. .. math:: \beta = \frac{1}{V}\left(\frac{\partial V}{\partial T} \right)_P Utilizes the temperature-derivative method of :obj:`thermo.VolumeGas` to perform the actual calculation. The derivatives are all numerical. Examples -------- >>> Chemical('Hexachlorobenzene', T=900).isobaric_expansion_g 0.001151869741981048 ''' dV_dT = self.VolumeGas.TP_dependent_property_derivative_T(self.T, self.P) Vm = self.Vmg if dV_dT and Vm: return isobaric_expansion(V=Vm, dV_dT=dV_dT)
0.003866
def ipython_paste(self,e): u"""Paste windows clipboard. If enable_ipython_paste_list_of_lists is True then try to convert tabseparated data to repr of list of lists or repr of array. If enable_ipython_paste_for_paths==True then change \\ to / and spaces to \space""" if self.enable_win32_clipboard: txt=clipboard.get_clipboard_text_and_convert( self.enable_ipython_paste_list_of_lists) if self.enable_ipython_paste_for_paths: if len(txt)<300 and ("\t" not in txt) and ("\n" not in txt): txt=txt.replace("\\","/").replace(" ",r"\ ") self.insert_text(txt) self.finalize()
0.021711
def print_table(seqs, id2name, name): """ print table of results # seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns], orfs?, introns?], ...]] """ itable = open('%s.itable' % (name.rsplit('.', 1)[0]), 'w') print('\t'.join(['#sequence', 'gene', 'model', 'insertion', 'gene position', 'model position', 'length', 'orf?', 'intron?', 'orf?intron?', 'insertion', 'orf', 'intron']), file=itable) for seq, info in list(seqs.items()): gene, model, insertions = info name = id2name[seq] for i, ins in enumerate(insertions, 1): gene_pos, model_pos, length, iseq, \ orfs, introns, orfs_b, introns_b, orf_annotations = ins # append annotation to orf header for orf in orfs: parts = orf[0].split() annotation = orf_annotations[parts[0].split('>')[1]] orf[0] = '%s %s %s' % (parts[0], annotation, ' '.join(parts[1:])) # get orf position gene_pos = '-'.join([str(j) for j in gene_pos]) # check if orf, intron is present if orfs_b is True or introns_b is True: orfs_introns_b = True else: orfs_introns_b = False out = [name, gene, model, i, gene_pos, model_pos, length, orfs_b, introns_b, orfs_introns_b] out.append('|'.join(iseq)) out.append('|'.join(['|'.join(orf) for orf in orfs])) out.append('|'.join(['|'.join(intron) for intron in introns])) print('\t'.join([str(i) for i in out]), file=itable) itable.close()
0.003036
def do_metric_name_list(mc, args): '''List names of metrics.''' fields = {} if args.dimensions: fields['dimensions'] = utils.format_dimensions_query(args.dimensions) if args.limit: fields['limit'] = args.limit if args.offset: fields['offset'] = args.offset if args.tenant_id: fields['tenant_id'] = args.tenant_id try: metric_names = mc.metrics.list_names(**fields) except (osc_exc.ClientException, k_exc.HttpError) as he: raise osc_exc.CommandError('%s\n%s' % (he.message, he.details)) else: if args.json: print(utils.json_formatter(metric_names)) return if isinstance(metric_names, list): utils.print_list(metric_names, ['Name'], formatters={'Name': lambda x: x['name']})
0.002472
def scroll(self, rect, dx, dy, attr=None, fill=' '): u'''Scroll a rectangle.''' if attr is None: attr = self.attr x0, y0, x1, y1 = rect source = SMALL_RECT(x0, y0, x1 - 1, y1 - 1) dest = self.fixcoord(x0 + dx, y0 + dy) style = CHAR_INFO() style.Char.AsciiChar = ensure_str(fill[0]) style.Attributes = attr return self.ScrollConsoleScreenBufferW(self.hout, byref(source), byref(source), dest, byref(style))
0.007339
def whois_domains(self, domains): """Calls WHOIS domain end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_result} """ api_name = 'opendns-whois-domain' fmt_url_path = u'whois/{0}' return self._multi_get(api_name, fmt_url_path, domains)
0.005698
def urls(order_by: Optional[str] = None): """List all URLs registered with the app.""" url_rules: List[Rule] = current_app.url_map._rules # sort the rules. by default they're sorted by priority, # ie in the order they were registered with the app if order_by == 'view': url_rules = sorted(url_rules, key=lambda rule: _get_rule_view(rule)) elif order_by != 'priority': url_rules = sorted(url_rules, key=lambda rule: getattr(rule, order_by)) headings = ('Method(s)', 'Rule', 'Endpoint', 'View', 'Options') print_table(headings, [(_get_http_methods(url_rule), url_rule.rule if url_rule.strict_slashes else url_rule.rule.rstrip('/') + '[/]', url_rule.endpoint, _get_rule_view(url_rule), _format_rule_options(url_rule), ) for url_rule in url_rules], ['<' if i > 0 else '>' for i, col in enumerate(headings)], primary_column_idx=1)
0.001914
async def download_artifacts(context, file_urls, parent_dir=None, session=None, download_func=download_file, valid_artifact_task_ids=None): """Download artifacts in parallel after validating their URLs. Valid ``taskId``s for download include the task's dependencies and the ``taskGroupId``, which by convention is the ``taskId`` of the decision task. Args: context (scriptworker.context.Context): the scriptworker context. file_urls (list): the list of artifact urls to download. parent_dir (str, optional): the path of the directory to download the artifacts into. If None, defaults to ``work_dir``. Default is None. session (aiohttp.ClientSession, optional): the session to use to download. If None, defaults to context.session. Default is None. download_func (function, optional): the function to call to download the files. default is ``download_file``. valid_artifact_task_ids (list, optional): the list of task ids that are valid to download from. If None, defaults to all task dependencies plus the decision taskId. Defaults to None. Returns: list: the full paths to the files downloaded Raises: scriptworker.exceptions.BaseDownloadError: on download failure after any applicable retries. """ parent_dir = parent_dir or context.config['work_dir'] session = session or context.session tasks = [] files = [] valid_artifact_rules = context.config['valid_artifact_rules'] # XXX when chain of trust is on everywhere, hardcode the chain of trust task list valid_artifact_task_ids = valid_artifact_task_ids or list(context.task['dependencies'] + [get_decision_task_id(context.task)]) for file_url in file_urls: rel_path = validate_artifact_url(valid_artifact_rules, valid_artifact_task_ids, file_url) abs_file_path = os.path.join(parent_dir, rel_path) files.append(abs_file_path) tasks.append( asyncio.ensure_future( retry_async( download_func, args=(context, file_url, abs_file_path), retry_exceptions=(DownloadError, aiohttp.ClientError, asyncio.TimeoutError), kwargs={'session': session}, ) ) ) await raise_future_exceptions(tasks) return files
0.004085
def frame_apply(obj, func, axis=0, broadcast=None, raw=False, reduce=None, result_type=None, ignore_failures=False, args=None, kwds=None): """ construct and return a row or column based frame apply object """ axis = obj._get_axis_number(axis) if axis == 0: klass = FrameRowApply elif axis == 1: klass = FrameColumnApply return klass(obj, func, broadcast=broadcast, raw=raw, reduce=reduce, result_type=result_type, ignore_failures=ignore_failures, args=args, kwds=kwds)
0.001653
def ensure_dir(dir_path): """ If DIR_PATH does not exist, makes it. Failing that, raises Exception. Returns True if dir already existed; False if it had to be made. """ exists = dir_exists(dir_path) if not exists: try: os.makedirs(dir_path) except(Exception,RuntimeError), e: raise Exception("Unable to create directory %s. Cause %s" % (dir_path, e)) return exists
0.004348
def _from_json_list(cls, response_raw, wrapper=None): """ :type response_raw: client.BunqResponseRaw :type wrapper: str|None :rtype: client.BunqResponse[list[cls]] """ json = response_raw.body_bytes.decode() obj = converter.json_to_class(dict, json) array = obj[cls._FIELD_RESPONSE] array_deserialized = [] for item in array: item_unwrapped = item if wrapper is None else item[wrapper] item_deserialized = converter.deserialize(cls, item_unwrapped) array_deserialized.append(item_deserialized) pagination = converter.deserialize(client.Pagination, obj[cls._FIELD_PAGINATION]) return client.BunqResponse(array_deserialized, response_raw.headers, pagination)
0.002296
def get_window_size(self, window): """ Get a window's size. """ w_ret = ctypes.c_uint(0) h_ret = ctypes.c_uint(0) _libxdo.xdo_get_window_size(self._xdo, window, ctypes.byref(w_ret), ctypes.byref(h_ret)) return window_size(w_ret.value, h_ret.value)
0.0059
def disable_contact_svc_notifications(self, contact): """Disable service notifications for a contact Format of the line that triggers function call:: DISABLE_CONTACT_SVC_NOTIFICATIONS;<contact_name> :param contact: contact to disable :type contact: alignak.objects.contact.Contact :return: None """ if contact.service_notifications_enabled: contact.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value contact.service_notifications_enabled = False self.send_an_element(contact.get_update_status_brok())
0.004792
def __load_countries(self, db): """Load the list of countries""" try: countries = self.__read_countries_file() except IOError as e: raise LoadError(str(e)) try: with db.connect() as session: for country in countries: session.add(country) except Exception as e: raise LoadError(str(e))
0.004878
def equivalent_diameter(target, pore_volume='pore.volume', pore_shape='sphere'): r""" Calculates the diameter of a sphere or edge-length of a cube with same volume as the pore. Parameters ---------- target : OpenPNM Geometry Object The Geometry object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary geometric properties. pore_volume : string The dictionary key containing the pore volume values pore_shape : string The shape of the pore body to assume when back-calculating from volume. Options are 'sphere' (default) or 'cube'. """ from scipy.special import cbrt pore_vols = target[pore_volume] if pore_shape.startswith('sph'): value = cbrt(6*pore_vols/_np.pi) elif pore_shape.startswith('cub'): value = cbrt(pore_vols) return value
0.001035
def discharge(self): """Discharge per unit length""" Q = np.zeros(self.aq.naq) Q[self.layers] = self.parameters[:, 0] return Q
0.012658
def register(name: str = None) -> type: """ Register classes that could be initialized from JSON configuration file. If name is not passed, the class name is converted to snake-case. """ def decorate(model_cls: type, reg_name: str = None) -> type: model_name = reg_name or short_name(model_cls) global _REGISTRY cls_name = model_cls.__module__ + ':' + model_cls.__name__ if model_name in _REGISTRY and _REGISTRY[model_name] != cls_name: logger.warning('Registry name "{}" has been already registered and will be overwritten.'.format(model_name)) _REGISTRY[model_name] = cls_name return model_cls return lambda model_cls_name: decorate(model_cls_name, name)
0.002699
def validateStringInput(input_key,input_data, read=False): """ To check if a string has the required format. This is only used for POST APIs. """ log = clog.error_log func = None if '*' in input_data or '%' in input_data: func = validationFunctionWildcard.get(input_key) if func is None: func = searchstr elif input_key == 'migration_input' : if input_data.find('#') != -1 : func = block else : func = dataset else: if not read: func = validationFunction.get(input_key) if func is None: func = namestr else: if input_key == 'dataset': func = reading_dataset_check elif input_key == 'block_name': func = reading_block_check elif input_key == 'logical_file_name': func = reading_lfn_check else: func = namestr try: func(input_data) except AssertionError as ae: serverLog = str(ae) + " key-value pair (%s, %s) cannot pass input checking" %(input_key, input_data) #print serverLog dbsExceptionHandler("dbsException-invalid-input2", message="Invalid Input Data %s...: Not Match Required Format" %input_data[:10], \ logger=log.error, serverError=serverLog) return input_data
0.010957
def month_boundaries(dt=None): ''' Return a 2-tuple containing the datetime instances for the first and last dates of the current month or using ``dt`` as a reference. ''' dt = dt or date.today() wkday, ndays = calendar.monthrange(dt.year, dt.month) start = datetime(dt.year, dt.month, 1) return (start, start + timedelta(ndays - 1))
0.002732
def update_from_defaultParams(self, defaultParams=None, plotters=True): """Update from the a dictionary like the :attr:`defaultParams` Parameters ---------- defaultParams: dict The :attr:`defaultParams` like dictionary. If None, the :attr:`defaultParams` attribute will be updated plotters: bool If True, ``'project.plotters'`` will be updated too""" if defaultParams is None: defaultParams = self.defaultParams self.update({key: val[0] for key, val in defaultParams.items() if plotters or key != 'project.plotters'})
0.004431
def create_case(self, name, email, subject, description, businessImpact, priority, phone): """ Send a case creation to SalesForces to create a ticket. @param name of the person creating the case. @param email of the person creating the case. @param subject of the case. @param description of the case. @param businessImpact of the case. @param priority of the case. @param phone of the person creating the case. @return Nothing if this is ok. @raise ServerError when something goes wrong. @raise ValueError when data passed in are invalid """ if not('@' in parseaddr(email)[1]): raise ValueError('invalid email: {}'.format(email)) if '' == name or name is None: raise ValueError('empty name') if '' == subject or subject is None: raise ValueError('empty subject') if '' == description or description is None: raise ValueError('empty description') if '' == businessImpact or businessImpact is None: raise ValueError('empty business impact') if priority is None: raise ValueError('Ensure the priority is from the set of ' 'known priorities') if '' == phone or phone is None: raise ValueError('empty phone') try: r = requests.post(self.url, data={ 'orgid': self.orgId, 'recordType': self.recordType, 'name': name, 'email': email, 'subject': subject, 'description': description, self.BUSINESS_IMPACT: businessImpact, 'priority': priority, 'phone': phone, 'external': 1 }, timeout=self.timeout) r.raise_for_status() except Timeout: message = 'Request timed out: {url} timeout: {timeout}' message = message.format(url=self.url, timeout=self.timeout) log.error(message) raise ServerError(message) except RequestException as err: log.info('cannot create case: {}'.format(err)) raise ServerError( 'cannot create case: {}'.format(err))
0.00129
def _merge_ws(self, item, container): # type: (Item, Container) -> bool """ Merges the given Item with the last one currently in the given Container if both are whitespace items. Returns True if the items were merged. """ last = container.last_item() if not last: return False if not isinstance(item, Whitespace) or not isinstance(last, Whitespace): return False start = self._idx - (len(last.s) + len(item.s)) container.body[-1] = ( container.body[-1][0], Whitespace(self._src[start : self._idx]), ) return True
0.007564
def get_int(self, key, default=UndefinedKey): """Return int representation of value found at key :param key: key to use (dot separated). E.g., a.b.c :type key: basestring :param default: default value if key not found :type default: int :return: int value :type return: int """ value = self.get(key, default) try: return int(value) if value is not None else None except (TypeError, ValueError): raise ConfigException( u"{key} has type '{type}' rather than 'int'".format(key=key, type=type(value).__name__))
0.004724
def reinterpret_bits_to_hstruct(sigOrVal, hStructT): """ Reinterpret signal of type Bits to signal of type HStruct """ container = hStructT.fromPy(None) offset = 0 for f in hStructT.fields: t = f.dtype width = t.bit_length() if f.name is not None: s = sigOrVal[(width + offset):offset] s = s._reinterpret_cast(t) setattr(container, f.name, s) offset += width return container
0.002114
def validate(self): """ Validate attributes by running all self._validate_*() methods. :raises TypeError: if an attribute has invalid type :raises ValueError: if an attribute contains invalid value """ method_names = sorted([i for i in dir(self) if i.startswith("_validate") and callable(getattr(self, i))]) for method_name in method_names: method = getattr(self, method_name) method()
0.006438
def calculate_dielectric_properties(dielectric, properties, average=True): r"""Calculate optical properties from the dielectric function Supported properties: Absorption ~~~~~~~~~~ The unit of alpha is :math:`\mathrm{cm}^{-1}`. Refractive index :math:`n` has real and imaginary parts: .. math:: n = [(e^\prime + ie^{\prime\prime} / e_0]^{1/2} = n^\prime + in^{\prime\prime} Relationship between :math:`a` and imaginary :math:`n^{\prime\prime}`: .. math:: a = 4 \pi n^{\prime\prime} / \lambda Where: .. math:: \lambda = hc/E Args: dielectric_data (tuple): The high-frequency dielectric data, following the same format as :obj:`pymatgen.io.vasp.Vasprun.dielectric`. This is a :obj:`tuple` containing the energy, the real part of the dielectric tensor, and the imaginary part of the tensor, as a :obj:`list` of :obj:`floats`. E.g.:: ( [energies], [[real_xx, real_yy, real_zz, real_xy, real_yz, real_xz]], [[imag_xx, imag_yy, imag_zz, imag_xy, imag_yz, imag_xz]] ) properties (set): The set of properties to return. Intermediate properties will be calculated as needed. Accepted values: 'eps_real', 'eps_im', 'absorption', 'loss', 'n_real', 'n_imag' average (:obj:`bool`, optional): Average the dielectric response across the xx, yy, zz directions and calculate properties with scalar maths. Defaults to ``True``. If False, solve dielectric matrix to obtain directional properties, returning xx, yy, zz components. This may be significantly slower! Returns: :obj:`tuple` of :obj:`list` of :obj:`float`: The optical absorption in :math:`\mathrm{cm}^{-1}`. If ``average`` is ``True``, the data will be returned as:: ([energies], [property]). If ``average`` is ``False``, the data will be returned as:: ([energies], [property_xx, property_yy, property_zz]). """ results = {} def _update_results(keys_vals): """Update results dict with selected properties only""" results.update({prop: (energies, data) for prop, data in keys_vals.items() if (prop in properties)}) return results energies = np.array(dielectric[0]) real_eps = np.array(dielectric[1]) imag_eps = np.array(dielectric[2]) if average: real_eps = np.average(real_eps[:, :3], axis=1) imag_eps = np.average(imag_eps[:, :3], axis=1) results = _update_results({'eps_real': real_eps, 'eps_imag': imag_eps}) eps = real_eps + 1j * imag_eps if 'loss' in properties: loss = -np.imag(1/eps) _update_results({'loss': loss}) if properties.intersection({'n_real', 'n_imag', 'absorption'}): n = np.sqrt(eps) _update_results({'n_real': n.real, 'n_imag': n.imag}) if 'absorption' in properties: alpha = n.imag * energies * 4 * np.pi / 1.23984212E-4 _update_results({'absorption': alpha}) else: # Work with eps as complex numbers in 9-column 'flattened' matrix # First interpret 6-column data as symmetric matrix # Input form xx yy zz xy yz xz # Indices 0 1 2 3 4 5 n_rows = real_eps.shape[0] eps = real_eps + 1j * imag_eps eps = np.array([eps[:, 0], eps[:, 3], eps[:, 5], eps[:, 3], eps[:, 1], eps[:, 4], eps[:, 5], eps[:, 4], eps[:, 2]]).T _update_results( {'eps_real': eps.real[:, [0, 4, 8]], 'eps_imag': eps.imag[:, [0, 4, 8]]}) # Invert epsilon to obtain energy-loss function if 'loss' in properties: def matrix_loss_func(eps_row): eps_matrix = eps_row.reshape(3, 3) return -np.linalg.inv(eps_matrix).imag.flatten() loss = np.array([matrix_loss_func(row) for row in eps]) _update_results({'loss': loss[:, [0, 4, 8]]}) if properties.intersection({'n_real', 'n_imag', 'absorption'}): def matrix_n(eps_row): eps_matrix = eps_row.reshape(3, 3) eigenvals, v = np.linalg.eig(eps_matrix) d = np.diag(eigenvals) n = v @ np.sqrt(d) @ np.linalg.inv(v) # Py3.5 matrix mult @ =D return n.flatten() n = np.array([matrix_n(row) for row in eps]) _update_results({'n_real': n.real[:, [0, 4, 8]], 'n_imag': n.imag[:, [0, 4, 8]]}) if 'absorption' in properties: alpha = (n.imag * energies.reshape(n_rows, 1) * 4 * np.pi / 1.23984212E-4) _update_results({'absorption': alpha[:, [0, 4, 8]]}) return results
0.000195
def _merge_dimensions(dimension, preferred=None, dont_extend=False): """ Take the LayoutDimension from this `Window` class and the received preferred size from the `UIControl` and return a `LayoutDimension` to report to the parent container. """ dimension = dimension or LayoutDimension() # When a preferred dimension was explicitly given to the Window, # ignore the UIControl. if dimension.preferred_specified: preferred = dimension.preferred # When a 'preferred' dimension is given by the UIControl, make sure # that it stays within the bounds of the Window. if preferred is not None: if dimension.max: preferred = min(preferred, dimension.max) if dimension.min: preferred = max(preferred, dimension.min) # When a `dont_extend` flag has been given, use the preferred dimension # also as the max dimension. if dont_extend and preferred is not None: max_ = min(dimension.max, preferred) else: max_ = dimension.max return LayoutDimension( min=dimension.min, max=max_, preferred=preferred, weight=dimension.weight)
0.001576
def decrypt(data, key): '''decrypt the data with the key''' data_len = len(data) data = ffi.from_buffer(data) key = ffi.from_buffer(__tobytes(key)) out_len = ffi.new('size_t *') result = lib.xxtea_decrypt(data, data_len, key, out_len) ret = ffi.buffer(result, out_len[0])[:] lib.free(result) return ret
0.002959
def _rolling_window(self, array, size): """ Compute rolling windows of width ``size`` of the given array. Return a numpy 2D stride array, where rows are the windows, each of ``size`` elements. :param array: the data array :type array: numpy 1D array (n) :param int size: the width of each window :rtype: numpy 2D stride array (n // size, size) """ shape = array.shape[:-1] + (array.shape[-1] - size + 1, size) strides = array.strides + (array.strides[-1],) return numpy.lib.stride_tricks.as_strided(array, shape=shape, strides=strides)
0.004732
def headers(self): """ The contig ID must be twenty characters or fewer. The names of the headers created following SPAdes assembly are usually far too long. This renames them as the sample name """ for sample in self.metadata.samples: # Create an attribute to store the path/file name of the fasta file with fixed headers sample.general.fixedheaders = sample.general.bestassemblyfile.replace('.fasta', '.ffn') sample.general.fixedheaders = os.path.abspath(sample.general.fixedheaders) # A list of contigs with modified record.id values fixedheaders = list() # Only do this if the file with fixed headers hasn't previously been created if not os.path.isfile(sample.general.fixedheaders): # Refseq genomes don't necessarily have underscores (or contig numbers) in the headers count = 0 formatcount = '{:04d}'.format(count) for record in SeqIO.parse(open(sample.general.bestassemblyfile, "rU"), "fasta"): # Split off anything following the contig number # >2013-SEQ-0129_1_length_303005_cov_13.1015_ID_17624 becomes # >2013-SEQ-0129_1 record.id = record.id.split('_length')[0] # Prokka has a requirement that the header is unique and less than or equal to 20 characters if len(record.id) > 20: # Extract the contig number from the string - assumption is that this number is the final # entry in the string, and that there are underscore separating the different components contignumber = record.id.split('_')[-1] if '_' in record.id else formatcount # Subtract the length of the contig number (and an additional one for the underscore) from # 20 for the string slice, and add the contig number at the end record.id = record.id[:(20 - len(contignumber) - 1)] + '_{}'.format(formatcount) # Clear the name and description attributes of the record record.name = '' record.description = '' # Add this record to our list fixedheaders.append(record) # Open the filtered assembly file with open(sample.general.fixedheaders, 'w') as formatted: # Write the records in the list to the file SeqIO.write(fixedheaders, formatted, 'fasta')
0.006425
def transition(trname='', field='', check=None, before=None, after=None): """Decorator to declare a function as a transition implementation.""" if is_callable(trname): raise ValueError( "The @transition decorator should be called as " "@transition(['transition_name'], **kwargs)") if check or before or after: warnings.warn( "The use of check=, before= and after= in @transition decorators is " "deprecated in favor of @transition_check, @before_transition and " "@after_transition decorators.", DeprecationWarning, stacklevel=2) return TransitionWrapper(trname, field=field, check=check, before=before, after=after)
0.004098
def _parse_numbered_syllable(unparsed_syllable): """Return the syllable and tone of a numbered Pinyin syllable.""" tone_number = unparsed_syllable[-1] if not tone_number.isdigit(): syllable, tone = unparsed_syllable, '5' elif tone_number == '0': syllable, tone = unparsed_syllable[:-1], '5' elif tone_number in '12345': syllable, tone = unparsed_syllable[:-1], tone_number else: raise ValueError("Invalid syllable: %s" % unparsed_syllable) return syllable, tone
0.001919
def _get_line(self): """Get a line or raise StopIteration""" line = self._f.readline() if len(line) == 0: raise StopIteration return line
0.01105