text
stringlengths
78
104k
score
float64
0
0.18
def normalize_path_out(self, path): """Normalizes path sent to client :param path: path to normalize :return: normalized path """ if path.startswith(self._CWD): normalized_path = path[len(self._CWD):] else: normalized_path = path # For remote debugging preprend client CWD if self._CLIENT_CWD: normalized_path = os.path.join(self._CLIENT_CWD, normalized_path) _logger.p_debug("normalize_path_out('%s') => %s", path, normalized_path) return normalized_path
0.005254
def load(self, d3mds): """Load X, y and context from D3MDS.""" X, y = d3mds.get_data() return Dataset(d3mds.dataset_id, X, y)
0.013333
def wildcards_overlap(name1, name2): """Return true if two wildcard patterns can match the same string.""" if not name1 and not name2: return True if not name1 or not name2: return False for matched1, matched2 in _character_matches(name1, name2): if wildcards_overlap(name1[matched1:], name2[matched2:]): return True return False
0.002597
def create_folder(dirpath, overwrite=False): """ Will create dirpath folder. If dirpath already exists and overwrite is False, will append a '+' suffix to dirpath until dirpath does not exist.""" if not overwrite: while op.exists(dirpath): dirpath += '+' os.makedirs(dirpath, exist_ok=overwrite) return dirpath
0.007916
def conference_kick(self, call_params): """REST Conference Kick helper """ path = '/' + self.api_version + '/ConferenceKick/' method = 'POST' return self.request(path, method, call_params)
0.008772
def _partition_all_internal(s, sep): """ Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings. :param s: The string to split. :param sep: A separator string. :return: A list of parts split by sep """ parts = list(s.partition(sep)) # if sep found if parts[1] == sep: new_parts = partition_all(parts[2], sep) parts.pop() parts.extend(new_parts) return [p for p in parts if p] else: if parts[0]: return [parts[0]] else: return []
0.003356
def get_port(self): """ Return a port to use to talk to this cluster. """ if len(self.client_nodes) > 0: node = self.client_nodes[0] else: node = self.nodes[0] return node.get_port()
0.008403
def _comprise(dict1,dict2): ''' dict1 = {'a':1,'b':2,'c':3,'d':4} dict2 = {'b':2,'c':3} _comprise(dict1,dict2) ''' len_1 = dict1.__len__() len_2 = dict2.__len__() if(len_2>len_1): return(False) else: for k2 in dict2: v2 = dict2[k2] if(k2 in dict1): v1 = dict1[k2] if(v1 == v2): return(True) else: return(False) else: return(False)
0.005618
def integrate_drag_sphere(D, rhop, rho, mu, t, V=0, Method=None, distance=False): r'''Integrates the velocity and distance traveled by a particle moving at a speed which will converge to its terminal velocity. Performs an integration of the following expression for acceleration: .. math:: a = \frac{g(\rho_p-\rho_f)}{\rho_p} - \frac{3C_D \rho_f u^2}{4D \rho_p} Parameters ---------- D : float Diameter of the sphere, [m] rhop : float Particle density, [kg/m^3] rho : float Density of the surrounding fluid, [kg/m^3] mu : float Viscosity of the surrounding fluid [Pa*s] t : float Time to integrate the particle to, [s] V : float Initial velocity of the particle, [m/s] Method : string, optional A string of the function name to use, as in the dictionary drag_sphere_correlations distance : bool, optional Whether or not to calculate the distance traveled and return it as well Returns ------- v : float Velocity of falling sphere after time `t` [m/s] x : float, returned only if `distance` == True Distance traveled by the falling sphere in time `t`, [m] Notes ----- This can be relatively slow as drag correlations can be complex. There are analytical solutions available for the Stokes law regime (Re < 0.3). They were obtained from Wolfram Alpha. [1]_ was not used in the derivation, but also describes the derivation fully. .. math:: V(t) = \frac{\exp(-at) (V_0 a + b(\exp(at) - 1))}{a} .. math:: x(t) = \frac{\exp(-a t)\left[V_0 a(\exp(a t) - 1) + b\exp(a t)(a t-1) + b\right]}{a^2} .. math:: a = \frac{18\mu_f}{D^2\rho_p} .. math:: b = \frac{g(\rho_p-\rho_f)}{\rho_p} The analytical solution will automatically be used if the initial and terminal velocity is show the particle's behavior to be laminar. Note that this behavior requires that the terminal velocity of the particle be solved for - this adds slight (1%) overhead for the cases where particles are not laminar. Examples -------- >>> integrate_drag_sphere(D=0.001, rhop=2200., rho=1.2, mu=1.78E-5, t=0.5, ... V=30, distance=True) (9.686465044053476, 7.8294546436299175) References ---------- .. [1] Timmerman, Peter, and Jacobus P. van der Weele. "On the Rise and Fall of a Ball with Linear or Quadratic Drag." American Journal of Physics 67, no. 6 (June 1999): 538-46. https://doi.org/10.1119/1.19320. ''' laminar_initial = Reynolds(V=V, rho=rho, D=D, mu=mu) < 0.01 v_laminar_end_assumed = v_terminal(D=D, rhop=rhop, rho=rho, mu=mu, Method=Method) laminar_end = Reynolds(V=v_laminar_end_assumed, rho=rho, D=D, mu=mu) < 0.01 if Method == 'Stokes' or (laminar_initial and laminar_end and Method is None): try: t1 = 18.0*mu/(D*D*rhop) t2 = g*(rhop-rho)/rhop V_end = exp(-t1*t)*(t1*V + t2*(exp(t1*t) - 1.0))/t1 x_end = exp(-t1*t)*(V*t1*(exp(t1*t) - 1.0) + t2*exp(t1*t)*(t1*t - 1.0) + t2)/(t1*t1) if distance: return V_end, x_end else: return V_end except OverflowError: # It is only necessary to integrate to terminal velocity t_to_terminal = time_v_terminal_Stokes(D, rhop, rho, mu, V0=V, tol=1e-9) if t_to_terminal > t: raise Exception('Should never happen') V_end, x_end = integrate_drag_sphere(D=D, rhop=rhop, rho=rho, mu=mu, t=t_to_terminal, V=V, Method='Stokes', distance=True) # terminal velocity has been reached - V does not change, but x does # No reason to believe this isn't working even though it isn't # matching the ode solver if distance: return V_end, x_end + V_end*(t - t_to_terminal) else: return V_end # This is a serious problem for small diameters # It would be possible to step slowly, using smaller increments # of time to avlid overflows. However, this unfortunately quickly # gets much, exponentially, slower than just using odeint because # for example solving 10000 seconds might require steps of .0001 # seconds at a diameter of 1e-7 meters. # x = 0.0 # subdivisions = 10 # dt = t/subdivisions # for i in range(subdivisions): # V, dx = integrate_drag_sphere(D=D, rhop=rhop, rho=rho, mu=mu, # t=dt, V=V, distance=True, # Method=Method) # x += dx # if distance: # return V, x # else: # return V Re_ish = rho*D/mu c1 = g*(rhop-rho)/rhop c2 = -0.75*rho/(D*rhop) def dv_dt(V, t): if V == 0: # 64/Re goes to infinity, but gets multiplied by 0 squared. t2 = 0.0 else: # t2 = c2*V*V*Stokes(Re_ish*V) t2 = c2*V*V*drag_sphere(Re_ish*V, Method=Method) return c1 + t2 # Number of intervals for the solution to be solved for; the integrator # doesn't care what we give it, but a large number of intervals are needed # For an accurate integration of the particle's distance traveled pts = 1000 if distance else 2 ts = np.linspace(0, t, pts) # Delayed import of necessaray functions from scipy.integrate import odeint, cumtrapz # Perform the integration Vs = odeint(dv_dt, [V], ts) # V_end = float(Vs[-1]) if distance: # Calculate the distance traveled x = float(cumtrapz(np.ravel(Vs), ts)[-1]) return V_end, x else: return V_end
0.004172
def recoverMemory(buffer, size): """parse an XML in-memory block and build a tree. In the case the document is not Well Formed, an attempt to build a tree is tried anyway """ ret = libxml2mod.xmlRecoverMemory(buffer, size) if ret is None:raise treeError('xmlRecoverMemory() failed') return xmlDoc(_obj=ret)
0.008955
def reset(target, settings): """ 重置设置 :param target: :param settings: :return: """ target_settings = _import_module(target) for k, v in settings.items(): if hasattr(target_settings, k): setattr(target_settings, k, _get_value(getattr(target_settings, k), v)) else: logger.debug('AttributeError {target} has no attribute {k}'.format(target=target, k=k)) return target_settings
0.006667
def generate(converter, input_file, format='xml', encoding='utf8'): """ Given a converter (as returned by compile()), this function reads the given input file and converts it to the requested output format. Supported output formats are 'xml', 'yaml', 'json', or 'none'. :type converter: compiler.Context :param converter: The compiled converter. :type input_file: str :param input_file: Name of a file to convert. :type format: str :param format: The output format. :type encoding: str :param encoding: Character encoding of the input file. :rtype: str :return: The resulting output. """ with codecs.open(input_file, encoding=encoding) as thefile: return generate_string(converter, thefile.read(), format=format)
0.001264
def get_relation_type(self, relation_type): """ Get all the items of the given relationship type related to this item. """ qs = self.get_queryset() return qs.filter(relation_type=relation_type)
0.008584
def get_content_metadata(id, version, cursor): """Return metadata related to the content from the database.""" # Do the module lookup args = dict(id=id, version=version) # FIXME We are doing two queries here that can hopefully be # condensed into one. cursor.execute(SQL['get-module-metadata'], args) try: result = cursor.fetchone()[0] # version is what we want to return, but in the sql we're using # current_version because otherwise there's a "column reference is # ambiguous" error result['version'] = result.pop('current_version') # FIXME We currently have legacy 'portal_type' names in the database. # Future upgrades should replace the portal type with a mimetype # of 'application/vnd.org.cnx.(module|collection|folder|<etc>)'. # Until then we will do the replacement here. result['mediaType'] = portaltype_to_mimetype(result['mediaType']) return result except (TypeError, IndexError,): # None returned raise httpexceptions.HTTPNotFound()
0.000908
def remap_status(val): """Convert status integer value to appropriate bitmask.""" status = 0 bad = BAD_DATA if val & 0xF0 else 0 val &= 0x0F if val == 0: status = START_ELEVATION elif val == 1: status = 0 elif val == 2: status = END_ELEVATION elif val == 3: status = START_ELEVATION | START_VOLUME elif val == 4: status = END_ELEVATION | END_VOLUME elif val == 5: status = START_ELEVATION | LAST_ELEVATION return status | bad
0.001931
def execute(self, env, args): """ Removes a task. `env` Runtime ``Environment`` instance. `args` Arguments object from arg parser. """ # extract args task_name = args.task_name force = args.force if env.task.active and env.task.name == task_name: raise errors.ActiveTask if not env.task.exists(task_name): raise errors.TaskNotFound(task_name) if force: env.task.remove(task_name) else: try: while True: prompt = ('Are you sure you want to delete "{0}" (y/n)? ' .format(task_name)) resp = env.io.prompt(prompt, newline=False).lower() if resp in ('y', 'n'): if resp == 'y': env.task.remove(task_name) break except KeyboardInterrupt: pass
0.001936
def scroll_event(self, widget, event): """ Called when a mouse is turned in the widget (and maybe for finger scrolling in the trackpad). Adjust method signature as appropriate for callback. """ x, y = event.x, event.y num_degrees = 0 direction = 0 # x, y = coordinates of mouse self.last_win_x, self.last_win_y = x, y # calculate number of degrees of scroll and direction of scroll # both floats in the 0-359.999 range # num_degrees = # direction = self.logger.debug("scroll deg=%f direction=%f" % ( num_degrees, direction)) data_x, data_y = self.check_cursor_location() return self.make_ui_callback('scroll', direction, num_degrees, data_x, data_y)
0.002387
def datetime2ole(dt): """converts from datetime object to ole datetime float""" delta = dt - OLE_TIME_ZERO delta_float = delta / datetime.timedelta(days=1) # trick from SO return delta_float
0.004831
def one_of(s): '''Parser a char from specified string.''' @Parser def one_of_parser(text, index=0): if index < len(text) and text[index] in s: return Value.success(index + 1, text[index]) else: return Value.failure(index, 'one of {}'.format(s)) return one_of_parser
0.003115
def start(self): """ The main method that starts the service. This is blocking. """ self._initial_setup() self.on_service_start() self.app = self.make_tornado_app() enable_pretty_logging() self.app.listen(self.port, address=self.host) self._start_periodic_tasks() # starts the event handlers self._initialize_event_handlers() self._start_event_handlers() try: self.io_loop.start() except RuntimeError: # TODO : find a way to check if the io_loop is running before trying to start it # this method to check if the loop is running is ugly pass
0.004255
def apply_units_to_cache(self, data): """ Apply units to :class:`ParameterizedXLS` data reader. """ # parameter parameter_name = self.parameters['parameter']['name'] parameter_units = str(self.parameters['parameter']['units']) data[parameter_name] *= UREG(parameter_units) # data self.parameters.pop('parameter') return super(ParameterizedXLS, self).apply_units_to_cache(data)
0.004386
def _convert(cls, name, module, filter, source=None): """ Create a new Enum subclass that replaces a collection of global constants """ # convert all constants from source (or module) that pass filter() to # a new Enum called name, and export the enum and its members back to # module; # also, replace the __reduce_ex__ method so unpickling works in # previous Python versions module_globals = vars(_sys.modules[module]) if source: source = vars(source) else: source = module_globals members = dict((name, value) for name, value in source.items() if filter(name)) cls = cls(name, members, module=module) cls.__reduce_ex__ = _reduce_ex_by_name module_globals.update(cls.__members__) module_globals[name] = cls return cls
0.002491
def update_port_postcommit(self, context): """Update port non-database commit event.""" vlan_segment, vxlan_segment = self._get_segments( context.top_bound_segment, context.bottom_bound_segment) orig_vlan_segment, orig_vxlan_segment = self._get_segments( context.original_top_bound_segment, context.original_bottom_bound_segment) if (self._is_vm_migrating(context, vlan_segment, orig_vlan_segment) or self._is_status_down(context.current)): vni = (self._port_action_vxlan( context.original, orig_vxlan_segment, self._delete_nve_member) if orig_vxlan_segment else 0) self._port_action_vlan(context.original, orig_vlan_segment, self._delete_switch_entry, vni) elif self._is_supported_deviceowner(context.current): if nexus_help.is_baremetal(context.current): all_switches, active_switches = ( self._get_baremetal_switches(context.current)) else: host_id = context.current.get(bc.portbindings.HOST_ID) all_switches, active_switches = ( self._get_host_switches(host_id)) # if switches not active but host_id is valid if not active_switches and all_switches: raise excep.NexusConnectFailed( nexus_host=all_switches[0], config="None", exc="Update Port Failed: Nexus Switch " "is down or replay in progress") vni = self._port_action_vxlan(context.current, vxlan_segment, self._configure_nve_member) if vxlan_segment else 0 self._port_action_vlan(context.current, vlan_segment, self._configure_port_entries, vni)
0.001592
def _outside_contraction_fn(objective_function, simplex, objective_values, face_centroid, best_index, worst_index, reflected, objective_at_reflected, contraction, shrinkage, batch_evaluate_objective): """Creates the condition function pair for an outside contraction.""" def _contraction(): """Performs a contraction.""" contracted = face_centroid + contraction * (reflected - face_centroid) objective_at_contracted = objective_function(contracted) is_contracted_acceptable = objective_at_contracted <= objective_at_reflected def _accept_contraction(): next_simplex = _replace_at_index(simplex, worst_index, contracted) objective_at_next_simplex = _replace_at_index( objective_values, worst_index, objective_at_contracted) return (False, next_simplex, objective_at_next_simplex, 1) def _reject_contraction(): return _shrink_towards_best(objective_function, simplex, best_index, shrinkage, batch_evaluate_objective) return prefer_static.cond(is_contracted_acceptable, _accept_contraction, _reject_contraction) return _contraction
0.006112
def ingest_zip(path, engine=None): """Given a path to a zip file created with `export()`, recreate the database with the data stored in the included .csv files. """ import_order = [ "network", "participant", "node", "info", "notification", "question", "transformation", "vector", "transmission", ] with ZipFile(path, "r") as archive: filenames = archive.namelist() for name in import_order: filename = [f for f in filenames if name in f][0] model_name = name.capitalize() model = getattr(models, model_name) file = archive.open(filename) if six.PY3: file = io.TextIOWrapper(file, encoding="utf8", newline="") ingest_to_model(file, model, engine)
0.001183
def is_won(grid): "Did the latest move win the game?" p, q = grid return any(way == (way & q) for way in ways_to_win)
0.007752
def eclean_dist(destructive=False, package_names=False, size_limit=0, time_limit=0, fetch_restricted=False, exclude_file='/etc/eclean/distfiles.exclude'): ''' Clean obsolete portage sources destructive Only keep minimum for reinstallation package_names Protect all versions of installed packages. Only meaningful if used with destructive=True size_limit <size> Don't delete distfiles bigger than <size>. <size> is a size specification: "10M" is "ten megabytes", "200K" is "two hundreds kilobytes", etc. Units are: G, M, K and B. time_limit <time> Don't delete distfiles files modified since <time> <time> is an amount of time: "1y" is "one year", "2w" is "two weeks", etc. Units are: y (years), m (months), w (weeks), d (days) and h (hours). fetch_restricted Protect fetch-restricted files. Only meaningful if used with destructive=True exclude_file Path to exclusion file. Default is /etc/eclean/distfiles.exclude This is the same default eclean-dist uses. Use None if this file exists and you want to ignore. Returns a dict containing the cleaned, saved, and deprecated dists: .. code-block:: python {'cleaned': {<dist file>: <size>}, 'deprecated': {<package>: <dist file>}, 'saved': {<package>: <dist file>}, 'total_cleaned': <size>} CLI Example: .. code-block:: bash salt '*' gentoolkit.eclean_dist destructive=True ''' if exclude_file is None: exclude = None else: try: exclude = _parse_exclude(exclude_file) except excludemod.ParseExcludeFileException as e: ret = {e: 'Invalid exclusion file: {0}'.format(exclude_file)} return ret if time_limit != 0: time_limit = cli.parseTime(time_limit) if size_limit != 0: size_limit = cli.parseSize(size_limit) clean_size = 0 engine = search.DistfilesSearch(lambda x: None) clean_me, saved, deprecated = engine.findDistfiles( destructive=destructive, package_names=package_names, size_limit=size_limit, time_limit=time_limit, fetch_restricted=fetch_restricted, exclude=exclude) cleaned = dict() def _eclean_progress_controller(size, key, *args): cleaned[key] = _pretty_size(size) return True if clean_me: cleaner = clean.CleanUp(_eclean_progress_controller) clean_size = cleaner.clean_dist(clean_me) ret = {'cleaned': cleaned, 'saved': saved, 'deprecated': deprecated, 'total_cleaned': _pretty_size(clean_size)} return ret
0.000367
def _change_sel_color(self, event): """Respond to motion of the color selection cross.""" (r, g, b), (h, s, v), color = self.square.get() self.red.set(r) self.green.set(g) self.blue.set(b) self.saturation.set(s) self.value.set(v) self.hexa.delete(0, "end") self.hexa.insert(0, color.upper()) if self.alpha_channel: self.alphabar.set_color((r, g, b)) self.hexa.insert('end', ("%2.2x" % self.alpha.get()).upper()) self._update_preview()
0.003478
def reference(self, symbol, count=1): """ However, if referenced, ensure that the counter is applied to the catch symbol. """ if symbol == self.catch_symbol: self.catch_symbol_usage += count else: self.parent.reference(symbol, count)
0.006536
def copy(self): """Return a shallow copy of this object. """ new = self.__class__() new.__dict__ = dict(self.__dict__) return new
0.011765
def get_public_key(self): """Get the PublicKey for this PrivateKey.""" return PublicKey.from_verifying_key( self._private_key.get_verifying_key(), network=self.network, compressed=self.compressed)
0.008475
def multiply(lhs, rhs): """Returns element-wise product of the input arrays with broadcasting. Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)`` when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape, this is equivalent to ``mx.nd.elemwise_mul(lhs, rhs)`` .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.sparse.array First array to be multiplied. rhs : scalar or mxnet.ndarray.sparse.array Second array to be multiplied. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise multiplication of the input arrays. Examples -------- >>> x = mx.nd.ones((2,3)).tostype('csr') >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(3) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([ 0., 1., 2.], dtype=float32) >>> (x*2).asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) >>> (x*y).asnumpy() array([[ 0., 0., 0.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.sparse.multiply(x, y).asnumpy() array([[ 0., 0., 0.], [ 1., 1., 1.]], dtype=float32) >>> (x*z).asnumpy() array([[ 0., 1., 2.], [ 0., 1., 2.]], dtype=float32) >>> mx.nd.sparse.multiply(x, z).asnumpy() array([[ 0., 1., 2.], [ 0., 1., 2.]], dtype=float32) >>> z = z.reshape((1, 3)) >>> z.asnumpy() array([[ 0., 1., 2.]], dtype=float32) >>> (x*z).asnumpy() array([[ 0., 1., 2.], [ 0., 1., 2.]], dtype=float32) >>> mx.nd.sparse.multiply(x, z).asnumpy() array([[ 0., 1., 2.], [ 0., 1., 2.]], dtype=float32) """ # pylint: disable= no-member, protected-access if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape: return _ufunc_helper( lhs, rhs, op.elemwise_mul, operator.mul, _internal._mul_scalar, None) return _ufunc_helper( lhs, rhs, op.broadcast_mul, operator.mul, _internal._mul_scalar, None)
0.001176
def __set_route(self, type_route, route): """ Sets the given type_route and route to the route mapping :rtype: object """ if type_route in self.__routes: if not self.verify_route_already_bound(type_route, route): self.__routes[type_route].append(route) else: self.__routes[type_route] = [route] return RouteMapping
0.004878
def createuser(self, email, name='', password=''): """ Return a bugzilla User for the given username :arg email: The email address to use in bugzilla :kwarg name: Real name to associate with the account :kwarg password: Password to set for the bugzilla account :raises XMLRPC Fault: Code 501 if the username already exists Code 500 if the email address isn't valid Code 502 if the password is too short Code 503 if the password is too long :return: User record for the username """ self._proxy.User.create(email, name, password) return self.getuser(email)
0.002976
def evolve(self, rho: Density) -> Density: """Apply the action of this channel upon a density""" N = rho.qubit_nb qubits = rho.qubits indices = list([qubits.index(q) for q in self.qubits]) + \ list([qubits.index(q) + N for q in self.qubits]) tensor = bk.tensormul(self.tensor, rho.tensor, indices) return Density(tensor, qubits, rho.memory)
0.004975
def solve(self): """Start (or re-start) optimisation. This method implements the framework for the alternation between `X` and `D` updates in a dictionary learning algorithm. There is sufficient flexibility in specifying the two updates that it calls that it is usually not necessary to override this method in derived clases. If option ``Verbose`` is ``True``, the progress of the optimisation is displayed at every iteration. At termination of this method, attribute :attr:`itstat` is a list of tuples representing statistics of each iteration. Attribute :attr:`timer` is an instance of :class:`.util.Timer` that provides the following labelled timers: ``init``: Time taken for object initialisation by :meth:`__init__` ``solve``: Total time taken by call(s) to :meth:`solve` ``solve_wo_func``: Total time taken by call(s) to :meth:`solve`, excluding time taken to compute functional value and related iteration statistics ``solve_wo_rsdl`` : Total time taken by call(s) to :meth:`solve`, excluding time taken to compute functional value and related iteration statistics as well as time take to compute residuals and implemented ``AutoRho`` mechanism """ # Print header and separator strings if self.opt['Verbose'] and self.opt['StatusHeader']: self.isc.printheader() # Reset timer self.timer.start(['solve', 'solve_wo_eval']) # Main optimisation iterations for self.j in range(self.j, self.j + self.opt['MaxMainIter']): # X update self.xstep.solve() self.post_xstep() # D update self.dstep.solve() self.post_dstep() # Evaluate functional self.timer.stop('solve_wo_eval') evl = self.evaluate() self.timer.start('solve_wo_eval') # Record elapsed time t = self.timer.elapsed(self.opt['IterTimer']) # Extract and record iteration stats xitstat = self.xstep.itstat[-1] if self.xstep.itstat else \ self.xstep.IterationStats( *([0.0,] * len(self.xstep.IterationStats._fields))) ditstat = self.dstep.itstat[-1] if self.dstep.itstat else \ self.dstep.IterationStats( *([0.0,] * len(self.dstep.IterationStats._fields))) itst = self.isc.iterstats(self.j, t, xitstat, ditstat, evl) self.itstat.append(itst) # Display iteration stats if Verbose option enabled if self.opt['Verbose']: self.isc.printiterstats(itst) # Call callback function if defined if self.opt['Callback'] is not None: if self.opt['Callback'](self): break # Increment iteration count self.j += 1 # Record solve time self.timer.stop(['solve', 'solve_wo_eval']) # Print final separator string if Verbose option enabled if self.opt['Verbose'] and self.opt['StatusHeader']: self.isc.printseparator() # Return final dictionary return self.getdict()
0.002082
def download_api(branch=None) -> str: """download API documentation from _branch_ of Habitica\'s repo on Github""" habitica_github_api = 'https://api.github.com/repos/HabitRPG/habitica' if not branch: branch = requests.get(habitica_github_api + '/releases/latest').json()['tag_name'] curl = local['curl']['-sL', habitica_github_api + '/tarball/{}'.format(branch)] tar = local['tar'][ 'axzf', '-', '--wildcards', '*/website/server/controllers/api-v3/*', '--to-stdout'] grep = local['grep']['@api'] sed = local['sed']['-e', 's/^[ */]*//g', '-e', 's/ / /g', '-'] return (curl | tar | grep | sed)()
0.007776
def ObjectInitializedEventHandler(obj, event): """Object has been created """ # only snapshot supported objects if not supports_snapshots(obj): return # object has already snapshots if has_snapshots(obj): return # take a new snapshot take_snapshot(obj, action="create")
0.003125
def EncodeForCSV(x): "Encodes one value for CSV." k = x.encode('utf-8') if ',' in k or '"' in k: return '"%s"' % k.replace('"', '""') else: return k
0.030488
def MultiDestroyFlowStates(self, session_ids, request_limit=None): """Deletes all requests and responses for the given flows. Args: session_ids: A lists of flows to destroy. request_limit: A limit on the number of requests to delete. Returns: A list of requests that were deleted. """ subjects = [session_id.Add("state") for session_id in session_ids] to_delete = [] deleted_requests = [] for subject, values in self.MultiResolvePrefix( subjects, self.FLOW_REQUEST_PREFIX, limit=request_limit): for _, serialized, _ in values: request = rdf_flow_runner.RequestState.FromSerializedString(serialized) deleted_requests.append(request) # Drop all responses to this request. response_subject = self.GetFlowResponseSubject(request.session_id, request.id) to_delete.append(response_subject) # Mark the request itself for deletion. to_delete.append(subject) # Drop them all at once. self.DeleteSubjects(to_delete, sync=True) return deleted_requests
0.004421
def purge_old_user_tasks(): """ Delete any UserTaskStatus and UserTaskArtifact records older than ``settings.USER_TASKS_MAX_AGE``. Intended to be run as a scheduled task. """ limit = now() - settings.USER_TASKS_MAX_AGE # UserTaskArtifacts will also be removed via deletion cascading UserTaskStatus.objects.filter(created__lt=limit).delete()
0.00542
def bulk_change_and_save(iterable, update_only_changed_fields=False, save_kwargs=None, **changed_fields): """ Changes a given `changed_fields` on each object in a given `iterable`, saves objects and returns the changed objects. """ return [ change_and_save(obj, update_only_changed_fields=update_only_changed_fields, save_kwargs=save_kwargs, **changed_fields) for obj in iterable ]
0.008989
def get(self, key, alt=None): """If dictionary contains _key_ return the associated value, otherwise return _alt_. """ with self.lock: if key in self: return self.getitem(key) else: return alt
0.007117
def _handle_value(self, value): """ Given a value string, unquote, remove comment, handle lists. (including empty and single member lists) """ if self._inspec: # Parsing a configspec so don't handle comments return (value, '') # do we look for lists in values ? if not self.list_values: mat = self._nolistvalue.match(value) if mat is None: raise SyntaxError() # NOTE: we don't unquote here return mat.groups() # mat = self._valueexp.match(value) if mat is None: # the value is badly constructed, probably badly quoted, # or an invalid list raise SyntaxError() (list_values, single, empty_list, comment) = mat.groups() if (list_values == '') and (single is None): # change this if you want to accept empty values raise SyntaxError() # NOTE: note there is no error handling from here if the regex # is wrong: then incorrect values will slip through if empty_list is not None: # the single comma - meaning an empty list return ([], comment) if single is not None: # handle empty values if list_values and not single: # FIXME: the '' is a workaround because our regex now matches # '' at the end of a list if it has a trailing comma single = None else: single = single or '""' single = self._unquote(single) if list_values == '': # not a list value return (single, comment) the_list = self._listvalueexp.findall(list_values) the_list = [self._unquote(val) for val in the_list] if single is not None: the_list += [single] return (the_list, comment)
0.001033
def gen_req_sfc(lat_x, lon_x, start, end, grid=[0.125, 0.125], scale=0): '''generate a dict of reqs kwargs for (lat_x,lon_x) spanning [start, end] Parameters ---------- lat_x : [type] [description] lon_x : [type] [description] start : [type] [description] end : [type] [description] grid : list, optional [description] (the default is [0.125, 0.125], which [default_description]) scale : int, optional [description] (the default is 0, which [default_description]) Returns ------- [type] [description] Examples -------- >>> gen_req_sfc(28, 116, '2015-01', '2015-01-31 23', grid=[0.125, 0.125], scale=0) ''' # scale is a factor to rescale grid size size = grid[0]*scale # generate pd.Series for timestamps ser_datetime = pd.date_range(start, end, freq='1h').to_series() # surface requests lat_c, lon_c = (roundPartial(x, grid[0]) for x in [lat_x, lon_x]) area = [lat_c+size, lon_c-size, lat_c-size, lon_c+size] dict_req_sfc = { 'variable': list_var_sfc, 'product_type': 'reanalysis', 'area': area, 'grid': grid, 'format': 'netcdf' } list_dict_req_sfc = [ {**dict_req_sfc, **dict_dt} for dict_dt in list(gen_dict_dt_sub(ser_datetime).values()) ] dict_req_sfc = { gen_fn(dict_req): gen_dict_proc(dict_req) for dict_req in list_dict_req_sfc } return dict_req_sfc
0.001976
def p_expression_cond(self, p): 'expression : expression COND expression COLON expression' p[0] = Cond(p[1], p[3], p[5], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
0.010363
def analyzeAll(self): """analyze every unanalyzed ABF in the folder.""" searchableData=str(self.files2) self.log.debug("considering analysis for %d ABFs",len(self.IDs)) for ID in self.IDs: if not ID+"_" in searchableData: self.log.debug("%s needs analysis",ID) try: self.analyzeABF(ID) except: print("EXCEPTION! "*100) else: self.log.debug("%s has existing analysis, not overwriting",ID) self.log.debug("verified analysis of %d ABFs",len(self.IDs))
0.01461
def nonoverlap(item_a, time_a, item_b, time_b, max_value): """ Percentage of pixels in each object that do not overlap with the other object Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """ return np.minimum(1 - item_a.count_overlap(time_a, item_b, time_b), max_value) / float(max_value)
0.006431
def check_type(self): """Make sure each stochastic has a correct type, and identify discrete stochastics.""" self.isdiscrete = {} for stochastic in self.stochastics: if stochastic.dtype in integer_dtypes: self.isdiscrete[stochastic] = True elif stochastic.dtype in bool_dtypes: raise ValueError( 'Binary stochastics not supported by AdaptativeMetropolis.') else: self.isdiscrete[stochastic] = False
0.007605
def to_jd(year, month, day): '''Convert a Positivist date to Julian day count.''' legal_date(year, month, day) gyear = year + YEAR_EPOCH - 1 return ( gregorian.EPOCH - 1 + (365 * (gyear - 1)) + floor((gyear - 1) / 4) + (-floor((gyear - 1) / 100)) + floor((gyear - 1) / 400) + (month - 1) * 28 + day )
0.002899
def init_argparser_working_dir( self, argparser, explanation='', help_template=( 'the working directory; %(explanation)s' 'default is current working directory (%(cwd)s)'), ): """ Subclass could an extra expanation on how this is used. Arguments explanation Explanation text for the default help template help_template A standard help message for this option. """ cwd = self.toolchain.join_cwd() argparser.add_argument( '--working-dir', dest=WORKING_DIR, metavar=metavar(WORKING_DIR), default=cwd, help=help_template % {'explanation': explanation, 'cwd': cwd}, )
0.002535
def from_parfiles(cls,pst,parfile_names,real_names=None): """ create a parameter ensemble from parfiles. Accepts parfiles with less than the parameters in the control (get NaNs in the ensemble) or extra parameters in the parfiles (get dropped) Parameters: pst : pyemu.Pst parfile_names : list of str par file names real_names : str optional list of realization names. If None, a single integer counter is used Returns: pyemu.ParameterEnsemble """ if isinstance(pst,str): pst = pyemu.Pst(pst) dfs = {} if real_names is not None: assert len(real_names) == len(parfile_names) else: real_names = np.arange(len(parfile_names)) for rname,pfile in zip(real_names,parfile_names): assert os.path.exists(pfile), "ParameterEnsemble.read_parfiles() error: " + \ "file: {0} not found".format(pfile) df = read_parfile(pfile) #check for scale differences - I don't who is dumb enough #to change scale between par files and pst... diff = df.scale - pst.parameter_data.scale if diff.apply(np.abs).sum() > 0.0: warnings.warn("differences in scale detected, applying scale in par file", PyemuWarning) #df.loc[:,"parval1"] *= df.scale dfs[rname] = df.parval1.values df_all = pd.DataFrame(data=dfs).T df_all.columns = df.index if len(pst.par_names) != df_all.shape[1]: #if len(pst.par_names) < df_all.shape[1]: # raise Exception("pst is not compatible with par files") pset = set(pst.par_names) dset = set(df_all.columns) diff = pset.difference(dset) if len(diff) > 0: warnings.warn("the following parameters are not in the par files (getting NaNs) :{0}". format(','.join(diff)),PyemuWarning) blank_df = pd.DataFrame(index=df_all.index,columns=diff) df_all = pd.concat([df_all,blank_df],axis=1) diff = dset.difference(pset) if len(diff) > 0: warnings.warn("the following par file parameters are not in the control (being dropped):{0}". format(','.join(diff)),PyemuWarning) df_all = df_all.loc[:, pst.par_names] return ParameterEnsemble.from_dataframe(df=df_all,pst=pst)
0.010282
def exception(message): """Exception method convenience wrapper.""" def decorator(method): """Inner decorator so we can accept arguments.""" @wraps(method) def wrapper(self, *args, **kwargs): """Innermost decorator wrapper - this is confusing.""" if self.messages: kwargs['message'] = args[0] if args else kwargs.get('message', message) else: kwargs['message'] = None kwargs['prefix'] = self.prefix kwargs['statsd'] = self.statsd return method(self, **kwargs) return wrapper return decorator
0.003106
def run(self): """ Esegue il montaggio delle varie condivisioni chiedendo all'utente username e password di dominio. """ logging.info('start run with "{}" at {}'.format( self.username, datetime.datetime.now())) progress = Progress(text="Controllo requisiti software...", pulsate=True, auto_close=True) progress(1) try: self.requirements() except LockFailedException as lfe: ErrorMessage('Errore "{}" probabilmente l\'utente {} non ha i' ' diritti di amministratore'.format(lfe, self.username)) sys.exit(20) except Exception as e: ErrorMessage("Si e' verificato un errore generico: {}".format(e)) sys.exit(21) progress(100) self.set_shares() # richiesta username del dominio insert_msg = "Inserisci l'utente del Dominio/Posta Elettronica" default_username = (self.host_username if self.host_username else os.environ['USER']) self.domain_username = GetText(text=insert_msg, entry_text=self.username) if self.domain_username is None or len(self.domain_username) == 0: error_msg = "Inserimento di un username di dominio vuoto" ErrorMessage(self.msg_error % error_msg) sys.exit(2) # richiesta della password di dominio insert_msg = u"Inserisci la password del Dominio/Posta Elettronica" self.domain_password = GetText(text=insert_msg, entry_text='password', password=True) if self.domain_password is None or len(self.domain_password) == 0: error_msg = u"Inserimento di una password di dominio vuota" ErrorMessage(self.msg_error % error_msg) sys.exit(3) progress_msg = u"Collegamento unità di rete in corso..." progress = Progress(text=progress_msg, pulsate=True, auto_close=True) progress(1) # ciclo per montare tutte le condivisioni result = [] for share in self.samba_shares: # print("#######") # print(share) if 'mountpoint' not in share.keys(): # creazione stringa che rappresente il mount-point locale mountpoint = os.path.expanduser( '~%s/%s/%s' % (self.host_username, share['hostname'], share['share'])) share.update({'mountpoint': mountpoint}) elif not share['mountpoint'].startswith('/'): mountpoint = os.path.expanduser( '~%s/%s' % (self.host_username, share['mountpoint'])) share.update({'mountpoint': mountpoint}) share.update({ 'host_username': self.host_username, 'domain_username': share.get( 'username', self.domain_username), 'domain_password': share.get( 'password', self.domain_password)}) # controllo che il mount-point locale esista altrimenti non # viene creato if not os.path.exists(share['mountpoint']): if self.verbose: logging.warning('Mountpoint "%s" not exist.' % share['mountpoint']) if not self.dry_run: os.makedirs(share['mountpoint']) # smonto la condivisione prima di rimontarla umont_cmd = self.cmd_umount % share if self.verbose: logging.warning("Umount command: %s" % umont_cmd) if not self.dry_run: umount_p = subprocess.Popen(umont_cmd, shell=True) returncode = umount_p.wait() time.sleep(2) mount_cmd = self.cmd_mount % share if self.verbose: placeholder = ",password=" logging.warning("Mount command: %s%s" % (mount_cmd.split( placeholder)[0], placeholder + "******\"")) # print(mount_cmd) # print("#######") if not self.dry_run: # montaggio della condivisione p_mnt = subprocess.Popen(mount_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) returncode = p_mnt.wait() result.append({'share': share['share'], 'returncode': returncode, 'stdout': p_mnt.stdout.read(), 'stderr': p_mnt.stderr.read()}) progress(100) if self.verbose: logging.warning("Risultati: %s" % result)
0.000391
def task(func): """Decorator to run the decorated function as a Task """ def task_wrapper(*args, **kwargs): return spawn(func, *args, **kwargs) return task_wrapper
0.005348
def get_sphinx_ref(self, url, label=None): """ Get an internal sphinx cross reference corresponding to `url` into the online docs, associated with a link with label `label` (if not None). """ # A url is assumed to correspond to a citation if it contains # 'zreferences.html#' if 'zreferences.html#' in url: key = url.partition('zreferences.html#')[2] ref = ':cite:`%s`' % key else: # If the url does not correspond to a citation, try to look it # up in each of the IntersphinxInventory objects in our list ref = None # Iterate over IntersphinxInventory objects in our list for ii in self.invlst: # If the baseurl for the current IntersphinxInventory # object matches the url, try to look up the reference # from the url and terminate the loop of the look up # succeeds if ii.matching_base_url(url): ref = ii.get_sphinx_ref(url, label) break if ref is None: raise KeyError('no match found for url %s' % url) return ref
0.001623
def from_key_bytes(cls, algorithm, key_bytes): """Builds a `Signer` from an algorithm suite and a raw signing key. :param algorithm: Algorithm on which to base signer :type algorithm: aws_encryption_sdk.identifiers.Algorithm :param bytes key_bytes: Raw signing key :rtype: aws_encryption_sdk.internal.crypto.Signer """ key = serialization.load_der_private_key(data=key_bytes, password=None, backend=default_backend()) return cls(algorithm, key)
0.005894
def char2features(sentence, i): ''' Returns features of char at position `i` in given sentence When it possible, result features list also includes features for 2 characters ahead and behind current position (like bigrams, or something like it) Currently, used features is: 1. lower-cased value of character 2. result of calling `character.isupper()` method 3. result of calling `character.isnumeric()` method ''' char = sentence[i] length = len(sentence) features = [ 'lower={0}'.format(char.lower()), 'isupper={0}'.format(char.isupper()), 'isnumeric={0}'.format(char.isnumeric()), ] if i == 0: features.extend(['BOS']) if i > 0: char = sentence[i - 1] features.extend([ '-1:lower={0}'.format(char.lower()), '-1:isupper={0}'.format(char.isupper()), '-1:isnumeric={0}'.format(char.isnumeric()), ]) if i > 1: char = sentence[i - 2] features.extend([ '-2:lower={0}'.format(char.lower()), '-2:isupper={0}'.format(char.isupper()), '-2:isnumeric={0}'.format(char.isnumeric()), ]) if i < length - 1: char = sentence[i + 1] features.extend([ '+1:lower={0}'.format(char.lower()), '+1:isupper={0}'.format(char.isupper()), '+1:isnumeric={0}'.format(char.isnumeric()), ]) if i < length - 2: char = sentence[i + 2] features.extend([ '+2:lower={0}'.format(char.lower()), '+2:isupper={0}'.format(char.isupper()), '+2:isnumeric={0}'.format(char.isnumeric()), ]) if i == length - 1: features.extend(['EOS']) return features
0.001127
def _lookup_hashes(self, full_hashes): """Lookup URL hash in blacklists Returns names of lists it was found in. """ full_hashes = list(full_hashes) cues = [fh[0:4] for fh in full_hashes] result = [] matching_prefixes = {} matching_full_hashes = set() is_potential_threat = False # First lookup hash prefixes which match full URL hash for (hash_prefix, negative_cache_expired) in self.storage.lookup_hash_prefix(cues): for full_hash in full_hashes: if full_hash.startswith(hash_prefix): is_potential_threat = True # consider hash prefix negative cache as expired if it is expired in at least one threat list matching_prefixes[hash_prefix] = matching_prefixes.get(hash_prefix, False) or negative_cache_expired matching_full_hashes.add(full_hash) # if none matches, URL hash is clear if not is_potential_threat: return [] # if there is non-expired full hash, URL is blacklisted matching_expired_threat_lists = set() for threat_list, has_expired in self.storage.lookup_full_hashes(matching_full_hashes): if has_expired: matching_expired_threat_lists.add(threat_list) else: result.append(threat_list) if result: return result # If there are no matching expired full hash entries # and negative cache is still current for all prefixes, consider it safe if len(matching_expired_threat_lists) == 0 and sum(map(int, matching_prefixes.values())) == 0: log.info('Negative cache hit.') return [] # Now we can assume that there are expired matching full hash entries and/or # cache prefix entries with expired negative cache. Both require full hash sync. self._sync_full_hashes(matching_prefixes.keys()) # Now repeat full hash lookup for threat_list, has_expired in self.storage.lookup_full_hashes(matching_full_hashes): if not has_expired: result.append(threat_list) return result
0.004957
def install_signal_handlers(self): """ Handle events like Ctrl-C from the command line. """ self.graceful_stop = False def request_shutdown_now(): self.shutdown_now() def request_shutdown_graceful(): # Second time CTRL-C, shutdown now if self.graceful_stop: self.shutdown_now() else: self.graceful_stop = True self.shutdown_graceful() # First time CTRL-C, try to shutdown gracefully gevent.signal(signal.SIGINT, request_shutdown_graceful) # User (or Heroku) requests a stop now, just mark tasks as interrupted. gevent.signal(signal.SIGTERM, request_shutdown_now)
0.002747
def create_mon_path(path, uid=-1, gid=-1): """create the mon path if it does not exist""" if not os.path.exists(path): os.makedirs(path) os.chown(path, uid, gid);
0.010753
def validateState(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None, returnStateName=False): """Raises ValidationException if value is not a USA state. Returns the capitalized state abbreviation, unless returnStateName is True in which case it returns the titlecased state name. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. * returnStateName (bool): If True, the full state name is returned, i.e. 'California'. Otherwise, the abbreviation, i.e. 'CA'. Defaults to False. >>> import pysimplevalidate as pysv >>> pysv.validateState('tx') 'TX' >>> pysv.validateState('california') 'CA' >>> pysv.validateState('WASHINGTON') 'WA' >>> pysv.validateState('WASHINGTON', returnStateName=True) 'Washington' """ # TODO - note that this is USA-centric. I should work on trying to make this more international. # Validate parameters. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value if value.upper() in USA_STATES_UPPER.keys(): # check if value is a state abbreviation if returnStateName: return USA_STATES[value.upper()] # Return full state name. else: return value.upper() # Return abbreviation. elif value.title() in USA_STATES.values(): # check if value is a state name if returnStateName: return value.title() # Return full state name. else: return USA_STATES_REVERSED[value.title()] # Return abbreviation. _raiseValidationException(_('%r is not a state.') % (_errstr(value)), excMsg)
0.007404
def _numeric_handler_factory(charset, transition, assertion, illegal_before_underscore, parse_func, illegal_at_end=(None,), ion_type=None, append_first_if_not=None, first_char=None): """Generates a handler co-routine which tokenizes a numeric component (a token or sub-token). Args: charset (sequence): Set of ordinals of legal characters for this numeric component. transition (callable): Called upon termination of this component (i.e. when a character not in ``charset`` is found). Accepts the previous character ordinal, the current character ordinal, the current context, and the previous transition. Returns a Transition if the component ends legally; otherwise, raises an error. assertion (callable): Accepts the first character's ordinal and the current context. Returns True if this is a legal start to the component. illegal_before_underscore (sequence): Set of ordinals of illegal characters to precede an underscore for this component. parse_func (callable): Called upon ending the numeric value. Accepts the current token value and returns a thunk that lazily parses the token. illegal_at_end (Optional[sequence]): Set of ordinals of characters that may not legally end the value. ion_type (Optional[IonType]): The type of the value if it were to end on this component. append_first_if_not (Optional[int]): The ordinal of a character that should not be appended to the token if it occurs first in this component (e.g. an underscore in many cases). first_char (Optional[int]): The ordinal of the character that should be appended instead of the character that occurs first in this component. This is useful for preparing the token for parsing in the case where a particular character is peculiar to the Ion format (e.g. 'd' to denote the exponent of a decimal value should be replaced with 'e' for compatibility with python's Decimal type). """ @coroutine def numeric_handler(c, ctx): assert assertion(c, ctx) if ion_type is not None: ctx.set_ion_type(ion_type) val = ctx.value if c != append_first_if_not: first = c if first_char is None else first_char val.append(first) prev = c c, self = yield trans = ctx.immediate_transition(self) while True: if _ends_value(c): if prev == _UNDERSCORE or prev in illegal_at_end: _illegal_character(c, ctx, '%s at end of number.' % (_chr(prev),)) trans = ctx.event_transition(IonThunkEvent, IonEventType.SCALAR, ctx.ion_type, parse_func(ctx.value)) if c == _SLASH: trans = ctx.immediate_transition(_number_slash_end_handler(c, ctx, trans)) else: if c == _UNDERSCORE: if prev == _UNDERSCORE or prev in illegal_before_underscore: _illegal_character(c, ctx, 'Underscore after %s.' % (_chr(prev),)) else: if c not in charset: trans = transition(prev, c, ctx, trans) else: val.append(c) prev = c c, _ = yield trans return numeric_handler
0.007026
def wrap_iterable(obj): """ Returns: wrapped_obj, was_scalar """ was_scalar = not isiterable(obj) wrapped_obj = [obj] if was_scalar else obj return wrapped_obj, was_scalar
0.004926
def keypress(self, size, key): """allow subclasses to intercept keystrokes""" key = self.__super.keypress(size, key) if key: key = self.unhandled_keys(size, key) return key
0.009259
def resolve_using_maximal_coverage(matches): """Given a list of matches, select a subset of matches such that there are no overlaps and the total number of covered characters is maximal. Parameters ---------- matches: list of Match Returns -------- list of Match """ if len(matches) == 0: return matches matches.sort() N = len(matches) scores = [len(match) for match in matches] prev = [-1] * N for i in range(1, N): bestscore = -1 bestprev = -1 j = i while j >= 0: # if matches do not overlap if matches[j].is_before(matches[i]): l = scores[j] + len(matches[i]) if l >= bestscore: bestscore = l bestprev = j else: # in case of overlapping matches l = scores[j] - len(matches[j]) + len(matches[i]) if l >= bestscore: bestscore = l bestprev = prev[j] j = j - 1 scores[i] = bestscore prev[i] = bestprev # first find the matching with highest combined score bestscore = max(scores) bestidx = len(scores) - scores[-1::-1].index(bestscore) -1 # then backtrack the non-conflicting matchings that should be kept keepidxs = [bestidx] bestidx = prev[bestidx] while bestidx != -1: keepidxs.append(bestidx) bestidx = prev[bestidx] # filter the matches return [matches[idx] for idx in reversed(keepidxs)]
0.003812
def insert_rows(self, row, no_rows=1): """Adds no_rows rows before row, appends if row > maxrows and marks grid as changed """ # Mark content as changed post_command_event(self.main_window, self.ContentChangedMsg) tab = self.grid.current_table self.code_array.insert(row, no_rows, axis=0, tab=tab)
0.005587
def set_options_values(self, options, parse=False, strict=False): """ Set the options from a dict of values (in string). :param option_values: the values of options (in format `{"opt_name": "new_value"}`) :type option_values: dict :param parse: whether to parse the given value :type parse: bool :param strict: if True the given `option_values` dict should only contains existing options (no other key) :type strict: bool """ if strict: for opt_name in options.keys(): if not self.has_option(opt_name): raise ValueError("'%s' is not a option of the component" % opt_name) elif self.option_is_hidden(opt_name): raise ValueError("'%s' is hidden, you can't set it" % opt_name) for opt_name, opt in self._options.items(): if opt.hidden: continue if opt_name in options: opt.set(options[opt_name], parse=parse)
0.006705
def find_files(globs): """Find files to include.""" last_cwd = os.getcwd() os.chdir(config.cwd) gex, gin = separate_globs(globs) # Find excluded files exclude = [] for glob in gex: parse_glob(glob, exclude) files = [] include = [] order = 0 # Find included files and removed excluded files for glob in gin: order += 1 array = parse_glob(glob, include) base = find_base(glob) for file in array: if file not in exclude: files.append((order, base, file)) os.chdir(last_cwd) return files
0.042226
def log_transform(rates): """ log transform a numeric value, unless it is zero, or negative """ transformed = [] for key in ['missense', 'nonsense', 'splice_lof', 'splice_region', 'synonymous']: try: value = math.log10(rates[key]) except ValueError: value = "NA" except KeyError: continue transformed.append(value) return '\t'.join(map(str, transformed))
0.012448
def _config_win32_nameservers(self, nameservers): """Configure a NameServer registry entry.""" # we call str() on nameservers to convert it from unicode to ascii nameservers = str(nameservers) split_char = self._determine_split_char(nameservers) ns_list = nameservers.split(split_char) for ns in ns_list: if not ns in self.nameservers: self.nameservers.append(ns)
0.006834
def read_bim(file_name): """Reads the BIM file to gather marker names. :param file_name: the name of the ``bim`` file. :type file_name: str :returns: a :py:class:`dict` containing the chromosomal location of each marker on the sexual chromosomes. It uses the :py:func:`encode_chr` to encode the chromosomes from ``X`` and ``Y`` to ``23`` and ``24``, respectively. """ marker_names_chr = None with open(file_name, 'r') as input_file: marker_names_chr = dict([ (i[1], encode_chr(i[0])) for i in [ j.rstrip("\r\n").split("\t") for j in input_file.readlines() ] if encode_chr(i[0]) in {23, 24} ]) return marker_names_chr
0.001346
def get(self, name=None): """Print a list of all jupyterHubs.""" # Print a list of hubs. if name is None: hubs = self.get_hubs() print("Running Jupyterhub Deployments (by name):") for hub_name in hubs: hub = Hub(namespace=hub_name) data = hub.get_description() url = data['LoadBalancer Ingress'] print(f' - Name: {hub_name}') print(f' Url: {url}') else: hub = Hub(namespace=name) hub.get()
0.00354
def usearch_chimera_filter_de_novo( fasta_filepath, output_chimera_filepath=None, output_non_chimera_filepath=None, abundance_skew=2.0, log_name="uchime_de_novo_chimera_filtering.log", usersort=False, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """ Chimera filter de novo, output chimeras and non-chimeras to fastas fasta_filepath = input fasta file, generally a dereplicated fasta output_chimera_filepath = output chimera filepath output_non_chimera_filepath = output non chimera filepath abundance_skew = abundance skew setting for de novo filtering. usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. """ if not output_chimera_filepath: _, output_chimera_filepath = mkstemp(prefix='uchime_chimeras_', suffix='.fasta') if not output_non_chimera_filepath: _, output_non_chimera_filepath = mkstemp(prefix='uchime_non_chimeras_', suffix='.fasta') log_filepath = join(working_dir, log_name) params = {'--abskew': abundance_skew} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() data = {'--uchime': fasta_filepath, '--chimeras': output_chimera_filepath, '--nonchimeras': output_non_chimera_filepath } if not remove_usearch_logs: data['--log'] = log_filepath app_result = app(data) if not save_intermediate_files: remove_files([output_chimera_filepath]) return app_result, output_non_chimera_filepath
0.000521
def parse(self, data=None, table_name=None): """Parse the lines from index i :param data: optional, store the parsed result to it when specified :param table_name: when inside a table array, it is the table name """ temp = self.dict_() sub_table = None is_array = False line = '' while True: line = self._readline() if not line: self._store_table(sub_table, temp, is_array, data=data) break # EOF if BLANK_RE.match(line): continue if TABLE_RE.match(line): next_table = self.split_string( TABLE_RE.match(line).group(1), '.', False) if table_name and not contains_list(next_table, table_name): self._store_table(sub_table, temp, is_array, data=data) break table = cut_list(next_table, table_name) if sub_table == table: raise TomlDecodeError(self.lineno, 'Duplicate table name' 'in origin: %r' % sub_table) else: # different table name self._store_table(sub_table, temp, is_array, data=data) sub_table = table is_array = False elif TABLE_ARRAY_RE.match(line): next_table = self.split_string( TABLE_ARRAY_RE.match(line).group(1), '.', False) if table_name and not contains_list(next_table, table_name): # Out of current loop # write current data dict to table dict self._store_table(sub_table, temp, is_array, data=data) break table = cut_list(next_table, table_name) if sub_table == table and not is_array: raise TomlDecodeError(self.lineno, 'Duplicate name of ' 'table and array of table: %r' % sub_table) else: # Begin a nested loop # Write any temp data to table dict self._store_table(sub_table, temp, is_array, data=data) sub_table = table is_array = True self.parse(temp, next_table) elif KEY_RE.match(line): m = KEY_RE.match(line) keys = self.split_string(m.group(1), '.') value = self.converter.convert(line[m.end():]) if value is None: raise TomlDecodeError(self.lineno, 'Value is missing') self._store_table(keys[:-1], {keys[-1]: value}, data=temp) else: raise TomlDecodeError(self.lineno, 'Pattern is not recognized: %r' % line) # Rollback to the last line for next parse # This will do nothing if EOF is hit self.instream.seek(self.instream.tell() - len(line)) self.lineno -= 1
0.000637
def _parse_authors(html_chunk): """ Parse authors of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: list: List of :class:`structures.Author` objects. Blank if no author \ found. """ authors_tags = html_chunk.match( ["div", {"class": "polozka_autor"}], "a" ) authors = [] for author_tag in authors_tags: # get name name = author_tag.getContent().strip() # skip tags without name if not name: continue # get url - if not found, set it to None url = author_tag.params.get("href", None) if url: url = normalize_url(BASE_URL, url) authors.append( Author(name, url) ) return authors
0.002415
def JsonResponseModel(self): """In this context, return raw JSON instead of proto.""" old_model = self.response_type_model self.__response_type_model = 'json' yield self.__response_type_model = old_model
0.00823
def _store(self, offset, value, size=1): """Stores value in memory as a big endian""" self.memory.write_BE(offset, value, size) for i in range(size): self._publish('did_evm_write_memory', offset + i, Operators.EXTRACT(value, (size - i - 1) * 8, 8))
0.010563
def redraw(self, whence=0): """Redraw the canvas. Parameters ---------- whence See :meth:`get_rgb_object`. """ with self._defer_lock: whence = min(self._defer_whence, whence) if not self.defer_redraw: if self._hold_redraw_cnt == 0: self._defer_whence = self._defer_whence_reset self.redraw_now(whence=whence) else: self._defer_whence = whence return elapsed = time.time() - self.time_last_redraw # If there is no redraw scheduled, or we are overdue for one: if (not self._defer_flag) or (elapsed > self.defer_lagtime): # If more time than defer_lagtime has passed since the # last redraw then just do the redraw immediately if elapsed > self.defer_lagtime: if self._hold_redraw_cnt > 0: #self._defer_flag = True self._defer_whence = whence return self._defer_whence = self._defer_whence_reset self.logger.debug("lagtime expired--forced redraw") self.redraw_now(whence=whence) return # Indicate that a redraw is necessary and record whence self._defer_flag = True self._defer_whence = whence # schedule a redraw by the end of the defer_lagtime secs = self.defer_lagtime - elapsed self.logger.debug("defer redraw (whence=%.2f) in %.f sec" % ( whence, secs)) self.reschedule_redraw(secs) else: # A redraw is already scheduled. Just record whence. self._defer_whence = whence self.logger.debug("update whence=%.2f" % (whence))
0.001521
def restore_model(cls, data): """Returns instance of ``cls`` with attributed loaded from ``data`` dict. """ obj = cls() for field in data: setattr(obj, field, data[field][Field.VALUE]) return obj
0.004386
def update(self, new_vals): """Add a dictionary of values to the current step without writing it to disk. """ for k, v in six.iteritems(new_vals): k = k.strip() if k in self.row: warnings.warn("Adding history key ({}) that is already set in this step".format( k), wandb.WandbWarning) self.row[k] = v
0.010127
def list_subdomains_previous_page(self): """ When paging through subdomain results, this will return the previous page, using the same limit. If there are no more results, a NoMoreResults exception will be raised. """ uri = self._paging.get("subdomain", {}).get("prev_uri") if uri is None: raise exc.NoMoreResults("There are no previous pages of subdomains " "to list.") return self._list_subdomains(uri)
0.008
def add_forwarding_rules(self, forwarding_rules): """ Adds new forwarding rules to a LoadBalancer. Args: forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects """ rules_dict = [rule.__dict__ for rule in forwarding_rules] return self.get_data( "load_balancers/%s/forwarding_rules/" % self.id, type=POST, params={"forwarding_rules": rules_dict} )
0.004329
def _get_sghead(expnum): """ Use the data web service to retrieve the stephen's astrometric header. :param expnum: CFHT exposure number you want the header for :rtype : list of astropy.io.fits.Header objects. """ version = 'p' key = "{}{}".format(expnum, version) if key in sgheaders: return sgheaders[key] url = "http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/data/pub/CFHTSG/{}{}.head".format(expnum, version) logging.getLogger("requests").setLevel(logging.ERROR) logging.debug("Attempting to retrieve {}".format(url)) resp = requests.get(url) if resp.status_code != 200: raise IOError(errno.ENOENT, "Could not get {}".format(url)) header_str_list = re.split('END \n', resp.content) # # make the first entry in the list a Null headers = [None] for header_str in header_str_list: headers.append(fits.Header.fromstring(header_str, sep='\n')) logging.debug(headers[-1].get('EXTVER', -1)) sgheaders[key] = headers return sgheaders[key]
0.001908
def inference(self, kern, X, likelihood, Y, Y_metadata=None): """ Returns a GridPosterior class containing essential quantities of the posterior """ N = X.shape[0] #number of training points D = X.shape[1] #number of dimensions Kds = np.zeros(D, dtype=object) #vector for holding covariance per dimension Qs = np.zeros(D, dtype=object) #vector for holding eigenvectors of covariance per dimension QTs = np.zeros(D, dtype=object) #vector for holding transposed eigenvectors of covariance per dimension V_kron = 1 # kronecker product of eigenvalues # retrieve the one-dimensional variation of the designated kernel oneDkernel = kern.get_one_dimensional_kernel(D) for d in range(D): xg = list(set(X[:,d])) #extract unique values for a dimension xg = np.reshape(xg, (len(xg), 1)) oneDkernel.lengthscale = kern.lengthscale[d] Kds[d] = oneDkernel.K(xg) [V, Q] = np.linalg.eig(Kds[d]) V_kron = np.kron(V_kron, V) Qs[d] = Q QTs[d] = Q.T noise = likelihood.variance + 1e-8 alpha_kron = self.kron_mvprod(QTs, Y) V_kron = V_kron.reshape(-1, 1) alpha_kron = alpha_kron / (V_kron + noise) alpha_kron = self.kron_mvprod(Qs, alpha_kron) log_likelihood = -0.5 * (np.dot(Y.T, alpha_kron) + np.sum((np.log(V_kron + noise))) + N*log_2_pi) # compute derivatives wrt parameters Thete derivs = np.zeros(D+2, dtype='object') for t in range(len(derivs)): dKd_dTheta = np.zeros(D, dtype='object') gamma = np.zeros(D, dtype='object') gam = 1 for d in range(D): xg = list(set(X[:,d])) xg = np.reshape(xg, (len(xg), 1)) oneDkernel.lengthscale = kern.lengthscale[d] if t < D: dKd_dTheta[d] = oneDkernel.dKd_dLen(xg, (t==d), lengthscale=kern.lengthscale[t]) #derivative wrt lengthscale elif (t == D): dKd_dTheta[d] = oneDkernel.dKd_dVar(xg) #derivative wrt variance else: dKd_dTheta[d] = np.identity(len(xg)) #derivative wrt noise gamma[d] = np.diag(np.dot(np.dot(QTs[d], dKd_dTheta[d].T), Qs[d])) gam = np.kron(gam, gamma[d]) gam = gam.reshape(-1,1) kappa = self.kron_mvprod(dKd_dTheta, alpha_kron) derivs[t] = 0.5*np.dot(alpha_kron.T,kappa) - 0.5*np.sum(gam / (V_kron + noise)) # separate derivatives dL_dLen = derivs[:D] dL_dVar = derivs[D] dL_dThetaL = derivs[D+1] return GridPosterior(alpha_kron=alpha_kron, QTs=QTs, Qs=Qs, V_kron=V_kron), \ log_likelihood, {'dL_dLen':dL_dLen, 'dL_dVar':dL_dVar, 'dL_dthetaL':dL_dThetaL}
0.014428
def event_return(events): ''' Return event data via SMTP ''' for event in events: ret = event.get('data', False) if ret: returner(ret)
0.005556
def set_status(self, value): """ Set the status of the motor to the specified value if not already set. """ if not self._status == value: old = self._status self._status = value logger.info("{} changing status from {} to {}".format(self, old.name, value.name)) self._statusChanged(old, value)
0.008065
def create(self): """ Creates a new record for a domain. Args: type (str): The type of the DNS record (e.g. A, CNAME, TXT). name (str): The host name, alias, or service being defined by the record. data (int): Variable data depending on record type. priority (int): The priority for SRV and MX records. port (int): The port for SRV records. ttl (int): The time to live for the record, in seconds. weight (int): The weight for SRV records. flags (int): An unsigned integer between 0-255 used for CAA records. tags (string): The parameter tag for CAA records. Valid values are "issue", "wildissue", or "iodef" """ input_params = { "type": self.type, "data": self.data, "name": self.name, "priority": self.priority, "port": self.port, "ttl": self.ttl, "weight": self.weight, "flags": self.flags, "tags": self.tags } data = self.get_data( "domains/%s/records" % (self.domain), type=POST, params=input_params, ) if data: self.id = data['domain_record']['id']
0.002266
def filter_from_options(key, options): """ :param key: Key str in options :param options: Mapping object :return: New mapping object from 'options' in which the item with 'key' filtered >>> filter_from_options('a', dict(a=1, b=2)) {'b': 2} """ return anyconfig.utils.filter_options([k for k in options.keys() if k != key], options)
0.00241
def managed(name, port, services=None, user=None, password=None, bypass_domains=None, network_service='Ethernet'): ''' Manages proxy settings for this mininon name The proxy server to use port The port used by the proxy server services A list of the services that should use the given proxy settings, valid services include http, https and ftp. If no service is given all of the valid services will be used. user The username to use for the proxy server if required password The password to use for the proxy server if required bypass_domains An array of the domains that should bypass the proxy network_service The network service to apply the changes to, this only necessary on macOS ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} valid_services = ['http', 'https', 'ftp'] if services is None: services = valid_services # Darwin if __grains__['os'] in ['MacOS', 'Darwin']: ret['changes'] = {'new': []} for service in services: current_settings = __salt__['proxy.get_{0}_proxy'.format(service)]() if current_settings.get('server') == name and current_settings.get('port') == six.text_type(port): ret['comment'] += '{0} proxy settings already set.\n'.format(service) elif __salt__['proxy.set_{0}_proxy'.format(service)](name, port, user, password, network_service): ret['comment'] += '{0} proxy settings updated correctly\n'.format(service) ret['changes']['new'].append({'service': service, 'server': name, 'port': port, 'user': user}) else: ret['result'] = False ret['comment'] += 'Failed to set {0} proxy settings.\n' if bypass_domains is not None: current_domains = __salt__['proxy.get_proxy_bypass']() if len(set(current_domains).intersection(bypass_domains)) == len(bypass_domains): ret['comment'] += 'Proxy bypass domains are already set correctly.\n' elif __salt__['proxy.set_proxy_bypass'](bypass_domains, network_service): ret['comment'] += 'Proxy bypass domains updated correctly\n' ret['changes']['new'].append({'bypass_domains': list(set(bypass_domains).difference(current_domains))}) else: ret['result'] = False ret['comment'] += 'Failed to set bypass proxy domains.\n' if not ret['changes']['new']: del ret['changes']['new'] return ret # Windows - Needs its own branch as all settings need to be set at the same time if __grains__['os'] in ['Windows']: changes_needed = False current_settings = __salt__['proxy.get_proxy_win']() current_domains = __salt__['proxy.get_proxy_bypass']() if current_settings.get('enabled', False) is True: for service in services: # We need to update one of our proxy servers if service not in current_settings: changes_needed = True break if current_settings[service]['server'] != name or current_settings[service]['port'] != six.text_type(port): changes_needed = True break else: # Proxy settings aren't enabled changes_needed = True # We need to update our bypass domains if len(set(current_domains).intersection(bypass_domains)) != len(bypass_domains): changes_needed = True if changes_needed: if __salt__['proxy.set_proxy_win'](name, port, services, bypass_domains): ret['comment'] = 'Proxy settings updated correctly' else: ret['result'] = False ret['comment'] = 'Failed to set {0} proxy settings.' else: ret['comment'] = 'Proxy settings already correct.' return ret
0.00416
def _trade(self, event): "内部函数" print('==================================market enging: trade') print(self.order_handler.order_queue.pending) print('==================================') self.order_handler._trade() print('done')
0.007273
def _parse_request_method(request: web.Request): """Parse Access-Control-Request-Method header of the preflight request """ method = request.headers.get(hdrs.ACCESS_CONTROL_REQUEST_METHOD) if method is None: raise web.HTTPForbidden( text="CORS preflight request failed: " "'Access-Control-Request-Method' header is not specified") # FIXME: validate method string (ABNF: method = token), if parsing # fails, raise HTTPForbidden. return method
0.00365
def _append_custom(self, insert, input, before_prompt=False): """ A low-level method for appending content to the end of the buffer. If 'before_prompt' is enabled, the content will be inserted before the current prompt, if there is one. """ # Determine where to insert the content. cursor = self._control.textCursor() if before_prompt and (self._reading or not self._executing): cursor.setPosition(self._append_before_prompt_pos) else: cursor.movePosition(QtGui.QTextCursor.End) start_pos = cursor.position() # Perform the insertion. result = insert(cursor, input) # Adjust the prompt position if we have inserted before it. This is safe # because buffer truncation is disabled when not executing. if before_prompt and not self._executing: diff = cursor.position() - start_pos self._append_before_prompt_pos += diff self._prompt_pos += diff return result
0.00289
def create_connection(): """Sets up a redis configuration""" global _cached_connection settings = oz.settings if settings["redis_cache_connections"] and _cached_connection != None: return _cached_connection else: conn = redis.StrictRedis( host=settings["redis_host"], port=settings["redis_port"], db=settings["redis_db"], password=settings["redis_password"], decode_responses=settings["redis_decode_responses"], ssl=settings["redis_use_ssl"], ssl_keyfile=settings["redis_ssl_keyfile"], ssl_certfile=settings["redis_ssl_certfile"], ssl_cert_reqs=settings["redis_ssl_cert_reqs"], ssl_ca_certs=settings["redis_ssl_ca_certs"] ) if settings["redis_cache_connections"]: _cached_connection = conn return conn
0.00223
def clean_addresses(addresses): """ Cleans email address. :param addresses: List of strings (email addresses) :return: List of strings (cleaned email addresses) """ if addresses is None: return [] addresses = addresses.replace("\'", "") address_list = re.split('[,;]', addresses) clean_list = [] for address in address_list: temp_clean_address = clean_address(address) clean_list.append(temp_clean_address) return clean_list
0.002033
def get_acc_list(self): """ :return: (ret, data) """ query_processor = self._get_sync_query_processor( GetAccountList.pack_req, GetAccountList.unpack_rsp) kargs = { 'user_id': self.get_login_user_id(), 'conn_id': self.get_sync_conn_id() } ret_code, msg, acc_list = query_processor(**kargs) if ret_code != RET_OK: return RET_ERROR, msg # 记录当前市场的帐号列表 self.__last_acc_list = [] for record in acc_list: trdMkt_list = record["trdMarket_list"] if self.__trd_mkt in trdMkt_list: self.__last_acc_list.append({ "trd_env": record["trd_env"], "acc_id": record["acc_id"]}) col_list = ["acc_id", "trd_env"] acc_table = pd.DataFrame(copy(self.__last_acc_list), columns=col_list) return RET_OK, acc_table
0.002137
def Y(self, value): """ sets the Y coordinate """ if isinstance(value, (int, float, long, types.NoneType)): self._y = value
0.01105
def output_thread(log, stdout, stderr, timeout_event, is_alive, quit, stop_output_event): """ this function is run in a separate thread. it reads from the process's stdout stream (a streamreader), and waits for it to claim that its done """ poller = Poller() if stdout is not None: poller.register_read(stdout) if stderr is not None: poller.register_read(stderr) # this is our poll loop for polling stdout or stderr that is ready to # be read and processed. if one of those streamreaders indicate that it # is done altogether being read from, we remove it from our list of # things to poll. when no more things are left to poll, we leave this # loop and clean up while poller: changed = no_interrupt(poller.poll, 0.1) for f, events in changed: if events & (POLLER_EVENT_READ | POLLER_EVENT_HUP): log.debug("%r ready to be read from", f) done = f.read() if done: poller.unregister(f) elif events & POLLER_EVENT_ERROR: # for some reason, we have to just ignore streams that have had an # error. i'm not exactly sure why, but don't remove this until we # figure that out, and create a test for it pass if timeout_event and timeout_event.is_set(): break if stop_output_event.is_set(): break # we need to wait until the process is guaranteed dead before closing our # outputs, otherwise SIGPIPE alive, _ = is_alive() while alive: quit.wait(1) alive, _ = is_alive() if stdout: stdout.close() if stderr: stderr.close()
0.00227
def _check_fpos(self, fp_, fpos, offset, block): """Check file position matches blocksize""" if (fp_.tell() + offset != fpos): warnings.warn("Actual "+block+" header size does not match expected") return
0.012552
def resizeEvent(self, event): """Override from QAbstractScrollArea. Resize the viewer widget when the viewport is resized.""" vp = self.viewport() rect = vp.geometry() x1, y1, x2, y2 = rect.getCoords() width = x2 - x1 + 1 height = y2 - y1 + 1 self.v_w.resize(width, height)
0.005917