text
stringlengths
78
104k
score
float64
0
0.18
def select(table, cols="*", where=(), group="", order=(), limit=(), **kwargs): """Convenience wrapper for database SELECT.""" where = dict(where, **kwargs).items() sql, args = makeSQL("SELECT", table, cols, where, group, order, limit) return execute(sql, args)
0.003571
def ToJsonString(self): """Converts Timestamp to RFC 3339 date string format. Returns: A string converted from timestamp. The string is always Z-normalized and uses 3, 6 or 9 fractional digits as required to represent the exact time. Example of the return format: '1972-01-01T10:00:20.021Z' """ nanos = self.nanos % _NANOS_PER_SECOND total_sec = self.seconds + (self.nanos - nanos) // _NANOS_PER_SECOND seconds = total_sec % _SECONDS_PER_DAY days = (total_sec - seconds) // _SECONDS_PER_DAY dt = datetime(1970, 1, 1) + timedelta(days, seconds) result = dt.isoformat() if (nanos % 1e9) == 0: # If there are 0 fractional digits, the fractional # point '.' should be omitted when serializing. return result + 'Z' if (nanos % 1e6) == 0: # Serialize 3 fractional digits. return result + '.%03dZ' % (nanos / 1e6) if (nanos % 1e3) == 0: # Serialize 6 fractional digits. return result + '.%06dZ' % (nanos / 1e3) # Serialize 9 fractional digits. return result + '.%09dZ' % nanos
0.00736
def mchirp_sampler_imf(**kwargs): ''' Draw chirp mass samples for power-law model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- mchirp-astro: array The chirp mass samples for the population ''' m1, m2 = draw_imf_samples(**kwargs) mchirp_astro = mchirp_from_mass1_mass2(m1, m2) return mchirp_astro
0.002183
def generic_api_view(injector): """Create DRF generic class-based API view from injector class.""" handler = create_handler(GenericAPIView, injector) apply_http_methods(handler, injector) apply_api_view_methods(handler, injector) apply_generic_api_view_methods(handler, injector) return injector.let(as_view=handler.as_view)
0.002865
def _diversity_metric(solution, population): """Return diversity value for solution compared to given population. Metric is sum of distance between solution and each solution in population, normalized to [0.0, 1.0]. """ # Edge case for empty population # If there are no other solutions, the given solution has maximum diversity if population == []: return 1.0 return ( sum([_manhattan_distance(solution, other) for other in population]) # Normalize (assuming each value in solution is in range [0.0, 1.0]) # NOTE: len(solution) is maximum manhattan distance / (len(population) * len(solution)))
0.001495
def do_not_disturb(self): """Get if do not disturb is enabled.""" return bool(strtobool(str(self._settings_json.get( CONST.SETTINGS_DO_NOT_DISTURB))))
0.011236
def create_asymmetric_key_pair(self, algorithm, length): """ Create an asymmetric key pair. Args: algorithm(CryptographicAlgorithm): An enumeration specifying the algorithm for which the created keys will be compliant. length(int): The length of the keys to be created. This value must be compliant with the constraints of the provided algorithm. Returns: dict: A dictionary containing the public key data, with at least the following key/value fields: * value - the bytes of the key * format - a KeyFormatType enumeration for the bytes format dict: A dictionary containing the private key data, identical in structure to the one above. Raises: InvalidField: Raised when the algorithm is unsupported or the length is incompatible with the algorithm. CryptographicFailure: Raised when the key generation process fails. Example: >>> engine = CryptographyEngine() >>> key = engine.create_asymmetric_key( ... CryptographicAlgorithm.RSA, 2048) """ if algorithm not in self._asymmetric_key_algorithms.keys(): raise exceptions.InvalidField( "The cryptographic algorithm ({0}) is not a supported " "asymmetric key algorithm.".format(algorithm) ) engine_method = self._asymmetric_key_algorithms.get(algorithm) return engine_method(length)
0.001242
def _i2c_read_bytes(self, length=1): """Read the specified number of bytes from the I2C bus. Length is the number of bytes to read (must be 1 or more). """ for i in range(length-1): # Read a byte and send ACK. self._command.append('\x20\x00\x00\x13\x00\x00') # Make sure pins are back in idle state with clock low and data high. self._ft232h.output_pins({0: GPIO.LOW, 1: GPIO.HIGH}, write=False) self._command.append(self._ft232h.mpsse_gpio()) # Read last byte and send NAK. self._command.append('\x20\x00\x00\x13\x00\xFF') # Make sure pins are back in idle state with clock low and data high. self._ft232h.output_pins({0: GPIO.LOW, 1: GPIO.HIGH}, write=False) self._command.append(self._ft232h.mpsse_gpio()) # Increase expected number of bytes. self._expected += length
0.003264
def find_credentials(): ''' Cycle through all the possible credentials and return the first one that works. ''' # if the username and password were already found don't fo though the # connection process again if 'username' in DETAILS and 'password' in DETAILS: return DETAILS['username'], DETAILS['password'] passwords = DETAILS['passwords'] for password in passwords: DETAILS['password'] = password if not __salt__['vsphere.test_vcenter_connection'](): # We are unable to authenticate continue # If we have data returned from above, we've successfully authenticated. return DETAILS['username'], password # We've reached the end of the list without successfully authenticating. raise salt.exceptions.VMwareConnectionError('Cannot complete login due to ' 'incorrect credentials.')
0.002134
def freeze(self, no_etag=False): """Call this method if you want to make your response object ready for pickeling. This buffers the generator if there is one. This also sets the etag unless `no_etag` is set to `True`. """ if not no_etag: self.add_etag() super(ETagResponseMixin, self).freeze()
0.005634
def _close(self, status, do_callbacks=True): """ Takes the status on which it should leave the connection and an optional boolean parameter to dispatch the disconnected and close callbacks if there are any. """ if self.is_closed: self._status = status return self._status = Client.CLOSED # Stop background tasks yield self._end_flusher_loop() if self._ping_timer is not None and self._ping_timer.is_running(): self._ping_timer.stop() if not self.io.closed(): self.io.close() # Cleanup subscriptions since not reconnecting so no need # to replay the subscriptions anymore. for ssid, sub in self._subs.items(): self._subs.pop(ssid, None) self._remove_subscription(sub) if do_callbacks: if self._disconnected_cb is not None: self._disconnected_cb() if self._closed_cb is not None: self._closed_cb()
0.001908
def plural(self): ''' Tries to scrape the plural version from uitmuntend.nl. ''' element = self._first('NN') if element: element = element.split('\r\n')[0] if ' | ' in element: # This means there is a plural singular, plural = element.split(' | ') return [plural.split(' ')[1]] else: # This means there is no plural return [''] return [None]
0.036939
def describe_features(self, traj): """ Returns a sliced version of the feature descriptor Parameters ---------- traj : MDtraj trajectory object Returns ------- list of sliced dictionaries describing each feature. """ features_list = self.feat.describe_features(traj) return [features_list[i] for i in self.indices]
0.004963
def resample_returns( returns, func, seed=0, num_trials=100 ): """ Resample the returns and calculate any statistic on every new sample. https://en.wikipedia.org/wiki/Resampling_(statistics) :param returns (Series, DataFrame): Returns :param func: Given the resampled returns calculate a statistic :param seed: Seed for random number generator :param num_trials: Number of times to resample and run the experiment :return: Series of resampled statistics """ # stats = [] if type(returns) is pd.Series: stats = pd.Series(index=range(num_trials)) elif type(returns) is pd.DataFrame: stats = pd.DataFrame( index=range(num_trials), columns=returns.columns ) else: raise(TypeError("returns needs to be a Series or DataFrame!")) n = returns.shape[0] for i in range(num_trials): random_indices = resample(returns.index, n_samples=n, random_state=seed + i) stats.loc[i] = func(returns.loc[random_indices]) return stats
0.001847
def delete_channel(self, channel_id): """Deletes channel """ req = requests.delete(self.channel_path(channel_id)) return req
0.012821
def get_unique_backends(): """Gets the unique backends that are available. Returns: list: Unique available backends. Raises: QiskitError: No backends available. """ backends = IBMQ.backends() unique_hardware_backends = [] unique_names = [] for back in backends: if back.name() not in unique_names and not back.configuration().simulator: unique_hardware_backends.append(back) unique_names.append(back.name()) if not unique_hardware_backends: raise QiskitError('No backends available.') return unique_hardware_backends
0.003257
def prune_feed_map(meta_graph, feed_map): """Function to prune the feedmap of nodes which no longer exist.""" node_names = [x.name + ":0" for x in meta_graph.graph_def.node] keys_to_delete = [] for k, _ in feed_map.items(): if k not in node_names: keys_to_delete.append(k) for k in keys_to_delete: del feed_map[k]
0.020772
def stopping_function(results, args=None, rstate=None, M=None, return_vals=False): """ The default stopping function utilized by :class:`DynamicSampler`. Zipped parameters are passed to the function via :data:`args`. Assigns the run a stopping value based on a weighted average of the stopping values for the posterior and evidence:: stop = pfrac * stop_post + (1.- pfrac) * stop_evid The evidence stopping value is based on the estimated evidence error (i.e. standard deviation) relative to a given threshold:: stop_evid = evid_std / evid_thresh The posterior stopping value is based on the fractional error (i.e. standard deviation / mean) in the Kullback-Leibler (KL) divergence relative to a given threshold:: stop_post = (kld_std / kld_mean) / post_thresh Estimates of the mean and standard deviation are computed using `n_mc` realizations of the input using a provided `'error'` keyword (either `'jitter'` or `'simulate'`, which call related functions :meth:`jitter_run` and :meth:`simulate_run` in :mod:`dynesty.utils`, respectively, or `'sim_approx'`, which boosts `'jitter'` by a factor of two). Returns the boolean `stop <= 1`. If `True`, the :class:`DynamicSampler` will stop adding new samples to our results. Parameters ---------- results : :class:`Results` instance :class:`Results` instance. args : dictionary of keyword arguments, optional Arguments used to set the stopping values. Default values are `pfrac = 1.0`, `evid_thresh = 0.1`, `post_thresh = 0.02`, `n_mc = 128`, `error = 'sim_approx'`, and `approx = True`. rstate : `~numpy.random.RandomState`, optional `~numpy.random.RandomState` instance. M : `map` function, optional An alias to a `map`-like function. This allows users to pass functions from pools (e.g., `pool.map`) to compute realizations in parallel. By default the standard `map` function is used. return_vals : bool, optional Whether to return the stopping value (and its components). Default is `False`. Returns ------- stop_flag : bool Boolean flag indicating whether we have passed the desired stopping criteria. stop_vals : tuple of shape (3,), optional The individual stopping values `(stop_post, stop_evid, stop)` used to determine the stopping criteria. """ # Initialize values. if args is None: args = dict({}) if rstate is None: rstate = np.random if M is None: M = map # Initialize hyperparameters. pfrac = args.get('pfrac', 1.0) if not 0. <= pfrac <= 1.: raise ValueError("The provided `pfrac` {0} is not between 0. and 1." .format(pfrac)) evid_thresh = args.get('evid_thresh', 0.1) if pfrac < 1. and evid_thresh < 0.: raise ValueError("The provided `evid_thresh` {0} is not non-negative " "even though `1. - pfrac` is {1}." .format(evid_thresh, 1. - pfrac)) post_thresh = args.get('post_thresh', 0.02) if pfrac > 0. and post_thresh < 0.: raise ValueError("The provided `post_thresh` {0} is not non-negative " "even though `pfrac` is {1}." .format(post_thresh, pfrac)) n_mc = args.get('n_mc', 128) if n_mc <= 1: raise ValueError("The number of realizations {0} must be greater " "than 1.".format(n_mc)) elif n_mc < 20: warnings.warn("Using a small number of realizations might result in " "excessively noisy stopping value estimates.") error = args.get('error', 'sim_approx') if error not in {'jitter', 'simulate', 'sim_approx'}: raise ValueError("The chosen `'error'` option {0} is not valid." .format(error)) if error == 'sim_approx': error = 'jitter' boost = 2. else: boost = 1. approx = args.get('approx', True) # Compute realizations of ln(evidence) and the KL divergence. rlist = [results for i in range(n_mc)] error_list = [error for i in range(n_mc)] approx_list = [approx for i in range(n_mc)] args = zip(rlist, error_list, approx_list) outputs = list(M(_kld_error, args)) kld_arr, lnz_arr = np.array([(kld[-1], res.logz[-1]) for kld, res in outputs]).T # Evidence stopping value. lnz_std = np.std(lnz_arr) stop_evid = np.sqrt(boost) * lnz_std / evid_thresh # Posterior stopping value. kld_mean, kld_std = np.mean(kld_arr), np.std(kld_arr) stop_post = boost * (kld_std / kld_mean) / post_thresh # Effective stopping value. stop = pfrac * stop_post + (1. - pfrac) * stop_evid if return_vals: return stop <= 1., (stop_post, stop_evid, stop) else: return stop <= 1.
0.0002
def get_files_to_commit(autooptions): """ Look through the local directory to pick up files to check """ workingdir = autooptions['working-directory'] includes = autooptions['track']['includes'] excludes = autooptions['track']['excludes'] # transform glob patterns to regular expressions # print("Includes ", includes) includes = r'|'.join([fnmatch.translate(x) for x in includes]) excludes = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.' matched_files = [] for root, dirs, files in os.walk(workingdir): # print("Looking at ", files) # exclude dirs # dirs[:] = [os.path.join(root, d) for d in dirs] dirs[:] = [d for d in dirs if not re.match(excludes, d)] # exclude/include files files = [f for f in files if not re.match(excludes, f)] #print("Files after excludes", files) #print(includes) files = [f for f in files if re.match(includes, f)] #print("Files after includes", files) files = [os.path.join(root, f) for f in files] matched_files.extend(files) return matched_files
0.006092
def total(self, xbin1=1, xbin2=-2): """ Return the total yield and its associated statistical and systematic uncertainties. """ integral, stat_error = self.hist.integral( xbin1=xbin1, xbin2=xbin2, error=True) # sum systematics in quadrature ups = [0] dns = [0] for sys_name in self.sys_names(): sys_low, sys_high = self.sys_hist(sys_name) up = sys_high.integral(xbin1=xbin1, xbin2=xbin2) - integral dn = sys_low.integral(xbin1=xbin1, xbin2=xbin2) - integral if up > 0: ups.append(up**2) else: dns.append(up**2) if dn > 0: ups.append(dn**2) else: dns.append(dn**2) syst_error = (sqrt(sum(ups)), sqrt(sum(dns))) return integral, stat_error, syst_error
0.00223
def group_dict_set(iterator: Iterable[Tuple[A, B]]) -> Mapping[A, Set[B]]: """Make a dict that accumulates the values for each key in an iterator of doubles.""" d = defaultdict(set) for key, value in iterator: d[key].add(value) return dict(d)
0.007519
def deserialize_assign(self, workflow, start_node): """ Reads the "pre-assign" or "post-assign" tag from the given node. start_node -- the xml node (xml.dom.minidom.Node) """ name = start_node.getAttribute('name') attrib = start_node.getAttribute('field') value = start_node.getAttribute('value') kwargs = {} if name == '': _exc('name attribute required') if attrib != '' and value != '': _exc('Both, field and right-value attributes found') elif attrib == '' and value == '': _exc('field or value attribute required') elif value != '': kwargs['right'] = value else: kwargs['right_attribute'] = attrib return operators.Assign(name, **kwargs)
0.002457
def fake_exc_info(exc_info, filename, lineno): """Helper for `translate_exception`.""" exc_type, exc_value, tb = exc_info # figure the real context out if tb is not None: real_locals = tb.tb_frame.f_locals.copy() ctx = real_locals.get('context') if ctx: locals = ctx.get_all() else: locals = {} for name, value in real_locals.iteritems(): if name.startswith('l_') and value is not missing: locals[name[2:]] = value # if there is a local called __jinja_exception__, we get # rid of it to not break the debug functionality. locals.pop('__jinja_exception__', None) else: locals = {} # assamble fake globals we need globals = { '__name__': filename, '__file__': filename, '__jinja_exception__': exc_info[:2], # we don't want to keep the reference to the template around # to not cause circular dependencies, but we mark it as Jinja # frame for the ProcessedTraceback '__jinja_template__': None } # and fake the exception code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec') # if it's possible, change the name of the code. This won't work # on some python environments such as google appengine try: if tb is None: location = 'template' else: function = tb.tb_frame.f_code.co_name if function == 'root': location = 'top-level template code' elif function.startswith('block_'): location = 'block "%s"' % function[6:] else: location = 'template' code = CodeType(0, code.co_nlocals, code.co_stacksize, code.co_flags, code.co_code, code.co_consts, code.co_names, code.co_varnames, filename, location, code.co_firstlineno, code.co_lnotab, (), ()) except: pass # execute the code and catch the new traceback try: exec code in globals, locals except: exc_info = sys.exc_info() new_tb = exc_info[2].tb_next # return without this frame return exc_info[:2] + (new_tb,)
0.001293
def coda_output(pymc_object, name=None, chain=-1): """Generate output files that are compatible with CODA :Arguments: pymc_object : Model or Node A PyMC object containing MCMC output. """ print_() print_("Generating CODA output") print_('=' * 50) if name is None: name = pymc_object.__name__ # Open trace file trace_file = open(name + '_coda.out', 'w') # Open index file index_file = open(name + '_coda.ind', 'w') variables = [pymc_object] if hasattr(pymc_object, 'stochastics'): variables = pymc_object.stochastics # Initialize index index = 1 # Loop over all parameters for v in variables: vname = v.__name__ print_("Processing", vname) try: index = _process_trace( trace_file, index_file, v.trace(chain=chain), vname, index) except TypeError: pass # Close files trace_file.close() index_file.close()
0.000943
def returner(ret): ''' Send a slack message with the data through a webhook :param ret: The Salt return :return: The result of the post ''' _options = _get_options(ret) webhook = _options.get('webhook', None) show_tasks = _options.get('show_tasks') author_icon = _options.get('author_icon') if not webhook or webhook is '': log.error('%s.webhook not defined in salt config', __virtualname__) return report = _generate_report(ret, show_tasks) if report.get('success'): title = _options.get('success_title') else: title = _options.get('failure_title') slack = _post_message(webhook, author_icon, title, report) return slack
0.001389
def register_messages_from_checker(self, checker): """Register all messages from a checker. :param BaseChecker checker: """ checker.check_consistency() for message in checker.messages: self.register_message(message)
0.007463
def dice_pairwise_und(a1, a2): ''' Calculates pairwise dice similarity for each vertex between two matrices. Treats the matrices as binary and undirected. Paramaters ---------- A1 : NxN np.ndarray Matrix 1 A2 : NxN np.ndarray Matrix 2 Returns ------- D : Nx1 np.ndarray dice similarity vector ''' a1 = binarize(a1, copy=True) a2 = binarize(a2, copy=True) # ensure matrices are binary n = len(a1) np.fill_diagonal(a1, 0) np.fill_diagonal(a2, 0) # set diagonals to 0 d = np.zeros((n,)) # dice similarity # calculate the common neighbors for each vertex for i in range(n): d[i] = 2 * (np.sum(np.logical_and(a1[:, i], a2[:, i])) / (np.sum(a1[:, i]) + np.sum(a2[:, i]))) return d
0.001227
def _postprocess_options(dbg, opts): ''' Handle options (`opts') that feed into the debugger (`dbg')''' # Set dbg.settings['printset'] print_events = [] if opts.fntrace: print_events = ['c_call', 'c_return', 'call', 'return'] if opts.linetrace: print_events += ['line'] if len(print_events): dbg.settings['printset'] = frozenset(print_events) pass for setting in ('annotate', 'basename', 'different'): dbg.settings[setting] = getattr(opts, setting) pass if getattr(opts, 'highlight'): dbg.settings['highlight'] = opts.highlight else: dbg.settings['highlight'] = 'plain' # if getattr(opts, 'style') and opts.style != 'none': # dbg.settings['style'] = opts.style # else: # dbg.settings['style'] = None dbg.settings['style'] = None # Normally we want to set Mdebugger.debugger_obj so that one can # put trepan.debugger breakpoints in a program and not have more # than one debugger running. More than one debugger may confuse # users, e.g. set different might stop at the same line once for # each debugger. if not opts.private: Mdebugger.debugger_obj = dbg pass # if opts.errors: # try: # dbg.stderr = open(opts.errors, 'w') # except IOError, (errno, strerror): # print "I/O in opening debugger output file %s" % opts.errors # print "error(%s): %s" % (errno, strerror) # except ValueError: # print "Could not convert data to an integer." # except: # print "Unexpected error in opening debugger output " # "file %s" % opts.errors # print sys.exc_info()[0] # sys.exit(2) # if opts.execute: # dbg.cmdqueue = list(opts.execute.split(';;')) if opts.post_mortem: Mapi.debugger_on_post_mortem() pass return
0.001506
def upgrade(refresh=False, root=None, **kwargs): ''' .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any pacman commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Run a full system upgrade, a pacman -Syu refresh Whether or not to refresh the package database before installing. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' ret = {'changes': {}, 'result': True, 'comment': ''} old = list_pkgs() cmd = [] if salt.utils.systemd.has_scope(__context__) \ and __salt__['config.get']('systemd.scope', True): cmd.extend(['systemd-run', '--scope']) cmd.extend(['pacman', '-Su', '--noprogressbar', '--noconfirm']) if salt.utils.data.is_true(refresh): cmd.append('-y') if root is not None: cmd.extend(('-r', root)) result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'result': result} ) return ret
0.001331
def prepare_video_params(self, title=None, tags='Others', description='', copyright_type='original', public_type='all', category=None, watch_password=None, latitude=None, longitude=None, shoot_time=None ): """ util method for create video params to upload. Only need to provide a minimum of two essential parameters: title and tags, other video params are optional. All params spec see: http://cloud.youku.com/docs?id=110#create . Args: title: string, 2-50 characters. tags: string, 1-10 tags joind with comma. description: string, less than 2000 characters. copyright_type: string, 'original' or 'reproduced' public_type: string, 'all' or 'friend' or 'password' watch_password: string, if public_type is password. latitude: double. longitude: double. shoot_time: datetime. Returns: dict params that upload/create method need. """ params = {} if title is None: title = self.file_name elif len(title) > 80: title = title[:80] if len(description) > 2000: description = description[0:2000] params['title'] = title params['tags'] = tags params['description'] = description params['copyright_type'] = copyright_type params['public_type'] = public_type if category: params['category'] = category if watch_password: params['watch_password'] = watch_password if latitude: params['latitude'] = latitude if longitude: params['longitude'] = longitude if shoot_time: params['shoot_time'] = shoot_time return params
0.003132
def get_gdf(stop=True): """Returns a string containing a GDF file. Setting stop to True will cause the trace to stop. """ ret = ['nodedef>name VARCHAR, label VARCHAR, hits INTEGER, ' + \ 'calls_frac DOUBLE, total_time_frac DOUBLE, ' + \ 'total_time DOUBLE, color VARCHAR, width DOUBLE'] for func, hits in func_count.items(): calls_frac, total_time_frac, total_time = _frac_calculation(func, hits) col = settings['node_colour'](calls_frac, total_time_frac) color = ','.join([str(round(float(c) * 255)) for c in col.split()]) ret.append('%s,%s,%s,%s,%s,%s,\'%s\',%s' % (func, func, hits, \ calls_frac, total_time_frac, total_time, color, \ math.log(hits * 10))) ret.append('edgedef>node1 VARCHAR, node2 VARCHAR, color VARCHAR') for fr_key, fr_val in call_dict.items(): if fr_key == '': continue for to_key, to_val in fr_val.items(): calls_frac, total_time_frac, total_time = \ _frac_calculation(to_key, to_val) col = settings['edge_colour'](calls_frac, total_time_frac) color = ','.join([str(round(float(c) * 255)) for c in col.split()]) ret.append('%s,%s,\'%s\'' % (fr_key, to_key, color)) ret = '\n'.join(ret) return ret
0.005952
def walkRelocatables(self, shouldRelocateCommand=_shouldRelocateCommand): """ for all relocatable commands yield (command_index, command_name, filename) """ for (idx, (lc, cmd, data)) in enumerate(self.commands): if shouldRelocateCommand(lc.cmd): name = _RELOCATABLE_NAMES[lc.cmd] ofs = cmd.name - sizeof(lc.__class__) - sizeof(cmd.__class__) yield idx, name, data[ofs:data.find(B('\x00'), ofs)].decode( sys.getfilesystemencoding())
0.003597
def expand_filename_pattern(self, pattern, base_dir, sourcefile=None): """ The function expand_filename_pattern expands a filename pattern to a sorted list of filenames. The pattern can contain variables and wildcards. If base_dir is given and pattern is not absolute, base_dir and pattern are joined. """ # replace vars like ${benchmark_path}, # with converting to list and back, we can use the function 'substitute_vars()' expandedPattern = substitute_vars([pattern], self, sourcefile) assert len(expandedPattern) == 1 expandedPattern = expandedPattern[0] if expandedPattern != pattern: logging.debug("Expanded variables in expression %r to %r.", pattern, expandedPattern) fileList = util.expand_filename_pattern(expandedPattern, base_dir) # sort alphabetical, fileList.sort() if not fileList: logging.warning("No files found matching %r.", pattern) return fileList
0.004757
def attach_run_command(cmd): """ Run a command when attaching Please do not call directly, this will execvp the command. This is to be used in conjunction with the attach method of a container. """ if isinstance(cmd, tuple): return _lxc.attach_run_command(cmd) elif isinstance(cmd, list): return _lxc.attach_run_command((cmd[0], cmd)) else: return _lxc.attach_run_command((cmd, [cmd]))
0.002165
def execute(self, **kwargs): """ Execute the interactive guessing procedure. :param show: Whether or not to show the figure. Useful for testing. :type show: bool :param block: Blocking call to matplotlib :type show: bool Any additional keyword arguments are passed to matplotlib.pyplot.show(). """ show = kwargs.pop('show') if show: # self.fig.show() # Apparently this does something else, # see https://github.com/matplotlib/matplotlib/issues/6138 plt.show(**kwargs)
0.003367
def etau_madau(wave, z, **kwargs): """Madau 1995 extinction for a galaxy at given redshift. This is the Lyman-alpha prescription from the photo-z code BPZ. The Lyman-alpha forest approximately has an effective "throughput" which is a function of redshift and rest-frame wavelength. One would multiply the SEDs by this factor before passing it through an instrument filter. This approximation is from Footnote 3 of :ref:`Madau et al. (1995) <synphot-ref-madau1995>`. This is claimed accurate to 5%. The scatter in this factor (due to different lines of sight) is huge, as shown in Madau's Fig. 3 (top panel); The figure's bottom panel shows a redshifted version of the "exact" prescription. Parameters ---------- wave : array-like or `~astropy.units.quantity.Quantity` Redshifted wavelength values. Non-redshifted wavelength is ``wave / (1 + z)``. z : number Redshift. kwargs : dict Equivalencies for unit conversion, see :func:`~synphot.units.validate_quantity`. Returns ------- extcurve : `ExtinctionCurve` Extinction curve to apply to the redshifted spectrum. """ if not isinstance(z, numbers.Real): raise exceptions.SynphotError( 'Redshift must be a real scalar number.') if np.isscalar(wave) or len(wave) <= 1: raise exceptions.SynphotError('Wavelength has too few data points') wave = units.validate_quantity(wave, u.AA, **kwargs).value ll = 912.0 c = np.array([3.6e-3, 1.7e-3, 1.2e-3, 9.3e-4]) el = np.array([1216, 1026, 973, 950], dtype=np.float) # noqa tau = np.zeros_like(wave, dtype=np.float) xe = 1.0 + z # Lyman series for i in range(len(el)): tau = np.where(wave <= el[i] * xe, tau + c[i] * (wave / el[i]) ** 3.46, tau) # Photoelectric absorption xc = wave / ll xc3 = xc ** 3 tau = np.where(wave <= ll * xe, (tau + 0.25 * xc3 * (xe ** 0.46 - xc ** 0.46) + 9.4 * xc ** 1.5 * (xe ** 0.18 - xc ** 0.18) - 0.7 * xc3 * (xc ** (-1.32) - xe ** (-1.32)) - 0.023 * (xe ** 1.68 - xc ** 1.68)), tau) thru = np.where(tau > 700., 0., np.exp(-tau)) meta = {'descrip': 'Madau 1995 extinction for z={0}'.format(z)} return ExtinctionCurve(ExtinctionModel1D, points=wave, lookup_table=thru, meta=meta)
0.000396
def parse_readme(): """ Crude parsing of modules/README.md returns a dict of {<module_name>: <documentation>} """ name = None re_mod = re.compile(r'^\#\#\# <a name="(?P<name>[a-z_0-9]+)"></a>') readme_file = os.path.join(modules_directory(), "README.md") modules_dict = {} with open(readme_file) as f: for row in f.readlines(): match = re_mod.match(row) if match: name = match.group("name") modules_dict[name] = [] continue if row.startswith("---"): name = None continue if name: modules_dict[name].append(row) return modules_dict
0.001385
def progress_iter(progress): ''' Initialize and return a progress bar iter ''' widgets = [progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.Timer(), ' Returns: [', progressbar.Counter(), '/{0}]'.format(progress['minion_count'])] bar = progressbar.ProgressBar(widgets=widgets, maxval=progress['minion_count']) bar.start() return bar
0.007979
def get(self, sid): """ Constructs a TerminatingSipDomainContext :param sid: The unique string that identifies the resource :returns: twilio.rest.trunking.v1.trunk.terminating_sip_domain.TerminatingSipDomainContext :rtype: twilio.rest.trunking.v1.trunk.terminating_sip_domain.TerminatingSipDomainContext """ return TerminatingSipDomainContext(self._version, trunk_sid=self._solution['trunk_sid'], sid=sid, )
0.010753
def _poll_loop(self): """At self.poll_period poll for changes""" next_poll = time.time() while True: next_poll += self._poll_period timeout = next_poll - time.time() if timeout < 0: timeout = 0 try: return self._stop_queue.get(timeout=timeout) except TimeoutError: # No stop, no problem pass try: self.handle_changes(self.client.get_changes()) except Exception: # TODO: should fault here? self.log.exception("Error while getting changes")
0.00304
def getResources(self,ep,noResp=False,cacheOnly=False): """ Get list of resources on an endpoint. :param str ep: Endpoint to get the resources of :param bool noResp: Optional - specify no response necessary from endpoint :param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint :return: list of resources :rtype: asyncResult """ # load query params if set to other than defaults q = {} result = asyncResult() result.endpoint = ep if noResp or cacheOnly: q['noResp'] = 'true' if noResp == True else 'false' q['cacheOnly'] = 'true' if cacheOnly == True else 'false' # make query self.log.debug("ep = %s, query=%s",ep,q) data = self._getURL("/endpoints/"+ep, query=q) result.fill(data) # check sucess of call if data.status_code == 200: # sucess result.error = False self.log.debug("getResources sucess, status_code = `%s`, content = `%s`", str(data.status_code),data.content) else: # fail result.error = response_codes("get_resources",data.status_code) self.log.debug("getResources failed with error code `%s`" %str(data.status_code)) result.is_done = True return result
0.040956
def _parse_da(self): """Extract font name, size and color from default appearance string (/DA object). Equivalent to 'pdf_parse_default_appearance' function in MuPDF's 'pdf-annot.c'. """ if not self._text_da: return font = "Helv" fsize = 0 col = (0, 0, 0) dat = self._text_da.split() # split on any whitespace for i, item in enumerate(dat): if item == "Tf": font = dat[i - 2][1:] fsize = float(dat[i - 1]) dat[i] = dat[i-1] = dat[i-2] = "" continue if item == "g": # unicolor text col = [(float(dat[i - 1]))] dat[i] = dat[i-1] = "" continue if item == "rg": # RGB colored text col = [float(f) for f in dat[i - 3:i]] dat[i] = dat[i-1] = dat[i-2] = dat[i-3] = "" continue self.text_font = font self.text_fontsize = fsize self.text_color = col self._text_da = " ".join([c for c in dat if c != ""]) return
0.004333
def calc_regenerated(self, lastvotetime): ''' Uses math formula to calculate the amount of steem power that would have been regenerated given a certain datetime object ''' delta = datetime.utcnow() - datetime.strptime(lastvotetime,'%Y-%m-%dT%H:%M:%S') td = delta.days ts = delta.seconds tt = (td * 86400) + ts return tt * 10000 / 86400 / 5
0.009732
def get_out_degrees(self): ''' API: get_degree(self) Description: Returns degrees of nodes in dictionary format. Return: Returns a dictionary of node degrees. Keys are node names, values are corresponding degrees. ''' degree = {} if self.attr['type'] is not DIRECTED_GRAPH: print('This function only works for directed graphs') return for n in self.get_node_list(): degree[n] = len(self.get_out_neighbors(n)) return degree
0.00346
def _indent(x): """Indent a string by 4 characters.""" lines = x.splitlines() for i, line in enumerate(lines): lines[i] = ' ' + line return '\n'.join(lines)
0.005464
def _run_cortex(fastq, indexes, params, out_base, dirs, config): """Run cortex_var run_calls.pl, producing a VCF variant file. """ print(out_base) fastaq_index = "{0}.fastaq_index".format(out_base) se_fastq_index = "{0}.se_fastq".format(out_base) pe_fastq_index = "{0}.pe_fastq".format(out_base) reffasta_index = "{0}.list_ref_fasta".format(out_base) with open(se_fastq_index, "w") as out_handle: out_handle.write(fastq + "\n") with open(pe_fastq_index, "w") as out_handle: out_handle.write("") with open(fastaq_index, "w") as out_handle: out_handle.write("{0}\t{1}\t{2}\t{2}\n".format(params["sample"], se_fastq_index, pe_fastq_index)) with open(reffasta_index, "w") as out_handle: for x in indexes["fasta"]: out_handle.write(x + "\n") os.environ["PERL5LIB"] = "{0}:{1}:{2}".format( os.path.join(dirs["cortex"], "scripts/calling"), os.path.join(dirs["cortex"], "scripts/analyse_variants/bioinf-perl/lib"), os.environ.get("PERL5LIB", "")) kmers = sorted(params["kmers"]) kmer_info = ["--first_kmer", str(kmers[0])] if len(kmers) > 1: kmer_info += ["--last_kmer", str(kmers[-1]), "--kmer_step", str(kmers[1] - kmers[0])] subprocess.check_call(["perl", os.path.join(dirs["cortex"], "scripts", "calling", "run_calls.pl"), "--fastaq_index", fastaq_index, "--auto_cleaning", "yes", "--bc", "yes", "--pd", "yes", "--outdir", os.path.dirname(out_base), "--outvcf", os.path.basename(out_base), "--ploidy", str(config["algorithm"].get("ploidy", 2)), "--stampy_hash", indexes["stampy"], "--stampy_bin", os.path.join(dirs["stampy"], "stampy.py"), "--refbindir", os.path.dirname(indexes["cortex"][0]), "--list_ref_fasta", reffasta_index, "--genome_size", str(params["genome_size"]), "--max_read_len", "30000", #"--max_var_len", "4000", "--format", "FASTQ", "--qthresh", "5", "--do_union", "yes", "--mem_height", "17", "--mem_width", "100", "--ref", "CoordinatesAndInCalling", "--workflow", "independent", "--vcftools_dir", dirs["vcftools"], "--logfile", "{0}.logfile,f".format(out_base)] + kmer_info) final = glob.glob(os.path.join(os.path.dirname(out_base), "vcfs", "{0}*FINALcombined_BC*decomp.vcf".format(os.path.basename(out_base)))) # No calls, need to setup an empty file if len(final) != 1: print("Did not find output VCF file for {0}".format(out_base)) return None else: return final[0]
0.004286
def onMessage(self, payload, is_binary): """ Called when a client sends a message """ if not is_binary: payload = payload.decode('utf-8') logger.debug("Incoming message ({peer}) : {message}".format( peer=self.peer, message=payload)) # Publish ON_RECEIVE message self.factory.mease.publisher.publish( message_type=ON_RECEIVE, client_id=self._client_id, client_storage=self.storage, message=payload)
0.003578
def getInstalledThemes(self, store): """ Collect themes from all offerings installed on this store, or (if called multiple times) return the previously collected list. """ if not store in self._getInstalledThemesCache: self._getInstalledThemesCache[store] = (self. _realGetInstalledThemes(store)) return self._getInstalledThemesCache[store]
0.013363
def base64encode(_input=None): """Return base64 encoded representation of a string.""" if PY2: # pragma: no cover return base64.b64encode(_input) elif PY3: # pragma: no cover if isinstance(_input, bytes): return base64.b64encode(_input).decode('UTF-8') elif isinstance(_input, str): return base64.b64encode(bytearray(_input, encoding='UTF-8')).decode('UTF-8')
0.004751
def docker(klass, container_id, shell, script, interval, deregister=None): """ Invoke *script* packaged within a running docker container with *container_id* at a specified *interval* on the configured *shell* using the Docker Exec API. Optional *register* after which a failing service will be automatically deregistered. """ ret = { 'docker_container_id': container_id, 'shell': shell, 'script': script, 'interval': interval } if deregister: ret['DeregisterCriticalServiceAfter'] = deregister return ret
0.003096
def concepts(self): """ Return all existing concepts, i.e. dimensions, measures and attributes within the model. """ for measure in self.measures: yield measure for aggregate in self.aggregates: yield aggregate for dimension in self.dimensions: yield dimension for attribute in dimension.attributes: yield attribute
0.004773
def createGroup(self, group, vendorSpecific=None): """See Also: createGroupResponse() Args: group: vendorSpecific: Returns: """ response = self.createGroupResponse(group, vendorSpecific) return self._read_boolean_response(response)
0.006623
def gen_stm(src, dst): """Return a STM instruction. """ return ReilBuilder.build(ReilMnemonic.STM, src, ReilEmptyOperand(), dst)
0.019737
def _get_LMv2_response(user_name, password, domain_name, server_challenge, client_challenge): """ [MS-NLMP] v28.0 2016-07-14 2.2.2.4 LMv2_RESPONSE The LMv2_RESPONSE structure defines the NTLM v2 authentication LmChallengeResponse in the AUTHENTICATE_MESSAGE. This response is used only when NTLM v2 authentication is configured. :param user_name: The user name of the user we are trying to authenticate with :param password: The password of the user we are trying to authenticate with :param domain_name: The domain name of the user account we are authenticated with :param server_challenge: A random 8-byte response generated by the server in the CHALLENGE_MESSAGE :param client_challenge: A random 8-byte response generated by the client for the AUTHENTICATE_MESSAGE :return response: LmChallengeResponse to the server challenge """ nt_hash = comphash._ntowfv2(user_name, password, domain_name) lm_hash = hmac.new(nt_hash, (server_challenge + client_challenge)).digest() response = lm_hash + client_challenge return response
0.008584
def _get_logger(self, handler): ''' Initialize a PCAP stream for logging data ''' log_file = self._get_log_file(handler) if not os.path.isdir(os.path.dirname(log_file)): os.makedirs(os.path.dirname(log_file)) handler['log_rot_time'] = time.gmtime() return pcap.open(log_file, mode='a')
0.0059
def active_element(self): """ Returns the element with focus, or BODY if nothing has focus. :Usage: :: element = driver.switch_to.active_element """ if self._driver.w3c: return self._driver.execute(Command.W3C_GET_ACTIVE_ELEMENT)['value'] else: return self._driver.execute(Command.GET_ACTIVE_ELEMENT)['value']
0.007299
def open(self, file, mode='r', perm=0o0644): """ Opens a file on the node :param file: file path to open :param mode: open mode :param perm: file permission in octet form mode: 'r' read only 'w' write only (truncate) '+' read/write 'x' create if not exist 'a' append :return: a file descriptor """ args = { 'file': file, 'mode': mode, 'perm': perm, } return self._client.json('filesystem.open', args)
0.003466
def cwd(self): """ Return a UNIX FS type string of the current working 'directory'. """ l_cwd = self.l_cwd[:] str_cwd = '/'.join(l_cwd) if len(str_cwd)>1: str_cwd = str_cwd[1:] return str_cwd
0.022013
def process_messages_loop_internal(self): """ Busy loop that processes incoming WorkRequest messages via functions specified by add_command. Terminates if a command runs shutdown method """ logging.info("Starting work queue loop.") self.connection.receive_loop_with_callback(self.queue_name, self.process_message)
0.01108
def tempogram(y=None, sr=22050, onset_envelope=None, hop_length=512, win_length=384, center=True, window='hann', norm=np.inf): '''Compute the tempogram: local autocorrelation of the onset strength envelope. [1]_ .. [1] Grosche, Peter, Meinard Müller, and Frank Kurth. "Cyclic tempogram - A mid-level tempo representation for music signals." ICASSP, 2010. Parameters ---------- y : np.ndarray [shape=(n,)] or None Audio time series. sr : number > 0 [scalar] sampling rate of `y` onset_envelope : np.ndarray [shape=(n,) or (m, n)] or None Optional pre-computed onset strength envelope as provided by `onset.onset_strength`. If multi-dimensional, tempograms are computed independently for each band (first dimension). hop_length : int > 0 number of audio samples between successive onset measurements win_length : int > 0 length of the onset autocorrelation window (in frames/onset measurements) The default settings (384) corresponds to `384 * hop_length / sr ~= 8.9s`. center : bool If `True`, onset autocorrelation windows are centered. If `False`, windows are left-aligned. window : string, function, number, tuple, or np.ndarray [shape=(win_length,)] A window specification as in `core.stft`. norm : {np.inf, -np.inf, 0, float > 0, None} Normalization mode. Set to `None` to disable normalization. Returns ------- tempogram : np.ndarray [shape=(win_length, n) or (m, win_length, n)] Localized autocorrelation of the onset strength envelope. If given multi-band input (`onset_envelope.shape==(m,n)`) then `tempogram[i]` is the tempogram of `onset_envelope[i]`. Raises ------ ParameterError if neither `y` nor `onset_envelope` are provided if `win_length < 1` See Also -------- librosa.onset.onset_strength librosa.util.normalize librosa.core.stft Examples -------- >>> # Compute local onset autocorrelation >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> hop_length = 512 >>> oenv = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length) >>> tempogram = librosa.feature.tempogram(onset_envelope=oenv, sr=sr, ... hop_length=hop_length) >>> # Compute global onset autocorrelation >>> ac_global = librosa.autocorrelate(oenv, max_size=tempogram.shape[0]) >>> ac_global = librosa.util.normalize(ac_global) >>> # Estimate the global tempo for display purposes >>> tempo = librosa.beat.tempo(onset_envelope=oenv, sr=sr, ... hop_length=hop_length)[0] >>> import matplotlib.pyplot as plt >>> plt.figure(figsize=(8, 8)) >>> plt.subplot(4, 1, 1) >>> plt.plot(oenv, label='Onset strength') >>> plt.xticks([]) >>> plt.legend(frameon=True) >>> plt.axis('tight') >>> plt.subplot(4, 1, 2) >>> # We'll truncate the display to a narrower range of tempi >>> librosa.display.specshow(tempogram, sr=sr, hop_length=hop_length, >>> x_axis='time', y_axis='tempo') >>> plt.axhline(tempo, color='w', linestyle='--', alpha=1, ... label='Estimated tempo={:g}'.format(tempo)) >>> plt.legend(frameon=True, framealpha=0.75) >>> plt.subplot(4, 1, 3) >>> x = np.linspace(0, tempogram.shape[0] * float(hop_length) / sr, ... num=tempogram.shape[0]) >>> plt.plot(x, np.mean(tempogram, axis=1), label='Mean local autocorrelation') >>> plt.plot(x, ac_global, '--', alpha=0.75, label='Global autocorrelation') >>> plt.xlabel('Lag (seconds)') >>> plt.axis('tight') >>> plt.legend(frameon=True) >>> plt.subplot(4,1,4) >>> # We can also plot on a BPM axis >>> freqs = librosa.tempo_frequencies(tempogram.shape[0], hop_length=hop_length, sr=sr) >>> plt.semilogx(freqs[1:], np.mean(tempogram[1:], axis=1), ... label='Mean local autocorrelation', basex=2) >>> plt.semilogx(freqs[1:], ac_global[1:], '--', alpha=0.75, ... label='Global autocorrelation', basex=2) >>> plt.axvline(tempo, color='black', linestyle='--', alpha=.8, ... label='Estimated tempo={:g}'.format(tempo)) >>> plt.legend(frameon=True) >>> plt.xlabel('BPM') >>> plt.axis('tight') >>> plt.grid() >>> plt.tight_layout() ''' from ..onset import onset_strength if win_length < 1: raise ParameterError('win_length must be a positive integer') ac_window = get_window(window, win_length, fftbins=True) if onset_envelope is None: if y is None: raise ParameterError('Either y or onset_envelope must be provided') onset_envelope = onset_strength(y=y, sr=sr, hop_length=hop_length) else: # Force row-contiguity to avoid framing errors below onset_envelope = np.ascontiguousarray(onset_envelope) if onset_envelope.ndim > 1: # If we have multi-band input, iterate over rows return np.asarray([tempogram(onset_envelope=oe_subband, hop_length=hop_length, win_length=win_length, center=center, window=window, norm=norm) for oe_subband in onset_envelope]) # Center the autocorrelation windows n = len(onset_envelope) if center: onset_envelope = np.pad(onset_envelope, int(win_length // 2), mode='linear_ramp', end_values=[0, 0]) # Carve onset envelope into frames odf_frame = util.frame(onset_envelope, frame_length=win_length, hop_length=1) # Truncate to the length of the original signal if center: odf_frame = odf_frame[:, :n] # Window, autocorrelate, and normalize return util.normalize(autocorrelate(odf_frame * ac_window[:, np.newaxis], axis=0), norm=norm, axis=0)
0.001603
def wrap_str(self, s=None, wrapper=None): """ Wrap a string in self.wrapper, with some extra handling for empty/None strings. If `wrapper` is set, use it instead. """ wrapper = wrapper or (self.wrapper or ('', '')) return str('' if s is None else s).join(wrapper)
0.00627
def fetch_data(self, url): ''' Fetches data from specific url. :return: The response. :rtype: dict ''' return self.http._post_data(url, None, self.http._headers_with_access_token())
0.017316
def _reaction_po_to_dict(tokens) -> Reaction: """Convert a reaction parse object to a DSL. :type tokens: ParseResult """ return Reaction( reactants=_reaction_part_po_to_dict(tokens[REACTANTS]), products=_reaction_part_po_to_dict(tokens[PRODUCTS]), )
0.003497
def effective_genome_size(fasta, read_length, nb_cores, tmpdir="/tmp"): # type: (str, int, int, str) -> None """Compute effective genome size for genome.""" idx = Fasta(fasta) genome_length = sum([len(c) for c in idx]) logging.info("Temporary directory: " + tmpdir) logging.info("File analyzed: " + fasta) logging.info("Genome length: " + str(genome_length)) print("File analyzed: ", fasta) print("Genome length: ", genome_length) chromosomes = ", ".join([c.name for c in idx]) if "_" in chromosomes: print("Warning. The following chromosomes are part of your genome:\n", chromosomes.replace(">", "") + "\n", file=sys.stderr) print( "You probably want to remove all chromosomes in your fasta containing '_' for the effective genome size computation to be accurate.", file=sys.stderr) # if tmpdir is None: # try: # tmpdir = os.environ['TMPDIR'] # except KeyError: # tmpdir = '/tmp' output_file = os.path.join(tmpdir, '{1}.jf'.format(read_length, basename(fasta))) atexit.register( lambda: call("rm {output_file}".format(output_file=output_file), shell=True)) call( "jellyfish count -t {nb_cores} -m {read_length} -s {genome_length} -L 1 -U 1 --out-counter-len 1 --counter-len 1 {fasta} -o {output_file}".format( **vars()), shell=True) stats = check_output("jellyfish stats {output_file}".format( output_file=output_file), shell=True) unique_kmers = int(stats.split()[1]) effective_genome_size = unique_kmers / genome_length logging.info("Number unique {read_length}-mers: ".format( read_length=read_length) + str(unique_kmers)) logging.info("Effective genome size: " + str(effective_genome_size)) print("Number unique {read_length}-mers: ".format(read_length=read_length), unique_kmers) print("Effective genome size: ", effective_genome_size) assert effective_genome_size < 1, "Something wrong happened, effective genome size over 1!"
0.002287
def _parse_authors(details): """ Parse authors of the book. Args: details (obj): HTMLElement containing slice of the page with details. Returns: list: List of :class:`structures.Author` objects. Blank if no author \ found. """ authors = details.find( "tr", {"id": "ctl00_ContentPlaceHolder1_tblRowAutor"} ) if not authors: return [] # book with unspecified authors # parse authors from HTML and convert them to Author objects author_list = [] for author in authors[0].find("a"): author_obj = Author(author.getContent()) if "href" in author.params: author_obj.URL = author.params["href"] author_list.append(author_obj) return author_list
0.001279
def generate_span_requests(self, span_datas): """Span request generator. :type span_datas: list of :class:`~opencensus.trace.span_data.SpanData` :param span_datas: SpanData tuples to convert to protobuf spans and send to opensensusd agent :rtype: list of `~gen.opencensus.agent.trace.v1.trace_service_pb2.ExportTraceServiceRequest` :returns: List of span export requests. """ pb_spans = [ utils.translate_to_trace_proto(span_data) for span_data in span_datas ] # TODO: send node once per channel yield trace_service_pb2.ExportTraceServiceRequest( node=self.node, spans=pb_spans)
0.002571
def append_some(ol,*eles,**kwargs): ''' from elist.elist import * ol = [1,2,3,4] id(ol) append_some(ol,5,6,7,8,mode="original") ol id(ol) #### ol = [1,2,3,4] id(ol) new = append_some(ol,5,6,7,8) new id(new) ''' if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" return(extend(ol,list(eles),mode=mode))
0.011236
def _connect_model(self, model): """ Used internally to connect the property into the model, and register self as a value observer for that property""" parts = self._prop_name.split(".") if len(parts) > 1: # identifies the model models = parts[:-1] Intermediate(model, models, self) for name in models: model = getattr(model, name) if not isinstance(model, Model): raise TypeError("Attribute '" + name + "' was expected to be a Model, but found: " + str(model)) prop = parts[-1] else: prop = parts[0] # prop is inside model? if not hasattr(model, prop): raise ValueError("Attribute '" + prop + "' not found in model " + str(model)) # is it observable? if model.has_property(prop): # we need to create an observing method before registering meth = types.MethodType(self._get_observer_fun(prop), self) setattr(self, meth.__name__, meth) self._prop = getattr(model, prop) self._prop_name = prop # registration of model: self._model = model self.observe_model(model)
0.002967
def gen403(request, baseURI, reason, project=None): """Return a 403 error""" orgas = None public_ask = False if not settings.PIAPI_STANDALONE: from organizations.models import Organization if project and project.plugItLimitOrgaJoinable: orgas = project.plugItOrgaJoinable.order_by('name').all() else: orgas = Organization.objects.order_by('name').all() rorgas = [] # Find and exclude the visitor orga for o in orgas: if str(o.pk) == settings.VISITOR_ORGA_PK: public_ask = True else: rorgas.append(o) orgas = rorgas return HttpResponseForbidden(render_to_response('plugIt/403.html', {'context': { 'reason': reason, 'orgas': orgas, 'public_ask': public_ask, 'ebuio_baseUrl': baseURI, 'ebuio_userMode': request.session.get('plugit-standalone-usermode', 'ano'), }, 'project': project }, context_instance=RequestContext(request)))
0.003721
def find(self, vid=None, pid=None, serial=None, interface=None, \ path=None, release_number=None, manufacturer=None, product=None, usage=None, usage_page=None): """ Attempts to open a device in this `Enumeration` object. Optional arguments can be provided to filter the resulting list based on various parameters of the HID devices. Args: vid: filters by USB Vendor ID pid: filters by USB Product ID serial: filters by USB serial string (.iSerialNumber) interface: filters by interface number (bInterfaceNumber) release_number: filters by the USB release number (.bcdDevice) manufacturer: filters by USB manufacturer string (.iManufacturer) product: filters by USB product string (.iProduct) usage: filters by HID usage usage_page: filters by HID usage_page path: filters by HID API path. """ result = [] for dev in self.device_list: if vid not in [0, None] and dev.vendor_id != vid: continue if pid not in [0, None] and dev.product_id != pid: continue if serial and dev.serial_number != serial: continue if path and dev.path != path: continue if manufacturer and dev.manufacturer_string != manufacturer: continue if product and dev.product_string != product: continue if release_number != None and dev.release_number != release_number: continue if interface != None and dev.interface_number != interface: continue if usage != None and dev.usage != usage: continue if usage_page != None and dev.usage_page != usage_page: continue result.append(dev) return result
0.004564
def edit_wiki_page(self, subreddit, page, content, reason=''): """Create or edit a wiki page with title `page` for `subreddit`. :returns: The json response from the server. """ data = {'content': content, 'page': page, 'r': six.text_type(subreddit), 'reason': reason} evict = self.config['wiki_page'].format( subreddit=six.text_type(subreddit), page=page.lower()) self.evict(evict) return self.request_json(self.config['wiki_edit'], data=data)
0.003565
def rouge_n(eval_sentences, ref_sentences, n=2): """Computes ROUGE-N f1 score of two text collections of sentences. Source: https://www.microsoft.com/en-us/research/publication/ rouge-a-package-for-automatic-evaluation-of-summaries/ Args: eval_sentences: Predicted sentences. ref_sentences: Sentences from the reference set n: Size of ngram. Defaults to 2. Returns: f1 score for ROUGE-N """ f1_scores = [] for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences): eval_ngrams = _get_ngrams(n, eval_sentence) ref_ngrams = _get_ngrams(n, ref_sentence) ref_count = len(ref_ngrams) eval_count = len(eval_ngrams) # Count the overlapping ngrams between evaluated and reference overlapping_ngrams = eval_ngrams.intersection(ref_ngrams) overlapping_count = len(overlapping_ngrams) # Handle edge case. This isn't mathematically correct, but it's good enough if eval_count == 0: precision = 0.0 else: precision = float(overlapping_count) / eval_count if ref_count == 0: recall = 0.0 else: recall = float(overlapping_count) / ref_count f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8))) # return overlapping_count / reference_count return np.mean(f1_scores, dtype=np.float32)
0.008321
def add_state_machine(widget, event=None): """Create a new state-machine when the user clicks on the '+' next to the tabs""" logger.debug("Creating new state-machine...") root_state = HierarchyState("new root state") state_machine = StateMachine(root_state) rafcon.core.singleton.state_machine_manager.add_state_machine(state_machine)
0.008475
def numConnects(self, layerName): """ Number of incoming weights, including bias. Assumes fully connected. """ count = 0 if self[layerName].active: count += 1 # 1 = bias for connection in self.connections: if connection.active and connection.fromLayer.active and connection.toLayer.name == layerName: count += connection.fromLayer.size return count
0.013605
def report_accounts(self, path, per_region=True, per_capita=False, pic_size=1000, format='rst', **kwargs): """ Generates a report to the given path for all extension This method calls .report_accounts for all extensions Notes ----- This looks prettier with the seaborn module (import seaborn before calling this method) Parameters ---------- path : string Root path for the report per_region : boolean, optional If true, reports the accounts per region per_capita : boolean, optional If true, reports the accounts per capita If per_capita and per_region are False, nothing will be done pic_size : int, optional size for the figures in px, 1000 by default format : string, optional file format of the report: 'rst'(default), 'html', 'latex', ... except for rst all depend on the module docutils (all writer_name from docutils can be used as format) ffname : string, optional root file name (without extension, per_capita or per_region will be attached) and folder names If None gets passed (default), self.name with be modified to get a valid name for the operation system without blanks **kwargs : key word arguments, optional This will be passed directly to the pd.DataFrame.plot method (through the self.plot_account method) """ for ext in self.get_extensions(data=True): ext.report_accounts(path=path, per_region=per_region, per_capita=per_capita, pic_size=pic_size, format=format, **kwargs)
0.002059
def _search_pn(self, href=None, limit=None, embed_items=None, embed_tracks=None, embed_metadata=None, embed_insights=None): """Function called to retrieve pages 2-n.""" url_components = urlparse(href) path = url_components.path data = parse_qs(url_components.query) # Change all lists into discrete values. for key in data.keys(): data[key] = data[key][0] # Deal with limit overriding. if limit is not None: data['limit'] = limit # Deal with embeds overriding. final_embed = helper.process_embed_override(data.get('embed'), embed_items, embed_tracks, embed_metadata, embed_insights) if final_embed is not None: data['embed'] = final_embed raw_result = self.get(path, data) if raw_result.status < 200 or raw_result.status > 202: raise APIException(raw_result.status, raw_result.json) else: result = raw_result.json return result
0.003175
def Close(self): """Closes the database file object. Raises: IOError: if the close failed. OSError: if the close failed. """ if self._connection: self._cursor = None self._connection.close() self._connection = None # TODO: move this to a central temp file manager and have it track errors. # https://github.com/log2timeline/dfvfs/issues/92 try: os.remove(self._temp_file_path) except (IOError, OSError): pass self._temp_file_path = ''
0.01165
def synthesize_property(property_name, default = None, contract = None, read_only = False, private_member_name = None): """ When applied to a class, this decorator adds a property to it and overrides the constructor in order to set\ the default value of the property. :IMPORTANT: In order for this to work on python 2, you must use new objects that is to say that the class must inherit from object. By default, the private attribute containing the property's value will be named ``property_name`` with '_' prepended to it. Naming convention can be overridden with a custom one using :meth:`naming_convention <naming_convention>` decorator. :param property_name: Name of the property to synthesize. :type property_name: str :param default: Property's default value. :type default: * :param contract: Type constraint. See `PyContracts <http://andreacensi.github.com/contracts/>`_ :type contract: * :param read_only: If set to ``True``, the property will not a have a setter. :type read_only: bool :param private_member_name: Custom name for the private attribute that contains the property's value. :type private_member_name: str|None :raises: :class:`DuplicateMemberNameError` when two synthetic members have the same name. :raises: :class:`InvalidPropertyOverrideError` when there's already a member with that name and which is not a property. """ return SyntheticDecoratorFactory().syntheticMemberDecorator(memberName = property_name, defaultValue = default, contract = contract, readOnly = read_only, privateMemberName = private_member_name, memberDelegate = PropertyDelegate())
0.018217
def c2u(name): """Convert camelCase (used in PHP) to Python-standard snake_case. Src: https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case Parameters ---------- name: A function or variable name in camelCase Returns ------- str: The name in snake_case """ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) s1 = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() return s1
0.00211
def restricted_brands(self): """ | Comment: ids of all brands that this ticket form is restricted to """ if self.api and self.restricted_brand_ids: return self.api._get_restricted_brands(self.restricted_brand_ids)
0.007752
def return_params(islitlet, csu_bar_slit_center, params, parmodel): """Return individual model parameters from object of type Parameters. Parameters ---------- islitlet : int Number of slitlet. csu_bar_slit_center : float CSU bar slit center, in mm. params : :class:`~lmfit.parameter.Parameters` Parameters to be employed in the prediction of the distorted boundaries. parmodel : str Model to be assumed. Allowed values are 'longslit' and 'multislit'. Returns ------- c2 : float Coefficient corresponding to the term r**2 in distortion equation. c4 : float Coefficient corresponding to the term r**4 in distortion equation. ff : float Scaling factor to be applied to the Y axis. slit_gap : float Slit gap. slit_height : float Slit height. theta0 : float Additional rotation angle (radians). x0 : float X coordinate of reference pixel. y0 : float Y coordinate of reference pixel. y_baseline : float Y coordinate employed as baseline. """ if parmodel == "longslit": # set each variable in EXPECTED_PARAMETER_LIST to the value # transferred through 'params' c2 = params['c2'].value c4 = params['c4'].value ff = params['ff'].value slit_gap = params['slit_gap'].value slit_height = params['slit_height'].value theta0_origin = params['theta0_origin'].value theta0_slope = params['theta0_slope'].value x0 = params['x0'].value y0 = params['y0'].value y_baseline = params['y_baseline'].value else: # set each variable in EXPECTED_PARAMETER_LIST_EXTENDED to the value # transferred through 'params' c2_a0s = params['c2_a0s'].value c2_a1s = params['c2_a1s'].value / 1E3 c2_a2s = params['c2_a2s'].value / 1E6 c2 = c2_a0s + \ c2_a1s * csu_bar_slit_center + \ c2_a2s * csu_bar_slit_center ** 2 # --- c4_a0s = params['c4_a0s'].value c4_a1s = params['c4_a1s'].value / 1E3 c4_a2s = params['c4_a2s'].value / 1E6 c4 = c4_a0s + \ c4_a1s * csu_bar_slit_center + \ c4_a2s * csu_bar_slit_center ** 2 # --- ff_a0s = params['ff_a0s'].value ff_a1s = params['ff_a1s'].value / 1E3 ff_a2s = params['ff_a2s'].value / 1E6 ff = ff_a0s + \ ff_a1s * csu_bar_slit_center + \ ff_a2s * csu_bar_slit_center ** 2 # --- slit_gap_a0s = params['slit_gap_a0s'].value slit_gap_a1s = params['slit_gap_a1s'].value / 1E3 slit_gap_a2s = params['slit_gap_a2s'].value / 1E6 slit_gap = slit_gap_a0s + \ slit_gap_a1s * csu_bar_slit_center + \ slit_gap_a2s * csu_bar_slit_center ** 2 # --- slit_height_a0s = params['slit_height_a0s'].value slit_height_a1s = params['slit_height_a1s'].value / 1E3 slit_height_a2s = params['slit_height_a2s'].value / 1E6 slit_height = slit_height_a0s + \ slit_height_a1s * csu_bar_slit_center + \ slit_height_a2s * csu_bar_slit_center ** 2 # --- theta0_origin_a0s = params['theta0_origin_a0s'].value theta0_origin_a1s = params['theta0_origin_a1s'].value / 1E3 theta0_origin_a2s = params['theta0_origin_a2s'].value / 1E6 theta0_origin = theta0_origin_a0s + \ theta0_origin_a1s * csu_bar_slit_center + \ theta0_origin_a2s * csu_bar_slit_center ** 2 # --- theta0_slope_a0s = params['theta0_slope_a0s'].value theta0_slope_a1s = params['theta0_slope_a1s'].value / 1E3 theta0_slope_a2s = params['theta0_slope_a2s'].value / 1E6 theta0_slope = theta0_slope_a0s + \ theta0_slope_a1s * csu_bar_slit_center + \ theta0_slope_a2s * csu_bar_slit_center ** 2 # --- x0_a0s = params['x0_a0s'].value x0_a1s = params['x0_a1s'].value / 1E3 x0_a2s = params['x0_a2s'].value / 1E6 x0 = x0_a0s + \ x0_a1s * csu_bar_slit_center + \ x0_a2s * csu_bar_slit_center ** 2 # --- y0_a0s = params['y0_a0s'].value y0_a1s = params['y0_a1s'].value / 1E3 y0_a2s = params['y0_a2s'].value / 1E6 y0 = y0_a0s + \ y0_a1s * csu_bar_slit_center + \ y0_a2s * csu_bar_slit_center ** 2 # --- y_baseline_a0s = params['y_baseline_a0s'].value y_baseline_a1s = params['y_baseline_a1s'].value / 1E3 y_baseline_a2s = params['y_baseline_a2s'].value / 1E6 y_baseline = y_baseline_a0s + \ y_baseline_a1s * csu_bar_slit_center + \ y_baseline_a2s * csu_bar_slit_center ** 2 theta0 = theta0_origin / 1E3 + theta0_slope / 1E4 * islitlet return c2, c4, ff, slit_gap, slit_height, theta0, x0, y0, y_baseline
0.00411
def keyPressEvent(self, ev): """Stop editing if enter is pressed""" if ev.key() in (Qt.Key_Enter, Qt.Key_Return): self._startOrStopEditing() elif ev.key() == Qt.Key_Escape: self._cancelEditing() else: Kittens.widgets.ClickableTreeWidget.keyPressEvent(self, ev)
0.006098
def safe_unicode(self, buf): """ Safely return an unicode encoded string """ tmp = "" buf = "".join(b for b in buf) for character in buf: tmp += character return tmp
0.008584
def _parse_header(cls, header_proto, resource): """Deserializes a resource's base64 encoded Protobuf header. """ header = header_proto() try: header_bytes = base64.b64decode(resource['header']) header.ParseFromString(header_bytes) except (KeyError, TypeError, ValueError, DecodeError): header = resource.get('header', None) LOGGER.error( 'The validator sent a resource with %s %s', 'a missing header' if header is None else 'an invalid header:', header or '') raise errors.ResourceHeaderInvalid() resource['header'] = cls._message_to_dict(header) return resource
0.002751
def htmlNewDoc(URI, ExternalID): """Creates a new HTML document """ ret = libxml2mod.htmlNewDoc(URI, ExternalID) if ret is None:raise treeError('htmlNewDoc() failed') return xmlDoc(_obj=ret)
0.014563
def add_task(self, keywords, context, rule): """Map a function to a list of keywords Parameters ---------- keywords : iterable of str sequence of strings which should trigger the given function context : Context A Context object created using desired function rule : tuple A tuple of integers, which act as relative indices using which data is extracted to be passed to the function passed via context. """ for keyword in keywords: self._tasks[keyword] = {'context': context, 'rule': rule}
0.003257
def plot_origin(self): # TODO add attribute option to color vectors """ Plot vectors of positional transition of LISA values starting from the same origin. """ import matplotlib.cm as cm import matplotlib.pyplot as plt ax = plt.subplot(111) xlim = [self._dx.min(), self._dx.max()] ylim = [self._dy.min(), self._dy.max()] for x, y in zip(self._dx, self._dy): xs = [0, x] ys = [0, y] plt.plot(xs, ys, '-b') # TODO change this to scale with attribute plt.axis('equal') plt.xlim(xlim) plt.ylim(ylim)
0.00314
def remove_prefix(self, prefix): """Remove network prefix. """ self._req('prefix remove %s' % prefix) time.sleep(1) self._req('netdataregister')
0.01087
def put_versioning(Bucket, Status, MFADelete=None, MFA=None, region=None, key=None, keyid=None, profile=None): ''' Given a valid config, update the versioning configuration for a bucket. Returns {updated: true} if versioning configuration was updated and returns {updated: False} if versioning configuration was not updated. CLI Example: .. code-block:: bash salt myminion boto_s3_bucket.put_versioning my_bucket Enabled ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) VersioningConfiguration = {'Status': Status} if MFADelete is not None: VersioningConfiguration['MFADelete'] = MFADelete kwargs = {} if MFA is not None: kwargs['MFA'] = MFA conn.put_bucket_versioning(Bucket=Bucket, VersioningConfiguration=VersioningConfiguration, **kwargs) return {'updated': True, 'name': Bucket} except ClientError as e: return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
0.003676
def make_tarball(base_name, base_dir, compress='gzip', verbose=False, dry_run=False): """Create a tar file from all the files under 'base_dir'. This file may be compressed. :param compress: Compression algorithms. Supported algorithms are: 'gzip': (the default) 'compress' 'bzip2' None For 'gzip' and 'bzip2' the internal tarfile module will be used. For 'compress' the .tar will be created using tarfile, and then we will spawn 'compress' afterwards. The output tar file will be named 'base_name' + ".tar", possibly plus the appropriate compression extension (".gz", ".bz2" or ".Z"). Return the output filename. """ # XXX GNU tar 1.13 has a nifty option to add a prefix directory. # It's pretty new, though, so we certainly can't require it -- # but it would be nice to take advantage of it to skip the # "create a tree of hardlinks" step! (Would also be nice to # detect GNU tar to use its 'z' option and save a step.) compress_ext = { 'gzip': ".gz", 'bzip2': '.bz2', 'compress': ".Z" } # flags for compression program, each element of list will be an argument tarfile_compress_flag = {'gzip':'gz', 'bzip2':'bz2'} compress_flags = {'compress': ["-f"]} if compress is not None and compress not in compress_ext.keys(): raise ValueError("bad value for 'compress': must be None, 'gzip'," "'bzip2' or 'compress'") archive_name = base_name + ".tar" if compress and compress in tarfile_compress_flag: archive_name += compress_ext[compress] mode = 'w:' + tarfile_compress_flag.get(compress, '') mkpath(os.path.dirname(archive_name), dry_run=dry_run) log.info('Creating tar file %s with mode %s' % (archive_name, mode)) if not dry_run: tar = tarfile.open(archive_name, mode=mode) # This recursively adds everything underneath base_dir tar.add(base_dir) tar.close() if compress and compress not in tarfile_compress_flag: spawn([compress] + compress_flags[compress] + [archive_name], dry_run=dry_run) return archive_name + compress_ext[compress] else: return archive_name
0.002622
def f_get_all(self, name, max_depth=None, shortcuts=True): """ Searches for all occurrences of `name` under `node`. Links are NOT considered since nodes are searched bottom up in the tree. :param node: Start node :param name: Name of what to look for, can be separated by colons, i.e. ``'mygroupA.mygroupB.myparam'``. :param max_depth: Maximum search depth relative to start node. `None` for no limit. :param shortcuts: If shortcuts are allowed, otherwise the stated name defines a consecutive name.For instance. ``'mygroupA.mygroupB.myparam'`` would also find ``mygroupA.mygroupX.mygroupB.mygroupY.myparam`` if shortcuts are allowed, otherwise not. :return: List of nodes that match the name, empty list if nothing was found. """ return self._nn_interface._get_all(self, name, max_depth=max_depth, shortcuts=shortcuts)
0.005865
def visit_arg(self, node, parent): """visit an arg node by returning a fresh AssName instance""" return self.visit_assignname(node, parent, node.arg)
0.012121
def _reduce_input(self, inputs, reducer, final=NotImplemented): """ Iterate over input, collect values with the same key, and call the reducer for each unique key. """ for key, values in groupby(inputs, key=lambda x: self.internal_serialize(x[0])): for output in reducer(self.deserialize(key), (v[1] for v in values)): yield output if final != NotImplemented: for output in final(): yield output self._flush_batch_incr_counter()
0.009398
def vmstats(): ''' .. versionchanged:: 2016.3.2 Return the virtual memory stats for this minion .. versionchanged:: 2016.11.4 Added support for AIX CLI Example: .. code-block:: bash salt '*' status.vmstats ''' def linux_vmstats(): ''' linux specific implementation of vmstats ''' ret = {} try: with salt.utils.files.fopen('/proc/vmstat', 'r') as fp_: stats = salt.utils.stringutils.to_unicode(fp_.read()) except IOError: pass else: for line in stats.splitlines(): if not line: continue comps = line.split() ret[comps[0]] = _number(comps[1]) return ret def generic_vmstats(): ''' generic implementation of vmstats note: works on FreeBSD, SunOS and OpenBSD (possibly others) ''' ret = {} for line in __salt__['cmd.run']('vmstat -s').splitlines(): comps = line.split() if comps[0].isdigit(): ret[' '.join(comps[1:])] = _number(comps[0].strip()) return ret # dict that returns a function that does the right thing per platform get_version = { 'Linux': linux_vmstats, 'FreeBSD': generic_vmstats, 'OpenBSD': generic_vmstats, 'SunOS': generic_vmstats, 'AIX': generic_vmstats, } errmsg = 'This method is unsupported on the current operating system!' return get_version.get(__grains__['kernel'], lambda: errmsg)()
0.000622
def note_delete(self, note_id): """delete a specific note (Requires login) (UNTESTED). Parameters: note_id (int): Where note_id is the note id. """ return self._get('notes/{0}.json'.format(note_id), method='DELETE', auth=True)
0.006757
def filter(self, **kwargs): # @TODO refactor with models as dicts """filter results of dataset eg. Query('Posts').filter(post_type='post') """ f_field = kwargs.keys()[0] f_value = kwargs[f_field] _newset = [] for m in self._dataset: if hasattr(m, f_field): if getattr(m, f_field) == f_value: _newset.append(m) self._dataset = _newset return self
0.011933
def vagrant(self, name=''): """ Run the following tasks on a vagrant box. First, you need to import this task in your ``fabfile.py``:: from fabric.api import * from burlap.vagrant import vagrant @task def some_task(): run('echo hello') Then you can easily run tasks on your current Vagrant box:: $ fab vagrant some_task """ r = self.local_renderer config = self.ssh_config(name) extra_args = self._settings_dict(config) r.genv.update(extra_args)
0.003333
def send_article_message(self, user_id, articles, kf_account=None): """ 发送图文消息:: articles = [ { "title":"Happy Day", "description":"Is Really A Happy Day", "url":"URL", "picurl":"PIC_URL" }, { "title":"Happy Day", "description":"Is Really A Happy Day", "url":"URL", "picurl":"PIC_URL" } ] client.send_acticle_message("user_id", acticles) :param user_id: 用户 ID 。 就是你收到的 `Message` 的 source :param articles: 一个包含至多8个 article 字典或 Article 对象的数组 :param kf_account: 发送消息的客服账户,默认值为 None,None 为不指定 :return: 返回的 JSON 数据包 """ if isinstance(articles[0], Article): formatted_articles = [] for article in articles: result = article.args result["picurl"] = result.pop("img") formatted_articles.append(result) else: formatted_articles = articles data = { "touser": user_id, "msgtype": "news", "news": { "articles": formatted_articles } } if kf_account is not None: data['customservice'] = {'kf_account': kf_account} return self.post( url="https://api.weixin.qq.com/cgi-bin/message/custom/send", data=data )
0.001292
def add_attachment(self, issue, attachment, filename=None): """Attach an attachment to an issue and returns a Resource for it. The client will *not* attempt to open or validate the attachment; it expects a file-like object to be ready for its use. The user is still responsible for tidying up (e.g., closing the file, killing the socket, etc.) :param issue: the issue to attach the attachment to :type issue: str :param attachment: file-like object to attach to the issue, also works if it is a string with the filename. :type attachment: BufferedReader :param filename: optional name for the attached file. If omitted, the file object's ``name`` attribute is used. If you acquired the file-like object by any other method than ``open()``, make sure that a name is specified in one way or the other. :type filename: str :rtype: Attachment """ if isinstance(attachment, string_types): attachment = open(attachment, "rb") if hasattr(attachment, 'read') and hasattr(attachment, 'mode') and attachment.mode != 'rb': logging.warning( "%s was not opened in 'rb' mode, attaching file may fail." % attachment.name) url = self._get_url('issue/' + str(issue) + '/attachments') fname = filename if not fname: fname = os.path.basename(attachment.name) if 'MultipartEncoder' not in globals(): method = 'old' r = self._session.post( url, files={ 'file': (fname, attachment, 'application/octet-stream')}, headers=CaseInsensitiveDict({'content-type': None, 'X-Atlassian-Token': 'nocheck'})) else: method = 'MultipartEncoder' def file_stream(): """Returns files stream of attachment. :rtype: MultipartEncoder """ return MultipartEncoder( fields={ 'file': (fname, attachment, 'application/octet-stream')}) m = file_stream() r = self._session.post( url, data=m, headers=CaseInsensitiveDict({'content-type': m.content_type, 'X-Atlassian-Token': 'nocheck'}), retry_data=file_stream) js = json_loads(r) if not js or not isinstance(js, collections.Iterable): raise JIRAError("Unable to parse JSON: %s" % js) attachment = Attachment(self._options, self._session, js[0]) if attachment.size == 0: raise JIRAError("Added empty attachment via %s method?!: r: %s\nattachment: %s" % (method, r, attachment)) return attachment
0.004726
def diff(self, other): """ Diff function for Incar. Compares two Incars and indicates which parameters are the same and which are not. Useful for checking whether two runs were done using the same parameters. Args: other (Incar): The other Incar object to compare to. Returns: Dict of the following format: {"Same" : parameters_that_are_the_same, "Different": parameters_that_are_different} Note that the parameters are return as full dictionaries of values. E.g. {"ISIF":3} """ similar_param = {} different_param = {} for k1, v1 in self.items(): if k1 not in other: different_param[k1] = {"INCAR1": v1, "INCAR2": None} elif v1 != other[k1]: different_param[k1] = {"INCAR1": v1, "INCAR2": other[k1]} else: similar_param[k1] = v1 for k2, v2 in other.items(): if k2 not in similar_param and k2 not in different_param: if k2 not in self: different_param[k2] = {"INCAR1": None, "INCAR2": v2} return {"Same": similar_param, "Different": different_param}
0.001599