text
stringlengths
78
104k
score
float64
0
0.18
def main(): """Main entry point for Glances. Select the mode (standalone, client or server) Run it... """ # Catch the CTRL-C signal signal.signal(signal.SIGINT, __signal_handler) # Log Glances and psutil version logger.info('Start Glances {}'.format(__version__)) logger.info('{} {} and psutil {} detected'.format( platform.python_implementation(), platform.python_version(), psutil_version)) # Share global var global core # Create the Glances main instance core = GlancesMain() config = core.get_config() args = core.get_args() # Glances can be ran in standalone, client or server mode start(config=config, args=args)
0.001397
def connect_to(self, joint, other_body, offset=(0, 0, 0), other_offset=(0, 0, 0), **kwargs): '''Move another body next to this one and join them together. This method will move the ``other_body`` so that the anchor points for the joint coincide. It then creates a joint to fasten the two bodies together. See :func:`World.move_next_to` and :func:`World.join`. Parameters ---------- joint : str The type of joint to use when connecting these bodies. other_body : :class:`Body` or str The other body to join with this one. offset : 3-tuple of float, optional The body-relative offset where the anchor for the joint should be placed. Defaults to (0, 0, 0). See :func:`World.move_next_to` for a description of how offsets are specified. other_offset : 3-tuple of float, optional The offset on the second body where the joint anchor should be placed. Defaults to (0, 0, 0). Like ``offset``, this is given as an offset relative to the size and shape of ``other_body``. ''' anchor = self.world.move_next_to(self, other_body, offset, other_offset) self.world.join(joint, self, other_body, anchor=anchor, **kwargs)
0.003771
def _make_tuple(x): """TF has an obnoxious habit of being lenient with single vs tuple.""" if isinstance(x, prettytensor.PrettyTensor): if x.is_sequence(): return tuple(x.sequence) else: return (x.tensor,) elif isinstance(x, tuple): return x elif (isinstance(x, collections.Sequence) and not isinstance(x, six.string_types)): return tuple(x) else: return (x,)
0.01956
def marquee(txt='',width=78,mark='*'): """Return the input string centered in a 'marquee'. :Examples: In [16]: marquee('A test',40) Out[16]: '**************** A test ****************' In [17]: marquee('A test',40,'-') Out[17]: '---------------- A test ----------------' In [18]: marquee('A test',40,' ') Out[18]: ' A test ' """ if not txt: return (mark*width)[:width] nmark = (width-len(txt)-2)//len(mark)//2 if nmark < 0: nmark =0 marks = mark*nmark return '%s %s %s' % (marks,txt,marks)
0.011419
def _send_long(self,value): """ Convert a numerical value into an integer, then to a bytes object. Check bounds for signed long. """ # Coerce to int. This will throw a ValueError if the value can't # actually be converted. if type(value) != int: new_value = int(value) if self.give_warnings: w = "Coercing {} into int ({})".format(value,new_value) warnings.warn(w,Warning) value = new_value # Range check if value > self.board.long_max or value < self.board.long_min: err = "Value {} exceeds the size of the board's long.".format(value) raise OverflowError(err) return struct.pack(self.board.long_type,value)
0.013547
def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} ttype = args['ttype'] (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile( args, require_sim_name=True) if targets_yaml is None: return job_configs write_full = args['write_full'] targets = load_yaml(targets_yaml) base_config = dict(config=args['config'], nsims=args['nsims'], seed=args['seed']) first = args['seed'] last = first + args['nsims'] - 1 for target_name, profile_list in targets.items(): for profile in profile_list: full_key = "%s:%s:%s" % (target_name, profile, sim) name_keys = dict(target_type=ttype, target_name=target_name, sim_name=sim, profile=profile, fullpath=True) sed_file = NAME_FACTORY.sim_sedfile(**name_keys) outfile = sed_file.replace( '_SEED.fits', '_collected_%06i_%06i.fits' % (first, last)) logfile = make_nfs_path(outfile.replace('.fits', '.log')) if not write_full: outfile = None summaryfile = sed_file.replace( '_SEED.fits', '_summary_%06i_%06i.fits' % (first, last)) job_config = base_config.copy() job_config.update(dict(sed_file=sed_file, outfile=outfile, summaryfile=summaryfile, logfile=logfile)) job_configs[full_key] = job_config return job_configs
0.001078
def prepare_to_run(self, clock, period_count): """ Prepare the activity for execution. :param clock: The clock containing the execution start time and execution period information. :param period_count: The total amount of periods this activity will be requested to be run for. """ if self.start_period_ix == -1 and self.start_datetime != datetime.min: # Set the Start period index for i in range(0, period_count): if clock.get_datetime_at_period_ix(i) > self.start_datetime: self.start_period_ix = i break if self.start_period_ix == -1: self.start_period_ix = 0 if self.period_count == -1 and self.end_datetime != datetime.max: # Set the Start date for i in range(0, period_count): if clock.get_datetime_at_period_ix(i) > self.end_datetime: self.period_count = i - self.start_period_ix break if self.period_count != -1: self.end_period_ix = self.start_period_ix + self.period_count else: self.end_period_ix = self.start_period_ix + period_count
0.00161
def list_servers(self, nicknames=None): """ Iterate through the servers of the server group with the specified nicknames, or the single server with the specified nickname, and yield a `ServerDefinition` object for each server. nicknames may be: None, string defining a nickname or list of nicknames """ if not nicknames: return self.list_all_servers() if isinstance(nicknames, six.string_types): nicknames = [nicknames] sd_list = [] sd_nick_list = [] for nickname in nicknames: if nickname in self._servers: sd_list.append(self.get_server(nickname)) elif nickname in self._server_groups: for item_nick in self._server_groups[nickname]: for sd in self.list_servers(item_nick): if sd.nickname not in sd_nick_list: sd_nick_list.append(sd.nickname) sd_list.append(sd) else: raise ValueError( "Server group or server nickname {0!r} not found in WBEM " "server definition file {1!r}". format(nickname, self._filepath)) return sd_list
0.001546
def _gen_sample(self): """Generate a random captcha image sample Returns ------- (numpy.ndarray, str) Tuple of image (numpy ndarray) and character string of digits used to generate the image """ num_str = self.get_rand(self.num_digit_min, self.num_digit_max) return self.captcha.image(num_str), num_str
0.008108
def initialize_major_angle(self): """ Computes the major angle: 2pi radians / number of groups. """ num_groups = len(self.nodes.keys()) self.major_angle = 2 * np.pi / num_groups
0.009217
def triangle_iteration(start, dim, stop=None): """ >>> list(triangle_iteration(1, 1, stop=3)) [(1,), (2,), (3,)] >>> list(triangle_iteration(1, 2, stop=3)) [(1, 1), (1, 2), (2, 1), (1, 3), (3, 1), (2, 2)] >>> list(triangle_iteration(1, 0, stop=3)) [] """ if not dim: return # Initialization point = (start, ) * dim frontier = set([point]) yield point c = 1 while True: frontier = set( increase_coord(p, i) for p in list(frontier) for i, _ in enumerate(p) ) for p in frontier: yield p c += 1 if stop is not None and c >= stop: return
0.001418
async def load_json_or_yaml_from_url(context, url, path, overwrite=True): """Retry a json/yaml file download, load it, then return its data. Args: context (scriptworker.context.Context): the scriptworker context. url (str): the url to download path (str): the path to download to overwrite (bool, optional): if False and path exists, don't download. Defaults to True. Returns: dict: the url data. Raises: Exception: as specified, on failure """ if path.endswith("json"): file_type = 'json' else: file_type = 'yaml' if not overwrite or not os.path.exists(path): await retry_async( download_file, args=(context, url, path), retry_exceptions=(DownloadError, aiohttp.ClientError), ) return load_json_or_yaml(path, is_path=True, file_type=file_type)
0.001111
def _determine_notification_info(notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar): ''' helper method for present. ensure that notification_configs are set ''' pillar_arn_list = copy.deepcopy( __salt__['config.option'](notification_arn_from_pillar, {}) ) pillar_arn = None if pillar_arn_list: pillar_arn = pillar_arn_list[0] pillar_notification_types = copy.deepcopy( __salt__['config.option'](notification_types_from_pillar, {}) ) arn = notification_arn if notification_arn else pillar_arn types = notification_types if notification_types else pillar_notification_types return (arn, types)
0.002463
def text_index(self): '''Returns the one-based index of the current text node.''' # This is the number of text nodes we've seen so far. # If we are currently in a text node, great; if not then add # one for the text node that's about to begin. i = self.tags.get(TextElement, 0) if self.last_tag is not TextElement: i += 1 return i
0.005025
def _read_newick_from_string(nw, root_node, matcher, formatcode): """ Reads a newick string in the New Hampshire format. """ if nw.count('(') != nw.count(')'): raise NewickError('Parentheses do not match. Broken tree structure?') # white spaces and separators are removed nw = re.sub("[\n\r\t]+", "", nw) current_parent = None # Each chunk represents the content of a parent node, and it could contain # leaves and closing parentheses. # We may find: # leaf, ..., leaf, # leaf, ..., leaf))), # leaf)), leaf, leaf)) # leaf)) # ) only if formatcode == 100 for chunk in nw.split("(")[1:]: # If no node has been created so far, this is the root, so use the node. current_parent = root_node if current_parent is None else current_parent.add_child() subchunks = [ch.strip() for ch in chunk.split(",")] # We should expect that the chunk finished with a comma (if next chunk # is an internal sister node) or a subchunk containing closing parenthesis until the end of the tree. #[leaf, leaf, ''] #[leaf, leaf, ')))', leaf, leaf, ''] #[leaf, leaf, ')))', leaf, leaf, ''] #[leaf, leaf, ')))', leaf), leaf, 'leaf);'] if subchunks[-1] != '' and not subchunks[-1].endswith(';'): raise NewickError('Broken newick structure at: %s' %chunk) # lets process the subchunks. Every closing parenthesis will close a # node and go up one level. for i, leaf in enumerate(subchunks): if leaf.strip() == '' and i == len(subchunks) - 1: continue # "blah blah ,( blah blah" closing_nodes = leaf.split(")") # first part after splitting by ) always contain leaf info _read_node_data(closing_nodes[0], current_parent, "leaf", matcher, formatcode) # next contain closing nodes and data about the internal nodes. if len(closing_nodes)>1: for closing_internal in closing_nodes[1:]: closing_internal = closing_internal.rstrip(";") # read internal node data and go up one level _read_node_data(closing_internal, current_parent, "internal", matcher, formatcode) current_parent = current_parent.up return root_node
0.005955
def created(self, data, schema=None, envelope=None): """ Gets a 201 response with the specified data. :param data: The content value. :param schema: The schema to serialize the data. :param envelope: The key used to envelope the data. :return: A Flask response object. """ data = marshal(data, schema, envelope) return self.__make_response((data, 201))
0.004695
def get_calendars(self, calendar_id=None, body=None, params=None): """ `<>`_ :arg calendar_id: The ID of the calendar to fetch :arg body: The from and size parameters optionally sent in the body :arg from_: skips a number of calendars :arg size: specifies a max number of calendars to get """ return self.transport.perform_request( "GET", _make_path("_ml", "calendars", calendar_id), params=params, body=body )
0.006061
def defrise_ellipses(ndim, nellipses=8, alternating=False): """Ellipses for the standard Defrise phantom in 2 or 3 dimensions. Parameters ---------- ndim : {2, 3} Dimension of the space for the ellipses/ellipsoids. nellipses : int, optional Number of ellipses. If more ellipses are used, each ellipse becomes thinner. alternating : bool, optional True if the ellipses should have alternating densities (+1, -1), otherwise all ellipses have value +1. See Also -------- odl.phantom.geometric.ellipsoid_phantom : Function for creating arbitrary ellipsoids phantoms odl.phantom.transmission.shepp_logan_ellipsoids """ ellipses = [] if ndim == 2: for i in range(nellipses): if alternating: value = (-1.0 + 2.0 * (i % 2)) else: value = 1.0 axis_1 = 0.5 axis_2 = 0.5 / (nellipses + 1) center_x = 0.0 center_y = -1 + 2.0 / (nellipses + 1.0) * (i + 1) rotation = 0 ellipses.append( [value, axis_1, axis_2, center_x, center_y, rotation]) elif ndim == 3: for i in range(nellipses): if alternating: value = (-1.0 + 2.0 * (i % 2)) else: value = 1.0 axis_1 = axis_2 = 0.5 axis_3 = 0.5 / (nellipses + 1) center_x = center_y = 0.0 center_z = -1 + 2.0 / (nellipses + 1.0) * (i + 1) rotation_phi = rotation_theta = rotation_psi = 0 ellipses.append( [value, axis_1, axis_2, axis_3, center_x, center_y, center_z, rotation_phi, rotation_theta, rotation_psi]) return ellipses
0.000554
def set_privacy(self, state): """ :param state: True or False :return: nothing """ values = {"desired_state": {"private": state}} response = self.api_interface.set_device_state(self, values) self._update_state_from_response(response)
0.00692
def serial_udb_extra_f15_send(self, sue_ID_VEHICLE_MODEL_NAME, sue_ID_VEHICLE_REGISTRATION, force_mavlink1=False): ''' Backwards compatible version of SERIAL_UDB_EXTRA F15 and F16: format sue_ID_VEHICLE_MODEL_NAME : Serial UDB Extra Model Name Of Vehicle (uint8_t) sue_ID_VEHICLE_REGISTRATION : Serial UDB Extra Registraton Number of Vehicle (uint8_t) ''' return self.send(self.serial_udb_extra_f15_encode(sue_ID_VEHICLE_MODEL_NAME, sue_ID_VEHICLE_REGISTRATION), force_mavlink1=force_mavlink1)
0.01157
def get_table(table_name): """ Get a registered table. Decorated functions will be converted to `DataFrameWrapper`. Parameters ---------- table_name : str Returns ------- table : `DataFrameWrapper` """ table = get_raw_table(table_name) if isinstance(table, TableFuncWrapper): table = table() return table
0.002717
def _make_fits(self): """Generates the data fits for any variables set for fitting in the shell.""" a = self.tests[self.active] args = self.curargs #We need to generate a fit for the data if there are any fits specified. if len(args["fits"]) > 0: for fit in list(args["fits"].keys()): a.fit(args["independent"], fit, args["fits"][fit], args["threshold"], args["functions"])
0.013605
def minute(self, value=None): """Corresponds to IDD Field `minute` Args: value (int): value for IDD Field `minute` value >= 0 value <= 60 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int ' 'for field `minute`'.format(value)) if value < 0: raise ValueError('value need to be greater or equal 0 ' 'for field `minute`') if value > 60: raise ValueError('value need to be smaller 60 ' 'for field `minute`') self._minute = value
0.002
def verify_psd_options_multi_ifo(opt, parser, ifos): """Parses the CLI options and verifies that they are consistent and reasonable. Parameters ---------- opt : object Result of parsing the CLI with OptionParser, or any object with the required attributes (psd_model, psd_file, asd_file, psd_estimation, psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output). parser : object OptionParser instance. """ for ifo in ifos: for opt_group in ensure_one_opt_groups: ensure_one_opt_multi_ifo(opt, parser, ifo, opt_group) if opt.psd_estimation[ifo]: required_opts_multi_ifo(opt, parser, ifo, ['--psd-segment-stride', '--psd-segment-length'], required_by = "--psd-estimation")
0.005967
def fetch_yeast_locus_sequence(locus_name, flanking_size=0): '''Acquire a sequence from SGD http://www.yeastgenome.org. :param locus_name: Common name or systematic name for the locus (e.g. ACT1 or YFL039C). :type locus_name: str :param flanking_size: The length of flanking DNA (on each side) to return :type flanking_size: int ''' from intermine.webservice import Service service = Service('http://yeastmine.yeastgenome.org/yeastmine/service') # Get a new query on the class (table) you will be querying: query = service.new_query('Gene') if flanking_size > 0: # The view specifies the output columns # secondaryIdentifier: the systematic name (e.g. YFL039C) # symbol: short name (e.g. ACT1) # length: sequence length # flankingRegions.direction: Upstream or downstream (or both) of locus # flankingRegions.sequence.length: length of the flanking regions # flankingRegions.sequence.residues: sequence of the flanking regions query.add_view('secondaryIdentifier', 'symbol', 'length', 'flankingRegions.direction', 'flankingRegions.sequence.length', 'flankingRegions.sequence.residues') # You can edit the constraint values below query.add_constraint('flankingRegions.direction', '=', 'both', code='A') query.add_constraint('Gene', 'LOOKUP', locus_name, 'S. cerevisiae', code='B') query.add_constraint('flankingRegions.distance', '=', '{:.1f}kb'.format(flanking_size / 1000.), code='C') # Uncomment and edit the code below to specify your own custom logic: query.set_logic('A and B and C') # TODO: What to do when there's more than one result? first_result = query.rows().next() # FIXME: Use logger module instead # print first_result['secondaryIdentifier'] # print first_result['symbol'], row['length'] # print first_result['flankingRegions.direction'] # print first_result['flankingRegions.sequence.length'] # print first_result['flankingRegions.sequence.residues'] seq = coral.DNA(first_result['flankingRegions.sequence.residues']) # TODO: add more metadata elif flanking_size == 0: # The view specifies the output columns query.add_view('primaryIdentifier', 'secondaryIdentifier', 'symbol', 'name', 'sgdAlias', 'organism.shortName', 'sequence.length', 'sequence.residues', 'description', 'qualifier') query.add_constraint('status', 'IS NULL', code='D') query.add_constraint('status', '=', 'Active', code='C') query.add_constraint('qualifier', 'IS NULL', code='B') query.add_constraint('qualifier', '!=', 'Dubious', code='A') query.add_constraint('Gene', 'LOOKUP', locus_name, 'S. cerevisiae', code='E') # Your custom constraint logic is specified with the code below: query.set_logic('(A or B) and (C or D) and E') first_result = query.rows().next() seq = coral.DNA(first_result['sequence.residues']) else: print 'Problem with the flanking region size....' seq = coral.DNA('') return seq
0.000289
def compile(self, prog, features=Features.ALL): """Currently this compiler simply returns an interpreter instead of compiling TODO: Write this compiler to increase LPProg run speed and to prevent exceeding maximum recursion depth Args: prog (str): A string containing the program. features (FeatureSet): The set of features to enable during compilation. Returns: LPProg """ return LPProg(Parser(Tokenizer(prog, features), features).program(), features)
0.011152
def enable(name, **kwargs): ''' Enable the named service to start at boot. flags : None Set optional flags to run the service with. service.flags can be used to change the default flags. CLI Example: .. code-block:: bash salt '*' service.enable <service name> salt '*' service.enable <service name> flags=<flags> ''' stat_cmd = '{0} set {1} status on'.format(_cmd(), name) stat_retcode = __salt__['cmd.retcode'](stat_cmd) flag_retcode = None # only (re)set flags for services that have an rc.d(8) script if os.path.exists('/etc/rc.d/{0}'.format(name)): flags = _get_flags(**kwargs) flag_cmd = '{0} set {1} flags {2}'.format(_cmd(), name, flags) flag_retcode = __salt__['cmd.retcode'](flag_cmd) return not any([stat_retcode, flag_retcode])
0.001183
def ratio_and_percentage(current, total, time_remaining): """Returns the progress ratio and percentage.""" return "{} / {} ({}% completed)".format(current, total, int(current / total * 100))
0.010101
def _subprocessor(self, disabled_qubits): """Create a subprocessor by deleting a set of qubits. We assume this removes all evil edges, and return an :class:`eden_processor` instance. """ edgelist = [(p, q) for p, q in self._edgelist if p not in disabled_qubits and q not in disabled_qubits] return eden_processor(edgelist, self.M, self.N, self.L, random_bundles=self._random_bundles)
0.006356
async def _send_plain_text(self, request: Request, stack: Stack): """ Sends plain text using `_send_text()`. """ await self._send_text(request, stack, None)
0.010582
def depth(self, local: bool = True) -> int: """Return the circuit depth. Args: local: If True include local one-qubit gates in depth calculation. Else return the multi-qubit gate depth. """ G = self.graph if not local: def remove_local(dagc: DAGCircuit) \ -> Generator[Operation, None, None]: for elem in dagc: if dagc.graph.degree[elem] > 2: yield elem G = DAGCircuit(remove_local(self)).graph return nx.dag_longest_path_length(G) - 1
0.003236
def _diff_lists(old, new, comparator): ''' Compare lists to extract the changes :param old: old list :param new: new list :return: a dictionary with ``unchanged``, ``new``, ``deleted`` and ``sorted`` keys The sorted list is the union of unchanged and new lists, but keeping the original order from the new list. ''' def _remove_indent(node): ''' Remove the XML indentation to compare XML trees more easily ''' node_copy = copy.deepcopy(node) node_copy.text = None for item in node_copy.iter(): item.tail = None return node_copy diff = {'unchanged': [], 'new': [], 'deleted': [], 'sorted': []} # We don't want to alter old since it may be used later by caller old_devices = copy.deepcopy(old) for new_item in new: found = [item for item in old_devices if comparator(_remove_indent(item), _remove_indent(new_item))] if found: old_devices.remove(found[0]) diff['unchanged'].append(found[0]) diff['sorted'].append(found[0]) else: diff['new'].append(new_item) diff['sorted'].append(new_item) diff['deleted'] = old_devices return diff
0.003218
def delete_record(self, name, recordid, username, password): ''' Delete record ''' #headers = {'key': username, 'secret': password} req = requests.delete(self.api_server + '/api/' + name + '/' + str(recordid), auth=(username, password)) return req
0.009709
def get_vlan_brief_output_last_vlan_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vlan_brief = ET.Element("get_vlan_brief") config = get_vlan_brief output = ET.SubElement(get_vlan_brief, "output") last_vlan_id = ET.SubElement(output, "last-vlan-id") last_vlan_id.text = kwargs.pop('last_vlan_id') callback = kwargs.pop('callback', self._callback) return callback(config)
0.004115
def register(self, portstr, username, password, pfactory): """ Register a perspective factory PFACTORY to be executed when a PB connection arrives on PORTSTR with USERNAME/PASSWORD. Returns a Registration object which can be used to unregister later. """ # do some basic normalization of portstrs if isinstance(portstr, type(0)) or ':' not in portstr: portstr = "tcp:%s" % portstr reg = Registration(self, portstr, username) if portstr not in self.dispatchers: disp = self.dispatchers[portstr] = Dispatcher(portstr) disp.setServiceParent(self) else: disp = self.dispatchers[portstr] disp.register(username, password, pfactory) return reg
0.002545
def tz_to_utc(dt, tz, ignoretz=True): """Converts a datetime object from the specified timezone to a UTC datetime. :param tz: the timezone the datetime is currently in. tz can be passed as a string or as a timezone object. (i.e. 'US/Central' or pytz.timezone('US/Central'), etc) :param ignoretz: will ignore the timezone portion of the datetime object and tzinfo will be None. :return: the datetime object by in UTC time. Examples: >>> tz_to_utc(datetime(2011, 11, 25, 9), 'US/Central') 2011-11-25 15:00:00 >>> tz_to_utc(datetime(2011, 11, 25, 9), pytz.timezone('US/Central')) 2011-11-25 15:00:00 >>> tz_to_utc(datetime(2011, 11, 25, 9), 'US/Central', False) 2011-11-25 15:00:00+00:00 """ if isinstance(tz, string_types): tz = pytz.timezone(tz) dt = tz.localize(dt) dt = datetime.astimezone(dt, pytz.timezone('UTC')) if ignoretz: return dt.replace(tzinfo=None) return dt
0.003046
def addrecords(X, new): """ Append one or more records to the end of a numpy recarray or ndarray . Can take a single record, void or tuple, or a list of records, voids or tuples. Implemented by the tabarray method :func:`tabular.tab.tabarray.addrecords`. **Parameters** **X** : numpy ndarray with structured dtype or recarray The array to add records to. **new** : record, void or tuple, or list of them Record(s) to add to `X`. **Returns** **out** : numpy ndarray with structured dtype New numpy array made up of `X` plus the new records. **See also:** :func:`tabular.spreadsheet.rowstack` """ if isinstance(new, np.record) or isinstance(new, np.void) or \ isinstance(new, tuple): new = [new] return np.append(X, utils.fromrecords(new, type=np.ndarray, dtype=X.dtype), axis=0)
0.004766
def obj_to_mark_down(self, file_path=None, title_columns=False, quote_numbers=True, quote_empty_str=False): """ This will return a str of a mark down table. :param title_columns: bool if True will title all headers :param file_path: str of the path to the file to write to :param quote_numbers: bool if True will quote numbers that are strings :param quote_empty_str: bool if True will quote empty strings :return: str """ md, column_widths = self.get_data_and_shared_column_widths( data_kwargs=dict(quote_numbers=quote_numbers, quote_empty_str=quote_empty_str, title_columns=title_columns), width_kwargs = dict(padding=1, pad_last_column=True)) md.insert(1, [u":" + u'-' * (width - 1) for width in column_widths]) md = [u'| '.join([row[c].ljust(column_widths[c]) for c in range(len(row))]) for row in md] ret = u'| ' + u' |\n| '.join(md) + u' |' self._save_file(file_path, ret) return ret
0.00524
def timed(name, tags=None): """Function decorator for tracking timing information on a function's invocation. >>> from statsdecor.decorators import timed >>> @timed('my.metric') >>> def my_func(): >>> pass """ def wrap(f): @wraps(f) def decorator(*args, **kwargs): stats = client() with stats.timer(name, tags=tags): return f(*args, **kwargs) return decorator return wrap
0.002101
def get_filename(self, instance): """Get the filename """ filename = self.field.getFilename(instance) if filename: return filename fieldname = self.get_field_name() content_type = self.get_content_type(instance) extension = mimetypes.guess_extension(content_type) return fieldname + extension
0.005405
def getstruct(self, msgid, as_json=False, stream=sys.stdout): """Get and print the whole message. as_json indicates whether to print the part list as JSON or not. """ parts = [part.get_content_type() for hdr, part in self._get(msgid)] if as_json: print(json.dumps(parts), file=stream) else: for c in parts: print(c, file=stream)
0.004796
def parsedate(data): """Convert a time string to a time tuple.""" t = parsedate_tz(data) if isinstance(t, tuple): return t[:9] else: return t
0.00578
def compute_metrics_cv(self, X, y, **kwargs): '''Compute cross-validated metrics. Trains this model on data X with labels y. Returns a list of dict with keys name, scoring_name, value. Args: X (Union[np.array, pd.DataFrame]): data y (Union[np.array, pd.DataFrame, pd.Series]): labels ''' # compute scores results = self.cv_score_mean(X, y) return results
0.004577
def _insertions(self, result, dimension, dimension_index): """Return list of (idx, sum) pairs representing subtotals. *idx* is the int offset at which to insert the ndarray subtotal in *sum*. """ def iter_insertions(): for anchor_idx, addend_idxs in dimension.hs_indices: insertion_idx = ( -1 if anchor_idx == "top" else result.shape[dimension_index] - 1 if anchor_idx == "bottom" else anchor_idx ) addend_fancy_idx = tuple( [slice(None) for _ in range(dimension_index)] + [np.array(addend_idxs)] ) yield ( insertion_idx, np.sum(result[addend_fancy_idx], axis=dimension_index), ) return [insertion for insertion in iter_insertions()]
0.002058
def __validate_arguments(self): """! @brief Check input arguments of BANG algorithm and if one of them is not correct then appropriate exception is thrown. """ if self.__levels <= 0: raise ValueError("Incorrect amount of levels '%d'. Level value should be greater than 0." % self.__levels) if len(self.__data) == 0: raise ValueError("Empty input data. Data should contain at least one point.") if self.__density_threshold < 0: raise ValueError("Incorrect density threshold '%f'. Density threshold should not be negative." % self.__density_threshold)
0.009023
def header(self, array): """Specify the header of the table """ self._check_row_size(array) self._header = map(str, array)
0.012903
def envget(var, default=None): """Get value of IRAF or OS environment variable.""" if 'pyraf' in sys.modules: #ONLY if pyraf is already loaded, import iraf into the namespace from pyraf import iraf else: # else set iraf to None so it knows to not use iraf's environment iraf = None try: if iraf: return iraf.envget(var) else: raise KeyError except KeyError: try: return _varDict[var] except KeyError: try: return os.environ[var] except KeyError: if default is not None: return default elif var == 'TERM': # Return a default value for TERM # TERM gets caught as it is found in the default # login.cl file setup by IRAF. print("Using default TERM value for session.") return 'xterm' else: raise KeyError("Undefined environment variable `%s'" % var)
0.001825
def query_form_data(self): """ Get the formdata stored in the database for existing slice. params: slice_id: integer """ form_data = {} slice_id = request.args.get('slice_id') if slice_id: slc = db.session.query(models.Slice).filter_by(id=slice_id).one_or_none() if slc: form_data = slc.form_data.copy() update_time_range(form_data) return json.dumps(form_data)
0.006316
def concatenate(arrays, axis=0, always_copy=True): """DEPRECATED, use ``concat`` instead Parameters ---------- arrays : list of `NDArray` Arrays to be concatenate. They must have identical shape except the first dimension. They also must have the same data type. axis : int The axis along which to concatenate. always_copy : bool Default `True`. When not `True`, if the arrays only contain one `NDArray`, that element will be returned directly, avoid copying. Returns ------- NDArray An `NDArray` that lives on the same context as `arrays[0].context`. """ assert isinstance(arrays, list) assert len(arrays) > 0 assert isinstance(arrays[0], NDArray) if not always_copy and len(arrays) == 1: return arrays[0] shape_axis = arrays[0].shape[axis] shape_rest1 = arrays[0].shape[0:axis] shape_rest2 = arrays[0].shape[axis+1:] dtype = arrays[0].dtype for arr in arrays[1:]: shape_axis += arr.shape[axis] assert shape_rest1 == arr.shape[0:axis] assert shape_rest2 == arr.shape[axis+1:] assert dtype == arr.dtype ret_shape = shape_rest1 + (shape_axis,) + shape_rest2 ret = empty(ret_shape, ctx=arrays[0].context, dtype=dtype) idx = 0 begin = [0 for _ in ret_shape] end = list(ret_shape) for arr in arrays: if axis == 0: ret[idx:idx+arr.shape[0]] = arr else: begin[axis] = idx end[axis] = idx+arr.shape[axis] # pylint: disable=no-member,protected-access _internal._crop_assign(ret, arr, out=ret, begin=tuple(begin), end=tuple(end)) # pylint: enable=no-member,protected-access idx += arr.shape[axis] return ret
0.000537
def fault_zone(self, zone, simulate_wire_problem=False): """ Faults a zone if we are emulating a zone expander. :param zone: zone to fault :type zone: int :param simulate_wire_problem: Whether or not to simulate a wire fault :type simulate_wire_problem: bool """ # Allow ourselves to also be passed an address/channel combination # for zone expanders. # # Format (expander index, channel) if isinstance(zone, tuple): expander_idx, channel = zone zone = self._zonetracker.expander_to_zone(expander_idx, channel) status = 2 if simulate_wire_problem else 1 self.send("L{0:02}{1}\r".format(zone, status))
0.002699
def upstream(self): """Get the remote name to use for upstream branches Uses "upstream" if it exists, "origin" otherwise """ cmd = ["git", "remote", "get-url", "upstream"] try: subprocess.check_output(cmd, stderr=subprocess.DEVNULL) except subprocess.CalledProcessError: return "origin" return "upstream"
0.005208
def resolve_const_spec(self, name, lineno): """Finds and links the ConstSpec with the given name.""" if name in self.const_specs: return self.const_specs[name].link(self) if '.' in name: include_name, component = name.split('.', 1) if include_name in self.included_scopes: return self.included_scopes[include_name].resolve_const_spec( component, lineno ) raise ThriftCompilerError( 'Unknown constant "%s" referenced at line %d%s' % ( name, lineno, self.__in_path() ) )
0.00313
def get_anonymization_salt(ts): """Get the anonymization salt based on the event timestamp's day.""" salt_key = 'stats:salt:{}'.format(ts.date().isoformat()) salt = current_cache.get(salt_key) if not salt: salt_bytes = os.urandom(32) salt = b64encode(salt_bytes).decode('utf-8') current_cache.set(salt_key, salt, timeout=60 * 60 * 24) return salt
0.002564
def predict_type(self): """ Predict the type of the inst_ref, and make sure the property being referenced is allowed """ inst_type = self.inst_ref.predict_type() if self.prop_ref_type.allowed_inst_type != inst_type: self.msg.fatal( "'%s' is not a valid property of instance" % self.prop_ref_type.get_name(), self.src_ref ) return self.prop_ref_type
0.006494
def data2rst(table, spans=[[[0, 0]]], use_headers=True, center_cells=False, center_headers=False): """ Convert a list of lists of str into a reStructuredText Grid Table Parameters ---------- table : list of lists of str spans : list of lists of lists of int, optional These are [row, column] pairs of cells that are merged in the table. Rows and columns start in the top left of the table.For example:: +--------+--------+ | [0, 0] | [0, 1] | +--------+--------+ | [1, 0] | [1, 1] | +--------+--------+ use_headers : bool, optional Whether or not the first row of table data will become headers. center_cells : bool, optional Whether or not cells will be centered center_headers: bool, optional Whether or not headers will be centered Returns ------- str The grid table string Example ------- >>> spans = [ ... [ [3, 1], [4, 1] ], ... [ [3, 2], [4, 2] ], ... [ [2, 1], [2, 2] ], ... ] >>> table = [ ... ["Header 1", "Header 2", "Header 3"], ... ["body row 1", "column 2", "column 3"], ... ["body row 2", "Cells may span columns", ""], ... ["body row 3", "Cells may span rows.", "- Cells\\n-contain\\n-blocks"], ... ["body row 4", "", ""], ... ] >>> print(dashtable.data2rst(table, spans)) +------------+------------+-----------+ | Header 1 | Header 2 | Header 3 | +============+============+===========+ | body row 1 | column 2 | column 3 | +------------+------------+-----------+ | body row 2 | Cells may span columns.| +------------+------------+-----------+ | body row 3 | Cells may | - Cells | +------------+ span rows. | - contain | | body row 4 | | - blocks. | +------------+------------+-----------+ """ table = copy.deepcopy(table) table_ok = check_table(table) if not table_ok == "": return "ERROR: " + table_ok if not spans == [[[0, 0]]]: for span in spans: span_ok = check_span(span, table) if not span_ok == "": return "ERROR: " + span_ok table = ensure_table_strings(table) table = add_cushions(table) spans = table_cells_2_spans(table, spans) widths = get_output_column_widths(table, spans) heights = get_output_row_heights(table, spans) cells = [] for span in spans: cell = make_cell(table, span, widths, heights, use_headers) cells.append(cell) cells = list(sorted(cells)) if center_cells: for cell in cells: if not cell.is_header: center_cell_text(cell) v_center_cell_text(cell) if center_headers: for cell in cells: if cell.is_header: center_cell_text(cell) v_center_cell_text(cell) grid_table = merge_all_cells(cells) return grid_table
0.000656
def set_kg(self, kg): """Sets the Kg for the BMC to use In RAKP, Kg is a BMC-specific integrity key that can be set. If not set, Kuid is used for the integrity key """ try: self.kg = kg.encode('utf-8') except AttributeError: self.kg = kg
0.006431
def factorize_and_solve(self,A,b): """ Factorizes A and sovles Ax=b. Parameters ---------- A : matrix b : ndarray Returns ------- x : ndarray """ A = coo_matrix(A) x = b.copy() self.mumps.set_centralized_assembled_values(A.data) self.mumps.set_rhs(x) self.mumps.run(job=5) return x
0.009662
def ae_latent_softmax(latents_pred, latents_discrete_hot, vocab_size, hparams): """Latent prediction and loss. Args: latents_pred: Tensor of shape [..., depth]. latents_discrete_hot: Tensor of shape [..., vocab_size]. vocab_size: an int representing the vocab size. hparams: HParams. Returns: sample: Tensor of shape [...], a sample from a multinomial distribution. loss: Tensor of shape [...], the softmax cross-entropy. """ with tf.variable_scope("latent_logits"): latents_logits = tf.layers.dense(latents_pred, vocab_size, name="logits_dense") if hparams.logit_normalization: latents_logits *= tf.rsqrt(1e-8 + tf.reduce_mean(tf.square(latents_logits))) loss = tf.nn.softmax_cross_entropy_with_logits_v2( labels=latents_discrete_hot, logits=latents_logits) # TODO(trandustin): tease this out from ae_latent_softmax. # we use just the loss portion to anchor prior / encoder on text. sample = multinomial_sample(latents_logits, vocab_size, hparams.sampling_method, hparams.sampling_temp) return sample, loss
0.003195
def _build_params(self): ''' método que constrói o dicionario com os parametros que serão usados na requisição HTTP Post ao PagSeguro Returns: Um dicionário com os parametros definidos no objeto Payment. ''' params = {} params['email'] = self.email params['token'] = self.token params['currency'] = self.currency # Atributos opcionais if self.receiver_email: params['receiver_email'] = self.receiver_email if self.reference: params['reference'] = self.reference if self.extra_amount: params['extra_amount'] = self.extra_amount if self.redirect_url: params['redirect_url'] = self.redirect_url if self.notification_url: params['notification_url'] = self.notification_url if self.max_uses: params['max_uses'] = self.max_uses if self.max_age: params['max_age'] = self.max_age #TODO: Incluir metadata aqui # Itens for index, item in enumerate(self.items, start=1): params['itemId%d' % index] = item['item_id'] params['itemDescription%d' % index] = item['description'] params['itemAmount%d' % index] = '%.2f' % item['amount'] params['itemQuantity%s' % index] = item['quantity'] if item.get('shipping_cost'): params['itemShippingCost%d' % index] = item['shipping_cost'] if item.get('weight'): params['itemWeight%d' % index] = item['weight'] # Sender if self.client.get('email'): params['senderEmail'] = self.client.get('email') if self.client.get('name'): params['senderName'] = ' '.join(self.client.get('name').split()) if self.client.get('phone_area_code'): params['senderAreaCode'] = self.client.get('phone_area_code') if self.client.get('phone_number'): params['senderPhone'] = self.client.get('phone_number') if self.client.get('cpf'): params['senderCPF'] = self.client.get('cpf') if self.client.get('sender_born_date'): params['senderBornDate'] = self.client.get('sender_born_date') # Shipping if self.shipping.get('type'): params['shippingType'] = self.shipping.get('type') if self.shipping.get('cost'): params['shippingCost'] = '%.2f' % self.shipping.get('cost') if self.shipping.get('country'): params['shippingAddressCountry'] = self.shipping.get('country') if self.shipping.get('state'): params['shippingAddressState'] = self.shipping.get('state') if self.shipping.get('city'): params['shippingAddressCity'] = self.shipping.get('city') if self.shipping.get('postal_code'): params['shippingAddressPostalCode'] = self.shipping.get('postal_code') if self.shipping.get('district'): params['shippingAddressDistrict'] = self.shipping.get('district') if self.shipping.get('street'): params['shippingAddressStreet'] = self.shipping.get('street') if self.shipping.get('number'): params['shippingAddressNumber'] = self.shipping.get('number') if self.shipping.get('complement'): params['shippingAddressComplement'] = self.shipping.get('complement') return params
0.001731
def lazy_tag(tag, *args, **kwargs): """ Lazily loads a template tag after the page has loaded. Requires jQuery (for now). Usage: {% load lazy_tags %} {% lazy_tag 'tag_lib.tag_name' arg1 arg2 kw1='test' kw2='hello' %} Args: tag (str): the tag library and tag name separated by a period. For a template tag named `do_thing` in a tag library named `thing_tags` the `tag` argument would be `'thing_tags.doc_thing'`. *args: arguments to be passed to the template tag. **kwargs: keyword arguments to be passed to the template tag. """ tag_id = get_tag_id() set_lazy_tag_data(tag_id, tag, args, kwargs) return render_to_string('lazy_tags/lazy_tag.html', { 'tag_id': tag_id, 'STATIC_URL': settings.STATIC_URL, })
0.001206
def set_user_access(uid, channel=14, callback=True, link_auth=True, ipmi_msg=True, privilege_level='administrator', **kwargs): ''' Set user access :param uid: user number [1:16] :param channel: number [1:7] :param callback: User Restricted to Callback - False = User Privilege Limit is determined by the User Privilege Limit parameter, below, for both callback and non-callback connections. - True = User Privilege Limit is determined by the User Privilege Limit parameter for callback connections, but is restricted to Callback level for non-callback connections. Thus, a user can only initiate a Callback when they 'call in' to the BMC, but once the callback connection has been made, the user could potentially establish a session as an Operator. :param link_auth: User Link authentication enable/disable (used to enable whether this user's name and password information will be used for link authentication, e.g. PPP CHAP) for the given channel. Link authentication itself is a global setting for the channel and is enabled/disabled via the serial/modem configuration parameters. :param ipmi_msg: User IPMI Messaging: (used to enable/disable whether this user's name and password information will be used for IPMI Messaging. In this case, 'IPMI Messaging' refers to the ability to execute generic IPMI commands that are not associated with a particular payload type. For example, if IPMI Messaging is disabled for a user, but that user is enabled for activating the SOL payload type, then IPMI commands associated with SOL and session management, such as Get SOL Configuration Parameters and Close Session are available, but generic IPMI commands such as Get SEL Time are unavailable.) :param privilege_level: User Privilege Limit. (Determines the maximum privilege level that the user is allowed to switch to on the specified channel.) - callback - user - operator - administrator - proprietary - no_access :param kwargs: - api_host=127.0.0.1 - api_user=admin - api_pass=example - api_port=623 - api_kg=None CLI Examples: .. code-block:: bash salt-call ipmi.set_user_access uid=2 privilege_level='operator' ''' with _IpmiCommand(**kwargs) as s: return s.set_user_access(uid, channel, callback, link_auth, ipmi_msg, privilege_level)
0.001902
def transform(self, X, y=None): """ Locally blur an image by applying a gradient anisotropic diffusion filter. Arguments --------- X : ANTsImage image to transform y : ANTsImage (optional) another image to transform. Example ------- >>> import ants >>> blur = ants.contrib.LocallyBlurIntensity(1,5) >>> img2d = ants.image_read(ants.get_data('r16')) >>> img2d_b = blur.transform(img2d) >>> ants.plot(img2d) >>> ants.plot(img2d_b) >>> img3d = ants.image_read(ants.get_data('mni')) >>> img3d_b = blur.transform(img3d) >>> ants.plot(img3d) >>> ants.plot(img3d_b) """ #if X.pixeltype != 'float': # raise ValueError('image.pixeltype must be float ... use TypeCast transform or clone to float') insuffix = X._libsuffix cast_fn = utils.get_lib_fn('locallyBlurAntsImage%s' % (insuffix)) casted_ptr = cast_fn(X.pointer, self.iters, self.conductance) return iio.ANTsImage(pixeltype=X.pixeltype, dimension=X.dimension, components=X.components, pointer=casted_ptr)
0.004132
def update(self, data): """Update the object with new data.""" fields = [ 'id', 'status', 'type', 'persistence', 'date_start', 'date_finish', 'date_created', 'date_modified', 'checksum', 'processor_name', 'input', 'input_schema', 'output', 'output_schema', 'static', 'static_schema', 'var', 'var_template', ] self.annotation = {} for f in fields: setattr(self, f, data[f]) self.name = data['static']['name'] if 'name' in data['static'] else '' self.annotation.update(self._flatten_field(data['input'], data['input_schema'], 'input')) self.annotation.update(self._flatten_field(data['output'], data['output_schema'], 'output')) self.annotation.update(self._flatten_field(data['static'], data['static_schema'], 'static')) self.annotation.update(self._flatten_field(data['var'], data['var_template'], 'var'))
0.005376
def request(self, batch, attempt=0): """Attempt to upload the batch and retry before raising an error """ try: q = self.api.new_queue() for msg in batch: q.add(msg['event'], msg['value'], source=msg['source']) q.submit() except: if attempt > self.retries: raise self.request(batch, attempt+1)
0.007353
def number_of_states(dtrajs): r""" Determine the number of states from a set of discrete trajectories Parameters ---------- dtrajs : list of int-arrays discrete trajectories """ # determine number of states n nmax = 0 for dtraj in dtrajs: nmax = max(nmax, np.max(dtraj)) # return number of states return nmax + 1
0.002681
def get(self): """ this translates to 'get all token addresses we have channels open for' """ return self.rest_api.get_tokens_list( self.rest_api.raiden_api.raiden.default_registry.address, )
0.00823
def wait(aws): """Waits for all of the awaitable objects (e.g. coroutines) in aws to finish. All the awaitable objects are waited for, even if one of them raises an exception. When one or more awaitable raises an exception, the exception from the awaitable with the lowest index in the aws list will be reraised. Args: aws: a single awaitable, or list awaitables. Returns: If aws is a single awaitable, its result. If aws is a list of awaitables, a list containing the of each awaitable in the list. Raises: Exception: if any of the awaitables raises. """ aws_list = aws if isinstance(aws, list) else [aws] results = asyncio.get_event_loop().run_until_complete(asyncio.gather( *aws_list, return_exceptions=True)) # If any of the cmds failed, re-raise the error. for result in results: if isinstance(result, Exception): raise result return results if isinstance(aws, list) else results[0]
0.008386
def set_position_target_global_int_encode(self, time_boot_ms, target_system, target_component, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate): ''' Sets a desired vehicle position, velocity, and/or acceleration in a global coordinate system (WGS84). Used by an external controller to command the vehicle (manual controller or other system). time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t) target_system : System ID (uint8_t) target_component : Component ID (uint8_t) coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t) lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t) alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float) ''' return MAVLink_set_position_target_global_int_message(time_boot_ms, target_system, target_component, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate)
0.005689
def restore(cls, data_dict): """ Restore from previously simplified data. Data is supposed to be valid, no checks are performed! """ obj = cls.__new__(cls) # Avoid calling constructor object.__setattr__(obj, '_simplified', data_dict) object.__setattr__(obj, '_storage', dict()) return obj
0.005666
async def run(*cmd): """Run the given subprocess command in a coroutine. Args: *cmd: the command to run and its arguments. Returns: The output that the command wrote to stdout as a list of strings, one line per element (stderr output is piped to stdout). Raises: RuntimeError: if the command returns a non-zero result. """ stdout = await checked_run(*cmd) log_path = os.path.join(FLAGS.base_dir, get_cmd_name(cmd) + '.log') with gfile.Open(log_path, 'a') as f: f.write(expand_cmd_str(cmd)) f.write('\n') f.write(stdout) f.write('\n') # Split stdout into lines. return stdout.split('\n')
0.01087
def lpad(s, N, char='\0'): """pads a string to the left with null-bytes or any other given character. ..note:: This is used by the :py:func:`xor` function. :param s: the string :param N: an integer of how much padding should be done :returns: the original bytes """ assert isinstance(char, bytes) and len(char) == 1, 'char should be a string with length 1' return s.rjust(N, char)
0.004831
def scopes(self, **kwargs): """Scopes associated to the team.""" return self._client.scopes(team=self.id, **kwargs)
0.015267
def _tzinfome(tzinfo): """Gets a tzinfo object from a string. Args: tzinfo: A string (or string like) object, or a datetime.tzinfo object. Returns: An datetime.tzinfo object. Raises: UnknownTimeZoneError: If the timezone given can't be decoded. """ if not isinstance(tzinfo, datetime.tzinfo): try: tzinfo = pytz.timezone(tzinfo) assert tzinfo.zone in pytz.all_timezones except AttributeError: raise pytz.UnknownTimeZoneError("Unknown timezone! %s" % tzinfo) return tzinfo
0.013308
def visible(self): """ Read/write. |True| if axis is visible, |False| otherwise. """ delete = self._element.delete_ if delete is None: return False return False if delete.val else True
0.008197
def create_with_virtualenv(self, interpreter, virtualenv_options): """Create a virtualenv using the virtualenv lib.""" args = ['virtualenv', '--python', interpreter, self.env_path] args.extend(virtualenv_options) if not self.pip_installed: args.insert(3, '--no-pip') try: helpers.logged_exec(args) self.env_bin_path = os.path.join(self.env_path, 'bin') except FileNotFoundError as error: logger.error('Virtualenv is not installed. It is needed to create a virtualenv with ' 'a different python version than fades (got {})'.format(error)) raise FadesError('virtualenv not found') except helpers.ExecutionError as error: error.dump_to_log(logger) raise FadesError('virtualenv could not be run') except Exception as error: logger.exception("Error creating virtualenv: %s", error) raise FadesError('General error while running virtualenv')
0.003872
def setOutputHandler(self, outputhandler): """ Sets a new output handler. Args: outputhandler: The function handling the AMPL output derived from interpreting user commands. """ class OutputHandlerInternal(amplpython.OutputHandler): def output(self, kind, msg): outputhandler.output(kind, msg) self._outputhandler = outputhandler self._outputhandler_internal = OutputHandlerInternal() lock_and_call( lambda: self._impl.setOutputHandler( self._outputhandler_internal ), self._lock )
0.003035
def get(img, light=False): """Get colorscheme.""" cols = gen_colors(img) if len(cols) < 6: logging.error("colorz failed to generate enough colors.") logging.error("Try another backend or another image. (wal --backend)") sys.exit(1) return adjust(cols, light)
0.003333
def chem_shifts_by_residue(self, amino_acids=None, atoms=None, amino_acids_and_atoms=None, nmrstar_version="3"): """Organize chemical shifts by amino acid residue. :param list amino_acids: List of amino acids three-letter codes. :param list atoms: List of BMRB atom type codes. :param dict amino_acids_and_atoms: Amino acid and its atoms key-value pairs. :param str nmrstar_version: Version of NMR-STAR format to use for look up chemical shifts loop. :return: List of OrderedDict per each chain :rtype: :py:class:`list` of :py:class:`collections.OrderedDict` """ if (amino_acids_and_atoms and amino_acids) or (amino_acids_and_atoms and atoms): raise ValueError('"amino_acids_and_atoms" parameter cannot be used simultaneously with ' '"amino_acids" and "atoms" parameters, one or another must be provided.') chemshifts_loop = NMRSTAR_CONSTANTS[nmrstar_version]["chemshifts_loop"] aminoacid_seq_id = NMRSTAR_CONSTANTS[nmrstar_version]["aminoacid_seq_id"] aminoacid_code = NMRSTAR_CONSTANTS[nmrstar_version]["aminoacid_code"] atom_code = NMRSTAR_CONSTANTS[nmrstar_version]["atom_code"] chemshift_value = NMRSTAR_CONSTANTS[nmrstar_version]["chemshift_value"] chains = [] for saveframe in self: if saveframe == u"data" or saveframe.startswith(u"comment"): continue else: for ind in self[saveframe].keys(): if ind.startswith(u"loop_"): if list(self[saveframe][ind][0]) == chemshifts_loop: chem_shifts_dict = OrderedDict() for entry in self[saveframe][ind][1]: residue_id = entry[aminoacid_seq_id] chem_shifts_dict.setdefault(residue_id, OrderedDict()) chem_shifts_dict[residue_id][u"AA3Code"] = entry[aminoacid_code] chem_shifts_dict[residue_id][u"Seq_ID"] = residue_id chem_shifts_dict[residue_id][entry[atom_code]] = entry[chemshift_value] chains.append(chem_shifts_dict) if amino_acids_and_atoms: for chem_shifts_dict in chains: for aa_dict in list(chem_shifts_dict.values()): if aa_dict[u"AA3Code"].upper() not in list(amino_acids_and_atoms.keys()): chem_shifts_dict.pop(aa_dict[u"Seq_ID"]) else: for resonance in list(aa_dict.keys()): if resonance in (u"AA3Code", u"Seq_ID") or resonance.upper() in amino_acids_and_atoms[aa_dict[u"AA3Code"]]: continue else: aa_dict.pop(resonance) else: if amino_acids: for chem_shifts_dict in chains: for aa_dict in list(chem_shifts_dict.values()): if aa_dict[u"AA3Code"].upper() not in amino_acids: chem_shifts_dict.pop(aa_dict[u"Seq_ID"]) if atoms: for chem_shifts_dict in chains: for aa_dict in chem_shifts_dict.values(): for resonance in list(aa_dict.keys()): if resonance in (u"AA3Code", u"Seq_ID") or resonance.upper() in atoms: continue else: aa_dict.pop(resonance) return chains
0.004607
def centerOnDateTime(self, dtime): """ Centers the view on a given datetime for the gantt widget. :param dtime | <QDateTime> """ view = self.uiGanttVIEW scene = view.scene() point = view.mapToScene(0, 0) x = scene.datetimeXPos(dtime) y = point.y() view.centerOn(x, y)
0.008065
def dP_wedge_meter(D, H, P1, P2): r'''Calculates the non-recoverable pressure drop of a wedge meter based on the measured pressures before and at the wedge meter, and the geometry of the wedge meter according to [1]_. .. math:: \Delta \bar \omega = (1.09 - 0.79\beta)\Delta P Parameters ---------- D : float Upstream internal pipe diameter, [m] H : float Portion of the diameter of the clear segment of the pipe up to the wedge blocking flow; the height of the pipe up to the wedge, [m] P1 : float Static pressure of fluid upstream of wedge meter at the cross-section of the pressure tap, [Pa] P2 : float Static pressure of fluid at the end of the wedge meter pressure tap, [ Pa] Returns ------- dP : float Non-recoverable pressure drop of the wedge meter, [Pa] Notes ----- The recoverable pressure drop should be recovered by 5 pipe diameters downstream of the wedge meter. Examples -------- >>> dP_wedge_meter(1, .7, 1E6, 9.5E5) 20344.849697483587 References ---------- .. [1] ISO/DIS 5167-6 - Measurement of Fluid Flow by Means of Pressure Differential Devices Inserted in Circular Cross-Section Conduits Running Full -- Part 6: Wedge Meters. ''' dP = P1 - P2 beta = diameter_ratio_wedge_meter(D, H) return (1.09 - 0.79*beta)*dP
0.007529
def add_missing(C): """Add arrays with zeros for missing Wilson coefficient keys""" C_out = C.copy() for k in (set(WC_keys) - set(C.keys())): C_out[k] = np.zeros(C_keys_shape[k]) return C_out
0.004651
def store(self, obj): """ Store Store an object into the MongoDB storage for caching Args: obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding Returns: ObjectId: MongoDB _id Raises: ErrStorageMongoConnection: Error during MongoDB communication. ErrStorageTypeUnsupported: Type unsupported. ErrStorageStore : Failed to store the binding or instance. """ # query if type(obj) is AtlasServiceInstance.Instance: query = { "instance_id" : obj.instance_id, "database" : obj.get_dbname(), "cluster": obj.get_cluster(), "parameters" : obj.parameters } elif type(obj) is AtlasServiceBinding.Binding: query = { "binding_id" : obj.binding_id, "parameters" : obj.parameters, "instance_id": obj.instance.instance_id } else: raise ErrStorageTypeUnsupported(type(obj)) # insert try: result = self.broker.insert_one(query) except: raise ErrStorageMongoConnection("Store Instance or Binding") if result is not None: # Flags the obj to provisioned obj.provisioned = True return result.inserted_id raise ErrStorageStore()
0.017254
def superclass(self, klass): """True if the Class is a superclass of the given one.""" return bool(lib.EnvSuperclassP(self._env, self._cls, klass._cls))
0.011905
def module_getmtime(filename): """ Get the mtime associated with a module. If this is a .pyc or .pyo file and a corresponding .py file exists, the time of the .py file is returned. :param filename: filename of the module. :returns: mtime or None if the file doesn"t exist. """ if os.path.splitext(filename)[1].lower() in (".pyc", ".pyo") and os.path.exists(filename[:-1]): return os.path.getmtime(filename[:-1]) if os.path.exists(filename): return os.path.getmtime(filename) return None
0.003704
def alg2keytype(alg): """ Go from algorithm name to key type. :param alg: The algorithm name :return: The key type """ if not alg or alg.lower() == "none": return "none" elif alg.startswith("RS") or alg.startswith("PS"): return "RSA" elif alg.startswith("HS") or alg.startswith("A"): return "oct" elif alg.startswith("ES") or alg.startswith("ECDH-ES"): return "EC" else: return None
0.00216
def build_url_field(self, field_name, model_class): """ This is needed due to DRF's model serializer uses the queryset to build url name # TODO: Move this to own serializer mixin or fix problem elsewhere? """ field, kwargs = super().build_url_field(field_name, model_class) view = self.root.context["view"] kwargs["view_name"] = view.get_url_name("detail") return field, kwargs
0.006757
def zfill(x, width): """zfill(x, width) -> string Pad a numeric string x with zeros on the left, to fill a field of the specified width. The string x is never truncated. """ if not isinstance(x, basestring): x = repr(x) return x.zfill(width)
0.003623
def scan_devices(fn, lfilter, iface=None): """Sniff packages :param fn: callback on packet :param lfilter: filter packages :return: loop """ try: sniff(prn=fn, store=0, # filter="udp", filter="arp or (udp and src port 68 and dst port 67 and src host 0.0.0.0)", lfilter=lfilter, iface=iface) except PermissionError: raise SocketPermissionError
0.004662
def get_valid_statements_for_modeling(sts: List[Influence]) -> List[Influence]: """ Select INDRA statements that can be used to construct a Delphi model from a given list of statements. """ return [ s for s in sts if is_grounded_statement(s) and (s.subj_delta["polarity"] is not None) and (s.obj_delta["polarity"] is not None) ]
0.002597
def write_ds9(regions, filename, coordsys='fk5', fmt='.6f', radunit='deg'): """ Converts a `list` of `~regions.Region` to DS9 string and write to file. Parameters ---------- regions : `list` List of `regions.Region` objects filename : `str` Filename in which the string is to be written. coordsys : `str`, optional #TODO Coordinate system that overrides the coordinate frames of all regions. Default is 'fk5'. fmt : `str`, optional A python string format defining the output precision. Default is .6f, which is accurate to 0.0036 arcseconds. radunit : `str`, optional This denotes the unit of the radius. Default is deg (degrees) Examples -------- >>> from astropy import units as u >>> from astropy.coordinates import SkyCoord >>> from regions import CircleSkyRegion, write_ds9 >>> reg_sky = CircleSkyRegion(SkyCoord(1 * u.deg, 2 * u.deg), 5 * u.deg) >>> write_ds9([reg_sky], 'test_write.reg') >>> with open('test_write.reg') as f: ... print(f.read()) # Region file format: DS9 astropy/regions fk5 circle(1.000007,2.000002,5.000000) """ output = ds9_objects_to_string(regions, coordsys, fmt, radunit) with open(filename, 'w') as fh: fh.write(output)
0.000759
def _initialize_buffers(self, view_size): """ Create the buffers to cache tile drawing :param view_size: (int, int): size of the draw area :return: None """ import math from pygame import Rect tw, th = self.data.tile_size mw, mh = self.data.map_size buffer_tile_width = int(math.ceil(view_size[0] / tw) + 2) * 2 buffer_tile_height = int(math.ceil(view_size[1] / th) + 2) * 2 buffer_pixel_size = buffer_tile_width * tw, buffer_tile_height * th self.map_rect = Rect(0, 0, mw * tw, mh * th) self.view_rect.size = view_size self._tile_view = Rect(0, 0, buffer_tile_width, buffer_tile_height) self._redraw_cutoff = 1 # TODO: optimize this value self._create_buffers(view_size, buffer_pixel_size) self._half_width = view_size[0] // 2 self._half_height = view_size[1] // 2 self._x_offset = 0 self._y_offset = 0 self.redraw_tiles()
0.002008
def _retrieve_problem(self, id_): """Resume polling for a problem previously submitted. Args: id_: Identification of the query. Returns: :obj: `Future` """ future = Future(self, id_, self.return_matrix, None) self.client._poll(future) return future
0.006061
def fake_shell(self, func, stdout=False): """ Execute a function and decorate its return value in the style of _low_level_execute_command(). This produces a return value that looks like some shell command was run, when really func() was implemented entirely in Python. If the function raises :py:class:`mitogen.core.CallError`, this will be translated into a failed shell command with a non-zero exit status. :param func: Function invoked as `func()`. :returns: See :py:attr:`COMMAND_RESULT`. """ dct = self.COMMAND_RESULT.copy() try: rc = func() if stdout: dct['stdout'] = repr(rc) except mitogen.core.CallError: LOG.exception('While emulating a shell command') dct['rc'] = 1 dct['stderr'] = traceback.format_exc() return dct
0.002125
def OnButtonCell(self, event): """Button cell event handler""" # The button text text = event.text with undo.group(_("Button")): self.grid.actions.set_attr("button_cell", text) self.grid.ForceRefresh() self.grid.update_attribute_toolbar() event.Skip()
0.006173
def wait_for(self, new_state): """ Wait for an exact state `new_state` to be reached by the state machine. If the state is skipped, that is, if a state which is greater than `new_state` is written to :attr:`state`, the coroutine raises :class:`OrderedStateSkipped` exception as it is not possible anymore that it can return successfully (see :attr:`state`). """ if self._state == new_state: return if self._state > new_state: raise OrderedStateSkipped(new_state) fut = asyncio.Future(loop=self.loop) self._exact_waiters.append((new_state, fut)) yield from fut
0.002899
def output_sizes(self): """Returns a tuple of all output sizes of all the layers.""" return tuple([l() if callable(l) else l for l in self._output_sizes])
0.012346
def create(self, name, sources=None, destinations=None, services=None, action='allow', log_options=None, authentication_options=None, connection_tracking=None, is_disabled=False, vpn_policy=None, mobile_vpn=False, add_pos=None, after=None, before=None, sub_policy=None, comment=None, **kw): """ Create a layer 3 firewall rule :param str name: name of rule :param sources: source/s for rule :type sources: list[str, Element] :param destinations: destination/s for rule :type destinations: list[str, Element] :param services: service/s for rule :type services: list[str, Element] :param action: allow,continue,discard,refuse,enforce_vpn, apply_vpn,forward_vpn, blacklist (default: allow) :type action: Action or str :param LogOptions log_options: LogOptions object :param ConnectionTracking connection_tracking: custom connection tracking settings :param AuthenticationOptions authentication_options: options for auth if any :param PolicyVPN,str vpn_policy: policy element or str href; required for enforce_vpn, use_vpn and apply_vpn actions :param bool mobile_vpn: if using a vpn action, you can set mobile_vpn to True and omit the vpn_policy setting if you want this VPN to apply to any mobile VPN based on the policy VPN associated with the engine :param str,Element sub_policy: sub policy required when rule has an action of 'jump'. Can be the FirewallSubPolicy element or href. :param int add_pos: position to insert the rule, starting with position 1. If the position value is greater than the number of rules, the rule is inserted at the bottom. If add_pos is not provided, rule is inserted in position 1. Mutually exclusive with ``after`` and ``before`` params. :param str after: Rule tag to add this rule after. Mutually exclusive with ``add_pos`` and ``before`` params. :param str before: Rule tag to add this rule before. Mutually exclusive with ``add_pos`` and ``after`` params. :param str comment: optional comment for this rule :raises MissingRequiredInput: when options are specified the need additional setting, i.e. use_vpn action requires a vpn policy be specified. :raises CreateRuleFailed: rule creation failure :return: the created ipv4 rule :rtype: IPv4Rule """ rule_values = self.update_targets(sources, destinations, services) rule_values.update(name=name, comment=comment) if isinstance(action, Action): rule_action = action else: rule_action = Action() rule_action.action = action if not rule_action.action in self._actions: raise CreateRuleFailed('Action specified is not valid for this ' 'rule type; action: {}'.format(rule_action.action)) if rule_action.action in ('apply_vpn', 'enforce_vpn', 'forward_vpn'): if vpn_policy is None and not mobile_vpn: raise MissingRequiredInput('You must either specify a vpn_policy or set ' 'mobile_vpn when using a rule with a VPN action') if mobile_vpn: rule_action.mobile_vpn = True else: try: vpn = element_resolver(vpn_policy) # VPNPolicy rule_action.vpn = vpn except ElementNotFound: raise MissingRequiredInput('Cannot find VPN policy specified: {}, ' .format(vpn_policy)) elif rule_action.action == 'jump': try: rule_action.sub_policy = element_resolver(sub_policy) except ElementNotFound: raise MissingRequiredInput('Cannot find sub policy specified: {} ' .format(sub_policy)) #rule_values.update(action=rule_action.data) log_options = LogOptions() if not log_options else log_options if connection_tracking is not None: rule_action.connection_tracking_options.update(**connection_tracking) auth_options = AuthenticationOptions() if not authentication_options \ else authentication_options rule_values.update( action=rule_action.data, options=log_options.data, authentication_options=auth_options.data, is_disabled=is_disabled) params = None href = self.href if add_pos is not None: href = self.add_at_position(add_pos) elif before or after: params = self.add_before_after(before, after) return ElementCreator( self.__class__, exception=CreateRuleFailed, href=href, params=params, json=rule_values)
0.007295
def trim1(l, proportiontocut, tail='right'): """ Slices off the passed proportion of items from ONE end of the passed list (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost' 10% of scores). Slices off LESS if proportion results in a non-integer slice index (i.e., conservatively slices off proportiontocut). Usage: ltrim1 (l,proportiontocut,tail='right') or set tail='left' Returns: trimmed version of list l """ if tail == 'right': lowercut = 0 uppercut = len(l) - int(proportiontocut * len(l)) elif tail == 'left': lowercut = int(proportiontocut * len(l)) uppercut = len(l) return l[lowercut:uppercut]
0.002972
def CopyFromDateTimeString(self, time_string): """Copies a POSIX timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC. """ date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) self._timestamp = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) self.is_local_time = False
0.001019
def inference(self, state_arr, limit=1000): ''' Infernce. Args: state_arr: `np.ndarray` of state. limit: The number of inferencing. Returns: `list of `np.ndarray` of an optimal route. ''' self.__inferencing_flag = True agent_x, agent_y = np.where(state_arr[0] == 1) agent_x, agent_y = agent_x[0], agent_y[0] self.__create_enemy(self.__map_arr) result_list = [(agent_x, agent_y, 0.0)] result_val_list = [agent_x, agent_y] for e in range(self.__enemy_num): result_val_list.append(self.__enemy_pos_list[e][0]) result_val_list.append(self.__enemy_pos_list[e][1]) result_val_list.append(0.0) result_list.append(tuple(result_val_list)) self.t = 0 while self.t < limit: next_action_arr = self.extract_possible_actions(state_arr) next_q_arr = self.function_approximator.inference_q(next_action_arr) action_arr, q = self.select_action(next_action_arr, next_q_arr) self.__move_enemy(action_arr) agent_x, agent_y = np.where(action_arr[0] == 1) agent_x, agent_y = agent_x[0], agent_y[0] result_val_list = [agent_x, agent_y] for e in range(self.__enemy_num): result_val_list.append(self.__enemy_pos_list[e][0]) result_val_list.append(self.__enemy_pos_list[e][1]) try: result_val_list.append(q[0]) except IndexError: result_val_list.append(q) result_list.append(tuple(result_val_list)) # Update State. state_arr = self.update_state(state_arr, action_arr) # Epsode. self.t += 1 # Check. end_flag = self.check_the_end_flag(state_arr) if end_flag is True: break return result_list
0.002996