text
stringlengths
78
104k
score
float64
0
0.18
def category_label(arg, labels, nulls=None): """ Format a known number of categories as strings Parameters ---------- labels : list of string nulls : string, optional How to label any null values among the categories Returns ------- string_categories : string value expression """ op = CategoryLabel(arg, labels, nulls) return op.to_expr()
0.002532
def math_dataset_init(alphabet_size=26, digits=None, functions=None): """Initializes required objects to generate symbolic math datasets. Produces token set, ExprOp instances, solve_op dictionary, encoders, and decoders needed to generate the algebra inverse dataset. Args: alphabet_size: How many possible variables there are. Max 52. digits: How many numerical digits to encode as tokens, "0" through str(digits-1), or None to encode no digits. functions: Defines special functions. A dict mapping human readable string names, like "log", "exp", "sin", "cos", etc., to single chars. Each function gets a unique token, like "L" for "log". WARNING, Make sure these tokens do not conflict with the list of possible variable names. Returns: AlgebraConfig instance holding all the objects listed above. Raises: ValueError: If `alphabet_size` is not in range [2, 52]. """ ops_list = ["+", "-", "*", "/"] ops = { "+": ExprOp("+", 0, True), "-": ExprOp("-", 0, False), "*": ExprOp("*", 1, True), "/": ExprOp("/", 1, False) } solve_ops = { "+l": lambda l, r, to: (l, ExprNode(to, r, ops["-"])), "+r": lambda l, r, to: (r, ExprNode(to, l, ops["-"])), "-l": lambda l, r, to: (l, ExprNode(to, r, ops["+"])), "-r": lambda l, r, to: (r, ExprNode(l, to, ops["-"])), "*l": lambda l, r, to: (l, ExprNode(to, r, ops["/"])), "*r": lambda l, r, to: (r, ExprNode(to, l, ops["/"])), "/l": lambda l, r, to: (l, ExprNode(to, r, ops["*"])), "/r": lambda l, r, to: (r, ExprNode(l, to, ops["/"])), } alphabet = ( [six.int2byte(ord("a") + c).decode("utf-8") for c in range(26)] + [six.int2byte(ord("A") + c).decode("utf-8") for c in range(26)]) if alphabet_size > 52: raise ValueError( "alphabet_size cannot be greater than 52. Got %s." % alphabet_size) if alphabet_size < 2: raise ValueError( "alphabet_size cannot be less than 2. Got %s." % alphabet_size) if digits is not None and not 1 <= digits <= 10: raise ValueError("digits cannot must be between 1 and 10. Got %s." % digits) vlist = alphabet[:alphabet_size] if digits is not None: dlist = [str(d) for d in range(digits)] else: dlist = [] if functions is None: functions = {} flist = sorted(functions.values()) pad = "_" tokens = [pad] + [":", "(", ")", "="] + ops_list + vlist + dlist + flist if len(tokens) != len(set(tokens)): raise ValueError("Duplicate token. Tokens: %s" % tokens) token_map = dict([(t, i) for i, t in enumerate(tokens)]) def int_encoder(sequence): return [token_map[s] for s in sequence] def int_decoder(tensor_1d): return "".join([tokens[i] for i in tensor_1d]) return AlgebraConfig( vlist=vlist, dlist=dlist, flist=flist, functions=functions, ops=ops, solve_ops=solve_ops, int_encoder=int_encoder, int_decoder=int_decoder)
0.007368
def unlock(name, zk_hosts=None, # in case you need to unlock without having run lock (failed execution for example) identifier=None, max_concurrency=1, ephemeral_lease=False, profile=None, scheme=None, username=None, password=None, default_acl=None): ''' Remove lease from semaphore. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} conn_kwargs = {'profile': profile, 'scheme': scheme, 'username': username, 'password': password, 'default_acl': default_acl} if __opts__['test']: ret['result'] = None ret['comment'] = 'Released lock if it is here' return ret if identifier is None: identifier = __grains__['id'] unlocked = __salt__['zk_concurrency.unlock'](name, zk_hosts=zk_hosts, identifier=identifier, max_concurrency=max_concurrency, ephemeral_lease=ephemeral_lease, **conn_kwargs) if unlocked: ret['result'] = True else: ret['comment'] = 'Unable to find lease for path {0}'.format(name) return ret
0.003526
def _handle_start_relation(self, attrs): """ Handle opening relation element :param attrs: Attributes of the element :type attrs: Dict """ self._curr = { 'attributes': dict(attrs), 'members': [], 'rel_id': None, 'tags': {} } if attrs.get('id', None) is not None: self._curr['rel_id'] = int(attrs['id']) del self._curr['attributes']['id']
0.004219
def train_on_audio(self, fn: str): """Run through a single audio file""" save_test = random() > 0.8 audio = load_audio(fn) num_chunks = len(audio) // self.args.chunk_size self.listener.clear() for i, chunk in enumerate(chunk_audio(audio, self.args.chunk_size)): print('\r' + str(i * 100. / num_chunks) + '%', end='', flush=True) self.audio_buffer = np.concatenate((self.audio_buffer[len(chunk):], chunk)) conf = self.listener.update(chunk) if conf > self.args.threshold: self.samples_since_train += 1 name = splitext(basename(fn))[0] + '-' + str(i) + '.wav' name = join(self.args.folder, 'test' if save_test else '', 'not-wake-word', 'generated', name) save_audio(name, self.audio_buffer) print() print('Saved to:', name) if not save_test and self.samples_since_train >= self.args.delay_samples and \ self.args.epochs > 0: self.samples_since_train = 0 self.retrain()
0.004337
def total_surface_energy(self): """ Total surface energy of the Wulff shape. Returns: (float) sum(surface_energy_hkl * area_hkl) """ tot_surface_energy = 0 for hkl in self.miller_energy_dict.keys(): tot_surface_energy += self.miller_energy_dict[hkl] * \ self.miller_area_dict[hkl] return tot_surface_energy
0.004751
def get_logfile_path(working_dir): """ Get the logfile path for our service endpoint. """ logfile_filename = virtualchain_hooks.get_virtual_chain_name() + ".log" return os.path.join( working_dir, logfile_filename )
0.026201
def add_event(self, key, event): """Add an event and its corresponding key to the store.""" assert isinstance(key, str) assert isinstance(event, bytes) if all([char.isalnum() or char == '-' for char in key]): safe_key = key else: raise ValueError("Key must be alphanumeric or a dash (-):" " {0}".format(key)) safe_event = base64.encodestring(event).decode().strip() data = "{0}\t{1}\n".format(safe_key, safe_event) # Important to make a single atomic write here self._hasher.update(data.encode()) self.f.write(data)
0.003082
def token_generator(self, texts, **kwargs): """Yields tokens from texts as `(text_idx, character)` """ for text_idx, text in enumerate(texts): if self.lower: text = text.lower() for char in text: yield text_idx, char
0.006757
def datetime_entry(self, prompt, message=None, formats=['%x %X'], show_example=False, rofi_args=None, **kwargs): """Prompt the user to enter a date and time. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. formats: list of strings, optional The formats that the user can enter the date and time in. These should be format strings as accepted by the datetime.datetime.strptime() function from the standard library. They are tried in order, and the first that returns a datetime object without error is selected. Note that the '%x %X' in the default list is the current locale's date and time representation. show_example: Boolean If True, the current date and time in the first format given is appended to the message. Returns ------- datetime.datetime, or None if the dialog is cancelled. """ def datetime_validator(text): # Try them in order. for format in formats: try: dt = datetime.strptime(text, format) except ValueError: continue else: # This one worked; good enough for us. return (dt, None) # None of the formats worked. return (None, 'Please enter a valid date and time.') # Add an example to the message? if show_example: message = message or "" message += "Current date and time in the correct format: " + datetime.now().strftime(formats[0]) return self.generic_entry(prompt, datetime_validator, message, rofi_args, **kwargs)
0.003717
def logfile_generator(self): """Yield each line of the file, or the next line if several files.""" if not self.args['exclude']: # ask all filters for a start_limit and fast-forward to the maximum start_limits = [f.start_limit for f in self.filters if hasattr(f, 'start_limit')] if start_limits: for logfile in self.args['logfile']: logfile.fast_forward(max(start_limits)) if len(self.args['logfile']) > 1: # merge log files by time for logevent in self._merge_logfiles(): yield logevent else: # only one file for logevent in self.args['logfile'][0]: if self.args['timezone'][0] != 0 and logevent.datetime: logevent._datetime = (logevent.datetime + timedelta(hours=self .args['timezone'][0])) yield logevent
0.001905
def profil_hebdo(df, func='mean'): """ Calcul du profil journalier Paramètres: df: DataFrame de données dont l'index est une série temporelle (cf module xair par exemple) func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...) soit la fonction elle-même (np.mean, np.max, ...) Retourne: Un DataFrame de moyennes par journée sur la semaine """ func = _get_funky(func) res = df.groupby(lambda x: x.weekday).aggregate(func) # On met des noms de jour à la place des numéros dans l'index res.index = [cal.day_name[i] for i in range(0,7)] return res
0.004651
def tsp(points, start=0): """ Find an ordering of points where each is visited and the next point is the closest in euclidean distance, and if there are multiple points with equal distance go to an arbitrary one. Assumes every point is visitable from every other point, i.e. the travelling salesman problem on a fully connected graph. It is not a MINIMUM traversal; rather it is a "not totally goofy traversal, quickly." On random points this traversal is often ~20x shorter than random ordering. Parameters --------------- points : (n, dimension) float ND points in space start : int The index of points we should start at Returns --------------- traversal : (n,) int Ordered traversal visiting every point distances : (n - 1,) float The euclidean distance between points in traversal """ # points should be float points = np.asanyarray(points, dtype=np.float64) if len(points.shape) != 2: raise ValueError('points must be (n, dimension)!') # start should be an index start = int(start) # a mask of unvisited points by index unvisited = np.ones(len(points), dtype=np.bool) unvisited[start] = False # traversal of points by index traversal = np.zeros(len(points), dtype=np.int64) - 1 traversal[0] = start # list of distances distances = np.zeros(len(points) - 1, dtype=np.float64) # a mask of indexes in order index_mask = np.arange(len(points), dtype=np.int64) # in the loop we want to call distances.sum(axis=1) # a lot and it's actually kind of slow for "reasons" # dot products with ones is equivalent and ~2x faster sum_ones = np.ones(points.shape[1]) # loop through all points for i in range(len(points) - 1): # which point are we currently on current = points[traversal[i]] # do NlogN distance query # use dot instead of .sum(axis=1) or np.linalg.norm # as it is faster, also don't square root here dist = np.dot((points[unvisited] - current) ** 2, sum_ones) # minimum distance index min_index = dist.argmin() # successor is closest unvisited point successor = index_mask[unvisited][min_index] # update the mask unvisited[successor] = False # store the index to the traversal traversal[i + 1] = successor # store the distance distances[i] = dist[min_index] # we were comparing distance^2 so take square root distances **= 0.5 return traversal, distances
0.000381
def _parse_networks(networks): ''' Common logic for parsing the networks ''' networks = salt.utils.args.split_input(networks or []) if not networks: networks = {} else: # We don't want to recurse the repack, as the values of the kwargs # being passed when connecting to the network will not be dictlists. networks = salt.utils.data.repack_dictlist(networks) if not networks: raise CommandExecutionError( 'Invalid network configuration (see documentation)' ) for net_name, net_conf in six.iteritems(networks): if net_conf is None: networks[net_name] = {} else: networks[net_name] = salt.utils.data.repack_dictlist(net_conf) if not networks[net_name]: raise CommandExecutionError( 'Invalid configuration for network \'{0}\' ' '(see documentation)'.format(net_name) ) for key in ('links', 'aliases'): try: networks[net_name][key] = salt.utils.args.split_input( networks[net_name][key] ) except KeyError: continue # Iterate over the networks again now, looking for # incorrectly-formatted arguments errors = [] for net_name, net_conf in six.iteritems(networks): if net_conf is not None: for key, val in six.iteritems(net_conf): if val is None: errors.append( 'Config option \'{0}\' for network \'{1}\' is ' 'missing a value'.format(key, net_name) ) if errors: raise CommandExecutionError( 'Invalid network configuration', info=errors) if networks: try: all_networks = [ x['Name'] for x in __salt__['docker.networks']() if 'Name' in x ] except CommandExecutionError as exc: raise CommandExecutionError( 'Failed to get list of existing networks: {0}.'.format(exc) ) else: missing_networks = [ x for x in sorted(networks) if x not in all_networks] if missing_networks: raise CommandExecutionError( 'The following networks are not present: {0}'.format( ', '.join(missing_networks) ) ) return networks
0.000369
def name(self): """The identifier of the machine.""" name = self.__class__.__name__ for i, character in enumerate(name): if character.isdigit(): return name[:i] + "-" + name[i:] return name
0.008032
def get_params(self, params, name_request): ''' Prepare and add for further render parameters. :param params: --dictionary with parameters :type params: dict :param name_request: --type of the parameters :type name_request: str, unicode :return: ''' self.write('') for elem in params: request_type = elem['type'] if elem.get('type', None) else 'schema' name = elem['name'] if elem.get('required', None): name += '(required)' schema = elem.get('schema', None) name = ':{} {} {}:'.format(name_request, request_type, name) if schema: definition = schema['$ref'].split('/')[-1] self.write(name + ' :ref:`{}`'.format(definition), 1) self.write('') else: desc = elem.get('description', '') self.write(name) self.write('{}'.format(desc), self.indent_depth + 1) self.write('')
0.001901
def read_legacy_cfg_files(self, cfg_files, alignak_env_files=None): # pylint: disable=too-many-nested-blocks,too-many-statements # pylint: disable=too-many-branches, too-many-locals """Read and parse the Nagios legacy configuration files and store their content into a StringIO object which content will be returned as the function result :param cfg_files: list of file to read :type cfg_files: list :param alignak_env_files: name of the alignak environment file :type alignak_env_files: list :return: a buffer containing all files :rtype: str """ cfg_buffer = '' if not cfg_files: return cfg_buffer # Update configuration with the first legacy configuration file name and path # This will update macro properties self.alignak_env = 'n/a' if alignak_env_files is not None: self.alignak_env = alignak_env_files if not isinstance(alignak_env_files, list): self.alignak_env = [os.path.abspath(alignak_env_files)] else: self.alignak_env = [os.path.abspath(f) for f in alignak_env_files] self.main_config_file = os.path.abspath(cfg_files[0]) self.config_base_dir = os.path.dirname(self.main_config_file) # Universal newline mode (all new lines are managed internally) res = StringIO(u"# Configuration cfg_files buffer", newline=None) if not self.read_config_silent and cfg_files: logger.info("Reading the configuration cfg_files...") # A first pass to get all the configuration cfg_files in a buffer for cfg_file in cfg_files: # Make sure the configuration cfg_files are not repeated... if os.path.abspath(cfg_file) in self.my_cfg_files: logger.warning("- ignoring repeated file: %s", os.path.abspath(cfg_file)) continue self.my_cfg_files.append(os.path.abspath(cfg_file)) # File header res.write(u"\n") res.write(u"# imported_from=%s" % cfg_file) res.write(u"\n") if not self.read_config_silent: logger.info("- opening '%s' configuration file", cfg_file) try: # Open in Universal way for Windows, Mac, Linux-based systems file_d = open(cfg_file, 'r') buf = file_d.readlines() file_d.close() except IOError as exp: self.add_error("cannot open main file '%s' for reading: %s" % (cfg_file, exp)) continue for line in buf: try: line = line.decode('utf8', 'replace') except AttributeError: # Python 3 will raise an exception because the line is still unicode pass line = line.strip() res.write(line) res.write(u"\n") if (re.search("^cfg_file", line) or re.search("^resource_file", line)) \ and '=' in line: elts = line.split('=', 1) if os.path.isabs(elts[1]): cfg_file_name = elts[1] else: cfg_file_name = os.path.join(self.config_base_dir, elts[1]) cfg_file_name = cfg_file_name.strip() cfg_file_name = os.path.abspath(cfg_file_name) # Make sure the configuration cfg_files are not repeated... if cfg_file_name in self.my_cfg_files: logger.warning("- ignoring repeated file: %s", cfg_file_name) else: self.my_cfg_files.append(cfg_file_name) if not self.read_config_silent: logger.info(" reading: %s", cfg_file_name) try: # Read the file content to the buffer file_d = open(cfg_file_name, 'r') # File header res.write(u"\n") res.write(u"# imported_from=%s" % cfg_file_name) res.write(u"\n") content = file_d.read() try: content = content.decode('utf8', 'replace') except AttributeError: # Python 3 will raise an exception pass res.write(content) res.write(u"\n") file_d.close() except IOError as exp: self.add_error(u"cannot open file '%s' for reading: %s" % (cfg_file_name, exp)) elif re.search("^cfg_dir", line) and '=' in line: elts = line.split('=', 1) if os.path.isabs(elts[1]): cfg_dir_name = elts[1] else: cfg_dir_name = os.path.join(self.config_base_dir, elts[1]) # Ok, look if it's really a directory if not os.path.isdir(cfg_dir_name): self.add_error(u"cannot open directory '%s' for reading" % cfg_dir_name) continue # Now walk for it. for root, _, walk_files in os.walk(cfg_dir_name, followlinks=True): for found_file in walk_files: if not re.search(r"\.cfg$", found_file): continue cfg_file_name = os.path.join(root, found_file) # Make sure the configuration cfg_files are not repeated... if os.path.abspath(cfg_file_name) in self.my_cfg_files: logger.warning("- ignoring repeated file: %s", cfg_file_name) else: self.my_cfg_files.append(cfg_file_name) if not self.read_config_silent: logger.info(" reading: %s", cfg_file_name) try: # Read the file content to the buffer file_d = open(cfg_file_name, 'r') # File header res.write(u"\n") res.write(u"# imported_from=%s" % cfg_file_name) res.write(u"\n") content = file_d.read() try: content = content.decode('utf8', 'replace') except AttributeError: # Python 3 will raise an exception pass res.write(content) res.write(u"\n") file_d.close() except IOError as exp: self.add_error(u"cannot open file '%s' for reading: %s" % (cfg_file_name, exp)) cfg_buffer = res.getvalue() res.close() return cfg_buffer
0.00286
def make_archive(name, repo, ref, destdir): """Makes an archive of a repository in the given destdir. :param text name: Name to give the archive. For instance foo. The file that is created will be called foo.tar.gz. :param text repo: Repository to clone. :param text ref: Tag/SHA/branch to check out. :param text destdir: Directory to place archives in. """ output_path = os.path.join(destdir, name + '.tar.gz') with tmpdir() as tempdir: # Clone the repository to the temporary directory cmd_output('git', 'clone', repo, tempdir) cmd_output('git', 'checkout', ref, cwd=tempdir) # We don't want the '.git' directory # It adds a bunch of size to the archive and we don't use it at # runtime rmtree(os.path.join(tempdir, '.git')) with tarfile.open(output_path, 'w|gz') as tf: tf.add(tempdir, name) return output_path
0.00107
def signal_to_exception(signum, frame): """ Called by the timeout alarm during the collector run time """ if signum == signal.SIGALRM: raise SIGALRMException() if signum == signal.SIGHUP: raise SIGHUPException() if signum == signal.SIGUSR1: raise SIGUSR1Exception() if signum == signal.SIGUSR2: raise SIGUSR2Exception() raise SignalException(signum)
0.002421
def to_task(self): """Return a task object representing this MessageProcessor job.""" task_args = self.get_task_args() # check for name in task args name = task_args.get('name', MESSAGE_PROCESSOR_NAME) # if the countdown isn't in the task_args set it to the frequency if not 'countdown' in task_args: task_args['countdown'] = self.frequency task_args['name'] = "%s-%s-%s-%s" % ( name, self.tag, self.current_batch, self.time_throttle) self.update_options(task_args=task_args) return super(MessageProcessor, self).to_task()
0.004815
def get_tiles_list(element): """ Returns the list of all tile names from Product_Organisation element in metadata.xml """ tiles = {} for el in element: g = (el.findall('.//Granules') or el.findall('.//Granule'))[0] name = g.attrib['granuleIdentifier'] name_parts = name.split('_') mgs = name_parts[-2] tiles[mgs] = name return tiles
0.002475
def _copy(self): """ Called during a PUT request where the action specifies a copy operation. Returns resource URI of the new file. """ copypath = self.action['copypath'] try: self.fs.copy(self.fp,copypath) except OSError: raise tornado.web.HTTPError(400) return copypath
0.008357
def transform_record(self, pid, record, links_factory=None, **kwargs): """Transform record into an intermediate representation.""" context = kwargs.get('marshmallow_context', {}) context.setdefault('pid', pid) return self.dump(self.preprocess_record(pid, record, links_factory=links_factory, **kwargs), context)
0.005435
def cmd_gimbal_roi(self, args): '''control roi position''' latlon = None try: latlon = self.module('map').click_position except Exception: print("No map available") return if latlon is None: print("No map click position available") return self.master.mav.mount_control_send(self.target_system, self.target_component, latlon[0]*1e7, latlon[1]*1e7, 0, # altitude zero for now 0)
0.004292
def persist(self): ''' Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf ''' config_dir = self.opts.get('conf_dir', None) if config_dir is None and 'conf_file' in self.opts: config_dir = os.path.dirname(self.opts['conf_file']) if config_dir is None: config_dir = salt.syspaths.CONFIG_DIR minion_d_dir = os.path.join( config_dir, os.path.dirname(self.opts.get('default_include', salt.config.DEFAULT_MINION_OPTS['default_include']))) if not os.path.isdir(minion_d_dir): os.makedirs(minion_d_dir) schedule_conf = os.path.join(minion_d_dir, '_schedule.conf') log.debug('Persisting schedule') schedule_data = self._get_schedule(include_pillar=False, remove_hidden=True) try: with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_: fp_.write( salt.utils.stringutils.to_bytes( salt.utils.yaml.safe_dump( {'schedule': schedule_data} ) ) ) except (IOError, OSError): log.error('Failed to persist the updated schedule', exc_info_on_loglevel=logging.DEBUG)
0.002803
def key_present(name, save_private=None, upload_public=None, region=None, key=None, keyid=None, profile=None): ''' Ensure key pair is present. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) log.debug('exists is %s', exists) if upload_public is not None and 'salt://' in upload_public: try: upload_public = __salt__['cp.get_file_str'](upload_public) except IOError as e: log.debug(e) ret['comment'] = 'File {0} not found.'.format(upload_public) ret['result'] = False return ret if not exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be created.'.format(name) ret['result'] = None return ret if save_private and not upload_public: created = __salt__['boto_ec2.create_key']( name, save_private, region, key, keyid, profile ) if created: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['new'] = created else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) elif not save_private and upload_public: imported = __salt__['boto_ec2.import_key'](name, upload_public, region, key, keyid, profile) if imported: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['old'] = None ret['changes']['new'] = imported else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) else: ret['result'] = False ret['comment'] = 'You can either upload or download a private key ' else: ret['result'] = True ret['comment'] = 'The key name {0} already exists'.format(name) return ret
0.000437
async def patch_register(self, register: Dict, request: 'Request'): """ Store all options in the "choices" sub-register. We store both the text and the potential intent, in order to match both regular quick reply clicks but also the user typing stuff on his keyboard that matches more or less the content of quick replies. """ register['choices'] = { o.slug: { 'intent': o.intent.key if o.intent else None, 'text': await render(o.text, request), } for o in self.options if isinstance(o, QuickRepliesList.TextOption) } return register
0.002963
def get(self, thing_id='0'): """ Handle a GET request, including websocket requests. thing_id -- ID of the thing this request is for """ self.thing = self.get_thing(thing_id) if self.thing is None: self.set_status(404) self.finish() return if self.request.headers.get('Upgrade', '').lower() == 'websocket': yield tornado.websocket.WebSocketHandler.get(self) return self.set_header('Content-Type', 'application/json') ws_href = '{}://{}'.format( 'wss' if self.request.protocol == 'https' else 'ws', self.request.headers.get('Host', '') ) description = self.thing.as_thing_description() description['links'].append({ 'rel': 'alternate', 'href': '{}{}'.format(ws_href, self.thing.get_href()), }) self.write(json.dumps(description)) self.finish()
0.002051
def legend(self, txt=None): """Set/get ``Actor`` legend text. :param str txt: legend text. Size and positions can be modified by setting attributes ``Plotter.legendSize``, ``Plotter.legendBC`` and ``Plotter.legendPos``. .. hint:: |fillholes.py|_ """ if txt: self._legend = txt else: return self._legend return self
0.004843
def set_sampling_strategies(self, filter, strategy_and_parms): """Set a strategy for all sensors matching the filter, including unseen sensors The strategy should persist across sensor disconnect/reconnect. filter : str Filter for sensor names strategy_and_params : seq of str or str As tuple contains (<strat_name>, [<strat_parm1>, ...]) where the strategy names and parameters are as defined by the KATCP spec. As str contains the same elements in space-separated form. Returns ------- done : tornado Future Resolves when done """ sensor_list = yield self.list_sensors(filter=filter) sensor_dict = {} for sens in sensor_list: # Set the strategy on each sensor try: sensor_name = sens.object.normalised_name yield self.set_sampling_strategy(sensor_name, strategy_and_parms) sensor_dict[sensor_name] = strategy_and_parms except Exception as exc: self._logger.exception( 'Unhandled exception trying to set sensor strategies {!r} for {} ({})' .format(strategy_and_parms, sens, exc)) sensor_dict[sensor_name] = None # Otherwise, depend on self._add_sensors() to handle it from the cache when the sensor appears\ raise tornado.gen.Return(sensor_dict)
0.00545
def well_images(self, well_row, well_column): """Get list of paths to images in specified well. Parameters ---------- well_row : int Starts at 0. Same as --V in files. well_column : int Starts at 0. Save as --U in files. Returns ------- list of strings Paths to images or empty list if no images are found. """ return list(i for i in self.images if attribute(i, 'u') == well_column and attribute(i, 'v') == well_row)
0.00519
def sam2fastq(line): """ print fastq from sam """ fastq = [] fastq.append('@%s' % line[0]) fastq.append(line[9]) fastq.append('+%s' % line[0]) fastq.append(line[10]) return fastq
0.004673
def downgrade(): """alexm: i believe this method is never called""" with op.batch_alter_table(t2_name) as batch_op: batch_op.drop_column('do_not_use') with op.batch_alter_table(t1_name) as batch_op: batch_op.drop_column('enabled')
0.003861
def _normalize(self, string): ''' Returns a sanitized string. ''' string = super(VerbixDe, self)._normalize(string) string = string.replace('sie; Sie', 'sie') string = string.strip() return string
0.033816
def get(cls, tag_id_or_URI, label=None): '''Return the tag with the given id or URI, or None. :param tag_id_or_name: the id or name of the tag to return :type tag_id_or_name: string :returns: the tag object with the given id or name, or None if there is no tag with that id or name :rtype: ckan.model.tag.Tag ''' # First try to get the tag by ID. semantictag = SemanticTag.by_id(tag_id_or_URI) if semantictag: return semantictag else: semantictag = SemanticTag.by_URI(tag_id_or_URI) return semantictag
0.029795
def Network_setCookie(self, name, value, **kwargs): """ Function path: Network.setCookie Domain: Network Method name: setCookie WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'name' (type: string) -> Cookie name. 'value' (type: string) -> Cookie value. Optional arguments: 'url' (type: string) -> The request-URI to associate with the setting of the cookie. This value can affect the default domain and path values of the created cookie. 'domain' (type: string) -> Cookie domain. 'path' (type: string) -> Cookie path. 'secure' (type: boolean) -> True if cookie is secure. 'httpOnly' (type: boolean) -> True if cookie is http-only. 'sameSite' (type: CookieSameSite) -> Cookie SameSite type. 'expires' (type: TimeSinceEpoch) -> Cookie expiration date, session cookie if not set Returns: 'success' (type: boolean) -> True if successfully set cookie. Description: Sets a cookie with the given cookie data; may overwrite equivalent cookies if they exist. """ assert isinstance(name, (str,) ), "Argument 'name' must be of type '['str']'. Received type: '%s'" % type( name) assert isinstance(value, (str,) ), "Argument 'value' must be of type '['str']'. Received type: '%s'" % type( value) if 'url' in kwargs: assert isinstance(kwargs['url'], (str,) ), "Optional argument 'url' must be of type '['str']'. Received type: '%s'" % type( kwargs['url']) if 'domain' in kwargs: assert isinstance(kwargs['domain'], (str,) ), "Optional argument 'domain' must be of type '['str']'. Received type: '%s'" % type( kwargs['domain']) if 'path' in kwargs: assert isinstance(kwargs['path'], (str,) ), "Optional argument 'path' must be of type '['str']'. Received type: '%s'" % type( kwargs['path']) if 'secure' in kwargs: assert isinstance(kwargs['secure'], (bool,) ), "Optional argument 'secure' must be of type '['bool']'. Received type: '%s'" % type( kwargs['secure']) if 'httpOnly' in kwargs: assert isinstance(kwargs['httpOnly'], (bool,) ), "Optional argument 'httpOnly' must be of type '['bool']'. Received type: '%s'" % type( kwargs['httpOnly']) expected = ['url', 'domain', 'path', 'secure', 'httpOnly', 'sameSite', 'expires'] passed_keys = list(kwargs.keys()) assert all([(key in expected) for key in passed_keys] ), "Allowed kwargs are ['url', 'domain', 'path', 'secure', 'httpOnly', 'sameSite', 'expires']. Passed kwargs: %s" % passed_keys subdom_funcs = self.synchronous_command('Network.setCookie', name=name, value=value, **kwargs) return subdom_funcs
0.037173
def getaddrlist(self, name): """Get a list of addresses from a header. Retrieves a list of addresses from a header, where each address is a tuple as returned by getaddr(). Scans all named headers, so it works properly with multiple To: or Cc: headers for example. """ raw = [] for h in self.getallmatchingheaders(name): if h[0] in ' \t': raw.append(h) else: if raw: raw.append(', ') i = h.find(':') if i > 0: addr = h[i+1:] raw.append(addr) alladdrs = ''.join(raw) a = AddressList(alladdrs) return a.addresslist
0.002714
def set_system_conf(self, key=None, value=None, d=None): """ Sets a java system property as a ('key', 'value') pair of using a dictionary {'key': 'value', ...} :param key: string :param value: string :param d: dictionary :return: None """ if isinstance(d, dict): self._system.update(d) elif isinstance(key, str) and isinstance(value, str): self._system[key] = value else: raise TypeError("key, value must be strings")
0.005618
def getElementsByAttr(self, attrName, attrValue, root='root'): ''' getElementsByAttr - Searches the full tree for elements with a given attribute name and value combination. This is always a full scan. @param attrName <lowercase str> - A lowercase attribute name @param attrValue <str> - Expected value of attribute @param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used. ''' (root, isFromRoot) = self._handleRootArg(root) elements = [] if isFromRoot is True and root.getAttribute(attrName) == attrValue: elements.append(root) getElementsByAttr = self.getElementsByAttr for child in root.children: if child.getAttribute(attrName) == attrValue: elements.append(child) elements += getElementsByAttr(attrName, attrValue, child) return TagCollection(elements)
0.004803
def perform_experiment(self, engine_list): """ Performs nearest neighbour experiments with custom vector data for all engines in the specified list. Returns self.result contains list of (distance_ratio, search_time) tuple. All are the averaged values over all request vectors. search_time is the average retrieval/search time compared to the average exact search time. """ # We will fill this array with measures for all the engines. result = [] # For each engine, first index vectors and then retrieve neighbours for engine in engine_list: print('Engine %d / %d' % (engine_list.index(engine), len(engine_list))) # Clean storage engine.clean_all_buckets() # Use this to compute average distance_ratio avg_distance_ratio = 0.0 # Use this to compute average result set size avg_result_size = 0.0 # Use this to compute average search time avg_search_time = 0.0 # Index all vectors and store them for index in range(self.vectors.shape[1]): engine.store_vector(self.vectors[:, index], 'data_%d' % index) # Look for N nearest neighbours for query vectors for index in self.query_indices: # We have to time the search search_time_start = time.time() # Get nearest N according to engine nearest = engine.neighbours(self.vectors[:, index]) # Get search time search_time = time.time() - search_time_start # Get average distance ratio (with respect to radius # of real N closest neighbours) distance_ratio = 0.0 for n in nearest: # If the vector is outside the real neighbour radius if n[2] > self.nearest_radius[index]: # Compute distance to real neighbour radius d = (n[2] - self.nearest_radius[index]) # And normalize it. 1.0 means: distance to # real neighbour radius is identical to radius d /= self.nearest_radius[index] # If all neighbours are in the radius, the # distance ratio is 0.0 distance_ratio += d # Normalize distance ratio over all neighbours distance_ratio /= len(nearest) # Add to accumulator avg_distance_ratio += distance_ratio # Add to accumulator avg_result_size += len(nearest) # Add to accumulator avg_search_time += search_time # Normalize distance ratio over query set avg_distance_ratio /= float(len(self.query_indices)) # Normalize avg result size avg_result_size /= float(len(self.query_indices)) # Normalize search time over query set avg_search_time = avg_search_time / float(len(self.query_indices)) # Normalize search time with respect to exact search avg_search_time /= self.exact_search_time_per_vector print(' distance_ratio=%f, result_size=%f, time=%f' % (avg_distance_ratio, avg_result_size, avg_search_time)) result.append((avg_distance_ratio, avg_result_size, avg_search_time)) return result
0.001593
def percentile(values, percent): """ PERCENTILE WITH INTERPOLATION RETURN VALUE AT, OR ABOVE, percentile OF THE VALUES snagged from http://code.activestate.com/recipes/511478-finding-the-percentile-of-the-values/ """ N = sorted(values) if not N: return None k = (len(N) - 1) * percent f = int(math.floor(k)) c = int(math.ceil(k)) if f == c: return N[int(k)] d0 = N[f] * (c - k) d1 = N[c] * (k - f) return d0 + d1
0.004124
def find_argument_target(xmrs, nodeid, rargname): """ Return the target of an argument (rather than just the variable). Note: If the argument value is an intrinsic variable whose target is an EP that has a quantifier, the non-quantifier EP's nodeid will be returned. With this nodeid, one can then use :meth:`Xmrs.nodeid() <delphin.mrs.xmrs.Xmrs.nodeid>` to get its quantifier's nodeid. Args: xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to use nodeid: nodeid of the argument. rargname: role name of the argument. Returns: The object that is the target of the argument. Possible values include: ================== ===== ============================= Argument value e.g. Target ------------------ ----- ----------------------------- intrinsic variable x4 nodeid; of the EP with the IV hole variable h0 nodeid; HCONS's labelset head label h1 nodeid; label's labelset head unbound variable i3 the variable itself constant "IBM" the constant itself ================== ===== ============================= """ tgt = xmrs.args(nodeid)[rargname] if tgt in xmrs.variables(): try: return xmrs.nodeid(tgt) except KeyError: pass try: tgt = xmrs.hcon(tgt).lo return next(iter(xmrs.labelset_heads(tgt)), None) except KeyError: pass try: return next(iter(xmrs.labelset_heads(tgt))) except (KeyError, StopIteration): pass return tgt
0.000578
def pinyin_to_zhuyin(s): """Convert all Pinyin syllables in *s* to Zhuyin. Spaces are added between connected syllables and syllable-separating apostrophes are removed. """ return _convert(s, zhon.pinyin.syllable, pinyin_syllable_to_zhuyin, remove_apostrophes=True, separate_syllables=True)
0.003012
def normalize_list_of_dicts(value, default_key, default_value=UNDEFINED): """ Converts given value to a list of dictionaries as follows: * ``[{...}]`` → ``[{...}]`` * ``{...}`` → ``[{...}]`` * ``'xyz'`` → ``[{default_key: 'xyz'}]`` * ``None`` → ``[{default_key: default_value}]`` (if specified) * ``None`` → ``[]`` :param default_value: only Unicode, i.e. `str` in Python 3.x and **only** `unicode` in Python 2.x """ if value is None: if default_value is UNDEFINED: return [] value = default_value if isinstance(value, dict): return [value] if isinstance(value, text_type): return [{default_key: value}] if isinstance(value, list): if not all(isinstance(x, dict) for x in value): def _fix(x): return {default_key: x} if isinstance(x, text_type) else x return list(map(_fix, value)) return value
0.002066
def get_by_addr(self, address): """ Lookup a set of notifications by address Args: address (UInt160 or str): hash of address for notifications Returns: list: a list of notifications """ addr = address if isinstance(address, str) and len(address) == 34: addr = Helper.AddrStrToScriptHash(address) if not isinstance(addr, UInt160): raise Exception("Incorrect address format") addrlist_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_ADDR).snapshot() results = [] for val in addrlist_snapshot.iterator(prefix=bytes(addr.Data), include_key=False): if len(val) > 4: try: event = SmartContractEvent.FromByteArray(val) results.append(event) except Exception as e: logger.error("could not parse event: %s %s" % (e, val)) return results
0.004049
def resume_writing(self, exc=None): '''Resume writing. Successive calls to this method will fails unless :meth:`pause_writing` is called first. ''' assert self._paused self._paused = False waiter = self._waiter if waiter is not None: self._waiter = None if not waiter.done(): if exc is None: waiter.set_result(None) else: waiter.set_exception(exc) self.transport.resume_reading() self._write_from_buffer()
0.003413
def list_user(context, id, sort, limit, where, verbose): """list_user(context, id, sort, limit, where, verbose) List users attached to a remoteci. >>> dcictl remoteci-list-user [OPTIONS] :param string id: ID of the remoteci to list the user from [required] :param string sort: Field to apply sort :param integer limit: Max number of rows to return :param string where: An optional filter criteria :param boolean verbose: Display verbose output """ result = remoteci.list_users(context, id=id, sort=sort, limit=limit, where=where) utils.format_output(result, context.format, verbose=verbose)
0.001445
def dataRestoreRecords(mimeData): """ Extracts the records from the inputed drag & drop mime data information. This will lookup the models based on their primary key information and generate the element class. :param mimeData | <QMimeData> :return [<orb.Table>, ..] """ if not mimeData.hasFormat('application/x-orb-records'): return [] from orb import Orb repros = nativestring(mimeData.data('application/x-orb-records')) repros = repros.split(';') output =[] for repro in repros: cls, pkey = re.match('^(\w+)\((.*)\)$', repro).groups() pkey = eval(pkey) model = Orb.instance().model(cls) if not model: continue record = model(pkey) if record.isRecord(): output.append(record) return output
0.014451
def _loadData(self, data): """ Load attribute values from Plex XML response. """ self._data = data self.codec = data.attrib.get('codec') self.codecID = data.attrib.get('codecID') self.id = cast(int, data.attrib.get('id')) self.index = cast(int, data.attrib.get('index', '-1')) self.language = data.attrib.get('language') self.languageCode = data.attrib.get('languageCode') self.selected = cast(bool, data.attrib.get('selected', '0')) self.streamType = cast(int, data.attrib.get('streamType')) self.type = cast(int, data.attrib.get('streamType'))
0.00316
def list2html(lst): """ convert a list to html using table formatting """ txt = '<TABLE width=100% border=0>' for l in lst: txt += '<TR>\n' if type(l) is str: txt+= '<TD>' + l + '</TD>\n' elif type(l) is list: txt+= '<TD>' for i in l: txt += i + ', ' txt+= '</TD>' else: txt+= '<TD>' + str(l) + '</TD>\n' txt += '</TR>\n' txt += '</TABLE><BR>\n' return txt
0.015936
def asjsonld(self): """Create JSON-LD with the original source data.""" source = {} if self.__source__: source.update(self.__source__) source.update(asjsonld(self)) return source
0.008696
def set_const(const, val): '''Convenience wrapper to reliably set the value of a constant from outside of package scope''' try: cur = getattr(_c, const) except AttributeError: raise FSQEnvError(errno.ENOENT, u'no such constant:'\ u' {0}'.format(const)) except TypeError: raise TypeError(errno.EINVAL, u'const name must be a string or'\ u' unicode object, not:'\ u' {0}'.format(const.__class__.__name__)) should_be = cur.__class__ try: if not isinstance(val, should_be): if should_be is unicode or cur is None: val = coerce_unicode(val, _c.FSQ_CHARSET) elif should_be is int and const.endswith('MODE'): val = int(val, 8) elif isinstance(cur, numbers.Integral): val = int(val) else: should_be(val) except (TypeError, ValueError, ): raise FSQEnvError(errno.EINVAL, u'invalid type for constant {0},'\ u' should be {1}, not:'\ u' {2}'.format(const, should_be.__name__, val.__class__.__name__)) setattr(_c, const, val) return val
0.004669
def smart_content_encoding(self): """Smart content encoding.""" encoding = self.content_encoding if not encoding: base_list = self.basename.split('.') while (not encoding) and len(base_list) > 1: _, encoding = mimetypes.guess_type('.'.join(base_list)) base_list.pop() return encoding
0.005376
def get_items(self, query_params=None): ''' Get all the items for this label. Returns a list of dictionaries. Each dictionary has the values for an item. ''' return self.fetch_json( uri_path=self.base_uri + '/checkItems', query_params=query_params or {} )
0.006116
def _pop_params(cls, kwargs): """ Pop entries from the `kwargs` passed to cls.__new__ based on the values in `cls.params`. Parameters ---------- kwargs : dict The kwargs passed to cls.__new__. Returns ------- params : list[(str, object)] A list of string, value pairs containing the entries in cls.params. Raises ------ TypeError Raised if any parameter values are not passed or not hashable. """ params = cls.params if not isinstance(params, Mapping): params = {k: NotSpecified for k in params} param_values = [] for key, default_value in params.items(): try: value = kwargs.pop(key, default_value) if value is NotSpecified: raise KeyError(key) # Check here that the value is hashable so that we fail here # instead of trying to hash the param values tuple later. hash(value) except KeyError: raise TypeError( "{typename} expected a keyword parameter {name!r}.".format( typename=cls.__name__, name=key ) ) except TypeError: # Value wasn't hashable. raise TypeError( "{typename} expected a hashable value for parameter " "{name!r}, but got {value!r} instead.".format( typename=cls.__name__, name=key, value=value, ) ) param_values.append((key, value)) return tuple(param_values)
0.001097
async def resolve( self, host, port=80, family=None, qtype='A', logging=True ): """Return resolving IP address(es) from host name.""" if self.host_is_ip(host): return host _host = self._cached_hosts.get(host) if _host: return _host resp = await self._resolve(host, qtype) if resp: hosts = [ { 'hostname': host, 'host': r.host, 'port': port, 'family': family, 'proto': socket.IPPROTO_IP, 'flags': socket.AI_NUMERICHOST, } for r in resp ] if family: self._cached_hosts[host] = hosts else: self._cached_hosts[host] = hosts[0]['host'] if logging: log.debug( '%s: Host resolved: %s' % (host, self._cached_hosts[host]) ) else: if logging: log.warning('%s: Could not resolve host' % host) return self._cached_hosts.get(host)
0.002586
def pluralize(count, item_type): """Pluralizes the item_type if the count does not equal one. For example `pluralize(1, 'apple')` returns '1 apple', while `pluralize(0, 'apple') returns '0 apples'. :return The count and inflected item_type together as a string :rtype string """ def pluralize_string(x): if x.endswith('s'): return x + 'es' else: return x + 's' text = '{} {}'.format(count, item_type if count == 1 else pluralize_string(item_type)) return text
0.016
def _get_limits_spot(self): """ Return a dict of limits for spot requests only. This method should only be used internally by :py:meth:~.get_limits`. :rtype: dict """ limits = {} limits['Max spot instance requests per region'] = AwsLimit( 'Max spot instance requests per region', self, 20, self.warning_threshold, self.critical_threshold, limit_type='Spot instance requests' ) limits['Max active spot fleets per region'] = AwsLimit( 'Max active spot fleets per region', self, 1000, self.warning_threshold, self.critical_threshold, ) limits['Max launch specifications per spot fleet'] = AwsLimit( 'Max launch specifications per spot fleet', self, 50, self.warning_threshold, self.critical_threshold, ) limits['Max target capacity per spot fleet'] = AwsLimit( 'Max target capacity per spot fleet', self, 3000, self.warning_threshold, self.critical_threshold ) limits['Max target capacity for all spot fleets in region'] = AwsLimit( 'Max target capacity for all spot fleets in region', self, 5000, self.warning_threshold, self.critical_threshold ) return limits
0.001317
def info(self, text): """ Posts an info message adding a timestamp and logging level to it for both file and console handlers. Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress at the very time they are being logged but their timestamp will be captured at the right time. Logger will redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw immediately (may produce flickering) then call 'flush' method. :param text: The text to log into file and console. """ self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.INFO)))
0.009709
def get_all_knoreq_user_objects(self, include_machine = False): """ Fetches all user objects with useraccountcontrol DONT_REQ_PREAUTH flag set from the AD, and returns MSADUser object. """ logger.debug('Polling AD for all user objects, machine accounts included: %s'% include_machine) if include_machine == True: ldap_filter = r'(userAccountControl:1.2.840.113556.1.4.803:=4194304)' else: ldap_filter = r'(&(userAccountControl:1.2.840.113556.1.4.803:=4194304)(!(sAMAccountName = *$)))' attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): # TODO: return ldapuser object yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
0.032967
def Weibull(lamda, k, tag=None): """ A Weibull random variate Parameters ---------- lamda : scalar The scale parameter k : scalar The shape parameter """ assert ( lamda > 0 and k > 0 ), 'Weibull "lamda" and "k" parameters must be greater than zero' return uv(ss.exponweib(lamda, k), tag=tag)
0.005556
def branchScale(self): """See docs for `Model` abstract base class.""" bs = -(self.Phi_x * scipy.diagonal(self.Pxy[0])).sum() * self.mu assert bs > 0 return bs
0.010471
def human_readable_number(number, suffix=""): """ Format the given number into a human-readable string. Code adapted from http://stackoverflow.com/a/1094933 :param variant number: the number (int or float) :param string suffix: the unit of the number :rtype: string """ for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]: if abs(number) < 1024.0: return "%3.1f%s%s" % (number, unit, suffix) number /= 1024.0 return "%.1f%s%s" % (number, "Y", suffix)
0.001938
def load_url(self, url, force=False, reload_seconds=0, callback_function=None): """ Starts loading a URL with an optional reload time in seconds. Setting force to True may load pages which block iframe embedding, but will prevent reload from working and will cause calls to load_url() to reload the app. """ def launch_callback(): """Loads requested URL after app launched.""" should_reload = not force and reload_seconds not in (0, None) reload_milliseconds = (0 if not should_reload else reload_seconds * 1000) msg = { "url": url, "force": force, "reload": should_reload, "reload_time": reload_milliseconds } self.send_message(msg, inc_session_id=True, callback_function=callback_function) self.launch(callback_function=launch_callback)
0.002887
def contains_extractor(document): """A basic document feature extractor that returns a dict of words that the document contains.""" tokens = _get_document_tokens(document) features = dict((u'contains({0})'.format(w), True) for w in tokens) return features
0.003636
def sync_user(self, url, token, encoding_aes_key, media_id, to_invite=True): """ 增量更新成员 https://work.weixin.qq.com/api/doc#90000/90135/90980 :param url: 企业应用接收企业微信推送请求的访问协议和地址,支持http或https协议 :param token: 用于生成签名 :param encoding_aes_key: 用于消息体的加密,是AES密钥的Base64编码 :param media_id: 上传的csv文件的media_id :param to_invite: 是否邀请新建的成员使用企业微信(将通过微信服务通知或短信或邮件下发邀请,每天自动下发一次,最多持续3个工作日),默认值为true。 :return: 返回的 JSON 数据包 """ return self._post( 'batch/syncuser', data={ 'media_id': media_id, 'to_invite': to_invite, 'callback': { 'url': url, 'token': token, 'encodingaeskey': encoding_aes_key } } )
0.003584
def get_diff_idxs(array, rtol, atol): """ Given an array with (C, N, L) values, being the first the reference value, compute the relative differences and discard the one below the tolerance. :returns: indices where there are sensible differences. """ C, N, L = array.shape diff_idxs = set() # indices of the sites with differences for c in range(1, C): for n in range(N): if not numpy.allclose(array[c, n], array[0, n], rtol, atol): diff_idxs.add(n) return numpy.fromiter(diff_idxs, int)
0.001786
def create(self, store_id, data): """ Add a new customer to a store. :param store_id: The store id. :type store_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { "id": string*, "email_address": string*, "opt_in_status": boolean* } """ self.store_id = store_id if 'id' not in data: raise KeyError('The store customer must have an id') if 'email_address' not in data: raise KeyError('The store customer must have an email_address') check_email(data['email_address']) if 'opt_in_status' not in data: raise KeyError('The store customer must have an opt_in_status') if data['opt_in_status'] not in [True, False]: raise TypeError('The opt_in_status must be True or False') response = self._mc_client._post(url=self._build_path(store_id, 'customers'), data=data) if response is not None: self.customer_id = response['id'] else: self.customer_id = None return response
0.002564
def pipe_to_process(self, payload): """Send something to stdin of a specific process.""" message = payload['input'] key = payload['key'] if not self.process_handler.is_running(key): return {'message': 'No running process for this key', 'status': 'error'} self.process_handler.send_to_process(message, key) return {'message': 'Message sent', 'status': 'success'}
0.004376
def StartRun(self, wait_for_start_event, signal_event, wait_for_write_event): """Starts a new run for the given cron job.""" # Signal that the cron thread has started. This way the cron scheduler # will know that the task is not sitting in a threadpool queue, but is # actually executing. wait_for_start_event.set() # Wait until the cron scheduler acknowledges the run. If it doesn't # acknowledge, just return (it means that the cron scheduler considers # this task as "not started" and has returned the lease so that another # worker can pick it up). if not signal_event.wait(TASK_STARTUP_WAIT): return try: logging.info("Processing cron job: %s", self.job.cron_job_id) self.run_state.started_at = rdfvalue.RDFDatetime.Now() self.run_state.status = "RUNNING" data_store.REL_DB.WriteCronJobRun(self.run_state) data_store.REL_DB.UpdateCronJob( self.job.cron_job_id, last_run_time=rdfvalue.RDFDatetime.Now(), current_run_id=self.run_state.run_id, forced_run_requested=False) finally: # Notify the cron scheduler that all the DB updates are done. At this # point the cron scheduler can safely return this job's lease. wait_for_write_event.set() try: self.Run() self.run_state.status = "FINISHED" except LifetimeExceededError: self.run_state.status = "LIFETIME_EXCEEDED" stats_collector_instance.Get().IncrementCounter( "cron_job_failure", fields=[self.job.cron_job_id]) except Exception as e: # pylint: disable=broad-except logging.exception("Cronjob %s failed with an error: %s", self.job.cron_job_id, e) stats_collector_instance.Get().IncrementCounter( "cron_job_failure", fields=[self.job.cron_job_id]) self.run_state.status = "ERROR" self.run_state.backtrace = str(e) finally: self.run_state.finished_at = rdfvalue.RDFDatetime.Now() elapsed = self.run_state.finished_at - self.run_state.started_at stats_collector_instance.Get().RecordEvent( "cron_job_latency", elapsed.seconds, fields=[self.job.cron_job_id]) if self.job.lifetime: expiration_time = self.run_state.started_at + self.job.lifetime if self.run_state.finished_at > expiration_time: self.run_state.status = "LIFETIME_EXCEEDED" stats_collector_instance.Get().IncrementCounter( "cron_job_timeout", fields=[self.job.cron_job_id]) data_store.REL_DB.WriteCronJobRun(self.run_state) current_job = data_store.REL_DB.ReadCronJob(self.job.cron_job_id) # If no other job was started while we were running, update last status # information. if current_job.current_run_id == self.run_state.run_id: data_store.REL_DB.UpdateCronJob( self.job.cron_job_id, current_run_id=None, last_run_status=self.run_state.status)
0.008778
def get_hostfirmware(self,callb=None): """Convenience method to request the device firmware info from the device This method will check whether the value has already been retrieved from the device, if so, it will simply return it. If no, it will request the information from the device and request that callb be executed when a response is received. The default callback will simply cache the value. :param callb: Callable to be used when the response is received. If not set, self.resp_set_label will be used. :type callb: callable :returns: The cached value :rtype: str """ if self.host_firmware_version is None: mypartial=partial(self.resp_set_hostfirmware) if callb: mycallb=lambda x,y:(mypartial(y),callb(x,y)) else: mycallb=lambda x,y:mypartial(y) response = self.req_with_resp(GetHostFirmware, StateHostFirmware,mycallb ) return (self.host_firmware_version,self.host_firmware_build_timestamp)
0.020928
def _recursive_dict_update(dict_, other, **kwargs): """Deep/recursive version of ``dict.update``. If a key is present in both dictionaries, and points to "child" dictionaries, those will be appropriately merged. :param overwrite: Whether to overwrite exisiting dictionary values """ overwrite = kwargs['overwrite'] for key, other_value in iteritems(other): if key in dict_: value = dict_[key] if is_mapping(value) and is_mapping(other_value): _recursive_dict_update(value, other_value, overwrite=overwrite) continue if overwrite: dict_[key] = other_value else: dict_.setdefault(key, other_value)
0.00137
def exists(self): """ Determine if any rows exist for the current query. :return: Whether the rows exist or not :rtype: bool """ limit = self.limit_ result = self.limit(1).count() > 0 self.limit(limit) return result
0.006873
def format_norm(kwargs, current=None): """Format a `~matplotlib.colors.Normalize` from a set of kwargs Returns ------- norm, kwargs the formatted `Normalize` instance, and the remaining keywords """ norm = kwargs.pop('norm', current) or 'linear' vmin = kwargs.pop('vmin', None) vmax = kwargs.pop('vmax', None) clim = kwargs.pop('clim', (vmin, vmax)) or (None, None) clip = kwargs.pop('clip', None) if norm == 'linear': norm = colors.Normalize() elif norm == 'log': norm = colors.LogNorm() elif not isinstance(norm, colors.Normalize): raise ValueError("unrecognised value for norm {!r}".format(norm)) for attr, value in (('vmin', clim[0]), ('vmax', clim[1]), ('clip', clip)): if value is not None: setattr(norm, attr, value) return norm, kwargs
0.001164
def run_cutadapt(job, fastqs, univ_options, cutadapt_options): """ This module runs cutadapt on the input RNA fastq files and then calls the RNA aligners. ARGUMENTS 1. fastqs: Dict of list of input RNA-Seq fastqs fastqs +- 'tumor_rna': [<JSid for 1.fastq> , <JSid for 2.fastq>] 2. univ_options: Dict of universal arguments used by almost all tools univ_options +- 'dockerhub': <dockerhub to use> 3. cutadapt_options: Dict of parameters specific to cutadapt cutadapt_options |- 'a': <sequence of 3' adapter to trim from fwd read> +- 'A': <sequence of 3' adapter to trim from rev read> RETURN VALUES 1. output_files: Dict of cutadapted fastqs output_files |- 'rna_cutadapt_1.fastq': <JSid> +- 'rna_cutadapt_2.fastq': <JSid> This module corresponds to node 2 on the tree """ job.fileStore.logToMaster('Running cutadapt on %s' %univ_options['patient']) work_dir = job.fileStore.getLocalTempDir() fq_extn = '.gz' if fastqs['gzipped'] else '' input_files = { 'rna_1.fastq' + fq_extn: fastqs['tumor_rna'][0], 'rna_2.fastq' + fq_extn: fastqs['tumor_rna'][1]} input_files = get_files_from_filestore(job, input_files, work_dir, docker=True) parameters = ['-a', cutadapt_options['a'], # Fwd read 3' adapter '-A', cutadapt_options['A'], # Rev read 3' adapter '-m', '35', # Minimum size of read '-o', docker_path('rna_cutadapt_1.fastq'), # Output for R1 '-p', docker_path('rna_cutadapt_2.fastq'), # Output for R2 input_files['rna_1.fastq'], input_files['rna_2.fastq']] docker_call(tool='cutadapt', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub']) output_files = defaultdict() for fastq_file in ['rna_cutadapt_1.fastq', 'rna_cutadapt_2.fastq']: output_files[fastq_file] = job.fileStore.writeGlobalFile('/'.join([work_dir, fastq_file])) return output_files
0.002821
def connect(self, inputs): '''Create Theano variables representing the outputs of this layer. Parameters ---------- inputs : dict of Theano expressions Symbolic inputs to this layer, given as a dictionary mapping string names to Theano expressions. Each string key should be of the form "{layer_name}:{output_name}" and refers to a specific output from a specific layer in the graph. Returns ------- outputs : dict A dictionary mapping names to Theano expressions for the outputs from this layer. updates : sequence of (parameter, expression) tuples Updates that should be performed by a Theano function that computes something using this layer. ''' outputs, updates = self.transform(inputs) # transform the outputs to be a list of ordered pairs if needed. if isinstance(outputs, dict): outputs = sorted(outputs.items()) if isinstance(outputs, (TT.TensorVariable, SS.SparseVariable)): outputs = [('out', outputs)] outs = {self.full_name(name): expr for name, expr in outputs} return outs, updates
0.001616
def load_lime(self, remote_path, listen_port, dump_format='lime'): """ Load LiME kernel module from remote filesystem :type remote_path: str :param remote_path: path to LiME kernel module on remote host :type listen_port: int :param listen_port: port LiME uses to listen to remote connections :type dump_format: str :param dump_format: LiME memory dump file format """ load_command = self.commands.load_lime.value.format(remote_path, listen_port, dump_format) self.shell.execute_async(load_command)
0.002829
def get_help(self, prefix='', include_special_flags=True): """Returns a help string for all known flags. Args: prefix: str, per-line output prefix. include_special_flags: bool, whether to include description of SPECIAL_FLAGS, i.e. --flagfile and --undefok. Returns: str, formatted help message. """ flags_by_module = self.flags_by_module_dict() if flags_by_module: modules = sorted(flags_by_module) # Print the help for the main module first, if possible. main_module = sys.argv[0] if main_module in modules: modules.remove(main_module) modules = [main_module] + modules return self._get_help_for_modules(modules, prefix, include_special_flags) else: output_lines = [] # Just print one long list of flags. values = six.itervalues(self._flags()) if include_special_flags: values = itertools.chain( values, six.itervalues(_helpers.SPECIAL_FLAGS._flags())) # pylint: disable=protected-access self._render_flag_list(values, output_lines, prefix) return '\n'.join(output_lines)
0.011494
def _read_output(path): """Read CmdStan output.csv. Parameters ---------- path : str Returns ------- List[DataFrame, DataFrame, List[str], List[str], List[str]] pandas.DataFrame Sample data pandas.DataFrame Sample stats List[str] Configuration information List[str] Adaptation information List[str] Timing info """ chains = [] configuration_info = [] adaptation_info = [] timing_info = [] i = 0 # Read (first) configuration and adaption with open(path, "r") as f_obj: column_names = False for i, line in enumerate(f_obj): line = line.strip() if line.startswith("#"): if column_names: adaptation_info.append(line.strip()) else: configuration_info.append(line.strip()) elif not column_names: column_names = True pconf = _process_configuration(configuration_info) if pconf["save_warmup"]: warmup_range = range(pconf["num_warmup"] // pconf["thin"]) for _, _ in zip(warmup_range, f_obj): continue else: break # Read data with open(path, "r") as f_obj: df = pd.read_csv(f_obj, comment="#") # split dataframe if header found multiple times if df.iloc[:, 0].dtype.kind == "O": first_col = df.columns[0] col_locations = first_col == df.loc[:, first_col] col_locations = list(col_locations.loc[col_locations].index) dfs = [] for idx, last_idx in zip(col_locations, [-1] + list(col_locations[:-1])): df_ = deepcopy(df.loc[last_idx + 1 : idx - 1, :]) for col in df_.columns: df_.loc[:, col] = pd.to_numeric(df_.loc[:, col]) if len(df_): dfs.append(df_.reset_index(drop=True)) df = df.loc[idx + 1 :, :] for col in df.columns: df.loc[:, col] = pd.to_numeric(df.loc[:, col]) dfs.append(df) else: dfs = [df] for j, df in enumerate(dfs): if j == 0: # Read timing info (first) from the end of the file line_num = i + df.shape[0] + 1 for k in range(5): line = linecache.getline(path, line_num + k).strip() if len(line): timing_info.append(line) configuration_info_len = len(configuration_info) adaptation_info_len = len(adaptation_info) timing_info_len = len(timing_info) num_of_samples = df.shape[0] header_count = 1 last_line_num = ( configuration_info_len + adaptation_info_len + timing_info_len + num_of_samples + header_count ) else: # header location found in the dataframe (not first) configuration_info = [] adaptation_info = [] timing_info = [] # line number for the next dataframe in csv line_num = last_line_num + 1 # row ranges config_start = line_num config_end = config_start + configuration_info_len # read configuration_info for reading_line in range(config_start, config_end): line = linecache.getline(path, reading_line) if line.startswith("#"): configuration_info.append(line) else: msg = ( "Invalid input file. " "Header information missing from combined csv. " "Configuration: {}".format(path) ) raise ValueError(msg) pconf = _process_configuration(configuration_info) warmup_rows = pconf["save_warmup"] * pconf["num_warmup"] // pconf["thin"] adaption_start = config_end + 1 + warmup_rows adaption_end = adaption_start + adaptation_info_len # read adaptation_info for reading_line in range(adaption_start, adaption_end): line = linecache.getline(path, reading_line) if line.startswith("#"): adaptation_info.append(line) else: msg = ( "Invalid input file. " "Header information missing from combined csv. " "Adaptation: {}".format(path) ) raise ValueError(msg) timing_start = adaption_end + len(df) - warmup_rows timing_end = timing_start + timing_info_len # read timing_info raise_timing_error = False for reading_line in range(timing_start, timing_end): line = linecache.getline(path, reading_line) if line.startswith("#"): timing_info.append(line) else: raise_timing_error = True break no_elapsed_time = not any("elapsed time" in row.lower() for row in timing_info) if raise_timing_error or no_elapsed_time: msg = ( "Invalid input file. " "Header information missing from combined csv. " "Timing: {}".format(path) ) raise ValueError(msg) last_line_num = reading_line # Remove warmup if pconf["save_warmup"]: saved_samples = pconf["num_samples"] // pconf["thin"] df = df.iloc[-saved_samples:, :] # Split data to sample_stats and sample sample_stats_columns = [col for col in df.columns if col.endswith("__")] sample_columns = [col for col in df.columns if col not in sample_stats_columns] sample_stats = df.loc[:, sample_stats_columns] sample_df = df.loc[:, sample_columns] chains.append((sample_df, sample_stats, configuration_info, adaptation_info, timing_info)) return chains
0.001432
def pointsToVoronoiGridShapefile(lat, lon, vor_shp_path, extent=None): """ Converts points to shapefile grid via voronoi """ voronoi_centroids = _get_voronoi_centroid_array(lat, lon, extent) # set-up output polygon shp log("Creating output polygon shp {0}" .format(os.path.basename(vor_shp_path))) if os.path.exists(vor_shp_path): os.remove(vor_shp_path) drv = ogr.GetDriverByName('ESRI Shapefile') outShp = drv.CreateDataSource(vor_shp_path) osr_geographic_proj = osr.SpatialReference() osr_geographic_proj.ImportFromEPSG(4326) layer = outShp.CreateLayer('', osr_geographic_proj, ogr.wkbPolygon) layer.CreateField(ogr.FieldDefn('GRID_LAT', ogr.OFTReal)) layer.CreateField(ogr.FieldDefn('GRID_LON', ogr.OFTReal)) layerDefn = layer.GetLayerDefn() # find nodes surrounding polygon centroid # sort nodes in counterclockwise order # create polygon perimeter through nodes log("Building Voronoi polygons...") # compute voronoi voronoi_manager = Voronoi(voronoi_centroids) voronoi_vertices = voronoi_manager.vertices voronoi_regions = voronoi_manager.regions for point_id, region_index in enumerate(voronoi_manager.point_region): vert_index_list = np.array(voronoi_regions[region_index]) voronoi_centroid = voronoi_centroids[point_id] voronoi_poly_points = _get_voronoi_poly_points(vert_index_list, voronoi_vertices, voronoi_centroid) if len(voronoi_poly_points) == 4: poly = ogr.Geometry(ogr.wkbPolygon) ring = ogr.Geometry(ogr.wkbLinearRing) for node in voronoi_poly_points: ring.AddPoint(node[0], node[1]) # grab first node to close ring ring.AddPoint(voronoi_poly_points[0][0], voronoi_poly_points[0][1]) poly.AddGeometry(ring) feat = ogr.Feature(layerDefn) feat.SetField('GRID_LON', float(voronoi_centroid[0])) feat.SetField('GRID_LAT', float(voronoi_centroid[1])) feat.SetGeometry(poly) layer.CreateFeature(feat)
0.000451
def reportMemory(k, options, field=None, isBytes=False): """ Given k kilobytes, report back the correct format as string. """ if options.pretty: return prettyMemory(int(k), field=field, isBytes=isBytes) else: if isBytes: k /= 1024. if field is not None: return "%*dK" % (field - 1, k) # -1 for the "K" else: return "%dK" % int(k)
0.002415
def build_catalog_info(self, catalog_info): """ Build a CatalogInfo object """ cat = SourceFactory.build_catalog(**catalog_info) catalog_info['catalog'] = cat # catalog_info['catalog_table'] = # Table.read(catalog_info['catalog_file']) catalog_info['catalog_table'] = cat.table catalog_info['roi_model'] =\ SourceFactory.make_fermipy_roi_model_from_catalogs([cat]) catalog_info['srcmdl_name'] =\ self._name_factory.srcmdl_xml(sourcekey=catalog_info['catalog_name']) return CatalogInfo(**catalog_info)
0.005008
def request( self, method, url, data=None, headers=None, withhold_token=False, client_id=None, client_secret=None, **kwargs ): """Intercept all requests and add the OAuth 2 token if present.""" if not is_secure_transport(url): raise InsecureTransportError() if self.token and not withhold_token: log.debug( "Invoking %d protected resource request hooks.", len(self.compliance_hook["protected_request"]), ) for hook in self.compliance_hook["protected_request"]: log.debug("Invoking hook %s.", hook) url, headers, data = hook(url, headers, data) log.debug("Adding token %s to request.", self.token) try: url, headers, data = self._client.add_token( url, http_method=method, body=data, headers=headers ) # Attempt to retrieve and save new access token if expired except TokenExpiredError: if self.auto_refresh_url: log.debug( "Auto refresh is set, attempting to refresh at %s.", self.auto_refresh_url, ) # We mustn't pass auth twice. auth = kwargs.pop("auth", None) if client_id and client_secret and (auth is None): log.debug( 'Encoding client_id "%s" with client_secret as Basic auth credentials.', client_id, ) auth = requests.auth.HTTPBasicAuth(client_id, client_secret) token = self.refresh_token( self.auto_refresh_url, auth=auth, **kwargs ) if self.token_updater: log.debug( "Updating token to %s using %s.", token, self.token_updater ) self.token_updater(token) url, headers, data = self._client.add_token( url, http_method=method, body=data, headers=headers ) else: raise TokenUpdated(token) else: raise log.debug("Requesting url %s using method %s.", url, method) log.debug("Supplying headers %s and data %s", headers, data) log.debug("Passing through key word arguments %s.", kwargs) return super(OAuth2Session, self).request( method, url, headers=headers, data=data, **kwargs )
0.002154
def dependency_lines(self): """The formatted dependencies=[...] lines for this target. If there are no dependencies, this returns an empty list. """ deps = sorted(self._dependencies_by_address.values(), key=lambda d: d.spec) def dep_lines(): yield ' dependencies = [' for dep in deps: for line in dep.lines(): yield line yield ' ],' return list(dep_lines()) if deps else []
0.013761
def from_string(cls, epstr, name, distro=None): """Parse an entry point from the syntax in entry_points.txt :param str epstr: The entry point string (not including 'name =') :param str name: The name of this entry point :param Distribution distro: The distribution in which the entry point was found :rtype: EntryPoint :raises BadEntryPoint: if *epstr* can't be parsed as an entry point. """ m = entry_point_pattern.match(epstr) if m: mod, obj, extras = m.group('modulename', 'objectname', 'extras') if extras is not None: extras = re.split(r',\s*', extras) return cls(name, mod, obj, extras, distro) else: raise BadEntryPoint(epstr)
0.003856
def computeMultipleExpectations(self, A_in, u_n, compute_uncertainty=True, compute_covariance=False, uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False): """Compute the expectations of multiple observables of phase space functions. Compute the expectations of multiple observables of phase space functions [A_0(x),A_1(x),...,A_i(x)] at a single state, along with the error in the estimates and the uncertainty in the estimates. The state is specified by the choice of u_n, which is the energy of the n samples evaluated at a the chosen state. Parameters: ------------- A_in : np.ndarray, float, shape=(I, k, N) A_in[i,n] = A_i(x_n), the value of phase observable i for configuration n at state of interest u_n : np.ndarray, float, shape=(N) u_n[n] is the reduced potential of configuration n at the state of interest compute_uncertainty : bool, optional, default=True If True, calculate the uncertainty compute_covariance : bool, optional, default=False If True, calculate the covariance uncertainty_method : string, optional Choice of method used to compute asymptotic covariance method, or None to use default See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None) warning_cutoff : float, optional Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10) Returns ------------- result_vals : dictionary Possible keys in the result_vals dictionary: 'mu' : np.ndarray, float, shape=(I) result_vals['mu'] is the estimate for the expectation of A_i(x) at the state specified by u_kn 'sigma' : np.ndarray, float, shape = (I) result_vals['sigma'] is the uncertainty in the expectation of A_state_map[i](x) at the state specified by u_n[state_map[i],:] or None if compute_uncertainty is False 'covariances' : np.ndarray, float, shape=(I, I) result_vals['covariances'] is the COVARIANCE in the estimates of A_i[i] and A_i[j]: we can't actually take a square root or None if compute_covariance is False 'Theta': np.ndarray, float, shape=(I, I), covariances of the log weights, useful for some additional calculations. Examples -------- >>> from pymbar import testsystems >>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn') >>> mbar = MBAR(u_kn, N_k) >>> A_in = np.array([x_n,x_n**2,x_n**3]) >>> u_n = u_kn[0,:] >>> results = mbar.computeMultipleExpectations(A_in, u_kn) """ # Retrieve N and K for convenience. I = A_in.shape[0] # number of observables K = self.K N = self.N # N is total number of samples if len(np.shape(A_in)) == 3: A_in_old = A_in.copy() # convert to k by n format A_in = np.zeros([I, N], np.float64) for i in range(I): A_in[i,:] = kn_to_n(A_in_old[i, :, :], N_k=self.N_k) if len(np.shape(u_n)) == 2: u_n = kn_to_n(u_n, N_k = self.N_k) state_map = np.zeros([2,I],int) state_map[1,:] = np.arange(I) # same (first) state for all variables. inner_results = self.computeExpectationsInner(A_in,u_n,state_map, return_theta=(compute_uncertainty or compute_covariance), uncertainty_method=uncertainty_method, warning_cutoff=warning_cutoff) result_vals = dict() expectations, uncertainties, covariances = None, None, None result_vals['mu'] = inner_results['observables'] if compute_uncertainty or compute_covariance or return_theta: Adiag = np.zeros([2*I,2*I],dtype=np.float64) diag = np.ones(2*I,dtype=np.float64) diag[0:I] = diag[I:2*I] = inner_results['observables']-inner_results['Amin'] np.fill_diagonal(Adiag,diag) Theta = Adiag*inner_results['Theta']*Adiag if return_theta: result_vals['Theta'] = Theta if compute_uncertainty: covA_ij = np.array(Theta[0:I,0:I]+Theta[I:2*I,I:2*I]-Theta[0:I,I:2*I]-Theta[I:2*I,0:I]) result_vals['sigma'] = np.sqrt(covA_ij[0:I,0:I].diagonal()) if compute_covariance: # compute estimate of statistical covariance of the observables result_vals['covariances'] = inner_results['Theta'][0:I,0:I] return result_vals
0.0084
def yiq_to_rgb(y, i=None, q=None): """Convert the color from YIQ coordinates to RGB. Parameters: :y: Tte Y component value [0...1] :i: The I component value [0...1] :q: The Q component value [0...1] Returns: The color as an (r, g, b) tuple in the range: r[0...1], g[0...1], b[0...1] >>> '({}, {}, {})'.format(*[round(v, 6) for v in yiq_to_rgb(0.592263, 0.458874, -0.0499818)]) '(1.0, 0.5, 1e-06)' """ if type(y) in [list,tuple]: y, i, q = y r = y + (i * 0.9562) + (q * 0.6210) g = y - (i * 0.2717) - (q * 0.6485) b = y - (i * 1.1053) + (q * 1.7020) return (r, g, b)
0.014041
def _self_pipe(self): """ This sets up a self-pipe so we can hand back an fd to the caller allowing the object to manage event triggers. The ends of the pipe are set non-blocking so it doesn't really matter if a bunch of events fill the pipe buffer. """ import fcntl self._poll_fd, self._poll_send = os.pipe() for fd in [self._poll_fd, self._poll_send]: fl = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
0.003745
def mk_token(opts, tdata): ''' Mint a new token using the config option hash_type and store tdata with 'token' attribute set to the token. This module uses the hash of random 512 bytes as a token. :param opts: Salt master config options :param tdata: Token data to be stored with 'token' attirbute of this dict set to the token. :returns: tdata with token if successful. Empty dict if failed. ''' redis_client = _redis_client(opts) if not redis_client: return {} hash_type = getattr(hashlib, opts.get('hash_type', 'md5')) tok = six.text_type(hash_type(os.urandom(512)).hexdigest()) try: while redis_client.get(tok) is not None: tok = six.text_type(hash_type(os.urandom(512)).hexdigest()) except Exception as err: log.warning( 'Authentication failure: cannot get token %s from redis: %s', tok, err ) return {} tdata['token'] = tok serial = salt.payload.Serial(opts) try: redis_client.set(tok, serial.dumps(tdata)) except Exception as err: log.warning( 'Authentication failure: cannot save token %s to redis: %s', tok, err ) return {} return tdata
0.002392
def good_sequences_to_track(flow, motion_threshold=1.0): """Get list of good frames to do tracking in. Looking at the optical flow, this function chooses a span of frames that fulfill certain criteria. These include * not being too short or too long * not too low or too high mean flow magnitude * a low max value (avoids motion blur) Currently, the cost function for a sequence is hard coded. Sorry about that. Parameters ------------- flow : ndarray The optical flow magnitude motion_threshold : float The maximum amount of motion to consider for sequence endpoints. Returns ------------ sequences : list Sorted list of (a, b, score) elements (highest scpre first) of sequences where a sequence is frames with frame indices in the span [a, b]. """ endpoints = [] in_low = False for i, val in enumerate(flow): if val < motion_threshold: if not in_low: endpoints.append(i) in_low = True else: if in_low: endpoints.append(i-1) # Previous was last in a low spot in_low = False def mean_score_func(m): mu = 15 sigma = 8 top_val = normpdf(mu, mu, sigma) return normpdf(m, mu, sigma) / top_val def max_score_func(m): mu = 40 sigma = 8 if m <= mu: return 1. else: top_val = normpdf(mu, mu, sigma) return normpdf(m, mu, sigma) / top_val def length_score_func(l): mu = 30 sigma = 10 top_val = normpdf(mu, mu, sigma) return normpdf(l, mu, sigma) / top_val min_length = 5 # frames sequences = [] for k, i in enumerate(endpoints[:-1]): for j in endpoints[k+1:]: length = j - i if length < min_length: continue seq = flow[i:j+1] m_score = mean_score_func(np.mean(seq)) mx_score = max_score_func(np.max(seq)) l_score = length_score_func(length) logger.debug("%d, %d scores: (mean=%.5f, max=%.5f, length=%.5f)" % (i,j,m_score, mx_score, l_score)) if min(m_score, mx_score, l_score) < 0.2: continue score = m_score + mx_score + l_score sequences.append((i, j, score)) return sorted(sequences, key=lambda x: x[2], reverse=True)
0.006743
def iso_to_datetime(date): """ Convert ISO 8601 time format to datetime format This function converts a date in ISO format, e.g. ``2017-09-14`` to a `datetime` instance, e.g. ``datetime.datetime(2017,9,14,0,0)`` :param date: date in ISO 8601 format :type date: str :return: datetime instance :rtype: datetime """ chunks = list(map(int, date.split('T')[0].split('-'))) return datetime.datetime(chunks[0], chunks[1], chunks[2])
0.004283
def flatten_phases_and_groups(phases_or_groups): """Recursively flatten nested lists for the list of phases or groups.""" if isinstance(phases_or_groups, PhaseGroup): phases_or_groups = [phases_or_groups] ret = [] for phase in phases_or_groups: if isinstance(phase, PhaseGroup): ret.append(phase.flatten()) elif isinstance(phase, collections.Iterable): ret.extend(flatten_phases_and_groups(phase)) else: ret.append(phase_descriptor.PhaseDescriptor.wrap_or_copy(phase)) return ret
0.017208
def _patch_stats_request(request): '''If the request has no filter config, add one that should do what is expected (include all items) see: PE-11813 ''' filt = request.get('filter', {}) if not filt.get('config', None): request['filter'] = filters.date_range('acquired', gt='1970-01-01T00:00:00Z') return request
0.002538
def fire(self, name, operation, args=None, **kwargs): """Send a message without waiting for a reply @param name: name of destination service queue @param operation: name of service operation to invoke @param args: dictionary of keyword args to pass to operation. Use this OR kwargs. @param kwargs: additional args to pass to operation """ if args: if kwargs: raise TypeError("specify args dict or keyword arguments, not both") else: args = kwargs d = dict(op=operation, args=args) headers = {'sender': self.add_sysname(self.name)} dest = self.add_sysname(name) def _fire(channel): with Producer(channel) as producer: producer.publish(d, routing_key=dest, headers=headers, serializer=self._serializer, exchange=self._exchange, declare=[self._exchange]) log.debug("sending message to %s", dest) with connections[self._pool_conn].acquire(block=True) as conn: _, channel = self.ensure(conn, _fire) conn.maybe_close_channel(channel)
0.004174
def _bind(self): """Bind events to handlers""" main_window = self.main_window handlers = self.handlers c_handlers = self.cell_handlers # Non wx.Grid events self.Bind(wx.EVT_MOUSEWHEEL, handlers.OnMouseWheel) self.Bind(wx.EVT_KEY_DOWN, handlers.OnKey) # Grid events self.GetGridWindow().Bind(wx.EVT_MOTION, handlers.OnMouseMotion) self.Bind(wx.grid.EVT_GRID_RANGE_SELECT, handlers.OnRangeSelected) # Context menu self.Bind(wx.grid.EVT_GRID_CELL_RIGHT_CLICK, handlers.OnContextMenu) # Cell code events main_window.Bind(self.EVT_CMD_CODE_ENTRY, c_handlers.OnCellText) main_window.Bind(self.EVT_CMD_INSERT_BMP, c_handlers.OnInsertBitmap) main_window.Bind(self.EVT_CMD_LINK_BMP, c_handlers.OnLinkBitmap) main_window.Bind(self.EVT_CMD_VIDEO_CELL, c_handlers.OnLinkVLCVideo) main_window.Bind(self.EVT_CMD_INSERT_CHART, c_handlers.OnInsertChartDialog) # Cell attribute events main_window.Bind(self.EVT_CMD_COPY_FORMAT, c_handlers.OnCopyFormat) main_window.Bind(self.EVT_CMD_PASTE_FORMAT, c_handlers.OnPasteFormat) main_window.Bind(self.EVT_CMD_FONT, c_handlers.OnCellFont) main_window.Bind(self.EVT_CMD_FONTSIZE, c_handlers.OnCellFontSize) main_window.Bind(self.EVT_CMD_FONTBOLD, c_handlers.OnCellFontBold) main_window.Bind(self.EVT_CMD_FONTITALICS, c_handlers.OnCellFontItalics) main_window.Bind(self.EVT_CMD_FONTUNDERLINE, c_handlers.OnCellFontUnderline) main_window.Bind(self.EVT_CMD_FONTSTRIKETHROUGH, c_handlers.OnCellFontStrikethrough) main_window.Bind(self.EVT_CMD_FROZEN, c_handlers.OnCellFrozen) main_window.Bind(self.EVT_CMD_LOCK, c_handlers.OnCellLocked) main_window.Bind(self.EVT_CMD_BUTTON_CELL, c_handlers.OnButtonCell) main_window.Bind(self.EVT_CMD_MARKUP, c_handlers.OnCellMarkup) main_window.Bind(self.EVT_CMD_MERGE, c_handlers.OnMerge) main_window.Bind(self.EVT_CMD_JUSTIFICATION, c_handlers.OnCellJustification) main_window.Bind(self.EVT_CMD_ALIGNMENT, c_handlers.OnCellAlignment) main_window.Bind(self.EVT_CMD_BORDERWIDTH, c_handlers.OnCellBorderWidth) main_window.Bind(self.EVT_CMD_BORDERCOLOR, c_handlers.OnCellBorderColor) main_window.Bind(self.EVT_CMD_BACKGROUNDCOLOR, c_handlers.OnCellBackgroundColor) main_window.Bind(self.EVT_CMD_TEXTCOLOR, c_handlers.OnCellTextColor) main_window.Bind(self.EVT_CMD_ROTATION0, c_handlers.OnTextRotation0) main_window.Bind(self.EVT_CMD_ROTATION90, c_handlers.OnTextRotation90) main_window.Bind(self.EVT_CMD_ROTATION180, c_handlers.OnTextRotation180) main_window.Bind(self.EVT_CMD_ROTATION270, c_handlers.OnTextRotation270) main_window.Bind(self.EVT_CMD_TEXTROTATATION, c_handlers.OnCellTextRotation) # Cell selection events self.Bind(wx.grid.EVT_GRID_CMD_SELECT_CELL, c_handlers.OnCellSelected) # Grid edit mode events main_window.Bind(self.EVT_CMD_ENTER_SELECTION_MODE, handlers.OnEnterSelectionMode) main_window.Bind(self.EVT_CMD_EXIT_SELECTION_MODE, handlers.OnExitSelectionMode) # Grid view events main_window.Bind(self.EVT_CMD_VIEW_FROZEN, handlers.OnViewFrozen) main_window.Bind(self.EVT_CMD_REFRESH_SELECTION, handlers.OnRefreshSelectedCells) main_window.Bind(self.EVT_CMD_TIMER_TOGGLE, handlers.OnTimerToggle) self.Bind(wx.EVT_TIMER, handlers.OnTimer) main_window.Bind(self.EVT_CMD_DISPLAY_GOTO_CELL_DIALOG, handlers.OnDisplayGoToCellDialog) main_window.Bind(self.EVT_CMD_GOTO_CELL, handlers.OnGoToCell) main_window.Bind(self.EVT_CMD_ZOOM_IN, handlers.OnZoomIn) main_window.Bind(self.EVT_CMD_ZOOM_OUT, handlers.OnZoomOut) main_window.Bind(self.EVT_CMD_ZOOM_STANDARD, handlers.OnZoomStandard) main_window.Bind(self.EVT_CMD_ZOOM_FIT, handlers.OnZoomFit) # Find events main_window.Bind(self.EVT_CMD_FIND, handlers.OnFind) main_window.Bind(self.EVT_CMD_REPLACE, handlers.OnShowFindReplace) main_window.Bind(wx.EVT_FIND, handlers.OnReplaceFind) main_window.Bind(wx.EVT_FIND_NEXT, handlers.OnReplaceFind) main_window.Bind(wx.EVT_FIND_REPLACE, handlers.OnReplace) main_window.Bind(wx.EVT_FIND_REPLACE_ALL, handlers.OnReplaceAll) main_window.Bind(wx.EVT_FIND_CLOSE, handlers.OnCloseFindReplace) # Grid change events main_window.Bind(self.EVT_CMD_INSERT_ROWS, handlers.OnInsertRows) main_window.Bind(self.EVT_CMD_INSERT_COLS, handlers.OnInsertCols) main_window.Bind(self.EVT_CMD_INSERT_TABS, handlers.OnInsertTabs) main_window.Bind(self.EVT_CMD_DELETE_ROWS, handlers.OnDeleteRows) main_window.Bind(self.EVT_CMD_DELETE_COLS, handlers.OnDeleteCols) main_window.Bind(self.EVT_CMD_DELETE_TABS, handlers.OnDeleteTabs) main_window.Bind(self.EVT_CMD_SHOW_RESIZE_GRID_DIALOG, handlers.OnResizeGridDialog) main_window.Bind(self.EVT_CMD_QUOTE, handlers.OnQuote) main_window.Bind(wx.grid.EVT_GRID_ROW_SIZE, handlers.OnRowSize) main_window.Bind(wx.grid.EVT_GRID_COL_SIZE, handlers.OnColSize) main_window.Bind(self.EVT_CMD_SORT_ASCENDING, handlers.OnSortAscending) main_window.Bind(self.EVT_CMD_SORT_DESCENDING, handlers.OnSortDescending) # Undo/Redo events main_window.Bind(self.EVT_CMD_UNDO, handlers.OnUndo) main_window.Bind(self.EVT_CMD_REDO, handlers.OnRedo)
0.000328
def _inning_actions(self, soup, inning_number, inning_id): """ Inning Actions. :param soup: Beautifulsoup object :param inning_number: Inning Number :param inning_id: Inning Id(0:home, 1:away) """ # at bat(batter box data) & pitching data for act in soup.find_all('action'): self.actions.append(InningAction.action(act, self.game, self.players.rosters, inning_number, inning_id))
0.006593
def zpopmin(self, name, count=None): """ Remove and return up to ``count`` members with the lowest scores from the sorted set ``name``. """ args = (count is not None) and [count] or [] options = { 'withscores': True } return self.execute_command('ZPOPMIN', name, *args, **options)
0.005618
def get_ar(self): """Create a temporary AR to fetch the fields from """ if not self.tmp_ar: logger.info("*** CREATING TEMPORARY AR ***") self.tmp_ar = self.context.restrictedTraverse( "portal_factory/AnalysisRequest/Request new analyses") return self.tmp_ar
0.006079
def manage_submissions(self): """ If there are no or only one submissions left, get new submissions. This function manages URL creation and the specifics for front page or subreddit mode. """ if not hasattr(self, 'submissions') or len(self.submissions) == 1: self.submissions = [] if self.options['mode'] == 'front': # If there are no login details, the standard front # page will be displayed. if self.options['password'] and self.options['username']: self.login() url = 'http://reddit.com/.json?sort={0}'.format(self.options['sort']) self.submissions = self.get_submissions(url) elif self.options['mode'] == 'subreddit': for subreddit in self.options['subreddits']: url = 'http://reddit.com/r/{0}/.json?sort={1}'.format( subreddit, self.options['limit']) self.submissions += self.get_submissions(url) else: return
0.00273
def get_node_type(type_str): """Returns the NodeType given a name of a JSON function object.""" if type_str == "container": return NodeType.CONTAINER elif type_str == "loop_plate": return NodeType.LOOP elif type_str == "assign": return NodeType.ASSIGN elif type_str == "condition": return NodeType.CONDITION elif type_str == "decision": return NodeType.DECISION else: raise ValueError("Unrecognized type string: ", type_str)
0.002