text
stringlengths
78
104k
score
float64
0
0.18
def clone_repo(self): """Clone a repository containing the dotfiles source.""" tempdir_path = tempfile.mkdtemp() if self.args.git: self.log.debug('Cloning git source repository from %s to %s', self.source, tempdir_path) self.sh('git clone', self.source, tempdir_path) else: raise NotImplementedError('Unknown repo type') self.source = tempdir_path
0.004415
def update_payload(self, fields=None): """Rename ``system_ids`` to ``system_uuids``.""" payload = super(HostCollection, self).update_payload(fields) if 'system_ids' in payload: payload['system_uuids'] = payload.pop('system_ids') return payload
0.006969
def setNodeCount(self, nodeType, numNodes, preemptable=False, force=False): """ Attempt to grow or shrink the number of preemptable or non-preemptable worker nodes in the cluster to the given value, or as close a value as possible, and, after performing the necessary additions or removals of worker nodes, return the resulting number of preemptable or non-preemptable nodes currently in the cluster. :param str nodeType: The node type to add or remove. :param int numNodes: Desired size of the cluster :param bool preemptable: whether the added nodes will be preemptable, i.e. whether they may be removed spontaneously by the underlying platform at any time. :param bool force: If False, the provisioner is allowed to deviate from the given number of nodes. For example, when downsizing a cluster, a provisioner might leave nodes running if they have active jobs running on them. :rtype: int :return: the number of worker nodes in the cluster after making the necessary adjustments. This value should be, but is not guaranteed to be, close or equal to the `numNodes` argument. It represents the closest possible approximation of the actual cluster size at the time this method returns. """ for attempt in retry(predicate=self.provisioner.retryPredicate): with attempt: workerInstances = self.getNodes(preemptable=preemptable) logger.debug("Cluster contains %i instances" % len(workerInstances)) # Reduce to nodes of the correct type workerInstances = {node:workerInstances[node] for node in workerInstances if node.nodeType == nodeType} ignoredNodes = [node for node in workerInstances if node.privateIP in self.ignoredNodes] numIgnoredNodes = len(ignoredNodes) numCurrentNodes = len(workerInstances) logger.debug("Cluster contains %i instances of type %s (%i ignored and draining jobs until " "they can be safely terminated)" % (numCurrentNodes, nodeType, numIgnoredNodes)) if not force: delta = numNodes - (numCurrentNodes - numIgnoredNodes) else: delta = numNodes - numCurrentNodes if delta > 0 and numIgnoredNodes > 0: # We can un-ignore a few nodes to compensate for the additional nodes we want. numNodesToUnignore = min(delta, numIgnoredNodes) logger.debug('Unignoring %i nodes because we want to scale back up again.' % numNodesToUnignore) delta -= numNodesToUnignore for node in ignoredNodes[:numNodesToUnignore]: self.ignoredNodes.remove(node.privateIP) self.leader.batchSystem.unignoreNode(node.privateIP) if delta > 0: logger.info('Adding %i %s nodes to get to desired cluster size of %i.', delta, 'preemptable' if preemptable else 'non-preemptable', numNodes) numNodes = numCurrentNodes + self._addNodes(nodeType, numNodes=delta, preemptable=preemptable) elif delta < 0: logger.info('Removing %i %s nodes to get to desired cluster size of %i.', -delta, 'preemptable' if preemptable else 'non-preemptable', numNodes) numNodes = numCurrentNodes - self._removeNodes(workerInstances, nodeType = nodeType, numNodes=-delta, preemptable=preemptable, force=force) else: if not force: logger.debug('Cluster (minus ignored nodes) already at desired size of %i. Nothing to do.', numNodes) else: logger.debug('Cluster already at desired size of %i. Nothing to do.', numNodes) return numNodes
0.008276
def wait_for_transfer_job(self, job, expected_statuses=(GcpTransferOperationStatus.SUCCESS,), timeout=60): """ Waits until the job reaches the expected state. :param job: Transfer job See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob :type job: dict :param expected_statuses: State that is expected See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status :type expected_statuses: set[str] :param timeout: :type timeout: time in which the operation must end in seconds :rtype: None """ while timeout > 0: operations = self.list_transfer_operations( filter={FILTER_PROJECT_ID: job[PROJECT_ID], FILTER_JOB_NAMES: [job[NAME]]} ) if GCPTransferServiceHook.operations_contain_expected_statuses(operations, expected_statuses): return time.sleep(TIME_TO_SLEEP_IN_SECONDS) timeout -= TIME_TO_SLEEP_IN_SECONDS raise AirflowException("Timeout. The operation could not be completed within the allotted time.")
0.004918
def decode_base64(data: str) -> bytes: """Decode base64, padding being optional. :param data: Base64 data as an ASCII byte string :returns: The decoded byte string. """ missing_padding = len(data) % 4 if missing_padding != 0: data += "=" * (4 - missing_padding) return base64.decodebytes(data.encode("utf-8"))
0.00289
def get_job(db, job_id, username=None): """ If job_id is negative, return the last calculation of the current user, otherwise returns the job_id unchanged. :param db: a :class:`openquake.server.dbapi.Db` instance :param job_id: a job ID (can be negative and can be nonexisting) :param username: an user name (if None, ignore it) :returns: a valid job or None if the original job ID was invalid """ job_id = int(job_id) if job_id > 0: dic = dict(id=job_id) if username: dic['user_name'] = username try: return db('SELECT * FROM job WHERE ?A', dic, one=True) except NotFound: return # else negative job_id if username: joblist = db('SELECT * FROM job WHERE user_name=?x ' 'ORDER BY id DESC LIMIT ?x', username, -job_id) else: joblist = db('SELECT * FROM job ORDER BY id DESC LIMIT ?x', -job_id) if not joblist: # no jobs return else: return joblist[-1]
0.000966
def save(self): """ Reset the user's password if the provided information is valid. """ token = models.PasswordResetToken.objects.get( key=self.validated_data["key"] ) token.email.user.set_password(self.validated_data["password"]) token.email.user.save() logger.info("Reset password for %s", token.email.user) token.delete()
0.004866
def _clean_block(response_dict): ''' Pythonize a blockcypher API response ''' response_dict['received_time'] = parser.parse(response_dict['received_time']) response_dict['time'] = parser.parse(response_dict['time']) return response_dict
0.007905
def login(email, password): """Login to Todoist. :param email: A Todoist user's email address. :type email: str :param password: A Todoist user's password. :type password: str :return: The Todoist user. :rtype: :class:`pytodoist.todoist.User` >>> from pytodoist import todoist >>> user = todoist.login('[email protected]', 'password') >>> print(user.full_name) John Doe """ user = _login(API.login, email, password) user.password = password return user
0.001942
def ssh_to_task(task) -> paramiko.SSHClient: """Create ssh connection to task's machine returns Paramiko SSH client connected to host. """ username = task.ssh_username hostname = task.public_ip ssh_key_fn = get_keypair_fn() print(f"ssh -i {ssh_key_fn} {username}@{hostname}") pkey = paramiko.RSAKey.from_private_key_file(ssh_key_fn) ssh_client = paramiko.SSHClient() ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) assert ssh_client counter = 1 while True: try: ssh_client.connect(hostname=hostname, username=username, pkey=pkey) if counter % 11 == 0: # occasionally re-obtain public ip, machine could've gotten restarted hostname = task.public_ip break except Exception as e: print( f'{task.name}: Exception connecting to {hostname} via ssh (could be a timeout): {e}') time.sleep(RETRY_INTERVAL_SEC) return ssh_client
0.021645
def measure_int_put(self, measure, value): """associates the measure of type Int with the given value""" if value < 0: # Should be an error in a later release. logger.warning("Cannot record negative values") self._measurement_map[measure] = value
0.006803
def MoveTo(x: int, y: int, moveSpeed: float = 1, waitTime: float = OPERATION_WAIT_TIME) -> None: """ Simulate mouse move to point x, y from current cursor. x: int. y: int. moveSpeed: float, 1 normal speed, < 1 move slower, > 1 move faster. waitTime: float. """ if moveSpeed <= 0: moveTime = 0 else: moveTime = MAX_MOVE_SECOND / moveSpeed curX, curY = GetCursorPos() xCount = abs(x - curX) yCount = abs(y - curY) maxPoint = max(xCount, yCount) screenWidth, screenHeight = GetScreenSize() maxSide = max(screenWidth, screenHeight) minSide = min(screenWidth, screenHeight) if maxPoint > minSide: maxPoint = minSide if maxPoint < maxSide: maxPoint = 100 + int((maxSide - 100) / maxSide * maxPoint) moveTime = moveTime * maxPoint * 1.0 / maxSide stepCount = maxPoint // 20 if stepCount > 1: xStep = (x - curX) * 1.0 / stepCount yStep = (y - curY) * 1.0 / stepCount interval = moveTime / stepCount for i in range(stepCount): cx = curX + int(xStep * i) cy = curY + int(yStep * i) # upper-left(0,0), lower-right(65536,65536) # mouse_event(MouseEventFlag.Move | MouseEventFlag.Absolute, cx*65536//screenWidth, cy*65536//screenHeight, 0, 0) SetCursorPos(cx, cy) time.sleep(interval) SetCursorPos(x, y) time.sleep(waitTime)
0.002079
def tag_pos_volume(line): """Tag POS volume number POS is journal that has special volume numbers e.g. PoS LAT2007 (2007) 369 """ def tagger(match): groups = match.groupdict() try: year = match.group('year') except IndexError: # Extract year from volume name # which should always include the year g = re.search(re_pos_year_num, match.group( 'volume_num'), re.UNICODE) year = g.group(0) if year: groups[ 'year'] = ' <cds.YR>(%s)</cds.YR>' % year.strip().strip('()') else: groups['year'] = '' return '<cds.JOURNAL>PoS</cds.JOURNAL>' \ ' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>' \ '%(year)s' \ ' <cds.PG>%(page)s</cds.PG>' % groups for p in re_pos: line = p.sub(tagger, line) return line
0.00107
def tokenize_number(val, line): """Parse val correctly into int or float.""" try: num = int(val) typ = TokenType.int except ValueError: num = float(val) typ = TokenType.float return {'type': typ, 'value': num, 'line': line}
0.003676
def _get_env(self, config): """ Read environment variables based on the settings defined in the defaults. These are expected to be upper-case versions of the actual setting names, prefixed by ``SCRAPEKIT_``. """ for option, value in config.items(): env_name = 'SCRAPEKIT_%s' % option.upper() value = os.environ.get(env_name, value) config[option] = value return config
0.004505
def get_vnetwork_portgroups_output_vnetwork_pgs_vs_nn(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups") config = get_vnetwork_portgroups output = ET.SubElement(get_vnetwork_portgroups, "output") vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs") vs_nn = ET.SubElement(vnetwork_pgs, "vs-nn") vs_nn.text = kwargs.pop('vs_nn') callback = kwargs.pop('callback', self._callback) return callback(config)
0.003472
def sparql_query(self, query, flush=None, limit=None): """ Run a Sparql query. :param query: sparql query string :rtype: list of dictionary """ return self.find_statements(query, language='sparql', type='tuples', flush=flush, limit=limit)
0.010033
def writeGenerator(self, gen): """ Iterates over a generator object and encodes all that is returned. """ n = getattr(gen, 'next') while True: try: self.writeElement(n()) except StopIteration: break
0.00678
def in_period(period, dt=None): """ Determines if a datetime is within a certain time period. If the time is omitted the current time will be used. in_period return True is the datetime is within the time period, False if not. If the expression is malformed a TimePeriod.InvalidFormat exception will be raised. (Note that this differs from Time::Period, which returns -1 if the expression is invalid). The format for the time period is like Perl's Time::Period module, which is documented in some detail here: http://search.cpan.org/~pryan/Period-1.20/Period.pm Here's the quick and dirty version. Each period is composed of one or more sub-period seperated by a comma. A datetime must match at least one of the sub periods to be considered in that time period. Each sub-period is composed of one or more tests, like so: scale {value} scale {a-b} scale {a b c} The datetime must pass each test for a sub-period for the sub-period to be considered true. For example: Match Mondays wd {mon} Match Monday mornings wd {mon} hr {9-16} Match Monday morning or Friday afternoon wd {mon} hr {0-12}, wd {fri} hr {0-12} Valid scales are: year month week yday mday wday hour minute second Those can be substituted with their corresponding code: yd mo wk yd md wd hr min sec """ if dt is None: dt = datetime.now() # transform whatever crazy format we're given and turn it into # something like this: # # md{1}|hr{midnight-noon},md{2}|hr{noon-midnight} period = re.sub(r"^\s*|\s*$", '', period) period = re.sub(r"\s*(?={|$)", '', period) period = re.sub(r",\s*", ',', period) period = re.sub(r"\s*-\s*", '-', period) period = re.sub(r"{\s*", '{', period) period = re.sub(r"\s*}\s*", '}', period) period = re.sub(r"}(?=[^,])", '}|', period) period = period.lower() if period == '': return True sub_periods = re.split(',', period) # go through each sub-period until one matches (OR logic) for sp in sub_periods: if _is_in_sub_period(sp, dt): return True return False
0.00084
def _RawData(self, data): """Convert data to common format. Configuration options are normally grouped by the functional component which define it (e.g. Logging.path is the path parameter for the logging subsystem). However, sometimes it is more intuitive to write the config as a flat string (e.g. Logging.path). In this case we group all the flat strings in their respective sections and create the sections automatically. Args: data: A dict of raw data. Returns: a dict in common format. Any keys in the raw data which have a "." in them are separated into their own sections. This allows the config to be written explicitly in dot notation instead of using a section. """ if not isinstance(data, dict): return data result = collections.OrderedDict() for k, v in iteritems(data): result[k] = self._RawData(v) return result
0.006536
def by_name(name): """Return a device by name. Args: name (str): The name of the device to return. Returns: :class:`~.SoCo`: The first device encountered among all zone with the given player name. If none are found `None` is returned. """ devices = discover(all_households=True) for device in (devices or []): if device.player_name == name: return device return None
0.002252
def WalkTree(top, getChildren: Callable = None, getFirstChild: Callable = None, getNextSibling: Callable = None, yieldCondition: Callable = None, includeTop: bool = False, maxDepth: int = 0xFFFFFFFF): """ Walk a tree not using recursive algorithm. top: a tree node. getChildren: function(treeNode) -> list. getNextSibling: function(treeNode) -> treeNode. getNextSibling: function(treeNode) -> treeNode. yieldCondition: function(treeNode, depth) -> bool. includeTop: bool, if True yield top first. maxDepth: int, enum depth. If getChildren is valid, ignore getFirstChild and getNextSibling, yield 3 items tuple: (treeNode, depth, remain children count in current depth). If getChildren is not valid, using getFirstChild and getNextSibling, yield 2 items tuple: (treeNode, depth). If yieldCondition is not None, only yield tree nodes that yieldCondition(treeNode, depth)->bool returns True. For example: def GetDirChildren(dir_): if os.path.isdir(dir_): return [os.path.join(dir_, it) for it in os.listdir(dir_)] for it, depth, leftCount in WalkTree('D:\\', getChildren= GetDirChildren): print(it, depth, leftCount) """ if maxDepth <= 0: return depth = 0 if getChildren: if includeTop: if not yieldCondition or yieldCondition(top, 0): yield top, 0, 0 children = getChildren(top) childList = [children] while depth >= 0: #or while childList: lastItems = childList[-1] if lastItems: if not yieldCondition or yieldCondition(lastItems[0], depth + 1): yield lastItems[0], depth + 1, len(lastItems) - 1 if depth + 1 < maxDepth: children = getChildren(lastItems[0]) if children: depth += 1 childList.append(children) del lastItems[0] else: del childList[depth] depth -= 1 elif getFirstChild and getNextSibling: if includeTop: if not yieldCondition or yieldCondition(top, 0): yield top, 0 child = getFirstChild(top) childList = [child] while depth >= 0: #or while childList: lastItem = childList[-1] if lastItem: if not yieldCondition or yieldCondition(lastItem, depth + 1): yield lastItem, depth + 1 child = getNextSibling(lastItem) childList[depth] = child if depth + 1 < maxDepth: child = getFirstChild(lastItem) if child: depth += 1 childList.append(child) else: del childList[depth] depth -= 1
0.002406
def get_upload_form(self): """Construct form for accepting file upload.""" return self.form_class(self.request.POST, self.request.FILES)
0.013158
def tf_summary_to_dict(tf_summary_str_or_pb, namespace=""): """Convert a Tensorboard Summary to a dictionary Accepts either a tensorflow.summary.Summary or one encoded as a string. """ values = {} if isinstance(tf_summary_str_or_pb, Summary): summary_pb = tf_summary_str_or_pb elif isinstance(tf_summary_str_or_pb, Event): summary_pb = tf_summary_str_or_pb.summary values["global_step"] = tf_summary_str_or_pb.step values["_timestamp"] = tf_summary_str_or_pb.wall_time else: summary_pb = Summary() summary_pb.ParseFromString(tf_summary_str_or_pb) for value in summary_pb.value: kind = value.WhichOneof("value") if kind == "simple_value": values[namespaced_tag(value.tag, namespace)] = value.simple_value elif kind == "image": from PIL import Image image = wandb.Image(Image.open( six.BytesIO(value.image.encoded_image_string))) tag_idx = value.tag.rsplit('/', 1) if len(tag_idx) > 1 and tag_idx[1].isdigit(): tag, idx = tag_idx values.setdefault(history_image_key(tag), []).append(image) else: values[history_image_key(value.tag)] = image # Coming soon... # elif kind == "audio": # audio = wandb.Audio(six.BytesIO(value.audio.encoded_audio_string), # sample_rate=value.audio.sample_rate, content_type=value.audio.content_type) elif kind == "histo": first = value.histo.bucket_limit[0] + \ value.histo.bucket_limit[0] - value.histo.bucket_limit[1] last = value.histo.bucket_limit[-2] + \ value.histo.bucket_limit[-2] - value.histo.bucket_limit[-3] np_histogram = (list(value.histo.bucket), [ first] + value.histo.bucket_limit[:-1] + [last]) values[namespaced_tag(value.tag)] = wandb.Histogram( np_histogram=np_histogram) return values
0.00097
def ifelse(arg, true_expr, false_expr): """ Shorthand for implementing ternary expressions bool_expr.ifelse(0, 1) e.g., in SQL: CASE WHEN bool_expr THEN 0 else 1 END """ # Result will be the result of promotion of true/false exprs. These # might be conflicting types; same type resolution as case expressions # must be used. case = ops.SearchedCaseBuilder() return case.when(arg, true_expr).else_(false_expr).end()
0.002198
def read(self, size=None): """ Read `size` of bytes.""" if size is None: return self.buf.read() + self.open_file.read() contents = self.buf.read(size) if len(contents) < size: contents += self.open_file.read(size - len(contents)) return contents
0.01083
def get_component_info(self, comp): """Return the information about sub-component specific to a particular data selection Parameters ---------- comp : `binning.Component` object Specifies the sub-component Returns `ModelComponentInfo` object """ if self.components is None: raise ValueError( 'Model component %s does not have sub-components' % self.sourcekey) if self.moving: comp_key = "zmax%i" % (comp.zmax) elif self.selection_dependent: comp_key = comp.make_key('{ebin_name}_{evtype_name}') else: raise ValueError( 'Model component %s is not moving or selection dependent' % self.sourcekey) return self.components[comp_key]
0.006173
def main(): """ 1. Parse CLI arguments. 2. Patch the environment with Python and libraries paths. This relaunches Python! 3. Launch IPython if requested 4. Load Chimera. IPython can import it now! 5. Run any additional CLI arguments (-m, -c, -f), if needed """ patch_sys_version() args, more_args = parse_cli_options() if args.path: print(guess_chimera_path()[0]) return patch_environ(nogui=args.nogui) if not args.nogui: sys.argv.remove('--gui') if args.command != 'notebook': enable_chimera(verbose=args.verbose, nogui=args.nogui) if args.nogui: run_cli_options(args)
0.003008
def filter(self): """ Get a filtered list of file imports :return: A list of file imports, with only the id set (you need to refresh them if you want all the attributes to be filled in) :rtype: list of :class:`carto.file_import.FileImportJob` :raise: CartoException """ try: response = self.send(self.get_collection_endpoint(), "get") if self.json_collection_attribute is not None: resource_ids = self.client.get_response_data( response, self.Meta.parse_json)[self.json_collection_attribute] else: resource_ids = self.client.get_response_data( response, self.Meta.parse_json) except Exception as e: raise CartoException(e) resources = [] for resource_id in resource_ids: try: resource = self.resource_class(self.client) except (ValueError, TypeError): continue else: setattr(resource, resource.Meta.id_field, resource_id) resources.append(resource) return resources
0.001647
def float_to_fix(signed, n_bits, n_frac): """**DEPRECATED** Return a function to convert a floating point value to a fixed point value. .. warning:: This function is deprecated in favour of :py:meth:`~.float_to_fp`. For example, a function to convert a float to a signed fractional representation with 8 bits overall and 4 fractional bits (S3.4) can be constructed and used with:: >>> s34 = float_to_fix(signed=True, n_bits=8, n_frac=4) >>> hex(s34(0.5)) '0x8' The fixed point conversion is saturating:: >>> q34 = float_to_fix(False, 8, 4) # Unsigned 4.4 >>> hex(q34(-0.5)) '0x0' >>> hex(q34(15.0)) '0xf0' >>> hex(q34(16.0)) '0xff' Parameters ---------- signed : bool Whether the values that are to be converted should be signed, or clipped at zero. >>> hex(float_to_fix(True, 8, 4)(-0.5)) # Signed '0xf8' >>> hex(float_to_fix(False, 8, 4)(-0.5)) # Unsigned '0x0' .. note:: Regardless of the value of the `signed` parameter the returned value is always an unsigned integer suitable for packing with the struct packing chars `B`, `H`, `I` etc. n_bits : int Total number of bits in the fixed-point representation (including sign bit and fractional bits). n_frac : int Number of fractional bits in the fixed-point representation. Raises ------ ValueError If the number of bits specified is not possible. For example, requiring more fractional bits than there are bits overall will result in a `ValueError`:: >>> fix_to_float(False, 8, 9) Traceback (most recent call last): ValueError: n_frac: 9: Must be less than 8 (and positive). """ warnings.warn("float_to_fix() is deprecated, see float_to_fp", DeprecationWarning) mask = int(2**n_bits - 1) min_v, max_v = validate_fp_params(signed, n_bits, n_frac) # Saturate values def bitsk(value): """Convert a floating point value to a fixed point value. Parameters ---------- value : float The value to convert. """ value = np.clip(value, min_v, max_v) if value < 0: fp_val = (1 << n_bits) + int(value * 2**n_frac) else: fp_val = int(value * 2**n_frac) assert 0 <= fp_val < 1 << (n_bits + 1) return fp_val & mask return bitsk
0.000386
def job_conf(self, job_id): """ A job configuration resource contains information about the job configuration for this job. :param str job_id: The job id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response` """ path = '/ws/v1/history/mapreduce/jobs/{jobid}/conf'.format(jobid=job_id) return self.request(path)
0.007059
def switch(request, tenant_id, redirect_field_name=auth.REDIRECT_FIELD_NAME): """Switches an authenticated user from one project to another.""" LOG.debug('Switching to tenant %s for user "%s".', tenant_id, request.user.username) endpoint, __ = utils.fix_auth_url_version_prefix(request.user.endpoint) session = utils.get_session() # Keystone can be configured to prevent exchanging a scoped token for # another token. Always use the unscoped token for requesting a # scoped token. unscoped_token = request.user.unscoped_token auth = utils.get_token_auth_plugin(auth_url=endpoint, token=unscoped_token, project_id=tenant_id) try: auth_ref = auth.get_access(session) msg = 'Project switch successful for user "%(username)s".' % \ {'username': request.user.username} LOG.info(msg) except keystone_exceptions.ClientException: msg = ( _('Project switch failed for user "%(username)s".') % {'username': request.user.username}) messages.error(request, msg) auth_ref = None LOG.exception('An error occurred while switching sessions.') # Ensure the user-originating redirection url is safe. # Taken from django.contrib.auth.views.login() redirect_to = request.GET.get(redirect_field_name, '') if not is_safe_url(url=redirect_to, host=request.get_host()): redirect_to = settings.LOGIN_REDIRECT_URL if auth_ref: user = auth_user.create_user_from_token( request, auth_user.Token(auth_ref, unscoped_token=unscoped_token), endpoint) auth_user.set_session_from_user(request, user) message = ( _('Switch to project "%(project_name)s" successful.') % {'project_name': request.user.project_name}) messages.success(request, message) response = shortcuts.redirect(redirect_to) utils.set_response_cookie(response, 'recent_project', request.user.project_id) return response
0.000466
def _parse_stsd(self, atom, fileobj): """Sets channels, bits_per_sample, sample_rate and optionally bitrate. Can raise MP4StreamInfoError. """ assert atom.name == b"stsd" ok, data = atom.read(fileobj) if not ok: raise MP4StreamInfoError("Invalid stsd") try: version, flags, data = parse_full_atom(data) except ValueError as e: raise MP4StreamInfoError(e) if version != 0: raise MP4StreamInfoError("Unsupported stsd version") try: num_entries, offset = cdata.uint32_be_from(data, 0) except cdata.error as e: raise MP4StreamInfoError(e) if num_entries == 0: return # look at the first entry if there is one entry_fileobj = cBytesIO(data[offset:]) try: entry_atom = Atom(entry_fileobj) except AtomError as e: raise MP4StreamInfoError(e) try: entry = AudioSampleEntry(entry_atom, entry_fileobj) except ASEntryError as e: raise MP4StreamInfoError(e) else: self.channels = entry.channels self.bits_per_sample = entry.sample_size self.sample_rate = entry.sample_rate self.bitrate = entry.bitrate self.codec = entry.codec self.codec_description = entry.codec_description
0.001402
def results(context, history_log): """Process provided history log and results files.""" if context.obj is None: context.obj = {} context.obj['history_log'] = history_log if context.invoked_subcommand is None: context.invoke(show, item=1)
0.00369
def _update_deferred(self, event): """ This does the actual work of updating channel metadata. This is called by the update(), and runs this method in another thread. """ if isinstance(event, ChannelCreated): i = event.channel[u'id'] event.channel[u'is_archived'] = event.channel[u'is_member'] = False self.channels[i] = event.channel elif isinstance(event, ChannelArchive): self.channels[event.channel][u'is_archived'] = True elif isinstance(event, GroupArchive): self.groups[event.channel][u'is_archived'] = True elif isinstance(event, ChannelDeleted): # FIXME: Handle delete events properly. # Channels don't really get deleted, they're more just archived. self.channels[event.channel][u'is_archived'] = True self.channels[event.channel][u'is_open'] = False elif isinstance(event, GroupClose): # When you close a group, it isn't open to you anymore, but it might # still exist. Treat it like ChannelDeleted self.groups[event.channel][u'is_archived'] = True self.groups[event.channel][u'is_open'] = False elif isinstance(event, ChannelJoined): cid = event.channel[u'id'] self.channels[cid] = event.channel elif isinstance(event, GroupJoined): gid = event.channel[u'id'] self.groups[gid] = event.channel elif isinstance(event, ChannelLeft): self.channels[event.channel][u'is_member'] = False elif isinstance(event, GroupLeft): self.groups[event.channel][u'is_member'] = False elif isinstance(event, ChannelMarked): # TODO: implement datetime handler properly self.channels[event.channel][u'last_read'] = event._b[u'ts'] elif isinstance(event, GroupMarked): self.groups[event.channel][u'last_read'] = event._b[u'ts'] elif isinstance(event, ChannelRename): self.channels[event.channel[u'id']][u'name'] = event.channel[u'name'] elif isinstance(event, GroupRename): self.groups[event.channel[u'id']][u'name'] = event.channel[u'name'] elif isinstance(event, ChannelUnarchive): self.channels[event.channel][u'is_archived'] = False elif isinstance(event, GroupUnarchive): self.groups[event.channel][u'is_archived'] = False elif isinstance(event, ImClose): self.ims[event.channel][u'is_open'] = False elif isinstance(event, ImCreated): i = event.channel[u'id'] event.channel[u'user'] = event.user self.ims[i] = event.channel elif isinstance(event, ImMarked): # TODO: implement datetime handler properly self.ims[event.channel][u'last_read'] = event._b[u'ts'] elif isinstance(event, ImOpen): self.ims[event.channel][u'is_open'] = True elif isinstance(event, PresenceChange): self.users[event.user][u'presence'] = event.presence elif isinstance(event, UserChange): # Everything but the status is provided # Copy this out of the existing object uid = event.user[u'id'] if event.user.get(u'status') is None and u'presence' in self.users[uid]: event.user[u'status'] = self.users[uid][u'presence'] self.users[uid] = event.user elif isinstance(event, TeamPrefChange): self.team[u'prefs'][event.name] = event.value elif isinstance(event, TeamJoin): uid = event.user[u'id'] self.users[uid] = event.user elif isinstance(event, BotAdded) or isinstance(event, BotChanged): bid = event.bot[u'id'] self.bots[bid] = event.bot
0.02272
def makeBaudRatePacket(ID, rate): """ Set baud rate of servo. in: rate - 0: 9600, 1:57600, 2:115200, 3:1Mbps out: write packet """ if rate not in [0, 1, 2, 3]: raise Exception('Packet.makeBaudRatePacket: wrong rate {}'.format(rate)) pkt = makeWritePacket(ID, xl320.XL320_BAUD_RATE, [rate]) return pkt
0.032154
def detect_duplicate_repos(repos1, repos2): """Return duplicate repos dict if repo_dir same and vcs different. :param repos1: list of repo expanded dicts :type repos1: list of :py:dict :param repos2: list of repo expanded dicts :type repos2: list of :py:dict :rtype: list of dicts or None :returns: Duplicate lists """ dupes = [] path_dupe_repos = [] curpaths = [r['repo_dir'] for r in repos1] newpaths = [r['repo_dir'] for r in repos2] path_duplicates = list(set(curpaths).intersection(newpaths)) if not path_duplicates: return None path_dupe_repos.extend( [r for r in repos2 if any(r['repo_dir'] == p for p in path_duplicates)] ) if not path_dupe_repos: return None for n in path_dupe_repos: currepo = next((r for r in repos1 if r['repo_dir'] == n['repo_dir']), None) if n['url'] != currepo['url']: dupes += (n, currepo) return dupes
0.002064
def remove_file(file_path): """Remove a file from the filesystem.""" if path.exists(file_path): try: rmtree(file_path) except Exception: print('Unable to remove temporary workdir {}'.format(file_path))
0.004016
def make_mashes(fastas, mash_file, threads, kmer = 21, force = False): """ Create mash files for multiple fasta files Input: fastas <list[str]> -- paths to fasta files mash_file <str> -- path to output mash file threads <int> -- # threads for parallelization kmer <int> -- kmer size for mash sketching force <boolean> -- force overwrite of all mash files """ mash_processes = set() sketches = [fasta + '.msh' for fasta in fastas] devnull = open(os.devnull, 'w') # Perform the sketching for fasta, sketch in zip(fastas, sketches): if os.path.isfile(sketch): continue mash_cmd = ['/opt/bin/bio/mash', 'sketch', '-o', fasta, '-k', str(kmer), fasta] mash_processes.add(subprocess.Popen(mash_cmd, stderr=devnull)) if len(mash_processes) >= threads: os.wait() mash_processes.difference_update([mp for mp in mash_processes if mp.poll() is not None]) # Collect stragglers for mp in mash_processes: if mp.poll() is None: mp.wait() # Paste sketches into single mash paste_mashes(sketches, mash_file, force = force) return
0.007383
def _disallow_catching_UnicodeDecodeError(f): """ Patches a template modules to prevent catching UnicodeDecodeError. Note that this has the effect of also making Template raise a UnicodeDecodeError instead of a TemplateEncodingError if the template string is not UTF-8 or unicode. """ patch_base = patch.object(VariableNode, 'render', variable_node_render) patch_all = patch_base if django.VERSION < (1, 9): from django.template.debug import DebugVariableNode patch_debug = patch.object( DebugVariableNode, 'render', debug_variable_node_render) patch_all = _compose(patch_all, patch_debug) return patch_all(f)
0.001458
def get_preview_url(self, data_type='L1C'): """Returns url location of full resolution L1C preview :return: """ if self.data_source is DataSource.SENTINEL2_L1C or self.safe_type is EsaSafeType.OLD_TYPE: return self.get_url(AwsConstants.PREVIEW_JP2) return self.get_qi_url('{}_PVI.jp2'.format(data_type))
0.008451
def regular_grid_1d_from_shape_pixel_scales_and_origin(shape, pixel_scales, origin=(0.0, 0.0)): """Compute the (y,x) arc second coordinates at the centre of every pixel of an array of shape (rows, columns). Coordinates are defined from the top-left corner, such that the first pixel at location [0, 0] has negative x \ and y values in arc seconds. The regular grid is returned on an array of shape (total_pixels**2, 2) where the 2D dimension of the original 2D \ array are reduced to one dimension. y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Parameters ---------- shape : (int, int) The (y,x) shape of the 2D array the regular grid of coordinates is computed for. pixel_scales : (float, float) The (y,x) arc-second to pixel scales of the 2D array. origin : (float, flloat) The (y,x) origin of the 2D array, which the regular grid is shifted around. Returns -------- ndarray A regular grid of (y,x) arc-second coordinates at the centre of every pixel on a 2D array. The regular grid array has dimensions (total_pixels**2, 2). Examples -------- regular_grid_1d = regular_grid_1d_from_shape_pixel_scales_and_origin(shape=(5,5), pixel_scales=(0.5, 0.5), \ origin=(0.0, 0.0)) """ regular_grid_1d = np.zeros((shape[0]*shape[1], 2)) centres_arcsec = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales, origin=origin) i=0 for y in range(shape[0]): for x in range(shape[1]): regular_grid_1d[i, 0] = -(y - centres_arcsec[0]) * pixel_scales[0] regular_grid_1d[i, 1] = (x - centres_arcsec[1]) * pixel_scales[1] i += 1 return regular_grid_1d
0.006959
def import_submodules(package, name=None, recursive=True): """Import all submodules of ``package``. Parameters ---------- package : `module` or string Package whose submodules to import. name : string, optional Override the package name with this value in the full submodule names. By default, ``package`` is used. recursive : bool, optional If ``True``, recursively import all submodules of ``package``. Otherwise, import only the modules at the top level. Returns ------- pkg_dict : dict Dictionary where keys are the full submodule names and values are the corresponding module objects. """ if isinstance(package, str): package = importlib.import_module(package) if name is None: name = package.__name__ submodules = [m[0] for m in inspect.getmembers(package, inspect.ismodule) if m[1].__name__.startswith('odl')] results = {} for pkgname in submodules: full_name = name + '.' + pkgname try: results[full_name] = importlib.import_module(full_name) except ImportError: pass else: if recursive: results.update(import_submodules(full_name, full_name)) return results
0.000764
def encode(self, *values): """Builds a hash from the passed `values`. :param values The values to transform into a hashid >>> hashids = Hashids('arbitrary salt', 16, 'abcdefghijkl0123456') >>> hashids.encode(1, 23, 456) '1d6216i30h53elk3' """ if not (values and all(_is_uint(x) for x in values)): return '' return _encode(values, self._salt, self._min_length, self._alphabet, self._separators, self._guards)
0.003929
def tz_from_geom(connection, geometry): r"""Finding the timezone of a given point or polygon geometry, assuming that the polygon is not crossing a border of a timezone. For a given point or polygon geometry not located within the timezone dataset (e.g. sea) the nearest timezone based on the bounding boxes of the geometries is returned. Parameters ---------- connection : sqlalchemy connection object A valid connection to a postigs database containing the timezone table geometry : shapely geometry object A point or polygon object. The polygon should not cross a timezone. Returns ------- string Timezone using the naming of the IANA time zone database References ---------- http://postgis.net/docs/manual-2.2/geometry_distance_box.html """ # TODO@Günni if geometry.geom_type in ['Polygon', 'MultiPolygon']: coords = geometry.centroid else: coords = geometry sql = """ SELECT tzid FROM oemof_test.tz_world WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326)); """.format(wkt=coords.wkt) if not connection.execute(sql).fetchone(): sql = """ SELECT tzid FROM oemof_test.tz_world ORDER BY ST_PointFromText('{wkt}', 4326) <#> geom LIMIT 1; """.format(wkt=coords.wkt) return connection.execute(sql).fetchone()[0]
0.000709
def calibrate_percentile_ranks(allele, predictor, peptides=None): """ Private helper function. """ global GLOBAL_DATA if peptides is None: peptides = GLOBAL_DATA["calibration_peptides"] predictor.calibrate_percentile_ranks( peptides=peptides, alleles=[allele]) return { allele: predictor.allele_to_percent_rank_transform[allele], }
0.002532
def json_format(out, graph): """Outputs the graph in a machine readable JSON format.""" steps = {} for step, deps in each_step(graph): steps[step.name] = {} steps[step.name]["deps"] = [dep.name for dep in deps] json.dump({"steps": steps}, out, indent=4) out.write("\n")
0.003268
def get_instance(cls, state): """:rtype: UserStorageHandler""" if cls.instance is None: cls.instance = UserStorageHandler(state) return cls.instance
0.01087
def send( self, record, message, resource=None, labels=None, trace=None, span_id=None ): """Overrides Transport.send(). :type record: :class:`logging.LogRecord` :param record: Python log record that the handler was called with. :type message: str :param message: The message from the ``LogRecord`` after being formatted by the associated log formatters. :type resource: :class:`~google.cloud.logging.resource.Resource` :param resource: (Optional) Monitored resource of the entry. :type labels: dict :param labels: (Optional) Mapping of labels for the entry. :type trace: str :param trace: (optional) traceid to apply to the logging entry. :type span_id: str :param span_id: (optional) span_id within the trace for the log entry. Specify the trace parameter if span_id is set. """ self.worker.enqueue( record, message, resource=resource, labels=labels, trace=trace, span_id=span_id, )
0.003481
def parse_oxide(self): """ Determines if an oxide is a peroxide/superoxide/ozonide/normal oxide. Returns: oxide_type (str): Type of oxide ozonide/peroxide/superoxide/hydroxide/None. nbonds (int): Number of peroxide/superoxide/hydroxide bonds in structure. """ structure = self.structure relative_cutoff = self.relative_cutoff o_sites_frac_coords = [] h_sites_frac_coords = [] lattice = structure.lattice if isinstance(structure.composition.elements[0], Element): comp = structure.composition elif isinstance(structure.composition.elements[0], Specie): elmap = collections.defaultdict(float) for site in structure: for species, occu in site.species.items(): elmap[species.element] += occu comp = Composition(elmap) if Element("O") not in comp or comp.is_element: return "None", 0 for site in structure: syms = [sp.symbol for sp in site.species.keys()] if "O" in syms: o_sites_frac_coords.append(site.frac_coords) if "H" in syms: h_sites_frac_coords.append(site.frac_coords) if h_sites_frac_coords: dist_matrix = lattice.get_all_distances(o_sites_frac_coords, h_sites_frac_coords) if np.any(dist_matrix < relative_cutoff * 0.93): return "hydroxide", len( np.where(dist_matrix < relative_cutoff * 0.93)[0]) / 2.0 dist_matrix = lattice.get_all_distances(o_sites_frac_coords, o_sites_frac_coords) np.fill_diagonal(dist_matrix, 1000) is_superoxide = False is_peroxide = False is_ozonide = False if np.any(dist_matrix < relative_cutoff * 1.35): bond_atoms = np.where(dist_matrix < relative_cutoff * 1.35)[0] is_superoxide = True elif np.any(dist_matrix < relative_cutoff * 1.49): is_peroxide = True bond_atoms = np.where(dist_matrix < relative_cutoff * 1.49)[0] if is_superoxide: if len(bond_atoms) > len(set(bond_atoms)): is_superoxide = False is_ozonide = True try: nbonds = len(set(bond_atoms)) except UnboundLocalError: nbonds = 0.0 if is_ozonide: str_oxide = "ozonide" elif is_superoxide: str_oxide = "superoxide" elif is_peroxide: str_oxide = "peroxide" else: str_oxide = "oxide" if str_oxide == "oxide": nbonds = comp["O"] return str_oxide, nbonds
0.000704
def escape_LDAP(ldap_string): # type: (str) -> str # pylint: disable=C0103 """ Escape a string to let it go in an LDAP filter :param ldap_string: The string to escape :return: The protected string """ if not ldap_string: # No content return ldap_string # Protect escape character previously in the string assert is_string(ldap_string) ldap_string = ldap_string.replace( ESCAPE_CHARACTER, ESCAPE_CHARACTER + ESCAPE_CHARACTER ) # Leading space if ldap_string.startswith(" "): ldap_string = "\\ {0}".format(ldap_string[1:]) # Trailing space if ldap_string.endswith(" "): ldap_string = "{0}\\ ".format(ldap_string[:-1]) # Escape other characters for escaped in ESCAPED_CHARACTERS: ldap_string = ldap_string.replace(escaped, ESCAPE_CHARACTER + escaped) return ldap_string
0.001116
def _write_method(schema): """Add a write method for named schema to a class. """ def method( self, filename=None, schema=schema, taxon_col='uid', taxon_annotations=[], node_col='uid', node_annotations=[], branch_lengths=True, **kwargs): # Use generic write class to write data. return _write( self._data, filename=filename, schema=schema, taxon_col=taxon_col, taxon_annotations=taxon_annotations, node_col=node_col, node_annotations=node_annotations, branch_lengths=branch_lengths, **kwargs ) # Update docs method.__doc__ = _write_doc_template(schema) return method
0.002519
def profile_prior_model_dict(self): """ Returns ------- profile_prior_model_dict: {str: PriorModel} A dictionary mapping_matrix instance variable names to variable profiles. """ return {key: value for key, value in filter(lambda t: isinstance(t[1], pm.PriorModel) and is_profile_class(t[1].cls), self.__dict__.items())}
0.009569
def regions(): """ Get all available regions for the RDS service. :rtype: list :return: A list of :class:`boto.rds.regioninfo.RDSRegionInfo` """ return [RDSRegionInfo(name='us-east-1', endpoint='rds.us-east-1.amazonaws.com'), RDSRegionInfo(name='eu-west-1', endpoint='rds.eu-west-1.amazonaws.com'), RDSRegionInfo(name='us-west-1', endpoint='rds.us-west-1.amazonaws.com'), RDSRegionInfo(name='us-west-2', endpoint='rds.us-west-2.amazonaws.com'), RDSRegionInfo(name='sa-east-1', endpoint='rds.sa-east-1.amazonaws.com'), RDSRegionInfo(name='ap-northeast-1', endpoint='rds.ap-northeast-1.amazonaws.com'), RDSRegionInfo(name='ap-southeast-1', endpoint='rds.ap-southeast-1.amazonaws.com') ]
0.001026
def is_valid_filename(filename, return_ext=False): """Check whether the argument is a filename.""" ext = Path(filename).suffixes if len(ext) > 2: logg.warn('Your filename has more than two extensions: {}.\n' 'Only considering the two last: {}.'.format(ext, ext[-2:])) ext = ext[-2:] # cases for gzipped/bzipped text files if len(ext) == 2 and ext[0][1:] in text_exts and ext[1][1:] in ('gz', 'bz2'): return ext[0][1:] if return_ext else True elif ext and ext[-1][1:] in avail_exts: return ext[-1][1:] if return_ext else True elif ''.join(ext) == '.soft.gz': return 'soft.gz' if return_ext else True elif ''.join(ext) == '.mtx.gz': return 'mtx.gz' if return_ext else True else: if return_ext: raise ValueError('"{}" does not end on a valid extension.\n' 'Please, provide one of the available extensions.\n{}\n' 'Text files with .gz and .bz2 extensions are also supported.' .format(filename, avail_exts)) else: return False
0.003472
def _list_records(self, rtype=None, name=None, content=None): """ list all records :param str rtype: type of record :param str name: name of record :param mixed content: value of record :return list: list of found records :raises Exception: on error """ opts = {'domain': self._domain} if rtype is not None: opts['type'] = rtype.upper() if name is not None: opts['name'] = self._full_name(name) if content is not None: opts['content'] = content opts.update(self._auth) response = self._api.nameserver.info(opts) self._validate_response( response=response, message='Failed to get records') records = [] if 'record' in response['resData']: for record in response['resData']['record']: processed_record = { 'type': record['type'], 'name': record['name'], 'ttl': record['ttl'], 'content': record['content'], 'id': record['id'] } records.append(processed_record) return records
0.001629
async def load_tuple(self, elem_type, params=None, elem=None, obj=None): """ Loads tuple of elements from the reader. Supports the tuple ref. Returns loaded tuple. :param elem_type: :param params: :param elem: :param obj: :return: """ if obj is None: return None elem_fields = params[0] if params else None if elem_fields is None: elem_fields = elem_type.f_specs() c_len = len(obj) if len(elem_fields) != c_len: raise ValueError('Size mismatch') res = elem if elem else [] for i in range(len(elem_fields)): try: self.tracker.push_index(i) fvalue = await self._load_field(params[1:] if params else None, x.eref(res, i) if elem else None, obj=obj[i]) self.tracker.pop() if not elem: res.append(fvalue) except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e return res
0.00251
def constrain(value, lower=-np.Inf, upper=np.Inf, allow_equal=False): """ Apply interval constraint on stochastic value. """ ok = flib.constrain(value, lower, upper, allow_equal) if ok == 0: raise ZeroProbability
0.004149
def p_expression_subsetvar(self, p): ''' expression : expression SUBSET VAR ''' _LOGGER.debug("expresion -> expresion SUBSET VAR") if p[3] not in self._VAR_VALUES: if self._autodefine_vars: self._VAR_VALUES[p[3]] = TypedList([]) else: raise TypeError("lists expected for SUBSET operator") l = self._VAR_VALUES[p[3]] if l.type != TypedList.LIST: raise TypeError("lists expected for SUBSET operator") p[3] = l self.p_expression_subsetlist(p)
0.015203
def get_child(self): """ Find file or folder at the remote_path :return: File|Folder """ path_parts = self.remote_path.split(os.sep) return self._get_child_recurse(path_parts, self.node)
0.008547
def get_columns(self, *, top=None, skip=None): """ Return the columns of this table :param int top: specify n columns to retrieve :param int skip: specify n columns to skip """ url = self.build_url(self._endpoints.get('get_columns')) params = {} if top is not None: params['$top'] = top if skip is not None: params['$skip'] = skip params = None if not params else params response = self.session.get(url, params=params) if not response: return iter(()) data = response.json() return (self.column_constructor(parent=self, **{self._cloud_data_key: column}) for column in data.get('value', []))
0.003958
def populate_index( version, circleci_build, appveyor_build, coveralls_build, travis_build ): """Populates ``docs/index.rst`` with release-specific data. Args: version (str): The current version. circleci_build (Union[str, int]): The CircleCI build ID corresponding to the release. appveyor_build (str): The AppVeyor build ID corresponding to the release. coveralls_build (Union[str, int]): The Coveralls.io build ID corresponding to the release. travis_build (int): The Travis CI build ID corresponding to the release. """ with open(RELEASE_INDEX_FILE, "r") as file_obj: template = file_obj.read() contents = template.format( version=version, circleci_build=circleci_build, appveyor_build=appveyor_build, coveralls_build=coveralls_build, travis_build=travis_build, ) with open(INDEX_FILE, "w") as file_obj: file_obj.write(contents)
0.000991
def post_freeze_hook(self): """Post :meth:`dtoolcore.ProtoDataSet.freeze` cleanup actions. This method is called at the end of the :meth:`dtoolcore.ProtoDataSet.freeze` method. In the :class:`dtoolcore.storage_broker.DiskStorageBroker` it removes the temporary directory for storing item metadata fragment files. """ if os.path.isdir(self._metadata_fragments_abspath): shutil.rmtree(self._metadata_fragments_abspath)
0.004115
def from_params(cls, params: List[Tuple[str, Params]] = None) -> "InitializerApplicator": """ Converts a Params object into an InitializerApplicator. The json should be formatted as follows:: [ ["parameter_regex_match1", { "type": "normal" "mean": 0.01 "std": 0.1 } ], ["parameter_regex_match2", "uniform"] ["prevent_init_regex", "prevent"] ] where the first item in each tuple is the regex that matches to parameters, and the second item is a set of parameters that will be passed to ``Initialzer.from_params()``. These values can either be strings, in which case they correspond to the names of initializers, or dictionaries, in which case they must contain the "type" key, corresponding to the name of an initializer. In addition, they may contain auxiliary named parameters which will be fed to the initializer itself. To determine valid auxiliary parameters, please refer to the torch.nn.init documentation. Only "prevent" is a special type which does not have corresponding initializer. Any parameter matching its corresponding regex will be overridden to NOT initialize. Returns ------- An InitializerApplicator containing the specified initializers. """ # pylint: disable=arguments-differ params = params or [] is_prevent = lambda item: item == "prevent" or item == {"type": "prevent"} prevent_regexes = [param[0] for param in params if is_prevent(param[1])] params = [param for param in params if param[1] if not is_prevent(param[1])] initializers = [(name, Initializer.from_params(init_params)) for name, init_params in params] return InitializerApplicator(initializers, prevent_regexes)
0.008089
def _block_stack_repr(self, block_stack): """Get a string version of `block_stack`, for debugging.""" blocks = ", ".join( ["(%s, %r)" % (dis.opname[b[0]], b[1]) for b in block_stack] ) return "[" + blocks + "]"
0.007874
def _import_module(self, module_path): """Dynamically import a module returning a handle to it. :param str module_path: The module path :rtype: module """ LOGGER.debug('Importing %s', module_path) try: return __import__(module_path) except ImportError as error: LOGGER.critical('Could not import %s: %s', module_path, error) return None
0.004651
def __create_vector(self, *vec) -> Vector3: """ Converts a variety of vector types to a flatbuffer Vector3. Supports Flatbuffers Vector3, cTypes Vector3, list/tuple of numbers, or passing x,y,z (z optional) """ import numbers if len(vec) == 1: if hasattr(vec[0], "__getitem__"): # Support all subscriptable types. try: x = float(vec[0][0]) y = float(vec[0][1]) try: z = float(vec[0][2]) except (ValueError, IndexError): z = 0 except ValueError: raise ValueError(f"Unexpected type(s) for creating vector: {type(vec[0][0])}, {type(vec[0][1])}") except IndexError: raise IndexError(f"Unexpected IndexError when creating vector from type: {type(vec[0])}") elif isinstance(vec[0], Vector3.Vector3): x = vec[0].X() y = vec[0].Y() z = vec[0].Z() elif isinstance(vec[0], GameDataStructVector3): x = vec[0].x y = vec[0].y z = vec[0].z else: raise ValueError(f"Unexpected type for creating vector: {type(vec[0])}") elif len(vec) == 2 or len(vec) == 3: if isinstance(vec[0], numbers.Number) and isinstance(vec[1], numbers.Number): x = vec[0] y = vec[1] if len(vec) == 2: z = 0 else: if isinstance(vec[2], numbers.Number): z = vec[2] else: raise ValueError(f"Unexpected type for creating vector: {type(vec[0])}") else: raise ValueError(f"Unexpected type(s) for creating vector: {type(vec[0])}, {type(vec[1])}") else: raise ValueError("Unexpected number of arguments for creating vector") return Vector3.CreateVector3(self.builder, x, y, z)
0.005233
def error(message): """ Throw an error with the given message and immediately quit. Args: message(str): The message to display. """ fail = '\033[91m' end = '\033[0m' sys.exit(fail + "Error: {}".format(message) + end)
0.003953
def SelectGlyph(aFont, message="Select a glyph:", title='FontParts'): """ Select a glyph for a given font. Optionally a `message` and `title` can be provided. :: from fontParts.ui import SelectGlyph font = CurrentFont() glyph = SelectGlyph(font) print(glyph) """ return dispatcher["SelectGlyph"](aFont=aFont, message=message, title=title)
0.002519
def from_file(cls, filename, sr=22050): """ Loads an audiofile, uses sr=22050 by default. """ y, sr = librosa.load(filename, sr=sr) return cls(y, sr)
0.011561
def is_git_directory_clean(path_to_repo: Path, search_parent_dirs: bool = True, check_untracked: bool = False) -> None: """ Check that the git working directory is in a clean state and raise exceptions if not. :path_to_repo: The path of the git repo """ repo = Repo(str(path_to_repo), search_parent_directories=search_parent_dirs) logger.debug("is_git_directory_clean check for repo in path={} from "\ "cwd={} with search_parent_directories={}".format( path_to_repo, os.getcwd(), search_parent_dirs)) # If there are changes to already tracked files if repo.is_dirty(): raise DirtyRepoException("Changes to the index or working tree." "Commit them first .") if check_untracked: if repo.untracked_files: raise DirtyRepoException("Untracked files. Commit them first.")
0.004154
def assert_dict_equal( first, second, key_msg_fmt="{msg}", value_msg_fmt="{msg}" ): """Fail unless first dictionary equals second. The dictionaries are considered equal, if they both contain the same keys, and their respective values are also equal. >>> assert_dict_equal({"foo": 5}, {"foo": 5}) >>> assert_dict_equal({"foo": 5}, {}) Traceback (most recent call last): ... AssertionError: key 'foo' missing from right dict The following key_msg_fmt arguments are supported, if the keys do not match: * msg - the default error message * first - the first dict * second - the second dict * missing_keys - list of keys missing from right * extra_keys - list of keys missing from left The following value_msg_fmt arguments are supported, if a value does not match: * msg - the default error message * first - the first dict * second - the second dict * key - the key where the value does not match * first_value - the value in the first dict * second_value - the value in the second dict """ first_keys = set(first.keys()) second_keys = set(second.keys()) missing_keys = list(first_keys - second_keys) extra_keys = list(second_keys - first_keys) if missing_keys or extra_keys: if missing_keys: if len(missing_keys) == 1: msg = "key {!r} missing from right dict".format( missing_keys[0] ) else: keys = ", ".join(sorted(repr(k) for k in missing_keys)) msg = "keys {} missing from right dict".format(keys) else: if len(extra_keys) == 1: msg = "extra key {!r} in right dict".format(extra_keys[0]) else: keys = ", ".join(sorted(repr(k) for k in extra_keys)) msg = "extra keys {} in right dict".format(keys) if key_msg_fmt: msg = key_msg_fmt.format( msg=msg, first=first, second=second, missing_keys=missing_keys, extra_keys=extra_keys, ) raise AssertionError(msg) for key in first: first_value = first[key] second_value = second[key] msg = "key '{}' differs: {!r} != {!r}".format( key, first_value, second_value ) if value_msg_fmt: msg = value_msg_fmt.format( msg=msg, first=first, second=second, key=key, first_value=first_value, second_value=second_value, ) msg = msg.replace("{", "{{").replace("}", "}}") assert_equal(first_value, second_value, msg_fmt=msg)
0.000359
def operator(func=None, *, pipable=False): """Create a stream operator from an asynchronous generator (or any function returning an asynchronous iterable). Decorator usage:: @operator async def random(offset=0., width=1.): while True: yield offset + width * random.random() Decorator usage for pipable operators:: @operator(pipable=True) async def multiply(source, factor): async with streamcontext(source) as streamer: async for item in streamer: yield factor * item In the case of pipable operators, the first argument is expected to be the asynchronous iteratable used for piping. The return value is a dynamically created class. It has the same name, module and doc as the original function. A new stream is created by simply instanciating the operator:: xs = random() ys = multiply(xs, 2) The original function is called at instanciation to check that signature match. In the case of pipable operators, the source is also checked for asynchronous iteration. The operator also have a pipe class method that can be used along with the piping synthax:: xs = random() ys = xs | multiply.pipe(2) This is strictly equivalent to the previous example. Other methods are available: - `original`: the original function as a static method - `raw`: same as original but add extra checking The raw method is useful to create new operators from existing ones:: @operator(pipable=True) def double(source): return multiply.raw(source, 2) """ def decorator(func): """Inner decorator for stream operator.""" # Gather data bases = (Stream,) name = func.__name__ module = func.__module__ extra_doc = func.__doc__ doc = extra_doc or f'Regular {name} stream operator.' # Extract signature signature = inspect.signature(func) parameters = list(signature.parameters.values()) if parameters and parameters[0].name in ('self', 'cls'): raise ValueError( 'An operator cannot be created from a method, ' 'since the decorated function becomes an operator class') # Injected parameters self_parameter = inspect.Parameter( 'self', inspect.Parameter.POSITIONAL_OR_KEYWORD) cls_parameter = inspect.Parameter( 'cls', inspect.Parameter.POSITIONAL_OR_KEYWORD) # Wrapped static method original = func original.__qualname__ = name + '.original' # Raw static method raw = func raw.__qualname__ = name + '.raw' # Init method def init(self, *args, **kwargs): if pipable and args: assert_async_iterable(args[0]) factory = functools.partial(self.raw, *args, **kwargs) return Stream.__init__(self, factory) # Customize init signature new_parameters = [self_parameter] + parameters init.__signature__ = signature.replace(parameters=new_parameters) # Customize init method init.__qualname__ = name + '.__init__' init.__name__ = '__init__' init.__module__ = module init.__doc__ = f'Initialize the {name} stream.' if pipable: # Raw static method def raw(*args, **kwargs): if args: assert_async_iterable(args[0]) return func(*args, **kwargs) # Custonize raw method raw.__signature__ = signature raw.__qualname__ = name + '.raw' raw.__module__ = module raw.__doc__ = doc # Pipe class method def pipe(cls, *args, **kwargs): return lambda source: cls(source, *args, **kwargs) # Customize pipe signature if parameters and parameters[0].kind in ( inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD): new_parameters = [cls_parameter] + parameters[1:] else: new_parameters = [cls_parameter] + parameters pipe.__signature__ = signature.replace(parameters=new_parameters) # Customize pipe method pipe.__qualname__ = name + '.pipe' pipe.__module__ = module pipe.__doc__ = f'Pipable "{name}" stream operator.' if extra_doc: pipe.__doc__ += "\n\n " + extra_doc # Gather attributes attrs = { '__init__': init, '__module__': module, '__doc__': doc, 'raw': staticmethod(raw), 'original': staticmethod(original), 'pipe': classmethod(pipe) if pipable else None} # Create operator class return type(name, bases, attrs) return decorator if func is None else decorator(func)
0.000197
def get_wsgi_requests(request): ''' For the given batch request, extract the individual requests and create WSGIRequest object for each. ''' valid_http_methods = ["get", "post", "put", "patch", "delete", "head", "options", "connect", "trace"] requests = json.loads(request.body) if type(requests) not in (list, tuple): raise BadBatchRequest("The body of batch request should always be list!") # Max limit check. no_requests = len(requests) if no_requests > _settings.MAX_LIMIT: raise BadBatchRequest("You can batch maximum of %d requests." % (_settings.MAX_LIMIT)) # We could mutate the current request with the respective parameters, but mutation is ghost in the dark, # so lets avoid. Construct the new WSGI request object for each request. def construct_wsgi_from_data(data): ''' Given the data in the format of url, method, body and headers, construct a new WSGIRequest object. ''' url = data.get("url", None) method = data.get("method", None) if url is None or method is None: raise BadBatchRequest("Request definition should have url, method defined.") if method.lower() not in valid_http_methods: raise BadBatchRequest("Invalid request method.") body = data.get("body", "") headers = data.get("headers", {}) return get_wsgi_request_object(request, method, url, headers, body) return [construct_wsgi_from_data(data) for data in requests]
0.00451
def join_timeseries(base, overwrite, join_linear=None): """Join two sets of timeseries Parameters ---------- base : :obj:`MAGICCData`, :obj:`pd.DataFrame`, filepath Base timeseries to use. If a filepath, the data will first be loaded from disk. overwrite : :obj:`MAGICCData`, :obj:`pd.DataFrame`, filepath Timeseries to join onto base. Any points which are in both `base` and `overwrite` will be taken from `overwrite`. If a filepath, the data will first be loaded from disk. join_linear : tuple of len(2) A list/array which specifies the period over which the two timeseries should be joined. The first element is the start time of the join period, the second element is the end time of the join period. In the join period (excluding the start and end times), output data will be a linear interpolation between (the annually interpolated) `base` and `overwrite` data. If None, no linear join will be done and any points in (the annually interpolated) `overwrite` data will simply overwrite any points in `base`. Returns ------- :obj:`MAGICCData` The joint timeseries. The resulting data is linearly interpolated onto annual steps """ if join_linear is not None: if len(join_linear) != 2: raise ValueError("join_linear must have a length of 2") if isinstance(base, str): base = MAGICCData(base) elif isinstance(base, MAGICCData): base = deepcopy(base) if isinstance(overwrite, str): overwrite = MAGICCData(overwrite) elif isinstance(overwrite, MAGICCData): overwrite = deepcopy(overwrite) result = _join_timeseries_mdata(base, overwrite, join_linear) return MAGICCData(result)
0.005556
def _add_gateway_node(self, gw_type, routing_node_gateway, network=None): """ Add a gateway node to existing routing tree. Gateways are only added if they do not already exist. If they do exist, check the destinations of the existing gateway and add destinations that are not already there. A current limitation is that if a gateway doesn't exist and the destinations specified do not have IP addresses that are valid, they are still added (i.e. IPv4 gateway with IPv6 destination is considered invalid). :param Routing self: the routing node, should be the interface routing node :param str gw_type: type of gateway, i.e. netlink, ospfv2_area, etc :param RoutingNodeGateway route_node_gateway: gateway element :param str network: network to bind to. If none, all networks :return: Whether a change was made or not :rtype: bool """ if self.level != 'interface': raise ModificationAborted('You must make this change from the ' 'interface routing level. Current node: {}'.format(self)) if self.related_element_type == 'tunnel_interface': return self._add_gateway_node_on_tunnel(routing_node_gateway) # Find any existing gateways routing_node = list(gateway_by_type(self, type=gw_type, on_network=network)) _networks = [netwk for netwk in self if netwk.ip == network] if network is \ not None else list(self) # Routing Node Gateway to add as Element gateway_element_type = routing_node_gateway.routing_node_element modified = False for network in _networks: # Short circuit for dynamic interfaces if getattr(network, 'dynamic_classid', None): network.data.setdefault('routing_node', []).append( routing_node_gateway) modified = True break # Used for comparison to this_network_node = network.routing_node_element if routing_node and any(netwk for _intf, netwk, gw in routing_node if netwk.routing_node_element == this_network_node and gateway_element_type == gw.routing_node_element): # A gateway exists on this network for gw in network: if gw.routing_node_element == gateway_element_type: existing_dests = [node.routing_node_element for node in gw] for destination in routing_node_gateway.destinations: is_valid_destination = False if destination not in existing_dests: dest_ipv4, dest_ipv6 = _which_ip_protocol(destination) if len(network.ip.split(':')) > 1: # IPv6 if dest_ipv6: is_valid_destination = True else: if dest_ipv4: is_valid_destination = True if is_valid_destination: gw.data.setdefault('routing_node', []).append( {'level': 'any', 'href': destination.href, 'name': destination.name}) modified = True else: # Gateway doesn't exist gw_ipv4, gw_ipv6 = _which_ip_protocol(gateway_element_type) # ipv4, ipv6 or both if len(network.ip.split(':')) > 1: if gw_ipv6: network.data.setdefault('routing_node', []).append( routing_node_gateway) modified = True else: # IPv4 if gw_ipv4: network.data.setdefault('routing_node', []).append( routing_node_gateway) modified = True if modified: self.update() return modified
0.007173
def antenna_pattern(self, right_ascension, declination, polarization, t_gps): """Return the detector response. Parameters ---------- right_ascension: float or numpy.ndarray The right ascension of the source declination: float or numpy.ndarray The declination of the source polarization: float or numpy.ndarray The polarization angle of the source Returns ------- fplus: float or numpy.ndarray The plus polarization factor for this sky location / orientation fcross: float or numpy.ndarray The cross polarization factor for this sky location / orientation """ gha = self.gmst_estimate(t_gps) - right_ascension cosgha = cos(gha) singha = sin(gha) cosdec = cos(declination) sindec = sin(declination) cospsi = cos(polarization) sinpsi = sin(polarization) x0 = -cospsi * singha - sinpsi * cosgha * sindec x1 = -cospsi * cosgha + sinpsi * singha * sindec x2 = sinpsi * cosdec x = np.array([x0, x1, x2]) dx = self.response.dot(x) y0 = sinpsi * singha - cospsi * cosgha * sindec y1 = sinpsi * cosgha + cospsi * singha * sindec y2 = cospsi * cosdec y = np.array([y0, y1, y2]) dy = self.response.dot(y) if hasattr(dx, 'shape'): fplus = (x * dx - y * dy).sum(axis=0) fcross = (x * dy + y * dx).sum(axis=0) else: fplus = (x * dx - y * dy).sum() fcross = (x * dy + y * dx).sum() return fplus, fcross
0.003628
def _get_mark_if_any(self): """Parse a mark section.""" line = self.next_line() if line.startswith(b'mark :'): return line[len(b'mark :'):] else: self.push_line(line) return None
0.00813
def compile_to_code(definition, handlers={}): """ Generates validation code for validating JSON schema passed in ``definition``. Example: .. code-block:: python import fastjsonschema code = fastjsonschema.compile_to_code({'type': 'string'}) with open('your_file.py', 'w') as f: f.write(code) You can also use it as a script: .. code-block:: bash echo "{'type': 'string'}" | python3 -m fastjsonschema > your_file.py python3 -m fastjsonschema "{'type': 'string'}" > your_file.py Exception :any:`JsonSchemaDefinitionException` is raised when generating the code fails (bad definition). """ _, code_generator = _factory(definition, handlers) return ( 'VERSION = "' + VERSION + '"\n' + code_generator.global_state_code + '\n' + code_generator.func_code )
0.003409
def result(self): """ Get the result for a job. This will block if the job is incomplete. Returns: The result for the Job. Raises: An exception if the Job resulted in an exception. """ self.wait() if self._fatal_error: raise self._fatal_error return self._result
0.006431
def po_to_unicode(po_obj): """ Turns a polib :class:`polib.PoFile` or a :class:`polib.PoEntry` into a :class:`unicode` string. :param po_obj: Either a :class:`polib.PoFile` or :class:`polib.PoEntry`. :rtype: :class:`unicode` string. """ po_text = po_obj.__str__() if type(po_text) != types.UnicodeType: po_text = po_text.decode('utf-8') return po_text
0.012315
def add_parameter(self, name, value, meta=None): """Add a parameter to the parameter list. :param name: New parameter's name. :type name: str :param value: New parameter's value. :type value: float :param meta: New parameter's meta property. :type meta: dict """ parameter = Parameter(name, value) if meta: parameter.meta = meta self.parameters.append(parameter)
0.006623
def plot_spatial_firing_rate(self,label_x='x',label_y='y',bins=None,resolution=1.0,geometry=None,weight_function=None,normalize_time=True,normalize_n=False,start_units_with_0=True,**kwargs): """ Plots a two dimensional representation of the firing rates. Each spike is binned according to two labels (`label_x`='x' and `label_y`='y'), which give a 2d histogram with `bins` many bins in each direction. By default, the number of bins orients itself on the dimensions of the respective labels, sampling them from the minimum to the maximum with `resolution`, which defaults to 1. `bins` and `resolution` can be tuples corresponding to the x and y values. """ if bool(self): import matplotlib.pylab as plt if bins is None: if type(resolution) is tuple: bins = (self.linspace_bins(label_x,resolution=resolution[0]),self.linspace_bins(label_y,resolution=resolution[1])) else: bins = (self.linspace_bins(label_x,resolution=resolution),self.linspace_bins(label_y,resolution=resolution)) H,xed,yed = self.spatial_firing_rate(label_x=label_x,label_y=label_y,bins=bins,geometry=geometry,weight_function=weight_function,normalize_time=normalize_time,normalize_n=False,start_units_with_0=start_units_with_0) Y, X = np.meshgrid(yed, xed) kwargs['cmap'] = kwargs.get('cmap','gray') plt.pcolormesh(X, Y, H,**kwargs) plt.gca().set_aspect('equal')
0.025016
def spinner( spinner_name=None, start_text=None, handler_map=None, nospin=False, write_to_stdout=True, ): """Get a spinner object or a dummy spinner to wrap a context. :param str spinner_name: A spinner type e.g. "dots" or "bouncingBar" (default: {"bouncingBar"}) :param str start_text: Text to start off the spinner with (default: {None}) :param dict handler_map: Handler map for signals to be handled gracefully (default: {None}) :param bool nospin: If true, use the dummy spinner (default: {False}) :param bool write_to_stdout: Writes to stdout if true, otherwise writes to stderr (default: True) :return: A spinner object which can be manipulated while alive :rtype: :class:`~vistir.spin.VistirSpinner` Raises: RuntimeError -- Raised if the spinner extra is not installed """ from .spin import create_spinner has_yaspin = None try: import yaspin except ImportError: has_yaspin = False if not nospin: raise RuntimeError( "Failed to import spinner! Reinstall vistir with command:" " pip install --upgrade vistir[spinner]" ) else: spinner_name = "" else: has_yaspin = True spinner_name = "" use_yaspin = (has_yaspin is False) or (nospin is True) if has_yaspin is None or has_yaspin is True and not nospin: use_yaspin = True if start_text is None and use_yaspin is True: start_text = "Running..." with create_spinner( spinner_name=spinner_name, text=start_text, handler_map=handler_map, nospin=nospin, use_yaspin=use_yaspin, write_to_stdout=write_to_stdout, ) as _spinner: yield _spinner
0.002238
def manual(cls, node, tval, ns=None): """ Set the node's xsi:type attribute based on either I{value}'s or the node text's class. Then adds the referenced prefix(s) to the node's prefix mapping. @param node: XML node. @type node: L{sax.element.Element} @param tval: XSD schema type name. @type tval: str @param ns: I{tval} XML namespace. @type ns: (prefix, URI) @return: Specified node. @rtype: L{sax.element.Element} """ xta = ":".join((Namespace.xsins[0], "type")) node.addPrefix(Namespace.xsins[0], Namespace.xsins[1]) if ns is None: node.set(xta, tval) else: ns = cls.genprefix(node, ns) qname = ":".join((ns[0], tval)) node.set(xta, qname) node.addPrefix(ns[0], ns[1]) return node
0.002247
def _delay_call(self): """ Makes sure that web service calls are at least 0.2 seconds apart. """ now = time.time() time_since_last = now - self.last_call_time if time_since_last < DELAY_TIME: time.sleep(DELAY_TIME - time_since_last) self.last_call_time = now
0.006006
def add_template_network_events(self, columns, vectors): """ Add a vector indexed """ # initialize with zeros - since vectors can be None, look for the # longest one that isn't new_events = None new_events = numpy.zeros( max([len(v) for v in vectors if v is not None]), dtype=self.network_event_dtype ) # they shouldn't all be None assert new_events is not None new_events['template_id'] = self.template_index for c, v in zip(columns, vectors): if v is not None: if isinstance(v, Array): new_events[c] = v.numpy() else: new_events[c] = v self.template_events = numpy.append(self.template_events, new_events)
0.002491
def add_source(self, source): """ Adds source to observation, keeping sorted order (in separation) """ if not type(source)==Source: raise TypeError('Can only add Source object.') if len(self.sources)==0: self.sources.append(source) else: ind = 0 for s in self.sources: # Keep sorted order of separation if source.separation < s.separation: break ind += 1 self.sources.insert(ind, source)
0.00708
def kn_to_n(kn, N_k = None, cleanup = False): """ Convert KxN_max array to N array Parameters ---------- u_kn: np.ndarray, float, shape=(KxN_max) N_k (optional) : np.array the N_k matrix from the previous formatting form cleanup (optional) : bool optional command to clean up, since u_kln can get very large Outputs ------- u_n: np.ndarray, float, shape=(N) """ #print "warning: KxN arrays deprecated; convering into new preferred N shape" # rewrite into kn shape # rewrite into kn shape [K, N_max] = np.shape(kn) if N_k is None: # We assume that all N_k are N_max. # Not really an easier way to do this without being given the answer. N_k = N_max*np.ones([K], dtype=np.int64) N = np.sum(N_k) n = np.zeros([N], dtype=np.float64) i = 0 for k in range(K): # loop through the old K; some might be zero for ik in range(N_k[k]): n[i] = kn[k, ik] i += 1 if cleanup: del(kn) # very big, let's explicitly delete return n
0.006452
def remove(self, path, recursive=True): """ Remove file or directory at location ``path``. :param path: a path within the FileSystem to remove. :type path: str :param recursive: if the path is a directory, recursively remove the directory and all of its descendants. Defaults to ``True``. :type recursive: bool """ self._connect() if self.sftp: self._sftp_remove(path, recursive) else: self._ftp_remove(path, recursive) self._close()
0.005226
def extender(path=None, cache=None): """A context that temporarily extends sys.path and reverts it after the context is complete.""" old_path = sys.path[:] extend(path, cache=None) try: yield finally: sys.path = old_path
0.003788
def load_command_table(self, args): # pylint: disable=unused-argument """ Load commands into the command table :param args: List of the arguments from the command line :type args: list :return: The ordered command table :rtype: collections.OrderedDict """ self.cli_ctx.raise_event(EVENT_CMDLOADER_LOAD_COMMAND_TABLE, cmd_tbl=self.command_table) return OrderedDict(self.command_table)
0.006682
def overview(game_id): """Gets the overview information for the game with matching id.""" output = {} # get data overview = mlbgame.data.get_overview(game_id) # parse data overview_root = etree.parse(overview).getroot() try: output = add_raw_box_score_attributes(output, game_id) except ValueError: pass # get overview attributes for x in overview_root.attrib: output[x] = overview_root.attrib[x] # Get probable starter attributes if they exist home_pitcher_tree = overview_root.find('home_probable_pitcher') if home_pitcher_tree is not None: output.update(build_namespaced_attributes( 'home_probable_pitcher', home_pitcher_tree)) else: output.update(build_probable_starter_defaults('home')) away_pitcher_tree = overview_root.find('away_probable_pitcher') if away_pitcher_tree is not None: output.update(build_namespaced_attributes( 'away_probable_pitcher', away_pitcher_tree)) else: output.update(build_probable_starter_defaults('away')) return output
0.000902
def render_aafigure(self, text, options): """ Render an ASCII art figure into the requested format output file. """ fname = get_basename(text, options) fname = '%s.%s' % (get_basename(text, options), options['format']) if True: #TODO: hasattr(self.builder, 'imgpath'): # HTML #TODO relfn = posixpath.join(self.builder.imgpath, fname) relfn = '_build/html/_images/' + fname #TODO: outfn = path.join(self.builder.outdir, '_images', fname) outfn = '/home/luca/repos/aafigure/documentation/_build/html/_images/' + fname else: # LaTeX relfn = fname outfn = path.join(self.builder.outdir, fname) metadata_fname = '%s.aafig' % outfn try: if path.isfile(outfn): extra = None if options['format'].lower() == 'svg': f = None try: try: f = file(metadata_fname, 'r') extra = f.read() except: raise AafigError() finally: if f is not None: f.close() return relfn, outfn, id, extra except AafigError: pass ensuredir(path.dirname(outfn)) try: (visitor, output) = aafigure.render(text, outfn, options) output.close() except aafigure.UnsupportedFormatError, e: raise AafigError(str(e)) extra = None if options['format'].lower() == 'svg': extra = visitor.get_size_attrs() f = file(metadata_fname, 'w') f.write(extra) f.close() return relfn, outfn, id, extra
0.005963
def fileobj_name(fileobj): """ Returns: text: A potential filename for a file object. Always a valid path type, but might be empty or non-existent. """ value = getattr(fileobj, "name", u"") if not isinstance(value, (text_type, bytes)): value = text_type(value) return value
0.003067
def mouseMoveEvent(self, event): """ Drags the selection view for this widget. :param event | <QMouseMoveEvent> """ w = event.pos().x() - self._region.x() h = event.pos().y() - self._region.y() self._region.setWidth(w) self._region.setHeight(h) self.repaint() super(XSnapshotWidget, self).mouseMoveEvent(event)
0.011494
def _get_md_files(self): """Get all markdown files.""" all_f = _all_files_matching_ext(os.getcwd(), "md") exclusions = [ "*.egg/*", "*.eggs/*", "*build/*" ] + self.exclusions return sorted([f for f in all_f if not _is_excluded(f, exclusions)])
0.00627
def on_to_state_edited(self, renderer, path, new_state_identifier): """Connects the outcome with a transition to the newly set state :param Gtk.CellRendererText renderer: The cell renderer that was edited :param str path: The path string of the renderer :param str new_state_identifier: An identifier for the new state that was selected """ def do_self_transition_check(t_id, new_state_identifier): # add self transition meta data if 'self' in new_state_identifier.split('.'): insert_self_transition_meta_data(self.model, t_id, 'outcomes_widget', combined_action=True) outcome_id = self.list_store[path][self.ID_STORAGE_ID] if outcome_id in self.dict_to_other_state or outcome_id in self.dict_to_other_outcome: transition_parent_state = self.model.parent.state if outcome_id in self.dict_to_other_state: t_id = self.dict_to_other_state[outcome_id][2] else: t_id = self.dict_to_other_outcome[outcome_id][2] if new_state_identifier is not None: to_state_id = new_state_identifier.split('.')[1] if not transition_parent_state.transitions[t_id].to_state == to_state_id: try: transition_parent_state.transitions[t_id].modify_target(to_state=to_state_id) do_self_transition_check(t_id, new_state_identifier) except ValueError as e: logger.warning("The target of transition couldn't be modified: {0}".format(e)) else: try: transition_parent_state.remove_transition(t_id) except AttributeError as e: logger.warning("The transition couldn't be removed: {0}".format(e)) else: # there is no transition till now if new_state_identifier is not None and not self.model.state.is_root_state: transition_parent_state = self.model.parent.state to_state_id = new_state_identifier.split('.')[1] try: t_id = transition_parent_state.add_transition(from_state_id=self.model.state.state_id, from_outcome=outcome_id, to_state_id=to_state_id, to_outcome=None, transition_id=None) do_self_transition_check(t_id, new_state_identifier) except (ValueError, TypeError) as e: logger.warning("The transition couldn't be added: {0}".format(e)) return else: logger.debug("outcome-editor got None in to_state-combo-change no transition is added")
0.005489
def component_acting_parent_tag(parent_tag, tag): """ Only intended for use in getting components, look for tag name of fig-group and if so, find the first fig tag inside it as the acting parent tag """ if parent_tag.name == "fig-group": if (len(tag.find_previous_siblings("fig")) > 0): acting_parent_tag = first(extract_nodes(parent_tag, "fig")) else: # Do not return the first fig as parent of itself return None else: acting_parent_tag = parent_tag return acting_parent_tag
0.001773
def permute(self, ordering: np.ndarray, axis: int) -> None: """ Permute the dataset along the indicated axis. Args: ordering (list of int): The desired order along the axis axis (int): The axis along which to permute Returns: Nothing. """ if self._file.__contains__("tiles"): del self._file['tiles'] ordering = list(np.array(ordering).flatten()) # Flatten the ordering, in case we got a column vector self.layers._permute(ordering, axis=axis) if axis == 0: self.row_attrs._permute(ordering) self.row_graphs._permute(ordering) if axis == 1: self.col_attrs._permute(ordering) self.col_graphs._permute(ordering)
0.031722