text
stringlengths
78
104k
score
float64
0
0.18
def _get_next_node(self, time_qualifier): """ Method goes to the top of the tree and traverses from there in search of the next suitable node for processing to the level defined by the given time_qualifier :param time_qualifier: defines target level of the tree :return: located node; type <TreeNode> """ hierarchy_entry = self.process_hierarchy.get_by_qualifier(time_qualifier) if hierarchy_entry.parent: parent_time_qualifier = hierarchy_entry.parent.process_entry.time_qualifier parent = self._get_next_node(parent_time_qualifier) else: parent = self.root return self._get_next_child_node(parent)
0.005525
def extract_ranges(row, ranges: ColRanges) -> List[Text]: """ Extracts a list of ranges from a row: - If the range is an int, just get the data at this index - If the range is a tuple of two ints, use them as indices in a slice - If the range is an int then a None, start the slice at the int and go up to the end of the row. """ out = [] for r in ranges: if isinstance(r, int): r = (r, r + 1) if r[1] is None: r = (r[0], len(row)) out.extend(row[r[0]:r[1]]) return [x for x in (y.strip() for y in out) if x]
0.001656
def init(self, conn): """ Create the version table and run the base script on an empty database. :param conn: a DB API 2 connection """ base = self.read_scripts()[0]['fname'] logging.info('Creating the initial schema from %s', base) apply_sql_script(conn, os.path.join(self.upgrade_dir, base)) self.install_versioning(conn)
0.005141
def download(url, filename, overwrite = False): ''' Downloads a file via HTTP. ''' from requests import get from os.path import exists debug('Downloading ' + unicode(url) + '...') data = get(url) if data.status_code == 200: if not exists(filename) or overwrite: f = open(filename, 'wb') f.write(data.content) f.close() return True return False
0.041096
def _pot_quat(self): """Returns the orientation of the pot.""" return T.convert_quat(self.sim.data.body_xquat[self.cube_body_id], to="xyzw")
0.019231
def check_password(self, username, password, properties): """Check the password validity. Used by plain-text authentication mechanisms. Default implementation: retrieve a "plain" password for the `username` and `realm` using `self.get_password` and compare it with the password provided. May be overridden e.g. to check the password against some external authentication mechanism (PAM, LDAP, etc.). :Parameters: - `username`: the username for which the password verification is requested. - `password`: the password to verify. - `properties`: mapping with authentication properties (those provided to the authenticator's ``start()`` method plus some already obtained via the mechanism). :Types: - `username`: `unicode` - `password`: `unicode` - `properties`: mapping :return: `True` if the password is valid. :returntype: `bool` """ logger.debug("check_password{0!r}".format( (username, password, properties))) pwd, pwd_format = self.get_password(username, (u"plain", u"md5:user:realm:password"), properties) if pwd_format == u"plain": logger.debug("got plain password: {0!r}".format(pwd)) return pwd is not None and password == pwd elif pwd_format in (u"md5:user:realm:password"): logger.debug("got md5:user:realm:password password: {0!r}" .format(pwd)) realm = properties.get("realm") if realm is None: realm = "" else: realm = realm.encode("utf-8") username = username.encode("utf-8") password = password.encode("utf-8") # pylint: disable-msg=E1101 urp_hash = hashlib.md5(b"%s:%s:%s").hexdigest() return urp_hash == pwd logger.debug("got password in unknown format: {0!r}".format(pwd_format)) return False
0.002315
def connect_with_username_and_password(cls, url=None, username=None, password=None): """ Returns an object that makes requests to the API, authenticated with a short-lived token retrieved from username and password. If username or password is not supplied, the method will prompt for a username and/or password to be entered interactively. See the connect method for more details about the `url` argument. PLEASE NOTE: This method is being provided as a temporary measure. We strongly encourage users of the Luminoso API to use a long-lived token instead, as explained in the V5_README file. """ from .v4_client import LuminosoClient as v4LC if username is None: username = input('Username: ') v4client = v4LC.connect(url=url, username=username, password=password) if url is None: url = '/' if url.startswith('http'): root_url = get_root_url(url) else: url = URL_BASE + '/' + url.lstrip('/') root_url = URL_BASE return cls(v4client.session, root_url)
0.002508
def add_listener_policy(self, json_data): """Attaches listerner policies to an ELB Args: json_data (json): return data from ELB upsert """ env = boto3.session.Session(profile_name=self.env, region_name=self.region) elbclient = env.client('elb') # create stickiness policy if set in configs stickiness = {} elb_settings = self.properties['elb'] if elb_settings.get('ports'): ports = elb_settings['ports'] for listener in ports: if listener.get("stickiness"): stickiness = self.add_stickiness() LOG.info('Stickiness Found: %s', stickiness) break # Attach policies to created ELB for job in json.loads(json_data)['job']: for listener in job['listeners']: policies = [] ext_port = listener['externalPort'] if listener['listenerPolicies']: policies.extend(listener['listenerPolicies']) if stickiness.get(ext_port): policies.append(stickiness.get(ext_port)) if policies: LOG.info('Adding listener policies: %s', policies) elbclient.set_load_balancer_policies_of_listener( LoadBalancerName=self.app, LoadBalancerPort=ext_port, PolicyNames=policies)
0.002778
def make_importfrom_alias(queue, body, context, name): """ Make an ast.alias node for the names list of an ast.ImportFrom. Parameters ---------- queue : deque Instruction Queue body : list Current body. context : DecompilationContext name : str Expected name of the IMPORT_FROM node to be popped. Returns ------- alias : ast.alias Side Effects ------------ Consumes IMPORT_FROM and STORE_NAME instructions from queue. """ import_from, store = queue.popleft(), queue.popleft() expect(import_from, instrs.IMPORT_FROM, "after IMPORT_NAME") if not import_from.arg == name: raise DecompilationError( "IMPORT_FROM name mismatch. Expected %r, but got %s." % ( name, import_from, ) ) return ast.alias( name=name, asname=store.arg if store.arg != name else None, )
0.00107
def make_main_index(struct, selection='"Protein"', ndx='main.ndx', oldndx=None): """Make index file with the special groups. This routine adds the group __main__ and the group __environment__ to the end of the index file. __main__ contains what the user defines as the *central* and *most important* parts of the system. __environment__ is everything else. The template mdp file, for instance, uses these two groups for T-coupling. These groups are mainly useful if the default groups "Protein" and "Non-Protein" are not appropriate. By using symbolic names such as __main__ one can keep scripts more general. :Returns: *groups* is a list of dictionaries that describe the index groups. See :func:`gromacs.cbook.parse_ndxlist` for details. :Arguments: *struct* : filename structure (tpr, pdb, gro) *selection* : string is a ``make_ndx`` command such as ``"Protein"`` or ``r DRG`` which determines what is considered the main group for centering etc. It is passed directly to ``make_ndx``. *ndx* : string name of the final index file *oldndx* : string name of index file that should be used as a basis; if None then the ``make_ndx`` default groups are used. This routine is very dumb at the moment; maybe some heuristics will be added later as could be other symbolic groups such as __membrane__. """ logger.info("Building the main index file {ndx!r}...".format(**vars())) # pass 1: select # get a list of groups # need the first "" to get make_ndx to spit out the group list. _,out,_ = gromacs.make_ndx(f=struct, n=oldndx, o=ndx, stdout=False, input=("", "q")) groups = cbook.parse_ndxlist(out) # find the matching groups, # there is a nasty bug in GROMACS where make_ndx may have multiple # groups, which caused the previous approach to fail big time. # this is a work around the make_ndx bug. # striping the "" allows compatibility with existing make_ndx selection commands. selection = selection.strip("\"") selected_groups = [g for g in groups if g['name'].lower() == selection.lower()] if len(selected_groups) > 1: logging.warn("make_ndx created duplicated groups, performing work around") if len(selected_groups) <= 0: msg = "no groups found for selection {0}, available groups are {1}".format(selection, groups) logging.error(msg) raise ValueError(msg) # Found at least one matching group, we're OK # index of last group last = len(groups) - 1 assert last == groups[-1]['nr'] group = selected_groups[0] # pass 2: # 1) last group is __main__ # 2) __environment__ is everything else (eg SOL, ions, ...) _,out,_ = gromacs.make_ndx(f=struct, n=ndx, o=ndx, stdout=False, # make copy selected group, this now has index last + 1 input=("{0}".format(group['nr']), # rename this to __main__ "name {0} __main__".format(last+1), # make a complement to this group, it get index last + 2 "! \"__main__\"", # rename this to __environment__ "name {0} __environment__".format(last+2), # list the groups "", # quit "q")) return cbook.parse_ndxlist(out)
0.004935
def duplicate(self, name): """ .. versionadded:: 0.5.8 Requires SMC version >= 6.3.2 Duplicate this element. This is a shortcut method that will make a direct copy of the element under the new name and type. :param str name: name for the duplicated element :raises ActionCommandFailed: failed to duplicate the element :return: the newly created element :rtype: Element """ dup = self.make_request( method='update', raw_result=True, resource='duplicate', params={'name': name}) return type(self)(name=name, href=dup.href, type=type(self).typeof)
0.005634
def persist(self, container: Container, image: str) -> None: """ Persists the state of a given container to a BugZoo image on this server. Parameters: container: the container to persist. image: the name of the Docker image that should be created. Raises: ImageAlreadyExists: if the image name is already in use by another Docker image on this server. """ logger_c = logger.getChild(container.uid) logger_c.debug("Persisting container as a Docker image: %s", image) try: docker_container = self.__dockerc[container.uid] except KeyError: logger_c.exception("Failed to persist container: container no longer exists.") # noqa: pycodestyle raise try: _ = self.__client_docker.images.get(image) logger_c.error("Failed to persist container: image, '%s', already exists.", # noqa: pycodestyle image) raise ImageAlreadyExists(image) except docker.errors.ImageNotFound: pass cmd = "docker commit {} {}" cmd = cmd.format(docker_container.id, image) try: subprocess.check_output(cmd, shell=True) except subprocess.CalledProcessError: logger.exception("Failed to persist container (%s) to image (%s).", # noqa: pycodestyle container.uid, image) raise logger_c.debug("Persisted container as a Docker image: %s", image)
0.001271
def configure(screen_name=None, config_file=None, app=None, **kwargs): """ Set up a config dictionary using a bots.yaml config file and optional keyword args. Args: screen_name (str): screen_name of user to search for in config file config_file (str): Path to read for the config file app (str): Name of the app to look for in the config file. Defaults to the one set in users.{screen_name}. default_directories (str): Directories to read for the bots.yaml/json file. Defaults to CONFIG_DIRS. default_bases (str): File names to look for in the directories. Defaults to CONFIG_BASES. """ # Use passed config file, or look for it in the default path. # Super-optionally, accept a different place to look for the file dirs = kwargs.pop('default_directories', None) bases = kwargs.pop('default_bases', None) file_config = {} if config_file is not False: config_file = find_file(config_file, dirs, bases) file_config = parse(config_file) # config and keys dicts # Pull non-authentication settings from the file. # Kwargs, user, app, and general settings are included, in that order of preference # Exclude apps and users sections from config config = {k: v for k, v in file_config.items() if k not in ('apps', 'users')} user_conf = file_config.get('users', {}).get(screen_name, {}) app = app or user_conf.get('app') app_conf = file_config.get('apps', {}).get(app, {}) # Pull user and app data from the file config.update(app_conf) config.update(user_conf) # kwargs take precendence over config file config.update({k: v for k, v in kwargs.items() if v is not None}) return config
0.004044
def get_context_data(self, **kwargs): """This adds into the context of breeding_type and sets it to Active.""" context = super(BreedingList, self).get_context_data(**kwargs) context['breeding_type'] = "Active" return context
0.018797
def HWProcess(cls, proc: HWProcess, ctx: ResourceContext) -> None: """ Gues resource usage by HWProcess """ seen = ctx.seen for stm in proc.statements: encl = stm._enclosed_for full_ev_dep = stm._is_completly_event_dependent now_ev_dep = stm._now_is_event_dependent ev_dep = full_ev_dep or now_ev_dep out_mux_dim = count_mux_inputs_for_outputs(stm) for o in stm._outputs: if o in seen: continue i = out_mux_dim[o] if isinstance(o._dtype, HArray): assert i == 1, (o, i, " only one ram port per HWProcess") for a in walk_assignments(stm, o): assert len(a.indexes) == 1, "one address per RAM port" addr = a.indexes[0] ctx.registerRAM_write_port(o, addr, ev_dep) elif ev_dep: ctx.registerFF(o) if i > 1: ctx.registerMUX(stm, o, i) elif o not in encl: ctx.registerLatch(o) if i > 1: ctx.registerMUX(stm, o, i) elif i > 1: ctx.registerMUX(stm, o, i) else: # just a connection continue if isinstance(stm, SwitchContainer): caseEqs = set([stm.switchOn._eq(c[0]) for c in stm.cases]) inputs = chain( [sig for sig in stm._inputs if sig not in caseEqs], [stm.switchOn]) else: inputs = stm._inputs for i in inputs: # discover only internal signals in this statements for # operators if not i.hidden or i in seen: continue cls.HWProcess_operators(i, ctx, ev_dep)
0.001514
def get_vault_ids_by_authorization(self, authorization_id): """Gets the list of ``Vault`` ``Ids`` mapped to an ``Authorization``. arg: authorization_id (osid.id.Id): ``Id`` of an ``Authorization`` return: (osid.id.IdList) - list of vault ``Ids`` raise: NotFound - ``authorization_id`` is not found raise: NullArgument - ``authorization_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_bin_ids_by_resource mgr = self._get_provider_manager('AUTHORIZATION', local=True) lookup_session = mgr.get_authorization_lookup_session(proxy=self._proxy) lookup_session.use_federated_vault_view() authorization = lookup_session.get_authorization(authorization_id) id_list = [] for idstr in authorization._my_map['assignedVaultIds']: id_list.append(Id(idstr)) return IdList(id_list)
0.002606
def set_description(self, id, **kwargs): # noqa: E501 """Set description associated with a specific source # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.set_description(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param str body: :return: ResponseContainer If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.set_description_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.set_description_with_http_info(id, **kwargs) # noqa: E501 return data
0.002186
def match_url(self, url, options=None): """ Return if this rule matches the URL. What to do if rule is matched is up to developer. Most likely ``.is_exception`` attribute should be taken in account. """ options = options or {} for optname in self.options: if optname == 'match-case': # TODO continue if optname not in options: raise ValueError("Rule requires option %s" % optname) if optname == 'domain': if not self._domain_matches(options['domain']): return False continue if options[optname] != self.options[optname]: return False return self._url_matches(url)
0.002561
def close_positions_order(self): """平仓单 Raises: RuntimeError -- if ACCOUNT.RUNNING_ENVIRONMENT is NOT TZERO Returns: list -- list with order """ order_list = [] time = '{} 15:00:00'.format(self.date) if self.running_environment == RUNNING_ENVIRONMENT.TZERO: for code, amount in self.hold_available.iteritems(): order = False if amount < 0: # 先卖出的单子 买平 order = self.send_order( code=code, price=0, amount=abs(amount), time=time, towards=ORDER_DIRECTION.BUY, order_model=ORDER_MODEL.CLOSE, amount_model=AMOUNT_MODEL.BY_AMOUNT, ) elif amount > 0: # 先买入的单子, 卖平 order = self.send_order( code=code, price=0, amount=abs(amount), time=time, towards=ORDER_DIRECTION.SELL, order_model=ORDER_MODEL.CLOSE, amount_model=AMOUNT_MODEL.BY_AMOUNT ) if order: order_list.append(order) return order_list else: raise RuntimeError( 'QAACCOUNT with {} environments cannot use this methods'.format( self.running_environment ) )
0.001835
def pick(options, title=None, indicator='*', default_index=0, multi_select=False, min_selection_count=0, options_map_func=None): """Construct and start a :class:`Picker <Picker>`. Usage:: >>> from pick import pick >>> title = 'Please choose an option: ' >>> options = ['option1', 'option2', 'option3'] >>> option, index = pick(options, title) """ picker = Picker(options, title, indicator, default_index, multi_select, min_selection_count, options_map_func) return picker.start()
0.005714
def _git_config(cwd, user, password, output_encoding=None): ''' Helper to retrieve git config options ''' contextkey = 'git.config.' + cwd if contextkey not in __context__: git_dir = rev_parse(cwd, opts=['--git-dir'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) if not os.path.isabs(git_dir): paths = (cwd, git_dir, 'config') else: paths = (git_dir, 'config') __context__[contextkey] = os.path.join(*paths) return __context__[contextkey]
0.001435
def encoder(nef, z_dim, batch_size, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12): '''The encoder is a CNN which takes 32x32 image as input generates the 100 dimensional shape embedding as a sample from normal distribution using predicted meand and variance ''' BatchNorm = mx.sym.BatchNorm data = mx.sym.Variable('data') e1 = mx.sym.Convolution(data, name='enc1', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef, no_bias=no_bias) ebn1 = BatchNorm(e1, name='encbn1', fix_gamma=fix_gamma, eps=eps) eact1 = mx.sym.LeakyReLU(ebn1, name='encact1', act_type='leaky', slope=0.2) e2 = mx.sym.Convolution(eact1, name='enc2', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*2, no_bias=no_bias) ebn2 = BatchNorm(e2, name='encbn2', fix_gamma=fix_gamma, eps=eps) eact2 = mx.sym.LeakyReLU(ebn2, name='encact2', act_type='leaky', slope=0.2) e3 = mx.sym.Convolution(eact2, name='enc3', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*4, no_bias=no_bias) ebn3 = BatchNorm(e3, name='encbn3', fix_gamma=fix_gamma, eps=eps) eact3 = mx.sym.LeakyReLU(ebn3, name='encact3', act_type='leaky', slope=0.2) e4 = mx.sym.Convolution(eact3, name='enc4', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*8, no_bias=no_bias) ebn4 = BatchNorm(e4, name='encbn4', fix_gamma=fix_gamma, eps=eps) eact4 = mx.sym.LeakyReLU(ebn4, name='encact4', act_type='leaky', slope=0.2) eact4 = mx.sym.Flatten(eact4) z_mu = mx.sym.FullyConnected(eact4, num_hidden=z_dim, name="enc_mu") z_lv = mx.sym.FullyConnected(eact4, num_hidden=z_dim, name="enc_lv") z = z_mu + mx.symbol.broadcast_mul(mx.symbol.exp(0.5*z_lv),mx.symbol.random_normal(loc=0, scale=1,shape=(batch_size,z_dim))) return z_mu, z_lv, z
0.012958
def _new_mem_buf(buffer=None): """ Allocate a new OpenSSL memory BIO. Arrange for the garbage collector to clean it up automatically. :param buffer: None or some bytes to use to put into the BIO so that they can be read out. """ if buffer is None: bio = _lib.BIO_new(_lib.BIO_s_mem()) free = _lib.BIO_free else: data = _ffi.new("char[]", buffer) bio = _lib.BIO_new_mem_buf(data, len(buffer)) # Keep the memory alive as long as the bio is alive! def free(bio, ref=data): return _lib.BIO_free(bio) _openssl_assert(bio != _ffi.NULL) bio = _ffi.gc(bio, free) return bio
0.001475
def Decode(self, attribute, value): """Decode the value to the required type.""" required_type = self._attribute_types.get(attribute, "bytes") if required_type == "integer": return rdf_structs.SignedVarintReader(value, 0)[0] elif required_type == "unsigned_integer": return rdf_structs.VarintReader(value, 0)[0] elif required_type == "string": if isinstance(value, bytes): return value.decode("utf-8") else: return utils.SmartUnicode(value) else: return value
0.011342
def zdivide(x, y): """ Return `x`/`y`, with 0 instead of NaN where `y` is 0. Parameters ---------- x : array_like Numerator y : array_like Denominator Returns ------- z : ndarray Quotient `x`/`y` """ # See https://stackoverflow.com/a/37977222 return np.divide(x, y, out=np.zeros_like(x), where=(y != 0))
0.002688
def validate(self, value): """ Always returns a Python boolean. """ value = super(Boolean, self).validate(value) if value is not None: value = bool(value) return value
0.00939
def Response( cls, body, method=None, uri=None, adding_headers=None, forcing_headers=None, status=200, streaming=False, **kw): """ shortcut to create an :py:class:`~httpretty.core.Entry` that takes the body as first positional argument .. seealso:: the parameters of this function match those of the :py:class:`~httpretty.core.Entry` constructor :param body: :param method: one of ``httpretty.GET``, ``httpretty.PUT``, ``httpretty.POST``, ``httpretty.DELETE``, ``httpretty.HEAD``, ``httpretty.PATCH``, ``httpretty.OPTIONS``, ``httpretty.CONNECT`` :param uri: :param adding_headers: :param forcing_headers: :param status: defaults to **200** :param streaming: defaults to **False** :param kw: keyword-arguments passed onto the :py:class:`~httpretty.core.Entry` :returns: an :py:class:`~httpretty.core.Entry` """ kw['body'] = body kw['adding_headers'] = adding_headers kw['forcing_headers'] = forcing_headers kw['status'] = int(status) kw['streaming'] = streaming return Entry(method, uri, **kw)
0.004831
def dispos(dra0, decd0, dra, decd): """Compute distance and position angle solving a spherical triangle (no approximations). Source/credit: Skycat Author: A.P. Martinez Parameters ---------- dra0 : float Center RA in decimal degrees. decd0 : float Center DEC in decimal degrees. dra : float Point RA in decimal degrees. decd : float Point DEC in decimal degrees. Returns ------- phi : float Phi in degrees (East of North). dist : float Distance in arcmin. """ radian = 180.0 / math.pi # coord transformed in radians alf = dra / radian alf0 = dra0 / radian del_ = decd / radian del0 = decd0 / radian sd0 = math.sin(del0) sd = math.sin(del_) cd0 = math.cos(del0) cd = math.cos(del_) cosda = math.cos(alf - alf0) cosd = sd0 * sd + cd0 * cd * cosda dist = math.acos(cosd) phi = 0.0 if dist > 0.0000004: sind = math.sin(dist) cospa = (sd * cd0 - cd * sd0 * cosda) / sind #if cospa > 1.0: # cospa=1.0 if math.fabs(cospa) > 1.0: # 2005-06-02: fix from [email protected] cospa = cospa / math.fabs(cospa) sinpa = cd * math.sin(alf - alf0) / sind phi = math.acos(cospa) * radian if sinpa < 0.0: phi = 360.0 - phi dist *= radian dist *= 60.0 if decd0 == 90.0: phi = 180.0 if decd0 == -90.0: phi = 0.0 return (phi, dist)
0.001312
def show_banner(ctx, param, value): """Shows dynaconf awesome banner""" if not value or ctx.resilient_parsing: return set_settings() click.echo(settings.dynaconf_banner) click.echo("Learn more at: http://github.com/rochacbruno/dynaconf") ctx.exit()
0.003571
def get_video_modes(monitor): """ Returns the available video modes for the specified monitor. Wrapper for: const GLFWvidmode* glfwGetVideoModes(GLFWmonitor* monitor, int* count); """ count_value = ctypes.c_int(0) count = ctypes.pointer(count_value) result = _glfw.glfwGetVideoModes(monitor, count) videomodes = [result[i].unwrap() for i in range(count_value.value)] return videomodes
0.002331
def selection_range(self): """ Returns the selected lines boundaries (start line, end line) :return: tuple(int, int) """ editor = self._editor doc = editor.document() start = doc.findBlock( editor.textCursor().selectionStart()).blockNumber() end = doc.findBlock( editor.textCursor().selectionEnd()).blockNumber() text_cursor = QTextCursor(editor.textCursor()) text_cursor.setPosition(editor.textCursor().selectionEnd()) if text_cursor.columnNumber() == 0 and start != end: end -= 1 return start, end
0.003165
def paxos_instance(self): """ Returns instance of PaxosInstance (protocol implementation). """ # Construct instance with the constant attributes. instance = PaxosInstance(self.network_uid, self.quorum_size) # Set the variable attributes from the aggregate. for name in self.paxos_variables: value = getattr(self, name, None) if value is not None: if isinstance(value, (set, list, dict, tuple)): value = deepcopy(value) setattr(instance, name, value) # Return the instance. return instance
0.003135
def variants(self, case_id, skip=0, count=1000, filters=None): """Fetch variants for a case.""" filters = filters or {} logger.debug("Fetching case with case_id: {0}".format(case_id)) case_obj = self.case(case_id) plugin, case_id = self.select_plugin(case_obj) self.filters = plugin.filters gene_lists = (self.gene_list(list_id) for list_id in filters.get('gene_lists', [])) nested_geneids = (gene_list.gene_ids for gene_list in gene_lists) gene_ids = set(itertools.chain.from_iterable(nested_geneids)) if filters.get('gene_ids'): filters['gene_ids'].extend(gene_ids) else: filters['gene_ids'] = gene_ids variants = plugin.variants(case_id, skip, count, filters) return variants
0.00241
def merge_true_table(): """Merge all true table into single excel file. """ writer = pd.ExcelWriter("True Table.xlsx") for p in Path(__file__).parent.select_by_ext(".csv"): df = pd.read_csv(p.abspath, index_col=0) df.to_excel(writer, p.fname, index=True) writer.save()
0.003289
def convert_predict_response(pred, serving_bundle): """Converts a PredictResponse to ClassificationResponse or RegressionResponse. Args: pred: PredictResponse to convert. serving_bundle: A `ServingBundle` object that contains the information about the serving request that the response was generated by. Returns: A ClassificationResponse or RegressionResponse. """ output = pred.outputs[serving_bundle.predict_output_tensor] raw_output = output.float_val if serving_bundle.model_type == 'classification': values = [] for example_index in range(output.tensor_shape.dim[0].size): start = example_index * output.tensor_shape.dim[1].size values.append(raw_output[start:start + output.tensor_shape.dim[1].size]) else: values = raw_output return convert_prediction_values(values, serving_bundle, pred.model_spec)
0.012673
def _add_new_labels(self, sentences): ''' Adds new sentences to the internal indexing of the model. Args: sentences (list): LabeledSentences for each doc to be added Returns: int: number of sentences added to the model ''' sentence_no = -1 total_words = 0 vocab = self.model.vocab model_sentence_n = len([l for l in vocab if l.startswith("DOC_")]) n_sentences = 0 for sentence_no, sentence in enumerate(sentences): sentence_length = len(sentence.words) for label in sentence.labels: total_words += 1 if label in vocab: vocab[label].count += sentence_length else: vocab[label] = gensim.models.word2vec.Vocab( count=sentence_length) vocab[label].index = len(self.model.vocab) - 1 vocab[label].code = [0] vocab[label].sample_probability = 1. self.model.index2word.append(label) n_sentences += 1 return n_sentences
0.004195
def find_templates(): """ Load python modules from templates directory and get templates list :return: list of tuples (pairs): [(compiled regex, lambda regex_match: return message_data)] """ templates = [] templates_directory = (inspect.getsourcefile(lambda: 0).rstrip('__init__.py') + 'templates') template_files = os.listdir(templates_directory) for template_file in template_files: if template_file.startswith('.') or not template_file.endswith('.py'): continue # Hack for dev development and disutils try: template_module = importlib.import_module('templates.{}'.format( template_file.rstrip('.py') )) except ImportError: template_module = importlib.import_module('ross.templates.{}'.format( template_file.rstrip('.py') )) # Iterate throw items in template. # If there are variable ends with 'templates', # extend templates list with it. for (name, content) in template_module.__dict__.items(): if name.endswith('templates'): for (regex_text, data_func) in content: templates.append((re.compile(regex_text, re.IGNORECASE), data_func)) return templates
0.002999
def fetch_raw_data(sql, connection, geometry): """ Fetch the coastdat2 from the database, adapt it to the local time zone and create a time index. """ tmp_dc = {} weather_df = pd.DataFrame( connection.execute(sql).fetchall(), columns=[ 'gid', 'geom_point', 'geom_polygon', 'data_id', 'time_series', 'dat_id', 'type_id', 'type', 'height', 'year', 'leap_year']).drop( 'dat_id', 1) # Get the timezone of the geometry tz = tools.tz_from_geom(connection, geometry) for ix in weather_df.index: # Convert the point of the weather location to a shapely object weather_df.loc[ix, 'geom_point'] = wkt_loads( weather_df['geom_point'][ix]) # Roll the dataset forward according to the timezone, because the # dataset is based on utc (Berlin +1, Kiev +2, London +0) utc = timezone('utc') offset = int(utc.localize(datetime(2002, 1, 1)).astimezone( timezone(tz)).strftime("%z")[:-2]) # Get the year and the length of the data array db_year = weather_df.loc[ix, 'year'] db_len = len(weather_df['time_series'][ix]) # Set absolute time index for the data sets to avoid errors. tmp_dc[ix] = pd.Series( np.roll(np.array(weather_df['time_series'][ix]), offset), index=pd.date_range(pd.datetime(db_year, 1, 1, 0), periods=db_len, freq='H', tz=tz)) weather_df['time_series'] = pd.Series(tmp_dc) return weather_df
0.000647
def init_app(application): """ Initialise an application Set up whitenoise to handle static files. """ config = {k: v for k, v in application.config.items() if k in SCHEMA} kwargs = {'autorefresh': application.debug} kwargs.update((k[11:].lower(), v) for k, v in config.items()) instance = whitenoise.WhiteNoise(application.wsgi_app, **kwargs) instance.add_files(application.static_folder, application.static_url_path) if not hasattr(application, 'extensions'): application.extensions = {} application.extensions['whitenoise'] = instance application.wsgi_app = instance
0.00158
def get_url_args(url): """ Returns a dictionary from a URL params """ url_data = urllib.parse.urlparse(url) arg_dict = urllib.parse.parse_qs(url_data.query) return arg_dict
0.005319
def assignees(self): """ Gets the task assignees """ if not self.can_update(): self._tcex.handle_error(910, [self.type]) for a in self.tc_requests.assignees(self.api_type, self.api_sub_type, self.unique_id): yield a
0.010714
def migration(resource, version, previous_version=''): """Register a migration function""" def decorator(func): @wraps(func) def wrapper(*args, **kwargs): migrated = func(*args, **kwargs) return migrated m = Migration(wrapper, resource, version, previous_version) m.register() return m return decorator
0.002618
def move_dirty_lock_file(dirty_lock_file, sm_path): """ Move the dirt_lock file to the sm_path and thereby is not found by auto recovery of backup anymore """ if dirty_lock_file is not None \ and not dirty_lock_file == os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1]): logger.debug("Move dirty lock from root tmp folder {0} to state machine folder {1}" "".format(dirty_lock_file, os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1]))) os.rename(dirty_lock_file, os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1]))
0.010204
def _validate_xtext(xtext): """If input token contains ASCII non-printables, register a defect.""" non_printables = _non_printable_finder(xtext) if non_printables: xtext.defects.append(errors.NonPrintableDefect(non_printables)) if utils._has_surrogates(xtext): xtext.defects.append(errors.UndecodableBytesDefect( "Non-ASCII characters found in header token"))
0.002475
def get_center_of_mass(self): """ Get a tuple (x,y) that is the center of mass. The center of mass is not necessarily the same as the center of the bounding box. Imagine a black square and a single dot wide outside of the square. """ xsum, ysum, counter = 0., 0., 0 for stroke in self.get_pointlist(): for point in stroke: xsum += point['x'] ysum += point['y'] counter += 1 return (xsum / counter, ysum / counter)
0.003724
def define_help_flags(): """Registers help flags. Idempotent.""" # Use a global to ensure idempotence. global _define_help_flags_called if not _define_help_flags_called: flags.DEFINE_flag(HelpFlag()) flags.DEFINE_flag(HelpshortFlag()) # alias for --help flags.DEFINE_flag(HelpfullFlag()) flags.DEFINE_flag(HelpXMLFlag()) _define_help_flags_called = True
0.013055
def format_all(format_string, env): """ Format the input string using each possible combination of lists in the provided environment. Returns a list of formated strings. """ prepared_env = parse_pattern(format_string, env, lambda x, y: [FormatWrapper(x, z) for z in y]) # Generate each possible combination, format the string with it and yield # the resulting string: for field_values in product(*prepared_env.itervalues()): format_env = dict(izip(prepared_env.iterkeys(), field_values)) yield format_string.format(**format_env)
0.003466
def start(self, container, **kwargs): """ Identical to :meth:`docker.api.container.ContainerApiMixin.start` with additional logging. """ self.push_log("Starting container '{0}'.".format(container)) super(DockerFabricClient, self).start(container, **kwargs)
0.010135
def dcdict2rdfpy(dc_dict): """Convert a DC dictionary into an RDF Python object.""" ark_prefix = 'ark: ark:' uri = URIRef('') # Create the RDF Python object. rdf_py = ConjunctiveGraph() # Set DC namespace definition. DC = Namespace('http://purl.org/dc/elements/1.1/') # Get the ark for the subject URI from the ark identifier. for element_value in dc_dict['identifier']: if element_value['content'].startswith(ark_prefix): uri = URIRef( element_value['content'].replace( ark_prefix, 'info:ark' ) ) # Bind the prefix/namespace pair. rdf_py.bind('dc', DC) # Get the values for each element in the ordered DC elements. for element_name in DC_ORDER: element_value_list = dc_dict.get(element_name, []) # Add the values to the RDF object. for element_value in element_value_list: # Handle URL values differently. if ('http' in element_value['content'] and ' ' not in element_value['content']): rdf_py.add(( uri, DC[element_name], URIRef(element_value['content']) )) else: rdf_py.add(( uri, DC[element_name], Literal(element_value['content']) )) return rdf_py
0.001393
def tick(self): """Return the time cost string as expect.""" string = self.passed if self.rounding: string = round(string) if self.readable: string = self.readable(string) return string
0.008032
def side_by_side(left, right): r"""Put two boxes next to each other. Assumes that all lines in the boxes are the same width. Example: >>> left = 'A \nC ' >>> right = 'B\nD' >>> print(side_by_side(left, right)) A B C D <BLANKLINE> """ left_lines = list(left.split('\n')) right_lines = list(right.split('\n')) # Pad the shorter column with whitespace diff = abs(len(left_lines) - len(right_lines)) if len(left_lines) > len(right_lines): fill = ' ' * len(right_lines[0]) right_lines += [fill] * diff elif len(right_lines) > len(left_lines): fill = ' ' * len(left_lines[0]) left_lines += [fill] * diff return '\n'.join(a + b for a, b in zip(left_lines, right_lines)) + '\n'
0.001255
def _load_cytoBand(filename): """ Load UCSC cytoBand table. Parameters ---------- filename : str path to cytoBand file Returns ------- df : pandas.DataFrame cytoBand table if loading was successful, else None References ---------- ..[1] Ryan Dale, GitHub Gist, https://gist.github.com/daler/c98fc410282d7570efc3#file-ideograms-py """ try: # adapted from chromosome plotting code (see [1]_) df = pd.read_table( filename, names=["chrom", "start", "end", "name", "gie_stain"] ) df["chrom"] = df["chrom"].str[3:] return df except Exception as err: print(err) return None
0.002478
def sent_folder(self): """ Shortcut to get SentItems Folder instance :rtype: mailbox.Folder """ return self.folder_constructor(parent=self, name='SentItems', folder_id=OutlookWellKnowFolderNames .SENT.value)
0.006289
def _count_words(self, to_lower=True, delimiters=["\r", "\v", "\n", "\f", "\t", " "]): """ This returns an SArray with, for each input string, a dict from the unique, delimited substrings to their number of occurrences within the original string. The SArray must be of type string. ..WARNING:: This function is deprecated, and will be removed in future versions of Turi Create. Please use the `text_analytics.count_words` function instead. Parameters ---------- to_lower : bool, optional "to_lower" indicates whether to map the input strings to lower case before counts delimiters: list[string], optional "delimiters" is a list of which characters to delimit on to find tokens Returns ------- out : SArray for each input string, a dict from the unique, delimited substrings to their number of occurrences within the original string. Examples -------- >>> sa = turicreate.SArray(["The quick brown fox jumps.", "Word word WORD, word!!!word"]) >>> sa._count_words() dtype: dict Rows: 2 [{'quick': 1, 'brown': 1, 'jumps': 1, 'fox': 1, 'the': 1}, {'word': 2, 'word,': 1, 'word!!!word': 1}] """ if (self.dtype != str): raise TypeError("Only SArray of string type is supported for counting bag of words") if (not all([len(delim) == 1 for delim in delimiters])): raise ValueError("Delimiters must be single-character strings") # construct options, will extend over time options = dict() options["to_lower"] = to_lower == True # defaults to std::isspace whitespace delimiters if no others passed in options["delimiters"] = delimiters with cython_context(): return SArray(_proxy=self.__proxy__.count_bag_of_words(options))
0.003974
def resolve_available_slots(self, worksheet_template, type='a'): """Returns the available slots from the current worksheet that fits with the layout defined in the worksheet_template and type of analysis passed in. Allowed type of analyses are: 'a' (routine analysis) 'b' (blank analysis) 'c' (control) 'd' (duplicate) :param worksheet_template: the worksheet template to match against :param type: type of analyses to restrict that suit with the slots :return: a list of slots positions """ if not worksheet_template or type not in ALLOWED_ANALYSES_TYPES: return list() ws_slots = self.get_slot_positions(type) layout = worksheet_template.getLayout() slots = list() for row in layout: # skip rows that do not match with the given type if row['type'] != type: continue slot = to_int(row['pos']) if slot in ws_slots: # We only want those that are empty continue slots.append(slot) return slots
0.001695
def _add_form_fields(obj, lines): """Improve the documentation of a Django Form class. This highlights the available fields in the form. """ lines.append("**Form fields:**") lines.append("") for name, field in obj.base_fields.items(): field_type = "{}.{}".format(field.__class__.__module__, field.__class__.__name__) tpl = "* ``{name}``: {label} (:class:`~{field_type}`)" lines.append(tpl.format( name=name, field=field, label=field.label or name.replace('_', ' ').title(), field_type=field_type ))
0.003317
def dateof(tag_name, tags): """Given a list of tags, returns the datetime of the tag with the given name; Otherwise None.""" for tag in tags: if tag['name'] == tag_name: commit = read_url(tag['commit']['url']) return parse_timestamp(commit['commit']['committer']['date']) return None
0.006116
def parse_instant_time(slot): """ Parse a slot into an InstantTime object. Sample response: { "entity": "snips/datetime", "range": { "end": 36, "start": 28 }, "rawValue": "tomorrow", "slotName": "weatherForecastStartDatetime", "value": { "grain": "Day", "kind": "InstantTime", "precision": "Exact", "value": "2017-09-15 00:00:00 +00:00" } } :param slot: a intent slot. :return: a parsed InstantTime object, or None. """ date = IntentParser.get_dict_value(slot, ['value', 'value']) if not date: return None date = parse(date) if not date: return None grain = InstantTime.parse_grain( IntentParser.get_dict_value(slot, ['value', 'grain'])) return InstantTime(date, grain)
0.002012
def shorten(string, max_length=80, trailing_chars=3): ''' trims the 'string' argument down to 'max_length' to make previews to long string values ''' assert type(string).__name__ in {'str', 'unicode'}, 'shorten needs string to be a string, not {}'.format(type(string)) assert type(max_length) == int, 'shorten needs max_length to be an int, not {}'.format(type(max_length)) assert type(trailing_chars) == int, 'shorten needs trailing_chars to be an int, not {}'.format(type(trailing_chars)) assert max_length > 0, 'shorten needs max_length to be positive, not {}'.format(max_length) assert trailing_chars >= 0, 'shorten needs trailing_chars to be greater than or equal to 0, not {}'.format(trailing_chars) return ( string ) if len(string) <= max_length else ( '{before:}...{after:}'.format( before=string[:max_length-(trailing_chars+3)], after=string[-trailing_chars:] if trailing_chars>0 else '' ) )
0.008122
def set_baudrate(self, baudrate): '''set baudrate''' try: self.port.setBaudrate(baudrate) except Exception: # for pySerial 3.0, which doesn't have setBaudrate() self.port.baudrate = baudrate
0.008
def get_energies(rootdir, reanalyze, verbose, detailed, sort, fmt): """ Doc string. """ if verbose: logformat = "%(relativeCreated)d msecs : %(message)s" logging.basicConfig(level=logging.INFO, format=logformat) if not detailed: drone = SimpleVaspToComputedEntryDrone(inc_structure=True) else: drone = VaspToComputedEntryDrone(inc_structure=True, data=["filename", "initial_structure"]) ncpus = multiprocessing.cpu_count() logging.info("Detected {} cpus".format(ncpus)) queen = BorgQueen(drone, number_of_drones=ncpus) if os.path.exists(SAVE_FILE) and not reanalyze: msg = "Using previously assimilated data from {}.".format(SAVE_FILE) \ + " Use -r to force re-analysis." queen.load_data(SAVE_FILE) else: if ncpus > 1: queen.parallel_assimilate(rootdir) else: queen.serial_assimilate(rootdir) msg = "Analysis results saved to {} for faster ".format(SAVE_FILE) + \ "subsequent loading." queen.save_data(SAVE_FILE) entries = queen.get_data() if sort == "energy_per_atom": entries = sorted(entries, key=lambda x: x.energy_per_atom) elif sort == "filename": entries = sorted(entries, key=lambda x: x.data["filename"]) all_data = [] for e in entries: if not detailed: delta_vol = "{:.2f}".format(e.data["delta_volume"] * 100) else: delta_vol = e.structure.volume / \ e.data["initial_structure"].volume - 1 delta_vol = "{:.2f}".format(delta_vol * 100) all_data.append((e.data["filename"].replace("./", ""), re.sub(r"\s+", "", e.composition.formula), "{:.5f}".format(e.energy), "{:.5f}".format(e.energy_per_atom), delta_vol)) if len(all_data) > 0: headers = ("Directory", "Formula", "Energy", "E/Atom", "% vol chg") print(tabulate(all_data, headers=headers, tablefmt=fmt)) print("") print(msg) else: print("No valid vasp run found.") os.unlink(SAVE_FILE)
0.000438
def record_set(self, train, labels=None, channel="train"): """Build a :class:`~RecordSet` from a numpy :class:`~ndarray` matrix and label vector. For the 2D ``ndarray`` ``train``, each row is converted to a :class:`~Record` object. The vector is stored in the "values" entry of the ``features`` property of each Record. If ``labels`` is not None, each corresponding label is assigned to the "values" entry of the ``labels`` property of each Record. The collection of ``Record`` objects are protobuf serialized and uploaded to new S3 locations. A manifest file is generated containing the list of objects created and also stored in S3. The number of S3 objects created is controlled by the ``train_instance_count`` property on this Estimator. One S3 object is created per training instance. Args: train (numpy.ndarray): A 2D numpy array of training data. labels (numpy.ndarray): A 1D numpy array of labels. Its length must be equal to the number of rows in ``train``. channel (str): The SageMaker TrainingJob channel this RecordSet should be assigned to. Returns: RecordSet: A RecordSet referencing the encoded, uploading training and label data. """ s3 = self.sagemaker_session.boto_session.resource('s3') parsed_s3_url = urlparse(self.data_location) bucket, key_prefix = parsed_s3_url.netloc, parsed_s3_url.path key_prefix = key_prefix + '{}-{}/'.format(type(self).__name__, sagemaker_timestamp()) key_prefix = key_prefix.lstrip('/') logger.debug('Uploading to bucket {} and key_prefix {}'.format(bucket, key_prefix)) manifest_s3_file = upload_numpy_to_s3_shards(self.train_instance_count, s3, bucket, key_prefix, train, labels) logger.debug("Created manifest file {}".format(manifest_s3_file)) return RecordSet(manifest_s3_file, num_records=train.shape[0], feature_dim=train.shape[1], channel=channel)
0.00782
def create(self, name, plugin_name, plugin_version, cluster_template_id=None, default_image_id=None, is_transient=None, description=None, cluster_configs=None, node_groups=None, user_keypair_id=None, anti_affinity=None, net_id=None, count=None, use_autoconfig=None, shares=None, is_public=None, is_protected=None): """Launch a Cluster.""" data = { 'name': name, 'plugin_name': plugin_name, 'plugin_version': plugin_version, } return self._do_create(data, cluster_template_id, default_image_id, is_transient, description, cluster_configs, node_groups, user_keypair_id, anti_affinity, net_id, count, use_autoconfig, shares, is_public, is_protected, api_ver=2)
0.008502
def kill_all(): """When polysh quits, we kill all the remote shells we started""" for i in dispatchers.all_instances(): try: os.kill(-i.pid, signal.SIGKILL) except OSError: # The process was already dead, no problem pass
0.003571
def PopupGetFolder(message, title=None, default_path='', no_window=False, size=(None, None), button_color=None, background_color=None, text_color=None, icon=DEFAULT_WINDOW_ICON, font=None, no_titlebar=False, grab_anywhere=False, keep_on_top=False, location=(None, None), initial_folder=None): """ Display popup with text entry field and browse button. Browse for folder :param message: :param default_path: :param no_window: :param size: :param button_color: :param background_color: :param text_color: :param icon: :param font: :param no_titlebar: :param grab_anywhere: :param keep_on_top: :param location: :return: Contents of text field. None if closed using X or cancelled """ if no_window: if Window.QTApplication is None: Window.QTApplication = QApplication(sys.argv) folder_name = QFileDialog.getExistingDirectory(dir=initial_folder) return folder_name layout = [[Text(message, auto_size_text=True, text_color=text_color, background_color=background_color)], [InputText(default_text=default_path, size=size), FolderBrowse(initial_folder=initial_folder)], [CloseButton('Ok', size=(60, 20), bind_return_key=True), CloseButton('Cancel', size=(60, 20))]] _title = title if title is not None else message window = Window(title=_title, icon=icon, auto_size_text=True, button_color=button_color, background_color=background_color, font=font, no_titlebar=no_titlebar, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top, location=location) (button, input_values) = window.Layout(layout).Read() if button != 'Ok': return None else: path = input_values[0] return path
0.005394
def getLocationRepresentation(self): """ Get the full population representation of the location layer. """ activeCells = np.array([], dtype="uint32") totalPrevCells = 0 for module in self.L6aModules: activeCells = np.append(activeCells, module.getActiveCells() + totalPrevCells) totalPrevCells += module.numberOfCells() return activeCells
0.007317
def get(self, sid): """ Constructs a InstalledAddOnExtensionContext :param sid: The unique Extension Sid :returns: twilio.rest.preview.marketplace.installed_add_on.installed_add_on_extension.InstalledAddOnExtensionContext :rtype: twilio.rest.preview.marketplace.installed_add_on.installed_add_on_extension.InstalledAddOnExtensionContext """ return InstalledAddOnExtensionContext( self._version, installed_add_on_sid=self._solution['installed_add_on_sid'], sid=sid, )
0.007042
def _get_min_distance_numpy(element): """ NumPy based implementation of get_min_distance """ xys = element.array([0, 1]) with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'invalid value encountered in') xys = xys.astype('float32').view(np.complex64) distances = np.abs(xys.T-xys) np.fill_diagonal(distances, np.inf) distances = distances[distances>0] if len(distances): return distances.min() return 0
0.003984
def convert(html): """converts an html string to markdown while preserving unsupported markup.""" bs = BeautifulSoup(html, 'html.parser') _markdownify(bs) ret = unicode(bs).replace(u'\xa0', '&nbsp;') ret = re.sub(r'\n{3,}', r'\n\n', ret) # ! FIXME: hack ret = re.sub(r'&lt;&lt;&lt;FLOATING LINK: (.+)&gt;&gt;&gt;', r'<\1>', ret) # ! FIXME: hack sp = re.split(r'(&lt;&lt;&lt;BLOCKQUOTE: .*?&gt;&gt;&gt;)', ret, flags=re.DOTALL) for i,e in enumerate(sp): if e[:len('&lt;&lt;&lt;BLOCKQUOTE:')] == '&lt;&lt;&lt;BLOCKQUOTE:': sp[i] = '> ' + e[len('&lt;&lt;&lt;BLOCKQUOTE:') : -len('&gt;&gt;&gt;')] sp[i] = sp[i].replace('\n', '\n> ') ret = ''.join(sp) return ret.strip('\n')
0.027576
def next_frame_ae_tiny(): """Conv autoencoder, tiny set for testing.""" hparams = next_frame_tiny() hparams.bottom["inputs"] = modalities.video_bitwise_bottom hparams.top["inputs"] = modalities.video_top hparams.batch_size = 8 hparams.dropout = 0.4 return hparams
0.028881
def is_redundant_multiplicon(self, value): """ Returns True if the passed multiplicon ID is redundant, False otherwise. - value, (int) multiplicon ID """ if not hasattr(self, '_redundant_multiplicon_cache'): sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"''' cur = self._dbconn.cursor() cur.execute(sql, {'id': str(value)}) result = [int(r[0]) for r in cur.fetchall()] self._redundant_multiplicon_cache = set(result) if value in self._redundant_multiplicon_cache: return True else: return False
0.003049
def get_meta(catid, sig): ''' Get metadata of dataset via ID. ''' meta_base = './static/dataset_list' if os.path.exists(meta_base): pass else: return False pp_data = {'logo': '', 'kind': '9'} for wroot, wdirs, wfiles in os.walk(meta_base): for wdir in wdirs: if wdir.lower().endswith(sig): # Got the dataset of certain ID. ds_base = pathlib.Path(os.path.join(wroot, wdir)) for uu in ds_base.iterdir(): if uu.name.endswith('.xlsx'): meta_dic = chuli_meta('u' + sig[2:], uu) pp_data['title'] = meta_dic['title'] pp_data['cnt_md'] = meta_dic['anytext'] pp_data['user_name'] = 'admin' pp_data['def_cat_uid'] = catid pp_data['gcat0'] = catid pp_data['def_cat_pid'] = catid[:2] + '00' pp_data['extinfo'] = {} elif uu.name.startswith('thumbnail_'): pp_data['logo'] = os.path.join(wroot, wdir, uu.name).strip('.') return pp_data
0.001667
def doubleclick(self, window_name, object_name): """ Double click on the object @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to look for, either full name, LDTP's name convention, or a Unix glob. Or menu heirarchy @type object_name: string @return: 1 on success. @rtype: integer """ object_handle = self._get_object_handle(window_name, object_name) if not object_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) self._grabfocus(object_handle) x, y, width, height = self._getobjectsize(object_handle) window = self._get_front_most_window() # Mouse double click on the object # object_handle.doubleClick() window.doubleClickMouse((x + width / 2, y + height / 2)) return 1
0.00398
def get_self_host(request_data): """ Returns the current host. :param request_data: The request as a dict :type: dict :return: The current host :rtype: string """ if 'http_host' in request_data: current_host = request_data['http_host'] elif 'server_name' in request_data: current_host = request_data['server_name'] else: raise Exception('No hostname defined') if ':' in current_host: current_host_data = current_host.split(':') possible_port = current_host_data[-1] try: possible_port = float(possible_port) current_host = current_host_data[0] except ValueError: current_host = ':'.join(current_host_data) return current_host
0.002334
def render_bash_options(self): """Rendering Bash options.""" options = '' if self.config.debug: options += "set -x\n" if self.config.strict: options += "set -euo pipefail\n" return options
0.007937
def iter_subclasses(class_): """Iterate over all the subclasses (and subclasses thereof, etc.) of given class. :param class_: Class to yield the subclasses of :return: Iterable of subclasses, sub-subclasses, etc. of ``class_`` """ ensure_class(class_) classes = set() def descend(class_): subclasses = set(class_.__subclasses__()) - classes classes.update(subclasses) return subclasses result = breadth_first(class_, descend) next(result) # omit ``class_`` itself return result
0.001818
def date_asn_block(self, ip, announce_date=None): """ Get the ASN and the IP Block announcing the IP at a specific date. :param ip: IP address to search for :param announce_date: Date of the announcement :rtype: tuple .. code-block:: python (announce_date, asn, block) .. note:: the returned announce_date might be different of the one given in parameter because some raw files are missing and we don't have the information. In this case, the nearest known date will be chosen, """ assignations, announce_date, keys = self.run(ip, announce_date) pos = next((i for i, j in enumerate(assignations) if j is not None), None) if pos is not None: block = keys[pos] if block != '0.0.0.0/0': return announce_date, assignations[pos], block return None
0.003012
def load_sources(self, sources): """Delete all sources in the ROI and load the input source list.""" self.clear() for s in sources: if isinstance(s, dict): s = Model.create_from_dict(s) self.load_source(s, build_index=False) self._build_src_index()
0.006192
def get_client(self, destination_params, job_id, **kwargs): """Build a client given specific destination parameters and job_id.""" destination_params = _parse_destination_params(destination_params) destination_params.update(**kwargs) job_manager_interface_class = self.job_manager_interface_class job_manager_interface_args = dict(destination_params=destination_params, **self.job_manager_interface_args) job_manager_interface = job_manager_interface_class(**job_manager_interface_args) return self.client_class(destination_params, job_id, job_manager_interface, **self.extra_client_kwds)
0.007764
def _compute_asset_lifetimes(self, country_codes): """ Compute and cache a recarray of asset lifetimes. """ equities_cols = self.equities.c if country_codes: buf = np.array( tuple( sa.select(( equities_cols.sid, equities_cols.start_date, equities_cols.end_date, )).where( (self.exchanges.c.exchange == equities_cols.exchange) & (self.exchanges.c.country_code.in_(country_codes)) ).execute(), ), dtype='f8', # use doubles so we get NaNs ) else: buf = np.array([], dtype='f8') lifetimes = np.recarray( buf=buf, shape=(len(buf),), dtype=[ ('sid', 'f8'), ('start', 'f8'), ('end', 'f8') ], ) start = lifetimes.start end = lifetimes.end start[np.isnan(start)] = 0 # convert missing starts to 0 end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX # Cast the results back down to int. return lifetimes.astype([ ('sid', 'i8'), ('start', 'i8'), ('end', 'i8'), ])
0.001438
def _tostream(self, stream, skipprepack= False): ''' Convert the struct into a bytes stream. This is the standard way to convert a NamedStruct to bytes. :param stream: a list of bytes to get the result :param skipprepack: if True, the prepack stage is skipped. For parser internal use. :returns: total appended size ''' if not skipprepack: self._prepack() datasize = self._packto(stream) paddingSize = self._parser.paddingsize2(datasize) if paddingSize > datasize: stream.write(b'\x00' * (paddingSize - datasize)) return paddingSize
0.011923
def translate_basic(usercode): """ Translate a basic color name to color with explanation. """ codenum = get_code_num(codes['fore'][usercode]) colorcode = codeformat(codenum) msg = 'Name: {:>10}, Number: {:>3}, EscapeCode: {!r}'.format( usercode, codenum, colorcode ) if disabled(): return msg return str(C(msg, fore=usercode))
0.002584
def detect(self): """Detect the IP address.""" if self.opts_family == AF_INET6: kind = IPV6_PUBLIC else: # 'INET': kind = IPV4 theip = None try: theip = detect_ip(kind) except GetIpException: LOG.exception("socket detector raised an exception:") self.set_current_value(theip) return theip
0.004988
def save_to_tomodir(self, directory): """Save the tomodir instance to a directory structure. Note ---- Test cases: * modeling only * inversion only * modeling and inversion """ self.create_tomodir(directory) self.grid.save_elem_file( directory + os.sep + 'grid/elem.dat' ) self.grid.save_elec_file( directory + os.sep + 'grid/elec.dat' ) # modeling if self.configs.configs is not None: self.configs.write_crmod_config( directory + os.sep + 'config/config.dat' ) if self.assignments['forward_model'] is not None: self.parman.save_to_rho_file( directory + os.sep + 'rho/rho.dat', self.assignments['forward_model'][0], self.assignments['forward_model'][1], ) self.crmod_cfg.write_to_file( directory + os.sep + 'exe/crmod.cfg' ) if self.assignments['measurements'] is not None: self.configs.write_crmod_volt( directory + os.sep + 'mod/volt.dat', self.assignments['measurements'] ) if self.assignments['sensitivities'] is not None: self._save_sensitivities( directory + os.sep + 'mod/sens', ) if self.assignments['potentials'] is not None: self._save_potentials( directory + os.sep + 'mod/pot', ) # inversion self.crtomo_cfg.write_to_file( directory + os.sep + 'exe/crtomo.cfg' ) if self.noise_model is not None: self.noise_model.write_crt_noisemod( directory + os.sep + 'exe/crt.noisemod' ) if not os.path.isdir(directory + os.sep + 'inv'): os.makedirs(directory + os.sep + 'inv')
0.001021
def close_room(self, room, namespace): """Remove all participants from a room.""" try: for sid in self.get_participants(namespace, room): self.leave_room(sid, namespace, room) except KeyError: pass
0.007663
def pull(self, remote="origin", ref=None): """Do a git pull of `ref` from `remote`.""" return git_pull(self.repo_dir, remote=remote, ref=ref)
0.012739
def table2matrix(table): """convert a table to a list of lists - a 2D matrix""" if not is_simpletable(table): raise NotSimpleTable("Not able read a cell in the table as a string") rows = [] for tr in table('tr'): row = [] for td in tr('td'): td = tdbr2EOL(td) # convert any '<br>' in the td to line ending try: row.append(td.contents[0]) except IndexError: row.append('') rows.append(row) return rows
0.003839
def compute_indel_length(fs_df): """Computes the indel length accounting for wether it is an insertion or deletion. Parameters ---------- fs_df : pd.DataFrame mutation input as dataframe only containing indel mutations Returns ------- indel_len : pd.Series length of indels """ indel_len = pd.Series(index=fs_df.index) indel_len[fs_df['Reference_Allele']=='-'] = fs_df['Tumor_Allele'][fs_df['Reference_Allele']=='-'].str.len() indel_len[fs_df['Tumor_Allele']=='-'] = fs_df['Reference_Allele'][fs_df['Tumor_Allele']=='-'].str.len() indel_len = indel_len.fillna(0).astype(int) return indel_len
0.010526
def has_succeed(self): """ Check if the connection has succeed Returns: Returns True if connection has succeed. False otherwise. """ status_code = self._response.status_code if status_code in [HTTP_CODE_ZERO, HTTP_CODE_SUCCESS, HTTP_CODE_CREATED, HTTP_CODE_EMPTY, HTTP_CODE_MULTIPLE_CHOICES]: return True if status_code in [HTTP_CODE_BAD_REQUEST, HTTP_CODE_UNAUTHORIZED, HTTP_CODE_PERMISSION_DENIED, HTTP_CODE_NOT_FOUND, HTTP_CODE_METHOD_NOT_ALLOWED, HTTP_CODE_CONNECTION_TIMEOUT, HTTP_CODE_CONFLICT, HTTP_CODE_PRECONDITION_FAILED, HTTP_CODE_INTERNAL_SERVER_ERROR, HTTP_CODE_SERVICE_UNAVAILABLE]: return False raise Exception('Unknown status code %s.', status_code)
0.005089
def register_options(cls, register): """Register an option to make capturing snapshots optional. This class is intended to be extended by Jvm resolvers (coursier and ivy), and the option name should reflect that. """ super(JvmResolverBase, cls).register_options(register) # TODO This flag should be defaulted to True when we are doing hermetic execution, # and should probably go away as we move forward into that direction. register('--capture-snapshots', type=bool, default=False, help='Enable capturing snapshots to add directory digests to dependency jars.' 'Note that this is necessary when hermetic execution is enabled.')
0.008915
def create_mapping(record, keys): """Create a field mapping for use in API updates and creates. Args: record (BaseModel): Record that should be mapped. keys (list[str]): Fields that should be mapped as keys. Returns: dict: Dictionary with keys: * ``field_mappings``: Field mappings as required by API. * ``data``: Ordered data dictionary for input record. """ ordered = OrderedDict() field_mappings = [] for key, value in record.items(): ordered[key] = value field_mappings.append({ 'columnNumber': len(ordered), # Five9 is not zero indexed. 'fieldName': key, 'key': key in keys, }) return { 'field_mappings': field_mappings, 'data': ordered, 'fields': list(ordered.values()), }
0.002116
def dismiss(self, targets, exit_when=None, sleep_interval=0.5, appearance_timeout=20, timeout=120): """ Automatically dismiss the target objects Args: targets (:obj:`list`): list of poco objects to be dropped exit_when: termination condition, default is None which means to automatically exit when list of ``targets`` is empty sleep_interval: time interval between each actions for the given targets, default is 0.5s appearance_timeout: time interval to wait for given target to appear on the screen, automatically exit when timeout, default is 20s timeout: dismiss function timeout, default is 120s Raises: PocoTargetTimeout: when dismiss time interval timeout, under normal circumstances, this should not happen and if happens, it will be reported """ try: self.wait_for_any(targets, timeout=appearance_timeout) except PocoTargetTimeout: # here returns only when timeout # 仅当超时时自动退出 warnings.warn('Waiting timeout when trying to dismiss something before them appear. Targets are {}' .encode('utf-8').format(targets)) return start_time = time.time() while True: no_target = True for t in targets: if t.exists(): try: for n in t: try: n.click(sleep_interval=sleep_interval) no_target = False except: pass except: # Catch the NodeHasBeenRemoved exception if some node was removed over the above iteration # and just ignore as this will not affect the result. # 遍历(__iter__: for n in t)过程中如果节点正好被移除了,可能会报远程节点被移除的异常 # 这个报错忽略就行 pass time.sleep(sleep_interval) should_exit = exit_when() if exit_when else False if no_target or should_exit: return if time.time() - start_time > timeout: raise PocoTargetTimeout('dismiss', targets)
0.004673
def find_one(cls, filter=None, *args, **kwargs): """ Returns one document dict if one passes the filter. Returns None otherwise. """ return cls.collection.find_one(filter, *args, **kwargs)
0.008772
def convert_to_pypi_version(version): """ Convert a git tag version string into something compatible with `PEP-440 <https://www.python.org/dev/peps/pep-0440/>`_. :param version: The input version string, normally directly out of git describe. :return: PEP-440 version string Usage:: >>> convert_to_pypi_version('r1.0.1') # Normal Releases 1.0.1 >>> convert_to_pypi_version('r1.0.1-dev1') # Dev Releases 1.0.1.dev1 >>> convert_to_pypi_version('r1.0.1-a1') # Alpha Releases 1.0.1a1 >>> convert_to_pypi_version('r1.0.1-b4') # Beta Releases 1.0.1b4 >>> convert_to_pypi_version('r1.0.1-rc2') # RC Releases 1.0.1rc2 >>> convert_to_pypi_version('r1.0.1-12-geaea7b6') # Post Releases 1.0.1.post12 """ v = re.search('^[r,v]{0,1}(?P<final>[0-9\.]+)(\-(?P<pre>(a|b|rc)[0-9]+))?(\-(?P<dev>dev[0-9]+))?(\-(?P<post>[0-9]+))?(\-.+)?$', version) if not v: return __default_version__ # https://www.python.org/dev/peps/pep-0440/#final-releases version = v.group('final') # https://www.python.org/dev/peps/pep-0440/#pre-releases if v.group('pre'): version += v.group('pre') # https://www.python.org/dev/peps/pep-0440/#developmental-releases if v.group('dev'): version += '.%s' % v.group('dev') # https://www.python.org/dev/peps/pep-0440/#post-releases if v.group('post'): version += '.post%s' % v.group('post') return version
0.006032
def _get_auto_increment_info(self, table): """figure out the the autoincrement value for the given table""" query = '' seq_table = '' seq_column = '' seq_name = '' find_query = "\n".join([ "SELECT", " t.relname as related_table,", " a.attname as related_column,", " s.relname as sequence_name", "FROM pg_class s", "JOIN pg_depend d ON d.objid = s.oid", "JOIN pg_class t ON d.objid = s.oid AND d.refobjid = t.oid", "JOIN pg_attribute a ON (d.refobjid, d.refobjsubid) = (a.attrelid, a.attnum)", "JOIN pg_namespace n ON n.oid = s.relnamespace", "WHERE", " s.relkind = 'S'", "AND", " n.nspname = 'public'", "AND", " t.relname = '{}'".format(table) ]) pipe = self._run_queries([find_query], popen_kwargs={'stdout': subprocess.PIPE}) stdout, stderr = pipe.communicate() if stdout: try: m = re.findall('^\s*(\S+)\s*\|\s*(\S+)\s*\|\s*(\S+)\s*$', stdout, flags=re.MULTILINE) seq_table, seq_column, seq_name = m[1] # http://www.postgresql.org/docs/9.2/static/functions-sequence.html # http://www.postgresql.org/docs/9.2/static/functions-conditional.html query = "\n".join([ "SELECT", " setval('{}',".format(seq_name.strip()), " coalesce(max({}), 1),".format(seq_column.strip()), " max({}) IS NOT null)".format(seq_column.strip()), "FROM \"{}\"".format(seq_table.strip()) ]) except IndexError: query = '' return query, seq_table, seq_column, seq_name
0.008625
def summary_by_datacenter(self): """Summary of the networks on the account, grouped by data center. The resultant dictionary is primarily useful for statistical purposes. It contains count information rather than raw data. If you want raw information, see the :func:`list_vlans` method instead. :returns: A dictionary keyed by data center with the data containing a set of counts for subnets, hardware, virtual servers, and other objects residing within that data center. """ datacenters = collections.defaultdict(lambda: { 'hardware_count': 0, 'public_ip_count': 0, 'subnet_count': 0, 'virtual_guest_count': 0, 'vlan_count': 0, }) for vlan in self.list_vlans(): name = utils.lookup(vlan, 'primaryRouter', 'datacenter', 'name') datacenters[name]['vlan_count'] += 1 datacenters[name]['public_ip_count'] += ( vlan['totalPrimaryIpAddressCount']) datacenters[name]['subnet_count'] += vlan['subnetCount'] # NOTE(kmcdonald): Only count hardware/guests once if vlan.get('networkSpace') == 'PRIVATE': datacenters[name]['hardware_count'] += ( vlan['hardwareCount']) datacenters[name]['virtual_guest_count'] += ( vlan['virtualGuestCount']) return dict(datacenters)
0.001336
def is_contradictory(self, other): """ Two lists are contradictory if the shorter one is not a prefix of the other. (Very strict definition -- could be generalized to subsequence) """ if other.size() > self.size(): return other.is_contradictory(self) # ensure self is bigger or equal size if other.size() == 0: # empty lists are fine return False # see if any values in the shorter list are contradictory or # unequal for i, oval in enumerate(other.value): if hasattr(self.value[i], 'is_contradictory') and \ self.value[i].is_contradictory(oval): # allow comparing cells return True elif self.value[i] != oval: return True return False
0.004646
def stop(self, timeout=None): """ Send the GET request required to stop the scan If timeout is not specified we just send the request and return. When it is the method will wait for (at most) :timeout: seconds until the scan changes it's status/stops. If the timeout is reached then an exception is raised. :param timeout: The timeout in seconds :return: None, an exception is raised if the timeout is exceeded """ assert self.scan_id is not None, 'No scan_id has been set' # # Simple stop # if timeout is None: url = '/scans/%s/stop' % self.scan_id self.conn.send_request(url, method='GET') return # # Stop with timeout # self.stop() for _ in xrange(timeout): time.sleep(1) is_running = self.get_status()['is_running'] if not is_running: return msg = 'Failed to stop the scan in %s seconds' raise ScanStopTimeoutException(msg % timeout)
0.001812
def get_firmware_manifest(self, manifest_id): """Get manifest with provided manifest_id. :param str manifest_id: ID of manifest to retrieve (Required) :return: FirmwareManifest """ api = self._get_api(update_service.DefaultApi) return FirmwareManifest(api.firmware_manifest_retrieve(manifest_id=manifest_id))
0.008403
def _update_evaluated_individuals_(self, result_score_list, eval_individuals_str, operator_counts, stats_dicts): """Update self.evaluated_individuals_ and error message during pipeline evaluation. Parameters ---------- result_score_list: list A list of CV scores for evaluated pipelines eval_individuals_str: list A list of strings for evaluated pipelines operator_counts: dict A dict where 'key' is the string representation of an individual and 'value' is the number of operators in the pipeline stats_dicts: dict A dict where 'key' is the string representation of an individual and 'value' is a dict containing statistics about the individual Returns ------- None """ for result_score, individual_str in zip(result_score_list, eval_individuals_str): if type(result_score) in [float, np.float64, np.float32]: self.evaluated_individuals_[individual_str] = self._combine_individual_stats(operator_counts[individual_str], result_score, stats_dicts[individual_str]) else: raise ValueError('Scoring function does not return a float.')
0.007042