text
stringlengths
78
104k
score
float64
0
0.18
def ui_extensions(self): """ Provides access to UI extensions management methods. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/ui-extensions :return: :class:`EnvironmentUIExtensionsProxy <contentful_management.ui_extensions_proxy.EnvironmentUIExtensionsProxy>` object. :rtype: contentful.ui_extensions_proxy.EnvironmentUIExtensionsProxy Usage: >>> ui_extensions_proxy = environment.ui_extensions() <EnvironmentUIExtensionsProxy space_id="cfexampleapi" environment_id="master"> """ return EnvironmentUIExtensionsProxy(self._client, self.space.id, self.id)
0.008499
def import_settings_class(setting_name): """ Return the class pointed to be an app setting variable. """ config_value = getattr(settings, setting_name) if config_value is None: raise ImproperlyConfigured("Required setting not found: {0}".format(setting_name)) return import_class(config_value, setting_name)
0.005882
def from_intervals(cls, intervals): """ :rtype : Node """ if not intervals: return None node = Node() node = node.init_from_sorted(sorted(intervals)) return node
0.008734
def pull(self, images, file_name=None, save=True, **kwargs): '''pull an image from a s3 storage Parameters ========== images: refers to the uri given by the user to pull in the format <collection>/<namespace>. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user's requested name for the file. It can optionally be None if the user wants a default. save: if True, you should save the container to the database using self.add() Returns ======= finished: a single container path, or list of paths ''' if not isinstance(images,list): images = [images] bot.debug('Execution of PULL for %s images' %len(images)) finished = [] for image in images: image = remove_uri(image) names = parse_image_name(image) if file_name is None: file_name = names['storage'].replace('/','-') # Assume the user provided the correct uri to start uri = names['storage_uri'] # First try to get the storage uri directly. try: self.bucket.download_file(uri, file_name) # If we can't find the file, help the user except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # Case 1, image not found, but not error with API bot.error('Cannot find %s!' % name) # Try to help the user with suggestions results = self._search_all(query=image) if len(results) > 0: bot.info('Did you mean:\n' % '\n'.join(results)) sys.exit(1) else: # Case 2: error with request, exit. bot.exit('Error downloading image %s' % image) # if we get down here, we have a uri found = None for obj in self.bucket.objects.filter(Prefix=image): if image in obj.key: found = obj # If we find the object, get metadata metadata = {} if found != None: metadata = found.get()['Metadata'] # Metadata bug will capitalize all fields, workaround is to lowercase # https://github.com/boto/boto3/issues/1709 metadata = dict((k.lower(), v) for k, v in metadata.items()) metadata.update(names) # If the user is saving to local storage if save is True and os.path.exists(file_name): container = self.add(image_path = file_name, image_uri = names['tag_uri'], metadata = metadata) file_name = container.image # If the image was pulled to either if os.path.exists(file_name): bot.custom(prefix="Success!", message = file_name) finished.append(file_name) if len(finished) == 1: finished = finished[0] return finished
0.007492
def createGroup(self, message, user_ids): """ Creates a group with the given ids :param message: The initial message :param user_ids: A list of users to create the group with. :return: ID of the new group :raises: FBchatException if request failed """ data = self._getSendData(message=self._oldMessage(message)) if len(user_ids) < 2: raise FBchatUserError("Error when creating group: Not enough participants") for i, user_id in enumerate(user_ids + [self._uid]): data["specific_to_list[{}]".format(i)] = "fbid:{}".format(user_id) message_id, thread_id = self._doSendRequest(data, get_thread_id=True) if not thread_id: raise FBchatException( "Error when creating group: No thread_id could be found" ) return thread_id
0.003378
def touch(args): """ find . -type l | %prog touch Linux commands `touch` wouldn't modify mtime for links, this script can. Use find to pipe in all the symlinks. """ p = OptionParser(touch.__doc__) opts, args = p.parse_args(args) fp = sys.stdin for link_name in fp: link_name = link_name.strip() if not op.islink(link_name): continue if not op.exists(link_name): continue source = get_abs_path(link_name) lnsf(source, link_name)
0.001887
def get_last(self, num=10): """Returns last `num` of HN stories Downloads all the HN articles and returns them as Item objects Returns: `list` object containing ids of HN stories. """ max_item = self.get_max_item() urls = [urljoin(self.item_url, F"{i}.json") for i in range( max_item - num + 1, max_item + 1)] result = self._run_async(urls=urls) return [Item(r) for r in result if r]
0.004219
def execute_django(self, soql, args=()): """ Fixed execute for queries coming from Django query compilers """ response = None sqltype = soql.split(None, 1)[0].upper() if isinstance(self.query, subqueries.InsertQuery): response = self.execute_insert(self.query) elif isinstance(self.query, subqueries.UpdateQuery): response = self.execute_update(self.query) elif isinstance(self.query, subqueries.DeleteQuery): response = self.execute_delete(self.query) elif isinstance(self.query, RawQuery): self.execute_select(soql, args) elif sqltype in ('SAVEPOINT', 'ROLLBACK', 'RELEASE'): log.info("Ignored SQL command '%s'", sqltype) return elif isinstance(self.query, Query): self.execute_select(soql, args) else: raise DatabaseError("Unsupported query: type %s: %s" % (type(self.query), self.query)) return response
0.002979
def clean(self): """ Verifiy that the values entered into the two password fields match. Note that an error here will end up in ``non_field_errors()`` because it doesn't apply to a single field. """ if 'dob_day' in self.cleaned_data and 'dob_month' in \ self.cleaned_data and 'dob_year' in self.cleaned_data: try: self._gen_dob() except ValueError: self._errors['dob_day'] = (_(\ "You provided an invalid date."),) if 'password1' in self.cleaned_data and 'password2' in \ self.cleaned_data: if self.cleaned_data['password1'] != \ self.cleaned_data['password2']: raise forms.ValidationError(_(\ "The two password fields didn't match.")) return self.cleaned_data
0.00533
def get_lambda_function_versions(self, function_name): """ Simply returns the versions available for a Lambda function, given a function name. """ try: response = self.lambda_client.list_versions_by_function( FunctionName=function_name ) return response.get('Versions', []) except Exception: return []
0.007389
def add_delta_deltas(filterbanks, name=None): """Compute time first and second-order derivative channels. Args: filterbanks: float32 tensor with shape [batch_size, len, num_bins, 1] name: scope name Returns: float32 tensor with shape [batch_size, len, num_bins, 3] """ delta_filter = np.array([2, 1, 0, -1, -2]) delta_delta_filter = scipy.signal.convolve(delta_filter, delta_filter, "full") delta_filter_stack = np.array( [[0] * 4 + [1] + [0] * 4, [0] * 2 + list(delta_filter) + [0] * 2, list(delta_delta_filter)], dtype=np.float32).T[:, None, None, :] delta_filter_stack /= np.sqrt( np.sum(delta_filter_stack**2, axis=0, keepdims=True)) filterbanks = tf.nn.conv2d( filterbanks, delta_filter_stack, [1, 1, 1, 1], "SAME", data_format="NHWC", name=name) return filterbanks
0.011848
def FindPosition(node, addition, index=0): ''' Method to search for children according to their position in list. Similar functionality to above method, except this is for adding items to the tree according to the nodes limits on children or types of children they can have :param node: current node being searched :param addition: the thing being added :param index: index to search :return: ''' if node is None: return None if type(addition) in node.rules: if len(node.children) < node.limit or node.limit == 0: return node else: if len(node.children) == 0: return None indexes = node.GetChildrenIndexes() result = FindPosition( node.GetChild( indexes[index]), addition, index) if result is None: index += 1 child = 0 while result is None and child < len(indexes): result = FindPosition( node.GetChild( indexes[child]), addition, index) child += 1 return result else: if len(node.children) == 0: return None indexes = node.GetChildrenIndexes() result = FindPosition(node.GetChild(indexes[index]), addition, index) if result is None: index += 1 child = 0 while result is None and child < len(node.children): result = FindPosition( node.GetChild( indexes[child]), addition, index) child += 1 return result
0.001707
def request_name(self): """Generate the name of the request.""" if self.static and not self.uses_request: return 'Empty' if not self.uses_request: return None if isinstance(self.uses_request, str): return self.uses_request return to_camel_case(self.name) + "Request"
0.005797
def _lower(string): """Custom lower string function. Examples: FooBar -> foo_bar """ if not string: return "" new_string = [string[0].lower()] for char in string[1:]: if char.isupper(): new_string.append("_") new_string.append(char.lower()) return "".join(new_string)
0.002933
def fetch_data(self, **var): """Retrieve data from CDMRemote for one or more variables.""" varstr = ','.join(name + self._convert_indices(ind) for name, ind in var.items()) query = self.query().add_query_parameter(req='data', var=varstr) return self._fetch(query)
0.006231
def load_config(paths=DEFAULT_CONFIG_PATHS): """Attempt to load config from paths, in order. Args: paths (List[string]): list of paths to python files Return: Config: loaded config """ config = Config() for path in paths: if os.path.isfile(path): config.load_pyfile(path) return config
0.002841
def init_app(self, app): """This callback can be used to initialize an application for use with the OpenERP server. """ app.config.setdefault('OPENERP_SERVER', 'http://localhost:8069') app.config.setdefault('OPENERP_DATABASE', 'openerp') app.config.setdefault('OPENERP_DEFAULT_USER', 'admin') app.config.setdefault('OPENERP_DEFAULT_PASSWORD', 'admin') app.jinja_env.globals.update( get_data_from_record=get_data_from_record ) cnx = Client( server=app.config['OPENERP_SERVER'], db=app.config['OPENERP_DATABASE'], user=app.config['OPENERP_DEFAULT_USER'], password=app.config['OPENERP_DEFAULT_PASSWORD'] ) self.default_user = cnx.user app.before_request(self.before_request)
0.002384
def retrieve_files(): """Get list of files found in provided locations. Search through the paths provided to find files for processing. :returns: absolute path of filename :rtype: list """ all_files = [] for location in cfg.CONF.locations or []: # if local path then make sure it is absolute if not location.startswith('\\'): location = os.path.abspath(os.path.expanduser(location)) LOG.debug('searching [%s]', location) for root, _, files in os.walk(location): LOG.debug('found file(s) %s', files) for name in files: filepath = os.path.join(root, name) if (os.access(filepath, os.R_OK) and not _is_blacklisted_filename(filepath) and _is_valid_extension(os.path.splitext(name)[1])): all_files.append(filepath) return all_files
0.001074
def S4U2self(self, user_to_impersonate, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]): #def S4U2self(self, user_to_impersonate, spn_user, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]): """ user_to_impersonate : KerberosTarget class """ if not self.kerberos_TGT: logger.debug('S4U2self invoked, but TGT is not available! Fetching TGT...') self.get_TGT() supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods) auth_package_name = 'Kerberos' now = datetime.datetime.utcnow() ###### Calculating authenticator data authenticator_data = {} authenticator_data['authenticator-vno'] = krb5_pvno authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm']) authenticator_data['cname'] = self.kerberos_TGT['cname'] authenticator_data['cusec'] = now.microsecond authenticator_data['ctime'] = now authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None) ap_req = {} ap_req['pvno'] = krb5_pvno ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value ap_req['ap-options'] = APOptions(set()) ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket']) ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc}) pa_data_auth = {} pa_data_auth['padata-type'] = PaDataType.TGS_REQ.value pa_data_auth['padata-value'] = AP_REQ(ap_req).dump() ###### Calculating checksum data S4UByteArray = NAME_TYPE.PRINCIPAL.value.to_bytes(4, 'little', signed = False) S4UByteArray += user_to_impersonate.username.encode() S4UByteArray += user_to_impersonate.domain.encode() S4UByteArray += auth_package_name.encode() logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray.hex()) logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray) chksum_data = _HMACMD5.checksum(self.kerberos_session_key, 17, S4UByteArray) logger.debug('S4U2self: chksum_data: %s' % chksum_data.hex()) chksum = {} chksum['cksumtype'] = int(CKSUMTYPE('HMAC_MD5')) chksum['checksum'] = chksum_data ###### Filling out PA-FOR-USER data for impersonation pa_for_user_enc = {} pa_for_user_enc['userName'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': user_to_impersonate.get_principalname()}) pa_for_user_enc['userRealm'] = user_to_impersonate.domain pa_for_user_enc['cksum'] = Checksum(chksum) pa_for_user_enc['auth-package'] = auth_package_name pa_for_user = {} pa_for_user['padata-type'] = int(PADATA_TYPE('PA-FOR-USER')) pa_for_user['padata-value'] = PA_FOR_USER_ENC(pa_for_user_enc).dump() ###### Constructing body krb_tgs_body = {} krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','canonicalize'])) krb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.UNKNOWN.value, 'name-string': [self.usercreds.username]}) krb_tgs_body['realm'] = self.usercreds.domain.upper() krb_tgs_body['till'] = now + datetime.timedelta(days=1) krb_tgs_body['nonce'] = secrets.randbits(31) krb_tgs_body['etype'] = [supp_enc.value] #selecting according to server's preferences krb_tgs_req = {} krb_tgs_req['pvno'] = krb5_pvno krb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value krb_tgs_req['padata'] = [pa_data_auth, pa_for_user] krb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body) req = TGS_REQ(krb_tgs_req) logger.debug('Sending S4U2self request to server') try: reply = self.ksoc.sendrecv(req.dump()) except KerberosError as e: if e.errorcode.value == 16: logger.error('S4U2self: Failed to get S4U2self! Error code (16) indicates that delegation is not enabled for this account! Full error: %s' % e) raise e logger.debug('Got S4U2self reply, decrypting...') tgs = reply.native encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue']) self.ccache.add_tgs(tgs, encTGSRepPart) logger.debug('Got valid TGS reply') self.kerberos_TGS = tgs return tgs, encTGSRepPart, key
0.035565
def str_on_2_unicode_on_3(s): """ argparse is way too awesome when doing repr() on choices when printing usage :param s: str or unicode :return: str on 2, unicode on 3 """ if not PY3: return str(s) else: # 3+ if not isinstance(s, str): return str(s, encoding="utf-8") return s
0.005831
def batched_index_select(target: torch.Tensor, indices: torch.LongTensor, flattened_indices: Optional[torch.LongTensor] = None) -> torch.Tensor: """ The given ``indices`` of size ``(batch_size, d_1, ..., d_n)`` indexes into the sequence dimension (dimension 2) of the target, which has size ``(batch_size, sequence_length, embedding_size)``. This function returns selected values in the target with respect to the provided indices, which have size ``(batch_size, d_1, ..., d_n, embedding_size)``. This can use the optionally precomputed :func:`~flattened_indices` with size ``(batch_size * d_1 * ... * d_n)`` if given. An example use case of this function is looking up the start and end indices of spans in a sequence tensor. This is used in the :class:`~allennlp.models.coreference_resolution.CoreferenceResolver`. Model to select contextual word representations corresponding to the start and end indices of mentions. The key reason this can't be done with basic torch functions is that we want to be able to use look-up tensors with an arbitrary number of dimensions (for example, in the coref model, we don't know a-priori how many spans we are looking up). Parameters ---------- target : ``torch.Tensor``, required. A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size). This is the tensor to be indexed. indices : ``torch.LongTensor`` A tensor of shape (batch_size, ...), where each element is an index into the ``sequence_length`` dimension of the ``target`` tensor. flattened_indices : Optional[torch.Tensor], optional (default = None) An optional tensor representing the result of calling :func:~`flatten_and_batch_shift_indices` on ``indices``. This is helpful in the case that the indices can be flattened once and cached for many batch lookups. Returns ------- selected_targets : ``torch.Tensor`` A tensor with shape [indices.size(), target.size(-1)] representing the embedded indices extracted from the batch flattened target tensor. """ if flattened_indices is None: # Shape: (batch_size * d_1 * ... * d_n) flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1)) # Shape: (batch_size * sequence_length, embedding_size) flattened_target = target.view(-1, target.size(-1)) # Shape: (batch_size * d_1 * ... * d_n, embedding_size) flattened_selected = flattened_target.index_select(0, flattened_indices) selected_shape = list(indices.size()) + [target.size(-1)] # Shape: (batch_size, d_1, ..., d_n, embedding_size) selected_targets = flattened_selected.view(*selected_shape) return selected_targets
0.006369
def has_sample(self, md5): """Checks if data store has this sample. Args: md5: The md5 digest of the required sample. Returns: True if sample with this md5 is present, else False. """ # The easiest thing is to simply get the sample and if that # succeeds than return True, else return False sample = self.get_sample(md5) return True if sample else False
0.004505
def command_repo_list(self): """Repositories list """ if len(self.args) == 1 and self.args[0] == "repo-list": RepoList().repos() else: usage("")
0.01
def register_fetcher(self, ctx_fetcher): """ Register another context-specialized fetcher :param Callable ctx_fetcher: A callable that will return the id or raise ExecutedOutsideContext if it was executed outside its context """ if ctx_fetcher not in self.ctx_fetchers: self.ctx_fetchers.append(ctx_fetcher)
0.008152
def cc_to_local_params(pitch, radius, oligo): """Returns local parameters for an oligomeric assembly. Parameters ---------- pitch : float Pitch of assembly radius : float Radius of assembly oligo : int Oligomeric state of assembly Returns ------- pitchloc : float Local pitch of assembly (between 2 adjacent component helices) rloc : float Local radius of assembly alphaloc : float Local pitch-angle of assembly """ rloc = numpy.sin(numpy.pi / oligo) * radius alpha = numpy.arctan((2 * numpy.pi * radius) / pitch) alphaloc = numpy.cos((numpy.pi / 2) - ((numpy.pi) / oligo)) * alpha pitchloc = (2 * numpy.pi * rloc) / numpy.tan(alphaloc) return pitchloc, rloc, numpy.rad2deg(alphaloc)
0.001248
def convert_libraries_in_path(config_path, lib_path, target_path=None): """ This function resaves all libraries found at the spcified path :param lib_path: the path to look for libraries :return: """ for lib in os.listdir(lib_path): if os.path.isdir(os.path.join(lib_path, lib)) and not '.' == lib[0]: if os.path.exists(os.path.join(os.path.join(lib_path, lib), "statemachine.yaml")) or \ os.path.exists(os.path.join(os.path.join(lib_path, lib), "statemachine.json")): if not target_path: convert(config_path, os.path.join(lib_path, lib)) else: convert(config_path, os.path.join(lib_path, lib), os.path.join(target_path, lib)) else: if not target_path: convert_libraries_in_path(config_path, os.path.join(lib_path, lib)) else: convert_libraries_in_path(config_path, os.path.join(lib_path, lib), os.path.join(target_path, lib)) else: if os.path.isdir(os.path.join(lib_path, lib)) and '.' == lib[0]: logger.debug("lib_root_path/lib_path .*-folder are ignored if within lib_path, " "e.g. -> {0} -> full path is {1}".format(lib, os.path.join(lib_path, lib)))
0.005975
def collapse_umi(in_file): """collapse reads using UMI tags""" keep = defaultdict(dict) with open_fastq(in_file) as handle: for line in handle: if line.startswith("@"): m = re.search('UMI_([ATGC]*)', line.strip()) umis = m.group(0) seq = handle.next().strip() handle.next() qual = handle.next().strip() if (umis, seq) in keep: keep[(umis, seq)][1].update(qual) keep[(umis, seq)][0].update(seq) else: keep[(umis, seq)] = [umi(seq), quality(qual)] logger.info("Sequences loaded: %s" % len(keep)) return keep
0.001395
def raw(self): """ Red, green, and blue components of the detected color, as a tuple. Officially in the range 0-1020 but the values returned will never be that high. We do not yet know why the values returned are low, but pointing the color sensor at a well lit sheet of white paper will return values in the 250-400 range. If this is an issue, check out the rgb() and calibrate_white() methods. """ self._ensure_mode(self.MODE_RGB_RAW) return self.value(0), self.value(1), self.value(2)
0.006932
def GetKeyByPath(self, key_path): """Retrieves the key for a specific path. Args: key_path (str): Windows Registry key path. Returns: WinRegistryKey: Windows Registry key or None if not available. Raises: RuntimeError: if the root key is not supported. """ root_key_path, _, key_path = key_path.partition( definitions.KEY_PATH_SEPARATOR) # Resolve a root key alias. root_key_path = root_key_path.upper() root_key_path = self._ROOT_KEY_ALIASES.get(root_key_path, root_key_path) if root_key_path not in self._ROOT_KEYS: raise RuntimeError('Unsupported root key: {0:s}'.format(root_key_path)) key_path = definitions.KEY_PATH_SEPARATOR.join([root_key_path, key_path]) key_path_upper = key_path.upper() for virtual_key_path, virtual_key_callback in self._VIRTUAL_KEYS: virtual_key_path_upper = virtual_key_path.upper() if key_path_upper.startswith(virtual_key_path_upper): key_path_suffix = key_path[len(virtual_key_path):] callback_function = getattr(self, virtual_key_callback) virtual_key = callback_function(key_path_suffix) if not virtual_key: raise RuntimeError('Unable to resolve virtual key: {0:s}.'.format( virtual_key_path)) return virtual_key key_path_prefix_upper, registry_file = self._GetFileByPath(key_path_upper) if not registry_file: return None if not key_path_upper.startswith(key_path_prefix_upper): raise RuntimeError('Key path prefix mismatch.') key_path_suffix = key_path[len(key_path_prefix_upper):] key_path = key_path_suffix or definitions.KEY_PATH_SEPARATOR return registry_file.GetKeyByPath(key_path)
0.004044
def add_event(request): """ Public form to add an event. """ form = AddEventForm(request.POST or None) if form.is_valid(): instance = form.save(commit=False) instance.sites = settings.SITE_ID instance.submitted_by = request.user instance.approved = True instance.slug = slugify(instance.name) instance.save() messages.success(request, 'Your event has been added.') return HttpResponseRedirect(reverse('events_index')) return render(request, 'happenings/event_form.html', { 'form': form, 'form_title': 'Add an event' })
0.001618
def _ensure_directory_exists(self, directory): """Ensure that the passed path exists.""" if not os.path.lexists(directory): os.makedirs(directory) return directory
0.01005
def get_security_group_id(name='', env='', region=''): """Get a security group ID. Args: name (str): Security Group name to find. env (str): Deployment environment to search. region (str): AWS Region to search. Returns: str: ID of Security Group, e.g. sg-xxxx. Raises: AssertionError: Call to Gate API was not successful. SpinnakerSecurityGroupError: Security Group _name_ was not found for _env_ in _region_. """ vpc_id = get_vpc_id(env, region) LOG.info('Find %s sg in %s [%s] in %s', name, env, region, vpc_id) url = '{0}/securityGroups/{1}/{2}/{3}?vpcId={4}'.format(API_URL, env, region, name, vpc_id) response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT) assert response.ok result = response.json() try: security_group_id = result['id'] except KeyError: msg = 'Security group ({0}) not found'.format(name) raise SpinnakerSecurityGroupError(msg) LOG.info('Found: %s', security_group_id) return security_group_id
0.001838
def handle(self, object, *args, **kw): ''' Calls each plugin in this PluginSet with the specified object, arguments, and keywords in the standard group plugin order. The return value from each successive invoked plugin is passed as the first parameter to the next plugin. The final return value is the object returned from the last plugin. If this plugin set is empty (i.e. no plugins exist or matched the spec), then a ValueError exception is thrown. ''' if not bool(self): if not self.spec or self.spec == SPEC_ALL: raise ValueError('No plugins available in group %r' % (self.group,)) raise ValueError( 'No plugins in group %r matched %r' % (self.group, self.spec)) for plugin in self.plugins: object = plugin.handle(object, *args, **kw) return object
0.004785
def authenticate(self, login=None, password=None): ''' Authenticated this client instance. ``login`` and ``password`` default to the environment variables ``MS_LOGIN`` and ``MS_PASSWD`` respectively. :param login: Email address associated with a microsoft account :param password: Matching password :raises: :class:`~xbox.exceptions.AuthenticationException` :returns: Instance of :class:`~xbox.Client` ''' if login is None: login = os.environ.get('MS_LOGIN') if password is None: password = os.environ.get('MS_PASSWD') if not login or not password: msg = ( 'Authentication credentials required. Please refer to ' 'http://xbox.readthedocs.org/en/latest/authentication.html' ) raise AuthenticationException(msg) self.login = login # firstly we have to GET the login page and extract # certain data we need to include in our POST request. # sadly the data is locked away in some javascript code base_url = 'https://login.live.com/oauth20_authorize.srf?' # if the query string is percent-encoded the server # complains that client_id is missing qs = unquote(urlencode({ 'client_id': '0000000048093EE3', 'redirect_uri': 'https://login.live.com/oauth20_desktop.srf', 'response_type': 'token', 'display': 'touch', 'scope': 'service::user.auth.xboxlive.com::MBI_SSL', 'locale': 'en', })) resp = self.session.get(base_url + qs) # python 3.x will error if this string is not a # bytes-like object url_re = b'urlPost:\\\'([A-Za-z0-9:\?_\-\.&/=]+)' ppft_re = b'sFTTag:\\\'.*value="(.*)"/>' login_post_url = re.search(url_re, resp.content).group(1) post_data = { 'login': login, 'passwd': password, 'PPFT': re.search(ppft_re, resp.content).groups(1)[0], 'PPSX': 'Passpor', 'SI': 'Sign in', 'type': '11', 'NewUser': '1', 'LoginOptions': '1', 'i3': '36728', 'm1': '768', 'm2': '1184', 'm3': '0', 'i12': '1', 'i17': '0', 'i18': '__Login_Host|1', } resp = self.session.post( login_post_url, data=post_data, allow_redirects=False, ) if 'Location' not in resp.headers: # we can only assume the login failed msg = 'Could not log in with supplied credentials' raise AuthenticationException(msg) # the access token is included in fragment of the location header location = resp.headers['Location'] parsed = urlparse(location) fragment = parse_qs(parsed.fragment) access_token = fragment['access_token'][0] url = 'https://user.auth.xboxlive.com/user/authenticate' resp = self.session.post(url, data=json.dumps({ "RelyingParty": "http://auth.xboxlive.com", "TokenType": "JWT", "Properties": { "AuthMethod": "RPS", "SiteName": "user.auth.xboxlive.com", "RpsTicket": access_token, } }), headers={'Content-Type': 'application/json'}) json_data = resp.json() user_token = json_data['Token'] uhs = json_data['DisplayClaims']['xui'][0]['uhs'] url = 'https://xsts.auth.xboxlive.com/xsts/authorize' resp = self.session.post(url, data=json.dumps({ "RelyingParty": "http://xboxlive.com", "TokenType": "JWT", "Properties": { "UserTokens": [user_token], "SandboxId": "RETAIL", } }), headers={'Content-Type': 'application/json'}) response = resp.json() self.AUTHORIZATION_HEADER = 'XBL3.0 x=%s;%s' % (uhs, response['Token']) self.user_xid = response['DisplayClaims']['xui'][0]['xid'] self.authenticated = True return self
0.001196
def genExampleStar(binaryLetter='', heirarchy=True): """ generates example star, if binaryLetter is true creates a parent binary object, if heirarchy is true will create a system and link everything up """ starPar = StarParameters() starPar.addParam('age', '7.6') starPar.addParam('magB', '9.8') starPar.addParam('magH', '7.4') starPar.addParam('magI', '7.6') starPar.addParam('magJ', '7.5') starPar.addParam('magK', '7.3') starPar.addParam('magV', '9.0') starPar.addParam('mass', '0.98') starPar.addParam('metallicity', '0.43') starPar.addParam('name', 'Example Star {0}{1}'.format(ac._ExampleSystemCount, binaryLetter)) starPar.addParam('name', 'HD {0}{1}'.format(ac._ExampleSystemCount, binaryLetter)) starPar.addParam('radius', '0.95') starPar.addParam('spectraltype', 'G5') starPar.addParam('temperature', '5370') exampleStar = Star(starPar.params) exampleStar.flags.addFlag('Fake') if heirarchy: if binaryLetter: exampleBinary = genExampleBinary() exampleBinary._addChild(exampleStar) exampleStar.parent = exampleBinary else: exampleSystem = genExampleSystem() exampleSystem._addChild(exampleStar) exampleStar.parent = exampleSystem return exampleStar
0.002992
def ics2task(): """Command line tool to convert from iCalendar to Taskwarrior""" from argparse import ArgumentParser, FileType from sys import stdin parser = ArgumentParser(description='Converter from iCalendar to Taskwarrior syntax.') parser.add_argument('infile', nargs='?', type=FileType('r'), default=stdin, help='Input iCalendar file (default: stdin)') parser.add_argument('outdir', nargs='?', help='Output Taskwarrior directory (default to ~/.task)', default=expanduser('~/.task')) args = parser.parse_args() vobject = readOne(args.infile.read()) task = IcsTask(args.outdir) for todo in vobject.vtodo_list: task.to_task(todo)
0.004255
def configure(self, mount_point, mfa_type='duo', force=False): """Configure MFA for a supported method. This endpoint allows you to turn on multi-factor authentication with a given backend. Currently only Duo is supported. Supported methods: POST: /auth/{mount_point}/mfa_config. Produces: 204 (empty body) :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :param mfa_type: Enables MFA with given backend (available: duo) :type mfa_type: str | unicode :param force: If True, make the "mfa_config" request regardless of circumstance. If False (the default), verify the provided mount_point is available and one of the types of methods supported by this feature. :type force: bool :return: The response of the configure MFA request. :rtype: requests.Response """ if mfa_type != 'duo' and not force: # The situation described via this exception is not likely to change in the future. # However we provided that flexibility here just in case. error_msg = 'Unsupported mfa_type argument provided "{arg}", supported types: "{mfa_types}"' raise exceptions.ParamValidationError(error_msg.format( mfa_types=','.join(SUPPORTED_MFA_TYPES), arg=mfa_type, )) params = { 'type': mfa_type, } api_path = '/v1/auth/{mount_point}/mfa_config'.format( mount_point=mount_point ) return self._adapter.post( url=api_path, json=params, )
0.004142
def transform_member(self, node, results): """Transform for imports of specific module elements. Replaces the module to be imported from with the appropriate new module. """ mod_member = results.get("mod_member") pref = mod_member.prefix member = results.get("member") # Simple case with only a single member being imported if member: # this may be a list of length one, or just a node if isinstance(member, list): member = member[0] new_name = None for change in MAPPING[mod_member.value]: if member.value in change[1]: new_name = change[0] break if new_name: mod_member.replace(Name(new_name, prefix=pref)) else: self.cannot_convert(node, "This is an invalid module element") # Multiple members being imported else: # a dictionary for replacements, order matters modules = [] mod_dict = {} members = results["members"] for member in members: # we only care about the actual members if member.type == syms.import_as_name: as_name = member.children[2].value member_name = member.children[0].value else: member_name = member.value as_name = None if member_name != u",": for change in MAPPING[mod_member.value]: if member_name in change[1]: if change[0] not in mod_dict: modules.append(change[0]) mod_dict.setdefault(change[0], []).append(member) new_nodes = [] indentation = find_indentation(node) first = True def handle_name(name, prefix): if name.type == syms.import_as_name: kids = [Name(name.children[0].value, prefix=prefix), name.children[1].clone(), name.children[2].clone()] return [Node(syms.import_as_name, kids)] return [Name(name.value, prefix=prefix)] for module in modules: elts = mod_dict[module] names = [] for elt in elts[:-1]: names.extend(handle_name(elt, pref)) names.append(Comma()) names.extend(handle_name(elts[-1], pref)) new = FromImport(module, names) if not first or node.parent.prefix.endswith(indentation): new.prefix = indentation new_nodes.append(new) first = False if new_nodes: nodes = [] for new_node in new_nodes[:-1]: nodes.extend([new_node, Newline()]) nodes.append(new_nodes[-1]) node.replace(nodes) else: self.cannot_convert(node, "All module elements are invalid")
0.000934
def _islice(self, min_pos, min_idx, max_pos, max_idx, reverse): """ Returns an iterator that slices `self` using two index pairs, `(min_pos, min_idx)` and `(max_pos, max_idx)`; the first inclusive and the latter exclusive. See `_pos` for details on how an index is converted to an index pair. When `reverse` is `True`, values are yielded from the iterator in reverse order. """ _lists = self._lists if min_pos > max_pos: return iter(()) elif min_pos == max_pos and not reverse: return iter(_lists[min_pos][min_idx:max_idx]) elif min_pos == max_pos and reverse: return reversed(_lists[min_pos][min_idx:max_idx]) elif min_pos + 1 == max_pos and not reverse: return chain(_lists[min_pos][min_idx:], _lists[max_pos][:max_idx]) elif min_pos + 1 == max_pos and reverse: return chain( reversed(_lists[max_pos][:max_idx]), reversed(_lists[min_pos][min_idx:]), ) elif not reverse: return chain( _lists[min_pos][min_idx:], chain.from_iterable(_lists[(min_pos + 1):max_pos]), _lists[max_pos][:max_idx], ) temp = map(reversed, reversed(_lists[(min_pos + 1):max_pos])) return chain( reversed(_lists[max_pos][:max_idx]), chain.from_iterable(temp), reversed(_lists[min_pos][min_idx:]), )
0.001306
def cwd_decorator(func): """ decorator to change cwd to directory containing rst for this function """ def wrapper(*args, **kw): cur_dir = os.getcwd() found = False for arg in sys.argv: if arg.endswith(".rst"): found = arg break if found: directory = os.path.dirname(found) if directory: os.chdir(directory) data = func(*args, **kw) os.chdir(cur_dir) return data return wrapper
0.001852
def MoveCursorToInnerPos(self, x: int = None, y: int = None, ratioX: float = 0.5, ratioY: float = 0.5, simulateMove: bool = True) -> tuple: """ Move cursor to control's internal position, default to center. x: int, if < 0, move to self.BoundingRectangle.right + x, if not None, ignore ratioX. y: int, if < 0, move to self.BoundingRectangle.bottom + y, if not None, ignore ratioY. ratioX: float. ratioY: float. simulateMove: bool. Return tuple, two ints(x,y), the cursor positon relative to screen(0,0) after moving or None if control's width or height == 0. """ rect = self.BoundingRectangle if rect.width() == 0 or rect.height() == 0: Logger.ColorfullyWriteLine('<Color=Yellow>Can not move curosr</Color>. {}\'s BoundingRectangle is {}. SearchProperties: {}'.format( self.ControlTypeName, rect, self.GetColorfulSearchPropertiesStr())) return if x is None: x = rect.left + int(rect.width() * ratioX) else: x = (rect.left if x >= 0 else rect.right) + x if y is None: y = rect.top + int(rect.height() * ratioY) else: y = (rect.top if y >= 0 else rect.bottom) + y if simulateMove and MAX_MOVE_SECOND > 0: MoveTo(x, y, waitTime=0) else: SetCursorPos(x, y) return x, y
0.005638
def addExpression(self, datafields): """ Adds an Expression to the db. Datafields is a tuple in the order: id, rna_quantification_id, name, expression, is_normalized, raw_read_count, score, units, conf_low, conf_hi """ self._expressionValueList.append(datafields) if len(self._expressionValueList) >= self._batchSize: self.batchAddExpression()
0.004854
def start(self, start_loop=True): """ Start a producer/consumer service """ txaio.start_logging() runner = self.setup_runner() if start_loop: try: runner.run(Component) except EventifyHandlerInitializationFailed as initError: logging.error('Unable to initialize handler: %s.' % initError.message) sys.exit(1) except ConnectionRefusedError: logging.error('Unable to connect to crossbar instance. Is it running?') sys.exit(1) except KeyboardInterrupt: logging.info('User initiated shutdown') loop = asyncio.get_event_loop() loop.stop() sys.exit(1) self.check_event_loop() self.reconnect() else: return runner.run( Component, start_loop=start_loop )
0.004103
def adjustStyleSheet( self ): """ Adjusts the stylesheet for this widget based on whether it has a \ corner radius and/or icon. """ radius = self.cornerRadius() icon = self.icon() if not self.objectName(): self.setStyleSheet('') elif not (radius or icon): self.setStyleSheet('') else: palette = self.palette() options = {} options['corner_radius'] = radius options['padding'] = 5 options['objectName'] = self.objectName() if icon and not icon.isNull(): options['padding'] += self.iconSize().width() + 2 self.setStyleSheet(LINEEDIT_STYLE % options)
0.016746
def do_files_exist(filenames): """Whether any of the filenames exist.""" preexisting = [tf.io.gfile.exists(f) for f in filenames] return any(preexisting)
0.025157
def delete(self, event): """Delete an existing object""" try: data, schema, user, client = self._get_args(event) except AttributeError: return try: uuids = data['uuid'] if not isinstance(uuids, list): uuids = [uuids] if schema not in objectmodels.keys(): self.log("Unknown schema encountered: ", schema, lvl=warn) return for uuid in uuids: self.log("Looking for object to be deleted:", uuid, lvl=debug) storage_object = objectmodels[schema].find_one({'uuid': uuid}) if not storage_object: self._cancel_by_error(event, 'not found') return self.log("Found object.", lvl=debug) if not self._check_permissions(user, 'write', storage_object): self._cancel_by_permission(schema, data, event) return # self.log("Fields:", storage_object._fields, "\n\n\n", # storage_object.__dict__) storage_object.delete() self.log("Deleted. Preparing notification.", lvl=debug) notification = objectdeletion(uuid, schema, client) if uuid in self.subscriptions: deletion = { 'component': 'hfos.events.objectmanager', 'action': 'deletion', 'data': { 'schema': schema, 'uuid': uuid, } } for recipient in self.subscriptions[uuid]: self.fireEvent(send(recipient, deletion)) del (self.subscriptions[uuid]) result = { 'component': 'hfos.events.objectmanager', 'action': 'delete', 'data': { 'schema': schema, 'uuid': storage_object.uuid } } self._respond(notification, result, event) except Exception as e: self.log("Error during delete request: ", e, type(e), lvl=error)
0.000853
def cancel_current_route( payment_state: InitiatorPaymentState, initiator_state: InitiatorTransferState, ) -> List[Event]: """ Cancel current route. This allows a new route to be tried. """ assert can_cancel(initiator_state), 'Cannot cancel a route after the secret is revealed' transfer_description = initiator_state.transfer_description payment_state.cancelled_channels.append(initiator_state.channel_identifier) return events_for_cancel_current_route(transfer_description)
0.003824
def save(self, message): """ Add version to repo object store, set repo head to version sha. :param message: Message string. """ self.commit.message = message self.commit.tree = self.tree #TODO: store new blobs only for item in self.tree.items(): self.repo.object_store.add_object(item.blob) self.repo.object_store.add_object(self.tree) # set HEAD to new commit self.repo.object_store.add_object(self.commit) self.repo.refs['refs/heads/master'] = self.commit.id
0.005254
def load_module(self, path, squash=True): """Load values from a Python module. Example modue ``config.py``:: DEBUG = True SQLITE = { "db": ":memory:" } >>> c = ConfigDict() >>> c.load_module('config') {DEBUG: True, 'SQLITE.DB': 'memory'} >>> c.load_module("config", False) {'DEBUG': True, 'SQLITE': {'DB': 'memory'}} :param squash: If true (default), dictionary values are assumed to represent namespaces (see :meth:`load_dict`). """ config_obj = load(path) obj = {key: getattr(config_obj, key) for key in dir(config_obj) if key.isupper()} if squash: self.load_dict(obj) else: self.update(obj) return self
0.002281
def enkf(self): """ Loop over time windows and apply da :return: """ for cycle_index, time_point in enumerate(self.timeline): if cycle_index >= len(self.timeline) - 1: # Logging : Last Update cycle has finished break print("Print information about this assimilation Cycle ???") # should be handeled in Logger # each cycle should have a dictionary of template files and instruction files to update the model inout # files # get current cycle update information current_cycle_files = self.cycle_update_files[cycle_index] # (1) update model input files for this cycle self.model_temporal_evolotion(cycle_index, current_cycle_files) # (2) generate new Pst object for the current time cycle current_pst = copy.deepcopy(self.pst) # update observation dataframe # update parameter dataframe # update in/out files if needed # At this stage the problem is equivalent to smoother problem self.smoother(current_pst)
0.003433
def format_image_iter( data_fetch, x_start=0, y_start=0, width=32, height=32, frame=0, columns=1, downsample=1 ): """Return the ANSI escape sequence to render a bitmap image. data_fetch Function that takes three arguments (x position, y position, and frame) and returns a Colour corresponding to the pixel stored there, or Transparent if the requested pixel is out of bounds. x_start Offset from the left of the image data to render from. Defaults to 0. y_start Offset from the top of the image data to render from. Defaults to 0. width Width of the image data to render. Defaults to 32. height Height of the image data to render. Defaults to 32. frame Single frame number/object, or a list to render in sequence. Defaults to frame 0. columns Number of frames to render per line (useful for printing tilemaps!). Defaults to 1. downsample Shrink larger images by printing every nth pixel only. Defaults to 1. """ frames = [] try: frame_iter = iter( frame ) frames = [f for f in frame_iter] except TypeError: frames = [frame] rows = math.ceil( len( frames )/columns ) for r in range( rows ): for y in range( 0, height, 2*downsample ): result = [] for c in range( min( (len( frames )-r*columns), columns ) ): row = [] for x in range( 0, width, downsample ): fr = frames[r*columns + c] c1 = data_fetch( x_start+x, y_start+y, fr ) c2 = data_fetch( x_start+x, y_start+y+downsample, fr ) row.append( (c1, c2) ) prev_pixel = None pointer = 0 while pointer < len( row ): start = pointer pixel = row[pointer] while pointer < len( row ) and (row[pointer] == pixel): pointer += 1 result.append( format_pixels( pixel[0], pixel[1], repeat=pointer-start ) ) yield ''.join( result ) return
0.02038
def getAllAsDict(self): """Return all the stats (dict).""" return {p: self._plugins[p].get_raw() for p in self._plugins}
0.014706
def _createCert(self, hostname, serial): """ Create a self-signed X.509 certificate. @type hostname: L{unicode} @param hostname: The hostname this certificate should be valid for. @type serial: L{int} @param serial: The serial number the certificate should have. @rtype: L{bytes} @return: The serialized certificate in PEM format. """ privateKey = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend()) publicKey = privateKey.public_key() name = x509.Name([ x509.NameAttribute(NameOID.COMMON_NAME, hostname)]) certificate = ( x509.CertificateBuilder() .subject_name(name) .issuer_name(name) .not_valid_before(datetime.today() - timedelta(days=1)) .not_valid_after(datetime.today() + timedelta(days=365)) .serial_number(serial) .public_key(publicKey) .add_extension( x509.BasicConstraints(ca=False, path_length=None), critical=True) .add_extension( x509.SubjectAlternativeName([ x509.DNSName(hostname)]), critical=False) .add_extension( x509.KeyUsage( digital_signature=True, content_commitment=False, key_encipherment=True, data_encipherment=False, key_agreement=False, key_cert_sign=False, crl_sign=False, encipher_only=False, decipher_only=False), critical=True) .add_extension( x509.ExtendedKeyUsage([ ExtendedKeyUsageOID.SERVER_AUTH]), critical=False) .sign( private_key=privateKey, algorithm=hashes.SHA256(), backend=default_backend())) return '\n'.join([ privateKey.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption()), certificate.public_bytes( encoding=serialization.Encoding.PEM), ])
0.000825
def _log_control(self, s): """Write control characters to the appropriate log files""" if self.encoding is not None: s = s.decode(self.encoding, 'replace') self._log(s, 'send')
0.009434
def download_era5_for_gssha(main_directory, start_datetime, end_datetime, leftlon=-180, rightlon=180, toplat=90, bottomlat=-90, precip_only=False): """ Function to download ERA5 data for GSSHA .. note:: https://software.ecmwf.int/wiki/display/WEBAPI/Access+ECMWF+Public+Datasets Args: main_directory(:obj:`str`): Location of the output for the forecast data. start_datetime(:obj:`str`): Datetime for download start. end_datetime(:obj:`str`): Datetime for download end. leftlon(Optional[:obj:`float`]): Left bound for longitude. Default is -180. rightlon(Optional[:obj:`float`]): Right bound for longitude. Default is 180. toplat(Optional[:obj:`float`]): Top bound for latitude. Default is 90. bottomlat(Optional[:obj:`float`]): Bottom bound for latitude. Default is -90. precip_only(Optional[bool]): If True, will only download precipitation. Example:: from gsshapy.grid.era_to_gssha import download_era5_for_gssha era5_folder = '/era5' leftlon = -95 rightlon = -75 toplat = 35 bottomlat = 30 download_era5_for_gssha(era5_folder, leftlon, rightlon, toplat, bottomlat) """ # parameters: https://software.ecmwf.int/wiki/display/CKB/ERA5_test+data+documentation#ERA5_testdatadocumentation-Parameterlistings # import here to make sure it is not required to run from ecmwfapi import ECMWFDataServer server = ECMWFDataServer() try: mkdir(main_directory) except OSError: pass download_area = "{toplat}/{leftlon}/{bottomlat}/{rightlon}".format(toplat=toplat, leftlon=leftlon, bottomlat=bottomlat, rightlon=rightlon) download_datetime = start_datetime while download_datetime <= end_datetime: download_file = path.join(main_directory, "era5_gssha_{0}.nc".format(download_datetime.strftime("%Y%m%d"))) download_date = download_datetime.strftime("%Y-%m-%d") if not path.exists(download_file) and not precip_only: server.retrieve({ 'dataset': "era5_test", # 'oper' specifies the high resolution daily data, as opposed to monthly means, wave, eda edmm, etc. 'stream': "oper", # We want instantaneous parameters, which are archived as type Analysis ('an') as opposed to forecast (fc) 'type': "an", # Surface level, as opposed to pressure level (pl) or model level (ml) 'levtype': "sfc", # For parameter codes see the ECMWF parameter database at http://apps.ecmwf.int/codes/grib/param-db 'param': "2t/2d/sp/10u/10v/tcc", # The spatial resolution in ERA5 is 31 km globally on a Gaussian grid. # Here we us lat/long with 0.25 degrees, which is approximately the equivalent of 31km. 'grid': "0.25/0.25", # ERA5 provides hourly analysis 'time': "00/to/23/by/1", # area: N/W/S/E 'area': download_area, 'date': download_date, 'target': download_file, 'format': 'netcdf', }) era5_request = { 'dataset': "era5_test", 'stream': "oper", 'type': "fc", 'levtype': "sfc", 'param': "tp/ssrd", 'grid': "0.25/0.25", 'area': download_area, 'format': 'netcdf', } prec_download_file = path.join(main_directory, "era5_gssha_{0}_fc.nc".format(download_datetime.strftime("%Y%m%d"))) loc_download_file0 = path.join(main_directory, "era5_gssha_{0}_0_fc.nc".format(download_datetime.strftime("%Y%m%d"))) loc_download_file1 = path.join(main_directory, "era5_gssha_{0}_1_fc.nc".format(download_datetime.strftime("%Y%m%d"))) loc_download_file2 = path.join(main_directory, "era5_gssha_{0}_2_fc.nc".format(download_datetime.strftime("%Y%m%d"))) if download_datetime <= start_datetime and not path.exists(loc_download_file0): loc_download_date = (download_datetime-timedelta(1)).strftime("%Y-%m-%d") # precipitation 0000-0600 era5_request['step'] = "6/to/12/by/1" era5_request['time'] = "18" era5_request['target'] = loc_download_file0 era5_request['date'] = loc_download_date server.retrieve(era5_request) if download_datetime == end_datetime and not path.exists(loc_download_file1): loc_download_date = download_datetime.strftime("%Y-%m-%d") # precipitation 0600-1800 era5_request['step'] = "1/to/12/by/1" era5_request['time'] = "06" era5_request['target'] = loc_download_file1 era5_request['date'] = loc_download_date server.retrieve(era5_request) if download_datetime == end_datetime and not path.exists(loc_download_file2): loc_download_date = download_datetime.strftime("%Y-%m-%d") # precipitation 1800-2300 era5_request['step'] = "1/to/5/by/1" era5_request['time'] = "18" era5_request['target'] = loc_download_file2 era5_request['date'] = loc_download_date server.retrieve(era5_request) if download_datetime < end_datetime and not path.exists(prec_download_file): # precipitation 0600-0600 (next day) era5_request['step'] = "1/to/12/by/1" era5_request['time'] = "06/18" era5_request['target'] = prec_download_file era5_request['date'] = download_date server.retrieve(era5_request) download_datetime += timedelta(1)
0.004892
def to_log(self, step=1.0, start=None, stop=None, basis=None, field=None, field_function=None, dtype=None, table=None, legend=None, legend_field=None, match_only=None, undefined=0, return_meta=False ): """ Return a fully sampled log from a striplog. Useful for crossplotting with log data, for example. Args: step (float): The step size. Default: 1.0. start (float): The start depth of the new log. You will want to match the logs, so use the start depth from the LAS file. Default: The basis if provided, else the start of the striplog. stop (float): The stop depth of the new log. Use the stop depth of the LAS file. Default: The basis if provided, else the stop depth of the striplog. field (str): If you want the data to come from one of the attributes of the components in the striplog, provide it. field_function (function): Provide a function to apply to the field you are asking for. It's up to you to make sure the function does what you want. legend (Legend): If you want the codes to come from a legend, provide one. Otherwise the codes come from the log, using integers in the order of prevalence. If you use a legend, they are assigned in the order of the legend. legend_field (str): If you want to get a log representing one of the fields in the legend, such as 'width' or 'grainsize'. match_only (list): If you only want to match some attributes of the Components (e.g. lithology), provide a list of those you want to match. undefined (number): What to fill in where no value can be determined, e.g. ``-999.25`` or ``np.null``. Default 0. return_meta (bool): If ``True``, also return the depth basis (np.linspace), and the component table. Returns: ndarray: If ``return_meta`` was ``True``, you get: * The log data as an array of ints. * The depth basis as an array of floats. * A list of the components in the order matching the ints. If ``return_meta`` was ``False`` (the default), you only get the log data. """ # Make the preparations. if basis is not None: start, stop = basis[0], basis[-1] step = basis[1] - start else: start = start or self.start.z stop = stop or self.stop.z pts = np.ceil((stop - start)/step) + 1 basis = np.linspace(start, stop, int(pts)) if (field is not None) or (legend_field is not None): result = np.zeros_like(basis, dtype=dtype) else: result = np.zeros_like(basis, dtype=np.int) if np.isnan(undefined): try: result[:] = np.nan except: pass # array type is int # If needed, make a look-up table for the log values. if table is None: table = [Component({})] if legend: table += [j.component for j in legend] elif field: s = set([iv.data.get(field) for iv in self]) table = [None] + list(filter(None, s)) else: table += [j[0] for j in self.unique] # Adjust the table if necessary. Go over all the components in the # table list, and remove elements that are not in the match list. # Careful! This results in a new table, with components that may not # be in the original list of components. if match_only is not None: if not isinstance(match_only, (list, tuple, set,)): raise StriplogError("match_only should be a list, not a string") table_new = [] for c in table: if c == '': continue # No idea why sometimes there's a '' c_new = Component({k: v for k, v in c.__dict__.items() if k in match_only}) # Only add unique, and preserve order. if c_new not in table_new: table_new.append(c_new) table = table_new else: match_only = [] start_ix = self.read_at(start, index=True) stop_ix = self.read_at(stop, index=True) if stop_ix is not None: stop_ix += 1 # Assign the values. for i in self[start_ix:stop_ix]: c = i.primary if match_only: c = Component({k: getattr(c, k, None) for k in match_only}) if legend and legend_field: # Use the legend field. try: key = legend.getattr(c, legend_field, undefined) key = key or undefined except ValueError: key = undefined elif field: # Get data directly from that field in iv.data. f = field_function or utils.null try: v = f(i.data.get(field, undefined)) or undefined key = table.index(v) except ValueError: key = undefined else: # Use the lookup table. try: key = table.index(c) or undefined except ValueError: key = undefined top_index = int(np.ceil((max(start, i.top.z)-start)/step)) base_index = int(np.ceil((min(stop, i.base.z)-start)/step)) try: result[top_index:base_index+1] = key except: # Have a list or array or something. result[top_index:base_index+1] = key[0] if return_meta: return result, basis, table else: return result
0.003017
def reset(self): "Reset the hidden states." [r.reset() for r in self.rnns if hasattr(r, 'reset')] if self.qrnn: self.hidden = [self._one_hidden(l) for l in range(self.n_layers)] else: self.hidden = [(self._one_hidden(l), self._one_hidden(l)) for l in range(self.n_layers)]
0.026316
def intersection_spectrail_arcline(spectrail, arcline): """Compute intersection of spectrum trail with arc line. Parameters ---------- spectrail : SpectrumTrail object Instance of SpectrumTrail class. arcline : ArcLine object Instance of ArcLine class Returns ------- xroot, yroot : tuple of floats (X,Y) coordinates of the intersection. """ # approximate location of the solution expected_x = (arcline.xlower_line + arcline.xupper_line) / 2.0 # composition of polynomials to find intersection as # one of the roots of a new polynomial rootfunct = arcline.poly_funct(spectrail.poly_funct) rootfunct.coef[1] -= 1 # compute roots to find solution tmp_xroots = rootfunct.roots() # take the nearest root to the expected location xroot = tmp_xroots[np.abs(tmp_xroots - expected_x).argmin()] if np.isreal(xroot): xroot = xroot.real else: raise ValueError("xroot=" + str(xroot) + " is a complex number") yroot = spectrail.poly_funct(xroot) return xroot, yroot
0.000896
def report(self, stream): """Writes an Xunit-formatted XML file The file includes a report of test errors and failures. """ self.stats['encoding'] = self.encoding self.stats['total'] = (self.stats['errors'] + self.stats['failures'] + self.stats['passes'] + self.stats['skipped']) self.error_report_file.write( u'<?xml version="1.0" encoding="%(encoding)s"?>' u'<testsuite name="nosetests" tests="%(total)d" ' u'errors="%(errors)d" failures="%(failures)d" ' u'skip="%(skipped)d">' % self.stats) self.error_report_file.write(u''.join([self._forceUnicode(e) for e in self.errorlist])) self.error_report_file.write(u'</testsuite>') self.error_report_file.close() if self.config.verbosity > 1: stream.writeln("-" * 70) stream.writeln("XML: %s" % self.error_report_file.name)
0.001998
def tree_text(node): """ >>> tree_text(parse_minidom('<h1>one</h1>two<div>three<em>four</em></div>')) 'one two three four' """ text = [] for descendant in walk_dom(node): if is_text(descendant): text.append(descendant.nodeValue) return ' '.join(text)
0.006711
def pop_callback(obj): """Pop a single callback.""" callbacks = obj._callbacks if not callbacks: return if isinstance(callbacks, Node): node = callbacks obj._callbacks = None else: node = callbacks.first callbacks.remove(node) if not callbacks: obj._callbacks = None return node.data, node.extra
0.002639
def _pelita_member_filter(parent_name, item_names): """ Filter a list of autodoc items for which to generate documentation. Include only imports that come from the documented module or its submodules. """ filtered_names = [] if parent_name not in sys.modules: return item_names module = sys.modules[parent_name] for item_name in item_names: item = getattr(module, item_name, None) location = getattr(item, '__module__', None) if location is None or (location + ".").startswith(parent_name + "."): filtered_names.append(item_name) return filtered_names
0.00156
def handle_message(self, msg): """Handle a message from the server. Parameters ---------- msg : Message object The Message to dispatch to the handler methods. """ # log messages received so that no one else has to if self._logger.isEnabledFor(logging.DEBUG): self._logger.debug( "received from {}: {}" .format(self.bind_address_string, repr(str(msg)))) if msg.mtype == Message.INFORM: return self.handle_inform(msg) elif msg.mtype == Message.REPLY: return self.handle_reply(msg) elif msg.mtype == Message.REQUEST: return self.handle_request(msg) else: self._logger.error("Unexpected message type from server ['%s']." % (msg,))
0.002347
def predictive_variance(self, mu,variance, predictive_mean=None, Y_metadata=None): """ Approximation to the predictive variance: V(Y_star) The following variance decomposition is used: V(Y_star) = E( V(Y_star|f_star)**2 ) + V( E(Y_star|f_star) )**2 :param mu: mean of posterior :param sigma: standard deviation of posterior :predictive_mean: output's predictive mean, if None _predictive_mean function will be called. """ #sigma2 = sigma**2 normalizer = np.sqrt(2*np.pi*variance) fmin_v = -np.inf fmin_m = np.inf fmin = -np.inf fmax = np.inf from ..util.misc import safe_exp # E( V(Y_star|f_star) ) def int_var(f,m,v): exponent = -(0.5/v)*np.square(f - m) p = safe_exp(exponent) #If p is zero then conditional_variance will overflow if p < 1e-10: return 0. else: return self.conditional_variance(f)*p scaled_exp_variance = [quad(int_var, fmin_v, fmax,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)] exp_var = np.array(scaled_exp_variance)[:,None] / normalizer #V( E(Y_star|f_star) ) = E( E(Y_star|f_star)**2 ) - E( E(Y_star|f_star) )**2 #E( E(Y_star|f_star) )**2 if predictive_mean is None: predictive_mean = self.predictive_mean(mu,variance) predictive_mean_sq = predictive_mean**2 #E( E(Y_star|f_star)**2 ) def int_pred_mean_sq(f,m,v,predictive_mean_sq): exponent = -(0.5/v)*np.square(f - m) p = np.exp(exponent) #If p is zero then conditional_mean**2 will overflow if p < 1e-10: return 0. else: return self.conditional_mean(f)**2*p scaled_exp_exp2 = [quad(int_pred_mean_sq, fmin_m, fmax,args=(mj,s2j,pm2j))[0] for mj,s2j,pm2j in zip(mu,variance,predictive_mean_sq)] exp_exp2 = np.array(scaled_exp_exp2)[:,None] / normalizer var_exp = exp_exp2 - predictive_mean_sq # V(Y_star) = E[ V(Y_star|f_star) ] + V[ E(Y_star|f_star) ] # V(Y_star) = E[ V(Y_star|f_star) ] + E(Y_star**2|f_star) - E[Y_star|f_star]**2 return exp_var + var_exp
0.015291
def twofilter_smoothing(self, t, info, phi, loggamma, linear_cost=False, return_ess=False, modif_forward=None, modif_info=None): """Two-filter smoothing. Parameters ---------- t: time, in range 0 <= t < T-1 info: SMC object the information filter phi: function test function, a function of (X_t,X_{t+1}) loggamma: function a function of (X_{t+1}) linear_cost: bool if True, use the O(N) variant (basic version is O(N^2)) Returns ------- Two-filter estimate of the smoothing expectation of phi(X_t,x_{t+1}) """ ti = self.T - 2 - t # t+1 in reverse if t < 0 or t >= self.T - 1: raise ValueError( 'two-filter smoothing: t must be in range 0,...,T-2') lwinfo = info.hist.wgt[ti].lw - loggamma(info.hist.X[ti]) if linear_cost: return self._twofilter_smoothing_ON(t, ti, info, phi, lwinfo, return_ess, modif_forward, modif_info) else: return self._twofilter_smoothing_ON2(t, ti, info, phi, lwinfo)
0.005464
def installed(name, pkgs=None, user=None, install_global=False, env=None): ''' Verify that the given package is installed and is at the correct version (if specified). .. code-block:: yaml ShellCheck-0.3.5: cabal: - installed: name The package to install user The user to run cabal install with install_global Install package globally instead of locally env A list of environment variables to be set prior to execution. The format is the same as the :py:func:`cmd.run <salt.states.cmd.run>`. state function. ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} try: call = __salt__['cabal.update'](user=user, env=env) except (CommandNotFoundError, CommandExecutionError) as err: ret['result'] = False ret['comment'] = 'Could not run cabal update {0}'.format(err) return ret if pkgs is not None: pkg_list = pkgs else: pkg_list = [name] try: installed_pkgs = __salt__['cabal.list']( user=user, installed=True, env=env) except (CommandNotFoundError, CommandExecutionError) as err: ret['result'] = False ret['comment'] = 'Error looking up \'{0}\': {1}'.format(name, err) return ret pkgs_satisfied = [] pkgs_to_install = [] for pkg in pkg_list: pkg_name, _, pkg_ver = _parse_pkg_string(pkg) if pkg_name not in installed_pkgs: pkgs_to_install.append(pkg) else: if pkg_ver: # version is specified if installed_pkgs[pkg_name] != pkg_ver: pkgs_to_install.append(pkg) else: pkgs_satisfied.append(pkg) else: pkgs_satisfied.append(pkg) if __opts__['test']: ret['result'] = None comment_msg = [] if pkgs_to_install: comment_msg.append( 'Packages(s) \'{0}\' are set to be installed'.format( ', '.join(pkgs_to_install))) if pkgs_satisfied: comment_msg.append( 'Packages(s) \'{0}\' satisfied by {1}'.format( ', '.join(pkg_list), ', '.join(pkgs_satisfied))) ret['comment'] = '. '.join(comment_msg) return ret if not pkgs_to_install: ret['result'] = True ret['comment'] = ('Packages(s) \'{0}\' satisfied by {1}'.format( ', '.join(pkg_list), ', '.join(pkgs_satisfied))) return ret try: call = __salt__['cabal.install'](pkgs=pkg_list, user=user, install_global=install_global, env=env) except (CommandNotFoundError, CommandExecutionError) as err: ret['result'] = False ret['comment'] = 'Error installing \'{0}\': {1}'.format( ', '.join(pkg_list), err) return ret if call and isinstance(call, dict): ret['result'] = True ret['changes'] = {'old': [], 'new': pkgs_to_install} ret['comment'] = 'Packages(s) \'{0}\' successfully installed'.format( ', '.join(pkgs_to_install)) else: ret['result'] = False ret['comment'] = 'Could not install packages(s) \'{0}\''.format( ', '.join(pkg_list)) return ret
0.000286
def remove(self, arr): """Removes an array from the list Parameters ---------- arr: str or :class:`InteractiveBase` The array name or the data object in this list to remove Raises ------ ValueError If no array with the specified array name is in the list""" name = arr if isinstance(arr, six.string_types) else arr.psy.arr_name if arr not in self: raise ValueError( "Array {0} not in the list".format(name)) for i, arr in enumerate(self): if arr.psy.arr_name == name: del self[i] return raise ValueError( "No array found with name {0}".format(name))
0.002677
def Concat(*args: Union[BitVec, List[BitVec]]) -> BitVec: """Create a concatenation expression. :param args: :return: """ # The following statement is used if a list is provided as an argument to concat if len(args) == 1 and isinstance(args[0], list): bvs = args[0] # type: List[BitVec] else: bvs = cast(List[BitVec], args) nraw = z3.Concat([a.raw for a in bvs]) annotations = [] # type: Annotations bitvecfunc = False for bv in bvs: annotations += bv.annotations if isinstance(bv, BitVecFunc): bitvecfunc = True if bitvecfunc: # Is there a better value to set func_name and input to in this case? return BitVecFunc( raw=nraw, func_name=None, input_=None, annotations=annotations ) return BitVec(nraw, annotations)
0.002347
def calculate_clock_angle(inst): """ Calculate IMF clock angle and magnitude of IMF in GSM Y-Z plane Parameters ----------- inst : pysat.Instrument Instrument with OMNI HRO data """ # Calculate clock angle in degrees clock_angle = np.degrees(np.arctan2(inst['BY_GSM'], inst['BZ_GSM'])) clock_angle[clock_angle < 0.0] += 360.0 inst['clock_angle'] = pds.Series(clock_angle, index=inst.data.index) # Calculate magnitude of IMF in Y-Z plane inst['BYZ_GSM'] = pds.Series(np.sqrt(inst['BY_GSM']**2 + inst['BZ_GSM']**2), index=inst.data.index) return
0.004367
def get_name(self): """Accessor to service_description attribute or name if first not defined :return: service name :rtype: str """ if hasattr(self, 'service_description'): return self.service_description if hasattr(self, 'name'): return self.name return 'SERVICE-DESCRIPTION-MISSING'
0.008219
def _remove_bottleneck(net_flux, path): """ Internal function for modifying the net flux matrix by removing a particular edge, corresponding to the bottleneck of a particular path. """ net_flux = copy.copy(net_flux) bottleneck_ind = net_flux[path[:-1], path[1:]].argmin() net_flux[path[bottleneck_ind], path[bottleneck_ind + 1]] = 0.0 return net_flux
0.002571
def get(self, **params): """Performs get request to the biomart service. Args: **params (dict of str: any): Arbitrary keyword arguments, which are added as parameters to the get request to biomart. Returns: requests.models.Response: Response from biomart for the request. """ if self._use_cache: r = requests.get(self.url, params=params) else: with requests_cache.disabled(): r = requests.get(self.url, params=params) r.raise_for_status() return r
0.003367
def _set_logger(self, name=None): """Adds a logger with a given `name`. If no name is given, name is constructed as `type(self).__name__`. """ if name is None: cls = self.__class__ name = '%s.%s' % (cls.__module__, cls.__name__) self._logger = logging.getLogger(name)
0.005865
def get(self, branch='master', filename=''): """Retrieve _filename_ from GitLab. Args: branch (str): Git Branch to find file. filename (str): Name of file to retrieve relative to root of Git repository, or _runway_dir_ if specified. Returns: str: Contents of file. """ file_contents = '' if self.runway_dir: file_contents = self.local_file(filename=filename) else: file_contents = self.remote_file(branch=branch, filename=filename) return file_contents
0.00335
def trainHMM_computeStatistics(features, labels): ''' This function computes the statistics used to train an HMM joint segmentation-classification model using a sequence of sequential features and respective labels ARGUMENTS: - features: a numpy matrix of feature vectors (numOfDimensions x n_wins) - labels: a numpy array of class indices (n_wins x 1) RETURNS: - start_prob: matrix of prior class probabilities (n_classes x 1) - transmat: transition matrix (n_classes x n_classes) - means: means matrix (numOfDimensions x 1) - cov: deviation matrix (numOfDimensions x 1) ''' u_labels = numpy.unique(labels) n_comps = len(u_labels) n_feats = features.shape[0] if features.shape[1] < labels.shape[0]: print("trainHMM warning: number of short-term feature vectors " "must be greater or equal to the labels length!") labels = labels[0:features.shape[1]] # compute prior probabilities: start_prob = numpy.zeros((n_comps,)) for i, u in enumerate(u_labels): start_prob[i] = numpy.count_nonzero(labels == u) # normalize prior probabilities start_prob = start_prob / start_prob.sum() # compute transition matrix: transmat = numpy.zeros((n_comps, n_comps)) for i in range(labels.shape[0]-1): transmat[int(labels[i]), int(labels[i + 1])] += 1 # normalize rows of transition matrix: for i in range(n_comps): transmat[i, :] /= transmat[i, :].sum() means = numpy.zeros((n_comps, n_feats)) for i in range(n_comps): means[i, :] = numpy.matrix(features[:, numpy.nonzero(labels == u_labels[i])[0]].mean(axis=1)) cov = numpy.zeros((n_comps, n_feats)) for i in range(n_comps): #cov[i,:,:] = numpy.cov(features[:,numpy.nonzero(labels==u_labels[i])[0]]) # use this lines if HMM using full gaussian distributions are to be used! cov[i, :] = numpy.std(features[:, numpy.nonzero(labels == u_labels[i])[0]], axis=1) return start_prob, transmat, means, cov
0.004012
def parse_table(document, tbl): "Parse table element." def _change(rows, pos_x): if len(rows) == 1: return rows count_x = 1 for x in rows[-1]: if count_x == pos_x: x.row_span += 1 count_x += x.grid_span return rows table = doc.Table() tbl_pr = tbl.find(_name('{{{w}}}tblPr')) if tbl_pr is not None: parse_table_properties(document, table, tbl_pr) for tr in tbl.xpath('./w:tr', namespaces=NAMESPACES): columns = [] pos_x = 0 for tc in tr.xpath('./w:tc', namespaces=NAMESPACES): cell = doc.TableCell() tc_pr = tc.find(_name('{{{w}}}tcPr')) if tc_pr is not None: parse_table_column_properties(doc, cell, tc_pr) # maybe after pos_x += cell.grid_span if cell.vmerge is not None and cell.vmerge == "": table.rows = _change(table.rows, pos_x) else: for p in tc.xpath('./w:p', namespaces=NAMESPACES): cell.elements.append(parse_paragraph(document, p)) columns.append(cell) table.rows.append(columns) return table
0.000806
def join(self, target): """join a channel""" password = self.config.passwords.get( target.strip(self.server_config['CHANTYPES'])) if password: target += ' ' + password self.send_line('JOIN %s' % target)
0.007752
def _filesec(self, files=None): """ Returns fileSec Element containing all files grouped by use. """ if files is None: files = self.all_files() filesec = etree.Element(utils.lxmlns("mets") + "fileSec") filegrps = {} for file_ in files: if file_.type.lower() not in ("item", AIP_ENTRY_TYPE): continue # Get fileGrp, or create if not exist filegrp = filegrps.get(file_.use) if filegrp is None: filegrp = etree.SubElement( filesec, utils.lxmlns("mets") + "fileGrp", USE=file_.use ) filegrps[file_.use] = filegrp file_el = file_.serialize_filesec() if file_el is not None: filegrp.append(file_el) return filesec
0.002331
def validate_value_type(value, spec): """ c_value_type = {'base': 'string', 'enumeration': ['Permit', 'Deny', 'Indeterminate']} {'member': 'anyURI', 'base': 'list'} {'base': 'anyURI'} {'base': 'NCName'} {'base': 'string'} """ if "maxlen" in spec: return len(value) <= int(spec["maxlen"]) if spec["base"] == "string": if "enumeration" in spec: if value not in spec["enumeration"]: raise NotValid("value not in enumeration") else: return valid_string(value) elif spec["base"] == "list": # comma separated list of values for val in [v.strip() for v in value.split(",")]: valid(spec["member"], val) else: return valid(spec["base"], value) return True
0.001164
def __set_no_protein(self, hgvs_string): """Set a flag for no protein expected. ("p.0" or "p.0?") Args: hgvs_string (str): hgvs syntax with "p." removed """ no_protein_list = ['0', '0?'] # no protein symbols if hgvs_string in no_protein_list: self.is_no_protein = True self.is_non_silent = True else: self.is_no_protein = False
0.004706
def get_from_package(package_name, path): """ Get the absolute path to a file in a package. Parameters ---------- package_name : str e.g. 'mpu' path : str Path within a package Returns ------- filepath : str """ filepath = pkg_resources.resource_filename(package_name, path) return os.path.abspath(filepath)
0.002681
def read_igpar(self): """ Renders accessible: er_ev = e<r>_ev (dictionary with Spin.up/Spin.down as keys) er_bp = e<r>_bp (dictionary with Spin.up/Spin.down as keys) er_ev_tot = spin up + spin down summed er_bp_tot = spin up + spin down summed p_elc = spin up + spin down summed p_ion = spin up + spin down summed (See VASP section "LBERRY, IGPAR, NPPSTR, DIPOL tags" for info on what these are). """ # variables to be filled self.er_ev = {} # will be dict (Spin.up/down) of array(3*float) self.er_bp = {} # will be dics (Spin.up/down) of array(3*float) self.er_ev_tot = None # will be array(3*float) self.er_bp_tot = None # will be array(3*float) self.p_elec = None self.p_ion = None try: search = [] # Nonspin cases def er_ev(results, match): results.er_ev[Spin.up] = np.array(map(float, match.groups()[1:4])) / 2 results.er_ev[Spin.down] = results.er_ev[Spin.up] results.context = 2 search.append([r"^ *e<r>_ev=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)", None, er_ev]) def er_bp(results, match): results.er_bp[Spin.up] = np.array([float(match.group(i)) for i in range(1, 4)]) / 2 results.er_bp[Spin.down] = results.er_bp[Spin.up] search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)", lambda results, line: results.context == 2, er_bp]) # Spin cases def er_ev_up(results, match): results.er_ev[Spin.up] = np.array([float(match.group(i)) for i in range(1, 4)]) results.context = Spin.up search.append([r"^.*Spin component 1 *e<r>_ev=\( *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)", None, er_ev_up]) def er_bp_up(results, match): results.er_bp[Spin.up] = np.array([float(match.group(1)), float(match.group(2)), float(match.group(3))]) search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)", lambda results, line: results.context == Spin.up, er_bp_up]) def er_ev_dn(results, match): results.er_ev[Spin.down] = np.array([float(match.group(1)), float(match.group(2)), float(match.group(3))]) results.context = Spin.down search.append([r"^.*Spin component 2 *e<r>_ev=\( *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)", None, er_ev_dn]) def er_bp_dn(results, match): results.er_bp[Spin.down] = np.array([float(match.group(i)) for i in range(1, 4)]) search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)", lambda results, line: results.context == Spin.down, er_bp_dn]) # Always present spin/non-spin def p_elc(results, match): results.p_elc = np.array([float(match.group(i)) for i in range(1, 4)]) search.append([r"^.*Total electronic dipole moment: " r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)", None, p_elc]) def p_ion(results, match): results.p_ion = np.array([float(match.group(i)) for i in range(1, 4)]) search.append([r"^.*ionic dipole moment: " r"*p\[ion\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)", None, p_ion]) self.context = None self.er_ev = {Spin.up: None, Spin.down: None} self.er_bp = {Spin.up: None, Spin.down: None} micro_pyawk(self.filename, search, self) if self.er_ev[Spin.up] is not None and \ self.er_ev[Spin.down] is not None: self.er_ev_tot = self.er_ev[Spin.up] + self.er_ev[Spin.down] if self.er_bp[Spin.up] is not None and \ self.er_bp[Spin.down] is not None: self.er_bp_tot = self.er_bp[Spin.up] + self.er_bp[Spin.down] except: self.er_ev_tot = None self.er_bp_tot = None raise Exception("IGPAR OUTCAR could not be parsed.")
0.001142
def reconnectUnitSignalsToModel(synthesisedUnitOrIntf, modelCls): """ Reconnect model signals to unit to run simulation with simulation model but use original unit interfaces for communication :param synthesisedUnitOrIntf: interface where should be signals replaced from signals from modelCls :param modelCls: simulation model form where signals for synthesisedUnitOrIntf should be taken """ obj = synthesisedUnitOrIntf subInterfaces = obj._interfaces if subInterfaces: for intf in subInterfaces: # proxies are destroyed on original interfaces and only proxies on # array items will remain reconnectUnitSignalsToModel(intf, modelCls) else: # reconnect signal from model s = synthesisedUnitOrIntf s._sigInside = getattr(modelCls, s._sigInside.name)
0.002288
def filter_scanline(type, line, fo, prev=None): """Apply a scanline filter to a scanline. `type` specifies the filter type (0 to 4); `line` specifies the current (unfiltered) scanline as a sequence of bytes; `prev` specifies the previous (unfiltered) scanline as a sequence of bytes. `fo` specifies the filter offset; normally this is size of a pixel in bytes (the number of bytes per sample times the number of channels), but when this is < 1 (for bit depths < 8) then the filter offset is 1. """ assert 0 <= type < 5 # The output array. Which, pathetically, we extend one-byte at a # time (fortunately this is linear). out = array('B', [type]) def sub(): ai = -fo for x in line: if ai >= 0: x = (x - line[ai]) & 0xff out.append(x) ai += 1 def up(): for i,x in enumerate(line): x = (x - prev[i]) & 0xff out.append(x) def average(): ai = -fo for i,x in enumerate(line): if ai >= 0: x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff else: x = (x - (prev[i] >> 1)) & 0xff out.append(x) ai += 1 def paeth(): # http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth ai = -fo # also used for ci for i,x in enumerate(line): a = 0 b = prev[i] c = 0 if ai >= 0: a = line[ai] c = prev[ai] p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: Pr = a elif pb <= pc: Pr = b else: Pr = c x = (x - Pr) & 0xff out.append(x) ai += 1 if not prev: # We're on the first line. Some of the filters can be reduced # to simpler cases which makes handling the line "off the top" # of the image simpler. "up" becomes "none"; "paeth" becomes # "left" (non-trivial, but true). "average" needs to be handled # specially. if type == 2: # "up" type = 0 elif type == 3: prev = [0]*len(line) elif type == 4: # "paeth" type = 1 if type == 0: out.extend(line) elif type == 1: sub() elif type == 2: up() elif type == 3: average() else: # type == 4 paeth() return out
0.004292
def update(self, ip_address=values.unset, friendly_name=values.unset, cidr_prefix_length=values.unset): """ Update the IpAddressInstance :param unicode ip_address: An IP address in dotted decimal notation from which you want to accept traffic. Any SIP requests from this IP address will be allowed by Twilio. IPv4 only supported today. :param unicode friendly_name: A human readable descriptive text for this resource, up to 64 characters long. :param unicode cidr_prefix_length: An integer representing the length of the CIDR prefix to use with this IP address when accepting traffic. By default the entire IP address is used. :returns: Updated IpAddressInstance :rtype: twilio.rest.api.v2010.account.sip.ip_access_control_list.ip_address.IpAddressInstance """ data = values.of({ 'IpAddress': ip_address, 'FriendlyName': friendly_name, 'CidrPrefixLength': cidr_prefix_length, }) payload = self._version.update( 'POST', self._uri, data=data, ) return IpAddressInstance( self._version, payload, account_sid=self._solution['account_sid'], ip_access_control_list_sid=self._solution['ip_access_control_list_sid'], sid=self._solution['sid'], )
0.00571
def reqPnL(self, account: str, modelCode: str = '') -> PnL: """ Start a subscription for profit and loss events. Returns a :class:`.PnL` object that is kept live updated. The result can also be queried from :meth:`.pnl`. https://interactivebrokers.github.io/tws-api/pnl.html Args: account: Subscribe to this account. modelCode: If specified, filter for this account model. """ key = (account, modelCode) assert key not in self.wrapper.pnlKey2ReqId reqId = self.client.getReqId() self.wrapper.pnlKey2ReqId[key] = reqId pnl = PnL(account, modelCode) self.wrapper.pnls[reqId] = pnl self.client.reqPnL(reqId, account, modelCode) return pnl
0.002558
def get_nts_strpval(self, fmt="{:8.2e}"): """Given GOEA namedtuples, return nts w/P-value in string format.""" objntmgr = MgrNts(self.goea_results) dcts = objntmgr.init_dicts() # pylint: disable=line-too-long pval_flds = set(k for k in self._get_fieldnames(next(iter(self.goea_results))) if k[:2] == 'p_') for fld_float in pval_flds: fld_str = "s_" + fld_float[2:] objntmgr.add_f2str(dcts, fld_float, fld_str, fmt) return objntmgr.mknts(dcts)
0.005747
def process_read_exception(exc, path, ignore=None): ''' Common code for raising exceptions when reading a file fails The ignore argument can be an iterable of integer error codes (or a single integer error code) that should be ignored. ''' if ignore is not None: if isinstance(ignore, six.integer_types): ignore = (ignore,) else: ignore = () if exc.errno in ignore: return if exc.errno == errno.ENOENT: raise CommandExecutionError('{0} does not exist'.format(path)) elif exc.errno == errno.EACCES: raise CommandExecutionError( 'Permission denied reading from {0}'.format(path) ) else: raise CommandExecutionError( 'Error {0} encountered reading from {1}: {2}'.format( exc.errno, path, exc.strerror ) )
0.001142
def get_weights(model_hparams, vocab_size, hidden_dim=None): """Create or get concatenated embedding or softmax variable. Args: model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. hidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size Returns: a list of num_shards Tensors. """ if hidden_dim is None: hidden_dim = model_hparams.hidden_size num_shards = model_hparams.symbol_modality_num_shards shards = [] for i in range(num_shards): shard_size = (vocab_size // num_shards) + ( 1 if i < vocab_size % num_shards else 0) var_name = "weights_%d" % i shards.append( tf.get_variable( var_name, [shard_size, hidden_dim], initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5))) if num_shards == 1: ret = shards[0] else: ret = tf.concat(shards, 0) # Convert ret to tensor. if not tf.executing_eagerly(): ret = common_layers.convert_gradient_to_tensor(ret) return ret
0.010711
def threshold_monitor_hidden_threshold_monitor_security_policy_area_sec_area_value(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") security = ET.SubElement(threshold_monitor, "security") policy = ET.SubElement(security, "policy") sec_policy_name_key = ET.SubElement(policy, "sec_policy_name") sec_policy_name_key.text = kwargs.pop('sec_policy_name') area = ET.SubElement(policy, "area") sec_area_value = ET.SubElement(area, "sec_area_value") sec_area_value.text = kwargs.pop('sec_area_value') callback = kwargs.pop('callback', self._callback) return callback(config)
0.005441
def _filter_variant_motif_res( motif_res, variant_start, variant_end, motif_length, seq, ): """ Remove MOODS motif hits that don't overlap the variant of interest. Parameters ---------- motif_res : list Result from MOODS search like [(21, 3.947748787969809), (-38, 3.979759977155675)]. variant_start : int Relative start position of the allele string in seq (not genomic coordinates). In other words, seq[variant_start:variant_end] should give the allele. variant_end : int Relative end position of the allele string in seq (not genomic coordinates). In other words, seq[variant_start:variant_end] should give the allele. motif_length : int Length of the motif. seq : str Sequence searched for motifs for this allele. Returns ------- motif_res : list List in the same format as motif_res input but with entries that don't overlap the variant removed. """ import MOODS remove = [] for r in motif_res: motif_start = r[0] motif_end = r[0] + motif_length if r[0] < 0: motif_start += len(seq) motif_end += len(seq) if motif_end <= variant_start or motif_start >= variant_end: remove.append(r) motif_res = list(set(motif_res) - set(remove)) return motif_res
0.007545
def _convert_eta_to_c(eta, ref_position): """ Parameters ---------- eta : 1D or 2D ndarray. The elements of the array should be this model's 'transformed' shape parameters, i.e. the natural log of (the corresponding shape parameter divided by the reference shape parameter). This array's elements will be real valued. If `eta` is 2D, then its shape should be (num_estimated_shapes, num_parameter_samples). ref_position : int. Specifies the position in the resulting array of shape == `(eta.shape[0] + 1,)` that should be equal to 1 - the sum of the other elements in the resulting array. Returns ------- c_vector : 1D or 2D ndarray based on `eta`. If `eta` is 1D then `c_vector` should have shape `(eta.shape[0] + 1, )`. If `eta` is 2D then `c_vector` should have shape `(eta.shape[0] + 1, eta.shape[1])`. The returned array will contains the 'natural' shape parameters that correspond to `eta`. """ # Exponentiate eta exp_eta = np.exp(eta) # Guard against overflow exp_eta[np.isposinf(exp_eta)] = max_comp_value # Guard against underflow exp_eta[exp_eta == 0] = min_comp_value # Calculate the denominator in a logistic transformation # Note the +1 is for the reference alternative which has been # constrained so that its corresponding eta = 0 and exp(0) = 1 denom = exp_eta.sum(axis=0) + 1 # Get a list of all the indices (or row indices) corresponding to the # alternatives whose shape parameters are being estimated. replace_list = list(range(eta.shape[0] + 1)) replace_list.remove(ref_position) # Initialize an array for the vector of shape parameters, c if len(eta.shape) > 1 and eta.shape[1] > 1: # Get an array of zeros with shape # (num_possible_alternatives, num_parameter_samples). This is used when # working with samples from a Bayesian posterior distribution c_vector = np.zeros((eta.shape[0] + 1, eta.shape[1])) # Calculate the natural shape parameters c_vector[replace_list, :] = exp_eta / denom c_vector[ref_position, :] = 1.0 / denom else: # Get an array of zeros with shape (num_possible_alternatives,) c_vector = np.zeros(eta.shape[0] + 1) # Calculate the natural shape parameters c_vector[replace_list] = exp_eta / denom c_vector[ref_position] = 1.0 / denom return c_vector
0.000395
def _register_endpoints(self, providers): """ Register methods to endpoints :type providers: list[str] :rtype: list[(str, ((satosa.context.Context, Any) -> satosa.response.Response, Any))] :param providers: A list of backend providers :return: A list of endpoint/method pairs """ url_map = [] for endp_category in self.endpoints: for binding, endp in self.endpoints[endp_category].items(): valid_providers = "" for provider in providers: valid_providers = "{}|^{}".format(valid_providers, provider) valid_providers = valid_providers.lstrip("|") parsed_endp = urlparse(endp) url_map.append(("(%s)/%s$" % (valid_providers, parsed_endp.path), functools.partial(self.handle_authn_request, binding_in=binding))) if self.expose_entityid_endpoint(): parsed_entity_id = urlparse(self.idp.config.entityid) url_map.append(("^{0}".format(parsed_entity_id.path[1:]), self._metadata_endpoint)) return url_map
0.005076
def pick_two_individuals_eligible_for_crossover(population): """Pick two individuals from the population which can do crossover, that is, they share a primitive. Parameters ---------- population: array of individuals Returns ---------- tuple: (individual, individual) Two individuals which are not the same, but share at least one primitive. Alternatively, if no such pair exists in the population, (None, None) is returned instead. """ primitives_by_ind = [set([node.name for node in ind if isinstance(node, gp.Primitive)]) for ind in population] pop_as_str = [str(ind) for ind in population] eligible_pairs = [(i, i+1+j) for i, ind1_prims in enumerate(primitives_by_ind) for j, ind2_prims in enumerate(primitives_by_ind[i+1:]) if not ind1_prims.isdisjoint(ind2_prims) and pop_as_str[i] != pop_as_str[i+1+j]] # Pairs are eligible in both orders, this ensures that both orders are considered eligible_pairs += [(j, i) for (i, j) in eligible_pairs] if not eligible_pairs: # If there are no eligible pairs, the caller should decide what to do return None, None pair = np.random.randint(0, len(eligible_pairs)) idx1, idx2 = eligible_pairs[pair] return population[idx1], population[idx2]
0.007763
def command(self, command, capture=True): """ Execute command on *Vim*. .. warning:: Do not use ``redir`` command if ``capture`` is ``True``. It's already enabled for internal use. If ``capture`` argument is set ``False``, the command execution becomes slightly faster. Example: >>> import headlessvim >>> with headlessvim.open() as vim: ... vim.command('echo 0') ... '0' >>> with headlessvim.open() as vim: ... vim.command('let g:spam = "ham"', False) ... vim.echo('g:spam') ... 'ham' :param string command: a command to execute :param boolean capture: ``True`` if command's output needs to be captured, else ``False`` :return: the output of the given command :rtype: string """ if capture: self.command('redir! >> {0}'.format(self._tempfile.name), False) self.set_mode('command') self.send_keys('{0}\n'.format(command)) if capture: self.command('redir END', False) return self._tempfile.read().strip('\n')
0.001668
def recoverPubkeyParameter(message, digest, signature, pubkey): """ Use to derive a number that allows to easily recover the public key from the signature """ if not isinstance(message, bytes): message = bytes(message, "utf-8") # pragma: no cover for i in range(0, 4): if SECP256K1_MODULE == "secp256k1": # pragma: no cover sig = pubkey.ecdsa_recoverable_deserialize(signature, i) p = secp256k1.PublicKey(pubkey.ecdsa_recover(message, sig)) if p.serialize() == pubkey.serialize(): return i elif SECP256K1_MODULE == "cryptography" and not isinstance(pubkey, PublicKey): p = recover_public_key(digest, signature, i, message) p_comp = hexlify(compressedPubkey(p)) pubkey_comp = hexlify(compressedPubkey(pubkey)) if p_comp == pubkey_comp: return i else: # pragma: no cover p = recover_public_key(digest, signature, i) p_comp = hexlify(compressedPubkey(p)) p_string = hexlify(p.to_string()) if isinstance(pubkey, PublicKey): # pragma: no cover pubkey_string = bytes(repr(pubkey), "ascii") else: # pragma: no cover pubkey_string = hexlify(pubkey.to_string()) if p_string == pubkey_string or p_comp == pubkey_string: # pragma: no cover return i
0.002091
def info(self, name, args): """Interfaces with the info dumpers (DBGFInfo). This feature is not implemented in the 4.0.0 release but it may show up in a dot release. in name of type str The name of the info item. in args of type str Arguments to the info dumper. return info of type str The into string. """ if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") if not isinstance(args, basestring): raise TypeError("args can only be an instance of type basestring") info = self._call("info", in_p=[name, args]) return info
0.005305
def find_indices(lst, element): """ Returns the indices for all occurrences of 'element' in 'lst'. Args: lst (list): List to search. element: Element to find. Returns: list: List of indices or values """ result = [] offset = -1 while True: try: offset = lst.index(element, offset+1) except ValueError: return result result.append(offset)
0.002273
def create_api_environment(self): """Get an instance of Api Environment services facade.""" return ApiEnvironment( self.networkapi_url, self.user, self.password, self.user_ldap)
0.008299
def froms(self): """Group metrics according to the `from` property. """ eax = {} for name, config in six.iteritems(self._metrics): from_ = self._get_property(config, 'from', default=self.stdout) eax.setdefault(from_, {})[name] = config return eax
0.006452