text
stringlengths
78
104k
score
float64
0
0.18
def model_to_json(self, object, cleanup=True): """Take a model instance and return it as a json struct""" model_name = type(object).__name__ if model_name not in self.swagger_dict['definitions']: raise ValidationError("Swagger spec has no definition for model %s" % model_name) model_def = self.swagger_dict['definitions'][model_name] log.debug("Marshalling %s into json" % model_name) m = marshal_model(self.spec, model_def, object) if cleanup: self.cleanup_model(m) return m
0.005319
def body_as_json(self, encoding='UTF-8'): """ The body of the event loaded as a JSON object is the data is compatible. :param encoding: The encoding to use for decoding message data. Default is 'UTF-8' :rtype: dict """ data_str = self.body_as_str(encoding=encoding) try: return json.loads(data_str) except Exception as e: raise TypeError("Event data is not compatible with JSON type: {}".format(e))
0.008065
def mirror_file(self, path_to, path_from, from_quick_server=True): """Mirrors a file to a different location. Each time the file changes while the process is running it will be copied to 'path_to', overwriting the destination. Parameters ---------- path_to : string The mirror destination. path_from : string The mirror origin. from_quick_server : bool If set the origin path is relative to *this* script otherwise it is relative to the process. """ full_path = path_from if not from_quick_server else os.path.join( os.path.dirname(__file__), path_from) if self._mirror is None: if not self._symlink_mirror(path_to, full_path, init=True): self._poll_mirror(path_to, full_path, init=True) return impl = self._mirror["impl"] if impl == "symlink": self._symlink_mirror(path_to, full_path, init=False) elif impl == "poll": self._poll_mirror(path_to, full_path, init=False) else: raise ValueError("unknown mirror implementation: {0}".format(impl))
0.001657
def set_device_name(self, new_name): """Sets a new BLE device name for this SK8. Args: new_name (str): the new device name as an ASCII string, max 20 characters. Returns: True if the name was updated successfully, False otherwise. """ device_name = self.get_characteristic_handle_from_uuid(UUID_DEVICE_NAME) if device_name is None: logger.warn('Failed to find handle for device name') return False if len(new_name) > MAX_DEVICE_NAME_LEN: logger.error('Device name exceeds maximum length ({} > {})'.format(len(new_name), MAX_DEVICE_NAME_LEN)) return False if self.dongle._write_attribute(self.conn_handle, device_name, new_name.encode('ascii')): self.name = new_name return True return False
0.008028
def mass_fraction_within_radius(self, kwargs_lens, center_x, center_y, theta_E, numPix=100): """ computes the mean convergence of all the different lens model components within a spherical aperture :param kwargs_lens: lens model keyword argument list :param center_x: center of the aperture :param center_y: center of the aperture :param theta_E: radius of aperture :return: list of average convergences for all the model components """ x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=2.*theta_E / numPix) x_grid += center_x y_grid += center_y mask = mask_util.mask_sphere(x_grid, y_grid, center_x, center_y, theta_E) kappa_list = [] for i in range(len(kwargs_lens)): kappa = self.LensModel.kappa(x_grid, y_grid, kwargs_lens, k=i) kappa_mean = np.sum(kappa * mask) / np.sum(mask) kappa_list.append(kappa_mean) return kappa_list
0.006048
def verify(xml, stream): """ Verify the signaure of an XML document with the given certificate. Returns `True` if the document is signed with a valid signature. Returns `False` if the document is not signed or if the signature is invalid. :param lxml.etree._Element xml: The document to sign :param file stream: The private key to sign the document with :rtype: Boolean """ # Import xmlsec here to delay initializing the C library in # case we don't need it. import xmlsec # Find the <Signature/> node. signature_node = xmlsec.tree.find_node(xml, xmlsec.Node.SIGNATURE) if signature_node is None: # No `signature` node found; we cannot verify return False # Create a digital signature context (no key manager is needed). ctx = xmlsec.SignatureContext() # Register <Response/> and <Assertion/> ctx.register_id(xml) for assertion in xml.xpath("//*[local-name()='Assertion']"): ctx.register_id(assertion) # Load the public key. key = None for fmt in [ xmlsec.KeyFormat.PEM, xmlsec.KeyFormat.CERT_PEM]: stream.seek(0) try: key = xmlsec.Key.from_memory(stream, fmt) break except ValueError: # xmlsec now throws when it can't load the key pass # Set the key on the context. ctx.key = key # Verify the signature. try: ctx.verify(signature_node) return True except Exception: return False
0.001292
def help_content(self): """Return the content of help for this step wizard. We only needs to re-implement this method in each wizard step. :returns: A message object contains help. :rtype: m.Message """ help_message = m.Message() help_message.add(m.Text(tr('No help text for this wizard step, yet.'))) return help_message
0.005115
def get_log_entries_by_ids(self, log_entry_ids): """Gets a ``LogEntryList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the entries specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible logentries may be omitted from the list and may present the elements in any order including returning a unique set. arg: log_entry_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.logging.LogEntryList) - the returned ``LogEntry list`` raise: NotFound - an ``Id was`` not found raise: NullArgument - ``log_entry_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources_by_ids # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('logging', collection='LogEntry', runtime=self._runtime) object_id_list = [] for i in log_entry_ids: object_id_list.append(ObjectId(self._get_id(i, 'logging').get_identifier())) result = collection.find( dict({'_id': {'$in': object_id_list}}, **self._view_filter())) result = list(result) sorted_result = [] for object_id in object_id_list: for object_map in result: if object_map['_id'] == object_id: sorted_result.append(object_map) break return objects.LogEntryList(sorted_result, runtime=self._runtime, proxy=self._proxy)
0.001968
def list_pools(self): """Fetches a list of all floating IP pools. :returns: List of FloatingIpPool objects """ search_opts = {'router:external': True} return [FloatingIpPool(pool) for pool in self.client.list_networks(**search_opts).get('networks')]
0.006536
def node_sub(self, node_self, node_other): '''node_sub High-level api: Compute the delta of two config nodes. This method is recursive. Assume two config nodes are different. Parameters ---------- node_self : `Element` A config node in the destination config that is being processed. node_self cannot be a leaf node. node_other : `Element` A config node in the source config that is being processed. Returns ------- tuple There are three elements in the tuple: a list of Restconf DELETE Requests, a list of Restconf PUT Requests, and a list of Restconf PATCH Requests. ''' deletes = [] puts = [] patches = [] # if a leaf-list node, delete the leaf-list totally # if a list node, by default delete the list instance # if a list node and delete_whole=True, delete the list totally def generate_delete(node, instance=True): composer = RestconfComposer(self.device, node) url = 'https://{}:{}'.format(self.ip, self.port) url += composer.get_url(instance=instance) deletes.append(requests.Request('DELETE', url, headers=header_json)) # if a leaf-list node, replace the leaf-list totally # if a list node, replace the list totally def generate_put(node, instance=True): composer = RestconfComposer(self.device, node) url = 'https://{}:{}'.format(self.ip, self.port) url += composer.get_url(instance=instance) data_json = composer.get_json(instance=instance) puts.append(requests.Request('PUT', url, headers=header_json, data=data_json)) # if a leaf-list node, update the leaf-list totally # if a list node, by default update the list instance # if a list node and update_whole=True, update the list totally def generate_patch(node, instance=True): composer = RestconfComposer(self.device, node) url = 'https://{}:{}'.format(self.ip, self.port) url += composer.get_url(instance=instance) data_json = composer.get_json(instance=instance) patches.append(requests.Request('PATCH', url, headers=header_json, data=data_json)) # the sequence of list instances under node_self is different from the # one under node_other def list_seq_is_different(tag): s_list = [i for i in node_self.iterchildren(tag=tag)] o_list = [i for i in node_other.iterchildren(tag=tag)] if [self.device.get_xpath(n) for n in s_list] == \ [self.device.get_xpath(n) for n in o_list]: return False else: return True # all list instances under node_self have peers under node_other, and # the sequence of list instances under node_self that have peers under # node_other is same as the sequence of list instances under node_other def list_seq_is_inclusive(tag): s_list = [i for i in node_self.iterchildren(tag=tag)] o_list = [i for i in node_other.iterchildren(tag=tag)] s_seq = [self.device.get_xpath(n) for n in s_list] o_seq = [self.device.get_xpath(n) for n in o_list] if set(s_seq) <= set(o_seq) and \ [i for i in s_seq if i in o_seq] == o_seq: return True else: return False in_s_not_in_o, in_o_not_in_s, in_s_and_in_o = \ self._group_kids(node_self, node_other) for child_s in in_s_not_in_o: schema_node = self.device.get_schema_node(child_s) if schema_node.get('type') == 'leaf' or \ schema_node.get('type') == 'container': generate_patch(child_s) elif schema_node.get('type') == 'leaf-list' or \ schema_node.get('type') == 'list': if schema_node.get('ordered-by') == 'user': return ([], [generate_put(node_self, instance=True)], []) else: generate_put(child_s, instance=True) for child_o in in_o_not_in_s: schema_node = self.device.get_schema_node(child_o) if schema_node.get('type') == 'leaf' or \ schema_node.get('type') == 'container': generate_delete(child_o) elif schema_node.get('type') == 'leaf-list' or \ schema_node.get('type') == 'list': if schema_node.get('ordered-by') == 'user': if list_seq_is_inclusive(child_o.tag): generate_delete(child_o, instance=True) else: return ([], [generate_put(node_self, instance=True)], []) else: generate_delete(child_o, instance=True) for child_s, child_o in in_s_and_in_o: schema_node = self.device.get_schema_node(child_s) if schema_node.get('type') == 'leaf': if child_s.text != child_o.text: generate_patch(child_s) elif schema_node.get('type') == 'leaf-list': if schema_node.get('ordered-by') == 'user': if list_seq_is_different(child_s.tag): return ([], [generate_put(node_self, instance=True)], []) elif schema_node.get('type') == 'container': if BaseCalculator(self.device, child_s, child_o).ne: x, y, z = self.node_sub(child_s, child_o) deletes += x puts += y patches += z elif schema_node.get('type') == 'list': if schema_node.get('ordered-by') == 'user': if list_seq_is_different(child_s.tag): return ([], [generate_put(node_self, instance=True)], []) else: if BaseCalculator(self.device, child_s, child_o).ne: x, y, z = self.node_sub(child_s, child_o) deletes += x puts += y patches += z else: if BaseCalculator(self.device, child_s, child_o).ne: x, y, z = self.node_sub(child_s, child_o) deletes += x puts += y patches += z return (deletes, puts, patches)
0.000733
def map_to_precursors_on_fly(seqs, names, loci, args): """map sequences to precursors with franpr algorithm to avoid writting on disk""" precursor = precursor_sequence(loci, args.ref).upper() dat = dict() for s, n in itertools.izip(seqs, names): res = pyMatch.Match(precursor, str(s), 1, 3) if res > -1: dat[n] = [res, res + len(s)] logger.debug("mapped in %s: %s out of %s" % (loci, len(dat), len(seqs))) return dat
0.004274
def request_control(self, device_id, access_mode=True): """ Request exclusive control of device :param device_id: id of device :type device_id: int :param access_mode: True=exclusive, False=shared :type access_mode: bool :returns: true if successful :rtype: bool """ if access_mode: if not request_control(self.corsair_sdk, device_id): self._raise_corsair_error() return True else: self.reload()
0.003731
def replace_all_caps(x:Collection[str]) -> Collection[str]: "Replace tokens in ALL CAPS in `x` by their lower version and add `TK_UP` before." res = [] for t in x: if t.isupper() and len(t) > 1: res.append(TK_UP); res.append(t.lower()) else: res.append(t) return res
0.020134
def Define(self, name, value = None, comment = None): """ Define a pre processor symbol name, with the optional given value in the current config header. If value is None (default), then #define name is written. If value is not none, then #define name value is written. comment is a string which will be put as a C comment in the header, to explain the meaning of the value (appropriate C comments will be added automatically). """ lines = [] if comment: comment_str = "/* %s */" % comment lines.append(comment_str) if value is not None: define_str = "#define %s %s" % (name, value) else: define_str = "#define %s" % name lines.append(define_str) lines.append('') self.config_h_text = self.config_h_text + '\n'.join(lines)
0.010067
def _get_upload_input_manager_cls(self, transfer_future): """Retrieves a class for managing input for an upload based on file type :type transfer_future: s3transfer.futures.TransferFuture :param transfer_future: The transfer future for the request :rtype: class of UploadInputManager :returns: The appropriate class to use for managing a specific type of input for uploads. """ upload_manager_resolver_chain = [ UploadFilenameInputManager, UploadSeekableInputManager, UploadNonSeekableInputManager ] fileobj = transfer_future.meta.call_args.fileobj for upload_manager_cls in upload_manager_resolver_chain: if upload_manager_cls.is_compatible(fileobj): return upload_manager_cls raise RuntimeError( 'Input %s of type: %s is not supported.' % ( fileobj, type(fileobj)))
0.003119
def fix_variable(self, v, value): """Fix the value of a variable and remove it from the constraint. Args: v (variable): Variable in the constraint to be set to a constant value. val (int): Value assigned to the variable. Values must match the :class:`.Vartype` of the constraint. Examples: This example creates a constraint that :math:`a \\ne b` on binary variables, fixes variable a to 0, and tests two candidate solutions. >>> import dwavebinarycsp >>> const = dwavebinarycsp.Constraint.from_func(operator.ne, ... ['a', 'b'], dwavebinarycsp.BINARY) >>> const.fix_variable('a', 0) >>> const.check({'b': 1}) True >>> const.check({'b': 0}) False """ variables = self.variables try: idx = variables.index(v) except ValueError: raise ValueError("given variable {} is not part of the constraint".format(v)) if value not in self.vartype.value: raise ValueError("expected value to be in {}, received {} instead".format(self.vartype.value, value)) configurations = frozenset(config[:idx] + config[idx + 1:] # exclude the fixed var for config in self.configurations if config[idx] == value) if not configurations: raise UnsatError("fixing {} to {} makes this constraint unsatisfiable".format(v, value)) variables = variables[:idx] + variables[idx + 1:] self.configurations = configurations self.variables = variables def func(*args): return args in configurations self.func = func self.name = '{} ({} fixed to {})'.format(self.name, v, value)
0.004217
def get_switchable_as_dense(network, component, attr, snapshots=None, inds=None): """ Return a Dataframe for a time-varying component attribute with values for all non-time-varying components filled in with the default values for the attribute. Parameters ---------- network : pypsa.Network component : string Component object name, e.g. 'Generator' or 'Link' attr : string Attribute name snapshots : pandas.Index Restrict to these snapshots rather than network.snapshots. inds : pandas.Index Restrict to these components rather than network.components.index Returns ------- pandas.DataFrame Examples -------- >>> get_switchable_as_dense(network, 'Generator', 'p_max_pu') """ df = network.df(component) pnl = network.pnl(component) index = df.index varying_i = pnl[attr].columns fixed_i = df.index.difference(varying_i) if inds is not None: index = index.intersection(inds) varying_i = varying_i.intersection(inds) fixed_i = fixed_i.intersection(inds) if snapshots is None: snapshots = network.snapshots return (pd.concat([ pd.DataFrame(np.repeat([df.loc[fixed_i, attr].values], len(snapshots), axis=0), index=snapshots, columns=fixed_i), pnl[attr].loc[snapshots, varying_i] ], axis=1, sort=False).reindex(columns=index))
0.002789
def sql( state, host, sql, database=None, # Details for speaking to MySQL via `mysql` CLI mysql_user=None, mysql_password=None, mysql_host=None, mysql_port=None, ): ''' Execute arbitrary SQL against MySQL. + sql: SQL command(s) to execute + database: optional database to open the connection with + mysql_*: global module arguments, see above ''' yield make_execute_mysql_command( sql, database=database, user=mysql_user, password=mysql_password, host=mysql_host, port=mysql_port, )
0.001712
def load_with_cache(file_, recache=False, sampling=1, columns=None, temp_dir='.', data_type='int16'): """@brief This function loads a file from the current directory and saves the cached file to later executions. It's also possible to make a recache or a subsampling of the signal and choose only a few columns of the signal, to accelerate the opening process. @param file String: the name of the file to open. @param recache Boolean: indication whether it's done recache or not (default = false). @param sampling Integer: the sampling step. if 1, the signal isn't sampled (default = 1). @param columns Array-Like: the columns to read from the file. if None, all columns are considered (default = None). @return data Array-Like: the data from the file. TODO: Should save cache in a different directory TODO: Create test function and check size of generated files TODO: receive a file handle """ cfile = '%s.npy' % file_ if (not path.exists(cfile)) or recache: if columns == None: data = np.loadtxt(file_)[::sampling, :] else: data = np.loadtxt(file_)[::sampling, columns] np.save(cfile, data.astype(data_type)) else: data = np.load(cfile) return data
0.003785
def get_schema_input_version(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_schema = ET.Element("get_schema") config = get_schema input = ET.SubElement(get_schema, "input") version = ET.SubElement(input, "version") version.text = kwargs.pop('version') callback = kwargs.pop('callback', self._callback) return callback(config)
0.004577
def save(self, vs, filetype): 'Copy rows to the system clipboard.' # use NTF to generate filename and delete file on context exit with tempfile.NamedTemporaryFile(suffix='.'+filetype) as temp: saveSheets(temp.name, vs) sync(1) p = subprocess.Popen( self.command, stdin=open(temp.name, 'r', encoding=options.encoding), stdout=subprocess.DEVNULL, close_fds=True) p.communicate()
0.003899
def render(self, rows): """ Join the HTML rows. """ if not rows: return '' li_tags = mark_safe(u"\n".join(format_html(u'<li>{0}</li>', force_text(row)) for row in rows)) if self.title: return format_html(u'<div class="toolbar-title">{0}</div>\n<ul>\n{1}\n</ul>', self.title, li_tags) else: return format_html(u'<ul>\n{0}\n</ul>', li_tags)
0.009281
def project(self, projection): ''' Return coordinates transformed to a given projection Projection should be a basemap or pyproj projection object or similar ''' x, y = projection(self.lon.decimal_degree, self.lat.decimal_degree) return (x, y)
0.006873
def _apply_gradients(self, grads, x, optim_state): """Refer to parent class documentation.""" new_x = [None] * len(x) new_optim_state = { "t": optim_state["t"] + 1., "m": [None] * len(x), "u": [None] * len(x) } t = new_optim_state["t"] for i in xrange(len(x)): g = grads[i] m_old = optim_state["m"][i] u_old = optim_state["u"][i] new_optim_state["m"][i] = ( self._beta1 * m_old + (1. - self._beta1) * g) new_optim_state["u"][i] = ( self._beta2 * u_old + (1. - self._beta2) * g * g) m_hat = new_optim_state["m"][i] / (1. - tf.pow(self._beta1, t)) u_hat = new_optim_state["u"][i] / (1. - tf.pow(self._beta2, t)) new_x[i] = ( x[i] - self._lr * m_hat / (tf.sqrt(u_hat) + self._epsilon)) return new_x, new_optim_state
0.010689
def _initialize_system_sync(self): """Initialize the device adapter by removing all active connections and resetting scan and advertising to have a clean starting state.""" connected_devices = self.bable.list_connected_devices() for device in connected_devices: self.disconnect_sync(device.connection_handle) self.stop_scan() self.set_advertising(False) # Register the GATT table to send the right services and characteristics when probed (like an IOTile device) self.register_gatt_table()
0.007042
def setup_user_mapping(pid, uid=os.getuid(), gid=os.getgid()): """Write uid_map and gid_map in /proc to create a user mapping that maps our user from outside the container to the same user inside the container (and no other users are mapped). @see: http://man7.org/linux/man-pages/man7/user_namespaces.7.html @param pid: The PID of the process in the container. """ proc_child = os.path.join("/proc", str(pid)) try: uid_map = "{0} {1} 1".format(uid, os.getuid()) # map uid internally to our uid externally util.write_file(uid_map, proc_child, "uid_map") except IOError as e: logging.warning("Creating UID mapping into container failed: %s", e) try: util.write_file("deny", proc_child, "setgroups") except IOError as e: # Not all systems have this file (depends on the kernel version), # but if it does not exist, we do not need to write to it. if e.errno != errno.ENOENT: logging.warning("Could not write to setgroups file in /proc: %s", e) try: gid_map = "{0} {1} 1".format(gid, os.getgid()) # map gid internally to our gid externally util.write_file(gid_map, proc_child, "gid_map") except IOError as e: logging.warning("Creating GID mapping into container failed: %s", e)
0.005307
def create(self, name, *args, **kwargs): """ Create an instance of this resource type. """ resource_name = self._resource_name(name) log.info( "Creating {} '{}'...".format(self._model_name, resource_name)) resource = self.collection.create(*args, name=resource_name, **kwargs) self._ids.add(resource.id) return resource
0.005063
def update(self, data, default=False): """Update this :attr:`Config` with ``data``. :param data: must be a ``Mapping`` like object exposing the ``item`` method for iterating through key-value pairs. :param default: if ``True`` the updated :attr:`settings` will also set their :attr:`~Setting.default` attribute with the updating value (provided it is a valid one). """ for name, value in data.items(): if value is not None: self.set(name, value, default)
0.003578
def download_image(self, image_type, image): """ Read file of a project and download it :param image_type: Image type :param image: The path of the image :returns: A file stream """ url = self._getUrl("/{}/images/{}".format(image_type, image)) response = yield from self._session().request("GET", url, auth=self._auth) if response.status == 404: raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(image)) return response
0.007519
def get_jpp_env(jpp_dir): """Return the environment dict of a loaded Jpp env. The returned env can be passed to `subprocess.Popen("J...", env=env)` to execute Jpp commands. """ env = { v[0]: ''.join(v[1:]) for v in [ l.split('=') for l in os.popen( "source {0}/setenv.sh {0} && env".format(jpp_dir) ).read().split('\n') if '=' in l ] } return env
0.004545
def get_piles(allgenes): """ Before running uniq, we need to compute all the piles. The piles are a set of redundant features we want to get rid of. Input are a list of GffLines features. Output are list of list of features distinct "piles". """ from jcvi.utils.range import Range, range_piles ranges = [Range(a.seqid, a.start, a.end, 0, i) \ for i, a in enumerate(allgenes)] for pile in range_piles(ranges): yield [allgenes[x] for x in pile]
0.005952
def spawn(self, owner, *args, **kwargs): """Spawns a new subordinate actor of `owner` and stores it in this container. jobs = Container() ... jobs.spawn(self, Job) jobs.spawn(self, Job, some_param=123) jobs = Container(Job) ... jobs.spawn(self) jobs.spawn(self, some_param=123) jobs = Container(Job.using('abc', some_kwarg=321)) ... jobs.spawn(self, extra_kwarg=123) jobs.spawn(self, some_kwarg=123, extra_kwarg=123) jobs.spawn(self, 'xyz', some_kwarg=345, extra_kwarg=567) """ return (self._spawn(owner, self.factory, *args, **kwargs) if self.factory else self._spawn(owner, *args, **kwargs))
0.003963
def ring_is_clockwise(ring): """ Determine if polygon ring coordinates are clockwise. Clockwise signifies outer ring, counter-clockwise an inner ring or hole. this logic was found at http://stackoverflow.com/questions/1165647/how-to-determine-if-a-list-of-polygon-points-are-in-clockwise-order this code taken from http://esri.github.com/geojson-utils/src/jsonConverters.js by James Cardona (MIT lisense) """ total = 0 for (pt1, pt2) in pairwise(ring): total += (pt2[0] - pt1[0]) * (pt2[1] + pt1[1]) return total >= 0
0.005348
def isAudio(self): """ Is this stream labelled as an audio stream? """ val=False if self.__dict__['codec_type']: if str(self.__dict__['codec_type']) == 'audio': val=True return val
0.015625
def _init_count_terms(self, annots): ''' Fills in the counts and overall aspect counts. ''' gonotindag = set() gocnts = self.gocnts go2obj = self.go2obj # Fill gocnts with GO IDs in annotations and their corresponding counts for terms in annots.values(): # key is 'gene' # Make a union of all the terms for a gene, if term parents are # propagated but they won't get double-counted for the gene allterms = set() for go_id in terms: goobj = go2obj.get(go_id, None) if goobj is not None: allterms.add(go_id) allterms |= goobj.get_all_parents() else: gonotindag.add(go_id) for parent in allterms: gocnts[parent] += 1 if gonotindag: print("{N} Assc. GO IDs not found in the GODag\n".format(N=len(gonotindag)))
0.004094
def offload_service_containers(self, service): """ :param service: :return: """ def anonymous(anonymous_service): if not isinstance(anonymous_service, Service): raise TypeError("service must be an instance of Service.") if anonymous_service.containers: logger.info("Deleting service: {0} containers.".format(anonymous_service.name)) for container_name in list(anonymous_service.containers.keys()): del anonymous_service.containers[container_name] self._service_map(service, anonymous, descending=True)
0.006221
def get_osds(service, device_class=None): """Return a list of all Ceph Object Storage Daemons currently in the cluster (optionally filtered by storage device class). :param device_class: Class of storage device for OSD's :type device_class: str """ luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 if luminous_or_later and device_class: out = check_output(['ceph', '--id', service, 'osd', 'crush', 'class', 'ls-osd', device_class, '--format=json']) else: out = check_output(['ceph', '--id', service, 'osd', 'ls', '--format=json']) if six.PY3: out = out.decode('UTF-8') return json.loads(out)
0.001235
def expand_dims(self, dim=None, axis=None, **dim_kwargs): """Return a new object with an additional axis (or axes) inserted at the corresponding position in the array shape. If dim is already a scalar coordinate, it will be promoted to a 1D coordinate consisting of a single value. Parameters ---------- dim : str, sequence of str, dict, or None Dimensions to include on the new variable. If provided as str or sequence of str, then dimensions are inserted with length 1. If provided as a dict, then the keys are the new dimensions and the values are either integers (giving the length of the new dimensions) or sequence/ndarray (giving the coordinates of the new dimensions). **WARNING** for python 3.5, if ``dim`` is dict-like, then it must be an ``OrderedDict``. This is to ensure that the order in which the dims are given is maintained. axis : integer, list (or tuple) of integers, or None Axis position(s) where new axis is to be inserted (position(s) on the result array). If a list (or tuple) of integers is passed, multiple axes are inserted. In this case, dim arguments should be same length list. If axis=None is passed, all the axes will be inserted to the start of the result array. **dim_kwargs : int or sequence/ndarray The keywords are arbitrary dimensions being inserted and the values are either the lengths of the new dims (if int is given), or their coordinates. Note, this is an alternative to passing a dict to the dim kwarg and will only be used if dim is None. **WARNING** for python 3.5 ``dim_kwargs`` is not available. Returns ------- expanded : same type as caller This object, but with an additional dimension(s). """ if isinstance(dim, int): raise TypeError('dim should be str or sequence of strs or dict') elif isinstance(dim, str): dim = OrderedDict(((dim, 1),)) elif isinstance(dim, (list, tuple)): if len(dim) != len(set(dim)): raise ValueError('dims should not contain duplicate values.') dim = OrderedDict(((d, 1) for d in dim)) # TODO: get rid of the below code block when python 3.5 is no longer # supported. python36_plus = sys.version_info[0] == 3 and sys.version_info[1] > 5 not_ordereddict = dim is not None and not isinstance(dim, OrderedDict) if not python36_plus and not_ordereddict: raise TypeError("dim must be an OrderedDict for python <3.6") elif not python36_plus and dim_kwargs: raise ValueError("dim_kwargs isn't available for python <3.6") dim_kwargs = OrderedDict(dim_kwargs) dim = either_dict_or_kwargs(dim, dim_kwargs, 'expand_dims') ds = self._to_temp_dataset().expand_dims(dim, axis) return self._from_temp_dataset(ds)
0.000644
def from_json_dict(cls, json_dict # type: Dict[str, Any] ): # type: (...) -> StringSpec """ Make a StringSpec object from a dictionary containing its properties. :param dict json_dict: This dictionary must contain an `'encoding'` key associated with a Python-conformant encoding. It must also contain a `'hashing'` key, whose contents are passed to :class:`FieldHashingProperties`. Permitted keys also include `'pattern'`, `'case'`, `'minLength'`, and `'maxLength'`. :raises InvalidSchemaError: When a regular expression is provided but is not a valid pattern. """ # noinspection PyCompatibility result = cast(StringSpec, # Go away, Mypy. super().from_json_dict(json_dict)) format_ = json_dict['format'] if 'encoding' in format_ and result.hashing_properties: result.hashing_properties.encoding = format_['encoding'] if 'pattern' in format_: pattern = format_['pattern'] try: result.regex = re_compile_full(pattern) except (SyntaxError, re.error) as e: msg = "Invalid regular expression '{}.'".format(pattern) e_new = InvalidSchemaError(msg) raise_from(e_new, e) result.regex_based = True else: result.case = format_.get('case', StringSpec._DEFAULT_CASE) result.min_length = format_.get('minLength') result.max_length = format_.get('maxLength') result.regex_based = False return result
0.002867
def x11(self, data: ['SASdata', str] = None, arima: str = None, by: [str, list] = None, id: [str, list] = None, macurves: str = None, monthly: str = None, output: [str, bool, 'SASdata'] = None, pdweights: str = None, quarterly: str = None, sspan: str = None, tables: str = None, var: str = None, procopts: [str, list] = None, stmtpassthrough: [str, list] = None, **kwargs: dict) -> 'SASresults': """ Python method to call the X11 procedure Documentation link: https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=etsug&docsetTarget=etsug_x11_syntax.htm&locale=en Either the MONTHLY or QUARTERLY statement must be specified, depending on the type of time series data you have. The PDWEIGHTS and MACURVES statements can be used only with the MONTHLY statement. The TABLES statement controls the printing of tables, while the OUTPUT statement controls the creation of the OUT= data set. :param data: SASdata object or string. This parameter is required. :parm arima: The arima variable can only be a string type. :parm by: The by variable can be a string or list type. :parm id: The id variable can be a string or list type. :parm macurves: The macurves variable can only be a string type. :parm monthly: The monthly variable can only be a string type. :parm output: The output variable can be a string, boolean or SASdata type. The member name for a boolean is "_output". :parm pdweights: The pdweights variable can only be a string type. :parm quarterly: The quarterly variable can only be a string type. :parm sspan: The sspan variable can only be a string type. :parm tables: The tables variable can only be a string type. :parm var: The var variable can only be a string type. :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object """
0.00985
def weighted_choice(self, probabilities, key): """Makes a weighted choice between several options. Probabilities is a list of 2-tuples, (probability, option). The probabilties don't need to add up to anything, they are automatically scaled.""" try: choice = self.values[key].lower() except KeyError: # override not set. return super(RecordingParameters, self)\ .weighted_choice(probabilities, key) # Find the matching key (case insensitive) for probability, option in probabilities: if str(option).lower() == choice: return option # for function or class-type choices, also check __name__ for probability, option in probabilities: if option.__name__.lower() == choice: return option assert False, "Invalid value provided"
0.002169
def _get_envelopes_min_maxes(envelopes): """ Returns the extrema of the inputted polygonal envelopes. Used for setting chart extent where appropriate. Note tha the ``Quadtree.bounds`` object property serves a similar role. Parameters ---------- envelopes : GeoSeries The envelopes of the given geometries, as would be returned by e.g. ``data.geometry.envelope.exterior``. Returns ------- (xmin, xmax, ymin, ymax) : tuple The data extrema. """ xmin = np.min(envelopes.map(lambda linearring: np.min([linearring.coords[1][0], linearring.coords[2][0], linearring.coords[3][0], linearring.coords[4][0]]))) xmax = np.max(envelopes.map(lambda linearring: np.max([linearring.coords[1][0], linearring.coords[2][0], linearring.coords[3][0], linearring.coords[4][0]]))) ymin = np.min(envelopes.map(lambda linearring: np.min([linearring.coords[1][1], linearring.coords[2][1], linearring.coords[3][1], linearring.coords[4][1]]))) ymax = np.max(envelopes.map(lambda linearring: np.max([linearring.coords[1][1], linearring.coords[2][1], linearring.coords[3][1], linearring.coords[4][1]]))) return xmin, xmax, ymin, ymax
0.01008
def works(self, member_id): """ This method retrieve a iterable of Works of the given member. args: Member ID (Integer) return: Works() """ context = '%s/%s' % (self.ENDPOINT, str(member_id)) return Works(context=context)
0.007168
def GetRosettaResidueMap(self, ConvertMSEToAtom = False, RemoveIncompleteFinalResidues = False, RemoveIncompleteResidues = False): '''Note: This function ignores any DNA.''' raise Exception('This code looks to be deprecated. Use construct_pdb_to_rosetta_residue_map instead.') chain = None sequences = {} residue_map = {} resid_set = set() resid_list = [] DNA_residues = set([' DA', ' DC', ' DG', ' DT']) chains = [] self.RAW_ATOM_SEQUENCE = [] essential_atoms_1 = set(['CA', 'C', 'N'])#, 'O']) essential_atoms_2 = set(['CA', 'C', 'N'])#, 'OG']) current_atoms = set() atoms_read = {} oldchainID = None removed_residue = {} for line in self.lines: if line[0:4] == 'ATOM' or (ConvertMSEToAtom and (line[0:6] == 'HETATM') and (line[17:20] == 'MSE')): chainID = line[21] if missing_chain_ids.get(self.pdb_id): chainID = missing_chain_ids[self.pdb_id] if chainID not in chains: chains.append(chainID) residue_longname = line[17:20] if residue_longname in DNA_residues: # Skip DNA continue if residue_longname == 'UNK': # Skip unknown residues continue if residue_longname not in allowed_PDB_residues_types and not(ConvertMSEToAtom and residue_longname == 'MSE'): if not self.strict: # Skip unknown residues continue else: raise NonCanonicalResidueException("Residue %s encountered: %s" % (line[17:20], line)) else: resid = line[21:27] #print(chainID, residue_longname, resid) #print(line) #print(resid_list) if resid not in resid_set: removed_residue[chainID] = False add_residue = True if current_atoms: if RemoveIncompleteResidues and essential_atoms_1.intersection(current_atoms) != essential_atoms_1 and essential_atoms_2.intersection(current_atoms) != essential_atoms_2: oldChain = resid_list[-1][0] oldResidueID = resid_list[-1][1:] print("The last residue '%s', %s, in chain %s is missing these atoms: %s." % (resid_list[-1], residue_longname, oldChain, essential_atoms_1.difference(current_atoms) or essential_atoms_2.difference(current_atoms))) resid_set.remove(resid_list[-1]) #print("".join(resid_list)) resid_list = resid_list[:-1] if oldchainID: removed_residue[oldchainID] = True #print("".join(resid_list)) #print(sequences[oldChain]) if sequences.get(oldChain): sequences[oldChain] = sequences[oldChain][:-1] if residue_map.get(oldChain): residue_map[oldChain] = residue_map[oldChain][:-1] #print(sequences[oldChain] else: assert(not(resid_set)) current_atoms = set() atoms_read[chainID] = set() atoms_read[chainID].add(line[12:15].strip()) resid_set.add(resid) resid_list.append(resid) chainID = line[21] sequences[chainID] = sequences.get(chainID, []) if residue_longname in non_canonical_amino_acids: sequences[chainID].append(non_canonical_amino_acids[residue_longname]) else: sequences[chainID].append(residue_type_3to1_map[residue_longname]) residue_map[chainID] = residue_map.get(chainID, []) if residue_longname in non_canonical_amino_acids: residue_map[chainID].append((resid, non_canonical_amino_acids[residue_longname])) else: residue_map[chainID].append((resid, residue_type_3to1_map[residue_longname])) oldchainID = chainID else: #atoms_read[chainID] = atoms_read.get(chainID, set()) atoms_read[chainID].add(line[12:15].strip()) current_atoms.add(line[12:15].strip()) if RemoveIncompleteFinalResidues: # These are (probably) necessary for Rosetta to keep the residue. Rosetta does throw away residues where only the N atom is present if that residue is at the end of a chain. for chainID, sequence_list in sequences.iteritems(): if not(removed_residue[chainID]): if essential_atoms_1.intersection(atoms_read[chainID]) != essential_atoms_1 and essential_atoms_2.intersection(atoms_read[chainID]) != essential_atoms_2: print("The last residue %s of chain %s is missing these atoms: %s." % (sequence_list[-1], chainID, essential_atoms_1.difference(atoms_read[chainID]) or essential_atoms_2.difference(atoms_read[chainID]))) oldResidueID = sequence_list[-1][1:] residue_map[chainID] = residue_map[chainID][0:-1] sequences[chainID] = sequence_list[0:-1] for chainID, sequence_list in sequences.iteritems(): sequences[chainID] = "".join(sequence_list) assert(sequences[chainID] == "".join([res_details[1] for res_details in residue_map[chainID]])) for chainID in chains: for a_acid in sequences.get(chainID, ""): self.RAW_ATOM_SEQUENCE.append((chainID, a_acid)) residue_objects = {} for chainID in residue_map.keys(): residue_objects[chainID] = [] for chainID, residue_list in residue_map.iteritems(): for res_pair in residue_list: resid = res_pair[0] resaa = res_pair[1] assert(resid[0] == chainID) residue_objects[chainID].append((resid[1:].strip(), resaa)) return sequences, residue_objects
0.00564
def choose_template(self, template): '''Choose a template Args: template: String, choose which template you would like. Returns: None Raises: None ''' n1 = int(template)/10 n2 = int(template)%10 self.send('^TS'+'0'+str(n1)+str(n2))
0.011765
def rlcomplete(self, text, state): """Return the state-th possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'. Parameters ---------- text : string Text to perform the completion on. state : int Counter used by readline. """ if state==0: self.line_buffer = line_buffer = self.readline.get_line_buffer() cursor_pos = self.readline.get_endidx() #io.rprint("\nRLCOMPLETE: %r %r %r" % # (text, line_buffer, cursor_pos) ) # dbg # if there is only a tab on a line with only whitespace, instead of # the mostly useless 'do you want to see all million completions' # message, just do the right thing and give the user his tab! # Incidentally, this enables pasting of tabbed text from an editor # (as long as autoindent is off). # It should be noted that at least pyreadline still shows file # completions - is there a way around it? # don't apply this on 'dumb' terminals, such as emacs buffers, so # we don't interfere with their own tab-completion mechanism. if not (self.dumb_terminal or line_buffer.strip()): self.readline.insert_text('\t') sys.stdout.flush() return None # Note: debugging exceptions that may occur in completion is very # tricky, because readline unconditionally silences them. So if # during development you suspect a bug in the completion code, turn # this flag on temporarily by uncommenting the second form (don't # flip the value in the first line, as the '# dbg' marker can be # automatically detected and is used elsewhere). DEBUG = False #DEBUG = True # dbg if DEBUG: try: self.complete(text, line_buffer, cursor_pos) except: import traceback; traceback.print_exc() else: # The normal production version is here # This method computes the self.matches array self.complete(text, line_buffer, cursor_pos) try: return self.matches[state] except IndexError: return None
0.002798
def run(self, cmd, sudo=False, ignore_error=False, success_status=(0,), error_callback=None, custom_log=None, retry=0): """Run a command on the remote host. The command is run on the remote host, if there is a redirected host then the command will be run on that redirected host. See __init__. :param cmd: the command to run :type cmd: str :param sudo: True if the command should be run with sudo, this parameter disable the use of environment files. :type sudo: str :param success_status: the list of the possible success status :type success_status: list :param error_callback: if provided, the callback to call in case of a failure. it will be called with two args, the output of the command and the returned error code. :return: the tuple (output of the command, returned code) :rtype: tuple :param custom_log: a optional string to record in the log instead of the command. This is useful for example if you want to hide a password. :type custom_log: str """ self._check_started() cmd_output = io.StringIO() channel = self._get_channel() cmd = self._prepare_cmd(cmd, sudo=sudo) if not custom_log: custom_log = cmd LOG.info("%s run '%s'" % (self.description, custom_log)) channel.exec_command(cmd) while True: received = None rl, _, _ = select.select([channel], [], [], 30) if rl: received = channel.recv(1024).decode('UTF-8', 'ignore').strip() if received: LOG.debug(received) cmd_output.write(received) if channel.exit_status_ready() and not received: break cmd_output = cmd_output.getvalue() exit_status = channel.exit_status try: return self._evaluate_run_result( exit_status, cmd_output, ignore_error=ignore_error, success_status=success_status, error_callback=error_callback, custom_log=custom_log) except (paramiko.ssh_exception.SSHException, socket.error) as e: if not retry: raise e else: return self.run( cmd, sudo=sudo, ignore_error=ignore_error, success_status=success_status, error_callback=error_callback, custom_log=custom_log, retry=(retry - 1))
0.001944
def can_vote(self, request): """ Determnines whether or not the current user can vote. Returns a bool as well as a string indicating the current vote status, with vote status being one of: 'closed', 'disabled', 'auth_required', 'can_vote', 'voted' """ modelbase_obj = self.modelbase_obj # can't vote if liking is closed if modelbase_obj.likes_closed: return False, 'closed' # can't vote if liking is disabled if not modelbase_obj.likes_enabled: return False, 'disabled' # anonymous users can't vote if anonymous likes are disabled if not request.user.is_authenticated() and not modelbase_obj.anonymous_likes: return False, 'auth_required' # return false if existing votes are found if Vote.objects.filter(object_id=modelbase_obj.id, token=request.secretballot_token).count() == 0: return True, 'can_vote' else: return False, 'voted'
0.007729
def dihedral(x, dih): """ Perform any of 8 permutations of 90-degrees rotations or flips for image x. """ x = np.rot90(x, dih%4) return x if dih<4 else np.fliplr(x)
0.022727
def reload_accelerators(self, *args): """Reassign an accel_group to guake main window and guake context menu and calls the load_accelerators method. """ if self.accel_group: self.guake.window.remove_accel_group(self.accel_group) self.accel_group = Gtk.AccelGroup() self.guake.window.add_accel_group(self.accel_group) self.load_accelerators()
0.00489
def raw_shell(s): 'Not a member of ShellQuoted so we get a useful error for raw strings' if isinstance(s, ShellQuoted): return s.do_not_use_raw_str raise RuntimeError('{0} should have been ShellQuoted'.format(s))
0.00431
def get_infobox(ptree, boxterm="box"): """ Returns parse tree template with title containing <boxterm> as dict: <box> = {<name>: <value>, ...} If simple transform fails, attempts more general assembly: <box> = {'boxes': [{<title>: <parts>}, ...], 'count': <len(boxes)>} """ boxes = [] for item in lxml.etree.fromstring(ptree).xpath("//template"): title = item.find('title').text if title and boxterm in title: box = template_to_dict(item) if box: return box alt = template_to_dict_alt(item, title) if alt: boxes.append(alt) if boxes: return {'boxes': boxes, 'count': len(boxes)}
0.001333
def GetFunctionText(heading, name): """Returns the needed text to automatically document a function in RSF/sphinx""" und = '-'*len(heading) return r''' %s %s .. autofunction:: %s ''' % (heading, und, name)
0.012931
def to_basestring(value): """Converts a string argument to a subclass of basestring. In python2, byte and unicode strings are mostly interchangeable, so functions that deal with a user-supplied argument in combination with ascii string constants can use either and should return the type the user supplied. In python3, the two types are not interchangeable, so this method is needed to convert byte strings to unicode. """ if isinstance(value, _BASESTRING_TYPES): return value if not isinstance(value, bytes_type): raise TypeError( "Expected bytes, unicode, or None; got %r" % type(value) ) return value.decode("utf-8")
0.001435
def bestseqs(self,thresh=None): """ m.bestseqs(,thresh=None) -- Return all k-mers that match motif with a score >= thresh """ if not thresh: if self._bestseqs: return self._bestseqs if not thresh: thresh = 0.8 * self.maxscore self._bestseqs = bestseqs(self,thresh) return self._bestseqs
0.016173
def fetch(self): """ Fetch a StepContextInstance :returns: Fetched StepContextInstance :rtype: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return StepContextInstance( self._version, payload, flow_sid=self._solution['flow_sid'], engagement_sid=self._solution['engagement_sid'], step_sid=self._solution['step_sid'], )
0.004847
def create_job_template(self, template): """ Creates a job template """ endpoint = self._build_url('jobTemplates') data = self._query_api('POST', endpoint, None, {'Content-Type': 'application/json'}, json.dumps(template)) return data['results']
0.004808
def status_mercurial(path, ignore_set, options): """Run hg status. Returns a 2-element tuple: * Text lines describing the status of the repository. * Empty sequence of subrepos, since hg does not support them. """ lines = run(['hg', '--config', 'extensions.color=!', 'st'], cwd=path) subrepos = () return [b' ' + l for l in lines if not l.startswith(b'?')], subrepos
0.005013
def public_dsn(dsn): '''Transform a standard Sentry DSN into a public one''' m = RE_DSN.match(dsn) if not m: log.error('Unable to parse Sentry DSN') public = '{scheme}://{client_id}@{domain}/{site_id}'.format( **m.groupdict()) return public
0.003623
def __create_index(self, keys, index_options): """Internal create index helper. :Parameters: - `keys`: a list of tuples [(key, type), (key, type), ...] - `index_options`: a dict of index options. """ index_doc = helpers._index_document(keys) index = {"key": index_doc} collation = validate_collation_or_none( index_options.pop('collation', None)) index.update(index_options) with self._socket_for_writes() as sock_info: if collation is not None: if sock_info.max_wire_version < 5: raise ConfigurationError( 'Must be connected to MongoDB 3.4+ to use collations.') else: index['collation'] = collation cmd = SON([('createIndexes', self.name), ('indexes', [index])]) try: self._command( sock_info, cmd, read_preference=ReadPreference.PRIMARY, codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, write_concern=self.write_concern, parse_write_concern_error=True) except OperationFailure as exc: if exc.code in common.COMMAND_NOT_FOUND_CODES: index["ns"] = self.__full_name wcn = (self.write_concern if self.write_concern.acknowledged else WriteConcern()) self.__database.system.indexes._insert( sock_info, index, True, False, False, wcn) else: raise
0.00122
def apply_settings(self): """Apply settings changed in 'Preferences' dialog box""" qapp = QApplication.instance() # Set 'gtk+' as the default theme in Gtk-based desktops # Fixes Issue 2036 if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()): try: qapp.setStyle('gtk+') except: pass else: style_name = CONF.get('appearance', 'windows_style', self.default_style) style = QStyleFactory.create(style_name) if style is not None: style.setProperty('name', style_name) qapp.setStyle(style) default = self.DOCKOPTIONS if CONF.get('main', 'vertical_tabs'): default = default|QMainWindow.VerticalTabs if CONF.get('main', 'animated_docks'): default = default|QMainWindow.AnimatedDocks self.setDockOptions(default) self.apply_panes_settings() self.apply_statusbar_settings() if CONF.get('main', 'use_custom_cursor_blinking'): qapp.setCursorFlashTime(CONF.get('main', 'custom_cursor_blinking')) else: qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
0.003858
def generate_bio_assembly(data_api, struct_inflator): """Generate the bioassembly data. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object""" bioassembly_count = 0 for bioassembly in data_api.bio_assembly: bioassembly_count += 1 for transform in bioassembly["transformList"]: struct_inflator.set_bio_assembly_trans(bioassembly_count, transform["chainIndexList"], transform["matrix"])
0.003289
def language(cls): """ Return language of the comic as a human-readable language name instead of a 2-character ISO639-1 code. """ lang = 'Unknown (%s)' % cls.lang if pycountry is None: if cls.lang in languages.Languages: lang = languages.Languages[cls.lang] else: try: lang = pycountry.languages.get(alpha2 = cls.lang).name except KeyError: try: lang = pycountry.languages.get(iso639_1_code = cls.lang).name except KeyError: pass return lang
0.010853
def send_cf_response(event, context, response_status, reason=None, response_data=None, physical_resource_id=None): """Responds to Cloudformation after a create/update/delete operation.""" response_data = response_data or {} reason = reason or "See the details in CloudWatch Log Stream: " + \ context.log_stream_name physical_resource_id = physical_resource_id or context.log_stream_name response_body = json.dumps( { 'Status': response_status, 'Reason': reason, 'PhysicalResourceId': physical_resource_id, 'StackId': event['StackId'], 'RequestId': event['RequestId'], 'LogicalResourceId': event['LogicalResourceId'], 'Data': response_data } ) opener = build_opener(HTTPHandler) request = Request(event["ResponseURL"], data=response_body) request.add_header("Content-Type", "") request.add_header("Content-Length", len(response_body)) request.get_method = lambda: 'PUT' try: response = opener.open(request) print("Status code: {}".format(response.getcode())) print("Status message: {}".format(response.msg)) return True except HTTPError as exc: print("Failed executing HTTP request: {}".format(exc.code)) return False
0.000743
def list_virtual_networks(call=None, kwargs=None): ''' List virtual networks. ''' if kwargs is None: kwargs = {} if call == 'action': raise SaltCloudSystemExit( 'The avail_sizes function must be called with ' '-f or --function' ) netconn = get_conn(client_type='network') resource_groups = list_resource_groups() ret = {} for group in resource_groups: try: networks = netconn.virtual_networks.list( resource_group_name=group ) except CloudError: networks = {} for network_obj in networks: network = network_obj.as_dict() ret[network['name']] = network ret[network['name']]['subnets'] = list_subnets( kwargs={'resource_group': group, 'network': network['name']} ) return ret
0.001105
def binary_to_float(binary_list, lower_bound, upper_bound): """Return a floating point number between lower and upper bounds, from binary. Args: binary_list: list<int>; List of 0s and 1s. The number of bits in this list determine the number of possible values between lower and upper bound. Increase the size of binary_list for more precise floating points. lower_bound: Minimum value for output, inclusive. A binary list of 0s will have this value. upper_bound: Maximum value for output, inclusive. A binary list of 1s will have this value. Returns: float; A floating point number. """ # Edge case for empty binary_list if binary_list == []: # With 0 bits, only one value can be represented, # and we default to lower_bound return lower_bound # A little bit of math gets us a floating point # number between upper and lower bound # We look at the relative position of # the integer corresponding to our binary list # between the upper and lower bound, # and offset that by lower bound return (( # Range between lower and upper bound float(upper_bound - lower_bound) # Divided by the maximum possible integer / (2**len(binary_list) - 1) # Times the integer represented by the given binary * binary_to_int(binary_list)) # Plus the lower bound + lower_bound)
0.002
def _add_additional_properties(position, properties_dict): """ Sets AdditionalProperties of the ProbModelXML. """ add_prop = etree.SubElement(position, 'AdditionalProperties') for key, value in properties_dict.items(): etree.SubElement(add_prop, 'Property', attrib={'name': key, 'value': value})
0.008646
def _has_fr_route(self): """Encapsulating the rules for whether the request was to a Flask endpoint""" # 404's, 405's, which might not have a url_rule if self._should_use_fr_error_handler(): return True # for all other errors, just check if FR dispatched the route if not request.url_rule: return False return self.owns_endpoint(request.url_rule.endpoint)
0.007026
def value(self, item): # type: (Any) -> Any """ Return value stored in weakref. :param item: Object from which get the value. :return: Value stored in the weakref, otherwise original value. :raise TreeDeletedException: when weakref is already deleted. """ if isinstance(item, weakref.ReferenceType): if item() is None: raise TreeDeletedException() return item() return item
0.006186
def delete(ctx, schema, uuid, object_filter, yes): """Delete stored objects (CAUTION!)""" database = ctx.obj['db'] if schema is None: log('No schema given. Read the help', lvl=warn) return model = database.objectmodels[schema] if uuid: count = model.count({'uuid': uuid}) obj = model.find({'uuid': uuid}) elif object_filter: count = model.count(literal_eval(object_filter)) obj = model.find(literal_eval(object_filter)) else: count = model.count() obj = model.find() if count == 0: log('No objects to delete found') return if not yes and not _ask("Are you sure you want to delete %i objects" % count, default=False, data_type="bool", show_hint=True): return for item in obj: item.delete() log('Done')
0.002283
def entries(self): """ reading box configuration entries for all boxes managed by Synergy Supervisor """ list_of_rows = [] try: list_of_rows = self.bc_dao.get_all() except LookupError as e: self.logger.error('MX Exception {0}'.format(e), exc_info=True) return list_of_rows
0.008929
def __createHTMNetwork(self, sensorParams, spEnable, spParams, tmEnable, tmParams, clEnable, clParams, anomalyParams): """ Create a CLA network and return it. description: HTMPredictionModel description dictionary (TODO: define schema) Returns: NetworkInfo instance; """ #-------------------------------------------------- # Create the network n = Network() #-------------------------------------------------- # Add the Sensor n.addRegion("sensor", "py.RecordSensor", json.dumps(dict(verbosity=sensorParams['verbosity']))) sensor = n.regions['sensor'].getSelf() enabledEncoders = copy.deepcopy(sensorParams['encoders']) for name, params in enabledEncoders.items(): if params is not None: classifierOnly = params.pop('classifierOnly', False) if classifierOnly: enabledEncoders.pop(name) # Disabled encoders are encoders that are fed to SDRClassifierRegion but not # SP or TM Regions. This is to handle the case where the predicted field # is not fed through the SP/TM. We typically just have one of these now. disabledEncoders = copy.deepcopy(sensorParams['encoders']) for name, params in disabledEncoders.items(): if params is None: disabledEncoders.pop(name) else: classifierOnly = params.pop('classifierOnly', False) if not classifierOnly: disabledEncoders.pop(name) encoder = MultiEncoder(enabledEncoders) sensor.encoder = encoder sensor.disabledEncoder = MultiEncoder(disabledEncoders) sensor.dataSource = DataBuffer() prevRegion = "sensor" prevRegionWidth = encoder.getWidth() # SP is not enabled for spatial classification network if spEnable: spParams = spParams.copy() spParams['inputWidth'] = prevRegionWidth self.__logger.debug("Adding SPRegion; spParams: %r" % spParams) n.addRegion("SP", "py.SPRegion", json.dumps(spParams)) # Link SP region n.link("sensor", "SP", "UniformLink", "") n.link("sensor", "SP", "UniformLink", "", srcOutput="resetOut", destInput="resetIn") n.link("SP", "sensor", "UniformLink", "", srcOutput="spatialTopDownOut", destInput="spatialTopDownIn") n.link("SP", "sensor", "UniformLink", "", srcOutput="temporalTopDownOut", destInput="temporalTopDownIn") prevRegion = "SP" prevRegionWidth = spParams['columnCount'] if tmEnable: tmParams = tmParams.copy() if prevRegion == 'sensor': tmParams['inputWidth'] = tmParams['columnCount'] = prevRegionWidth else: assert tmParams['columnCount'] == prevRegionWidth tmParams['inputWidth'] = tmParams['columnCount'] self.__logger.debug("Adding TMRegion; tmParams: %r" % tmParams) n.addRegion("TM", "py.TMRegion", json.dumps(tmParams)) # Link TM region n.link(prevRegion, "TM", "UniformLink", "") if prevRegion != "sensor": n.link("TM", prevRegion, "UniformLink", "", srcOutput="topDownOut", destInput="topDownIn") else: n.link("TM", prevRegion, "UniformLink", "", srcOutput="topDownOut", destInput="temporalTopDownIn") n.link("sensor", "TM", "UniformLink", "", srcOutput="resetOut", destInput="resetIn") prevRegion = "TM" prevRegionWidth = tmParams['inputWidth'] if clEnable and clParams is not None: clParams = clParams.copy() clRegionName = clParams.pop('regionName') self.__logger.debug("Adding %s; clParams: %r" % (clRegionName, clParams)) n.addRegion("Classifier", "py.%s" % str(clRegionName), json.dumps(clParams)) # SDR Classifier-specific links if str(clRegionName) == "SDRClassifierRegion": n.link("sensor", "Classifier", "UniformLink", "", srcOutput="actValueOut", destInput="actValueIn") n.link("sensor", "Classifier", "UniformLink", "", srcOutput="bucketIdxOut", destInput="bucketIdxIn") # This applies to all (SDR and KNN) classifiers n.link("sensor", "Classifier", "UniformLink", "", srcOutput="categoryOut", destInput="categoryIn") n.link(prevRegion, "Classifier", "UniformLink", "") if self.getInferenceType() == InferenceType.TemporalAnomaly: anomalyClParams = dict( trainRecords=anomalyParams.get('autoDetectWaitRecords', None), cacheSize=anomalyParams.get('anomalyCacheRecords', None) ) self._addAnomalyClassifierRegion(n, anomalyClParams, spEnable, tmEnable) #-------------------------------------------------- # NuPIC doesn't initialize the network until you try to run it # but users may want to access components in a setup callback n.initialize() return NetworkInfo(net=n, statsCollectors=[])
0.011405
def __get_aws_metric(table_name, lookback_window_start, lookback_period, metric_name): """ Returns a metric list from the AWS CloudWatch service, may return None if no metric exists :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: How many minutes to look at :type lookback_period: int :type lookback_period: Length of the lookback period in minutes :type metric_name: str :param metric_name: Name of the metric to retrieve from CloudWatch :returns: list -- A list of time series data for the given metric, may be None if there was no data """ try: now = datetime.utcnow() start_time = now - timedelta(minutes=lookback_window_start) end_time = now - timedelta( minutes=lookback_window_start - lookback_period) return cloudwatch_connection.get_metric_statistics( period=lookback_period * 60, start_time=start_time, end_time=end_time, metric_name=metric_name, namespace='AWS/DynamoDB', statistics=['Sum'], dimensions={'TableName': table_name}, unit='Count') except BotoServerError as error: logger.error( 'Unknown boto error. Status: "{0}". ' 'Reason: "{1}". Message: {2}'.format( error.status, error.reason, error.message)) raise
0.000657
def _update_project(self, project): '''update one project''' if project['name'] not in self.projects: self.projects[project['name']] = Project(self, project) else: self.projects[project['name']].update(project) project = self.projects[project['name']] if project._send_on_get_info: # update project runtime info from processor by sending a _on_get_info # request, result is in status_page.track.save project._send_on_get_info = False self.on_select_task({ 'taskid': '_on_get_info', 'project': project.name, 'url': 'data:,_on_get_info', 'status': self.taskdb.SUCCESS, 'fetch': { 'save': self.get_info_attributes, }, 'process': { 'callback': '_on_get_info', }, }) # load task queue when project is running and delete task_queue when project is stoped if project.active: if not project.task_loaded: self._load_tasks(project) project.task_loaded = True else: if project.task_loaded: project.task_queue = TaskQueue() project.task_loaded = False if project not in self._cnt['all']: self._update_project_cnt(project.name)
0.002759
def get_commands(self): """ Returns commands available to execute :return: list of (name, doc) tuples """ commands = [] for name, value in inspect.getmembers(self): if not inspect.isgeneratorfunction(value): continue if name.startswith('_') or name == 'run': continue doc = inspect.getdoc(value) commands.append((name, doc)) return commands
0.004202
def claimInterface(self, interface): """ Claim (= get exclusive access to) given interface number. Required to receive/send data. Can be used as a context manager: with handle.claimInterface(0): # do stuff # handle.releaseInterface(0) gets automatically called """ mayRaiseUSBError( libusb1.libusb_claim_interface(self.__handle, interface), ) return _ReleaseInterface(self, interface)
0.003992
def grid_destroy_from_name(job_name): """Destroy all the jobs with a given name. Args: job_name (str): the job name """ jobs = grid_reload_from_name(job_name) for job in jobs: job.delete() logger.info("Killing the job (%s, %s)" % (job.site, job.uid))
0.003401
def _get_videoname(cls, videofile): """parse the `videofile` and return it's basename """ name = os.path.basename(videofile) name = os.path.splitext(name)[0] return name
0.009569
def release(self): """ Release the lock. Removes the PID file to release the lock, or raises an error if the current process does not hold the lock. """ if not self.is_locked(): raise NotLocked("%s is not locked" % self.path) if not self.i_am_locking(): raise NotMyLock("%s is locked, but not by me" % self.path) remove_existing_pidfile(self.path)
0.004535
def shl(computation: BaseComputation) -> None: """ Bitwise left shift """ shift_length, value = computation.stack_pop(num_items=2, type_hint=constants.UINT256) if shift_length >= 256: result = 0 else: result = (value << shift_length) & constants.UINT_256_MAX computation.stack_push(result)
0.00597
def is_valid(self): """Returns True if this form and all subforms (if any) are valid. If all standard form-validation tests pass, uses :class:`~eulxml.xmlmap.XmlObject` validation methods to check for schema-validity (if a schema is associated) and reporting errors. Additonal notes: * schema validation requires that the :class:`~eulxml.xmlmap.XmlObject` be initialized with the cleaned form data, so if normal validation checks pass, the associated :class:`~eulxml.xmlmap.XmlObject` instance will be updated with data via :meth:`update_instance` * schema validation errors SHOULD NOT happen in a production system :rtype: boolean """ valid = super(XmlObjectForm, self).is_valid() and \ all(s.is_valid() for s in six.itervalues(self.subforms)) and \ all(s.is_valid() for s in six.itervalues(self.formsets)) # schema validation can only be done after regular validation passes, # because xmlobject must be updated with cleaned_data if valid and self.instance is not None: # update instance required to check schema-validity instance = self.update_instance() if instance.is_valid(): return True else: # if not schema-valid, add validation errors to error dictionary # NOTE: not overriding _get_errors because that is used by the built-in validation # append to any existing non-field errors if NON_FIELD_ERRORS not in self._errors: self._errors[NON_FIELD_ERRORS] = self.error_class() self._errors[NON_FIELD_ERRORS].append("There was an unexpected schema validation error. " + "This should not happen! Please report the following errors:") for err in instance.validation_errors(): self._errors[NON_FIELD_ERRORS].append('VALIDATION ERROR: %s' % err.message) return False return valid
0.006689
def crossMatchTo(self, reference, radius=1*u.arcsec, visualize=False): ''' Cross-match this catalog onto another reference catalog. If proper motions are included in the reference, then its coordinates will be propagated to the obstime/epoch of this current catalog. Parameters ---------- reference : Constellation A reference Constellation to which we want to cross-match the stars in this catalog. Most likely, you'll want to use Gaia for this (since it has good astrometry and good proper motions). radius : float, with astropy units of angle How close to objects need to be in the cross-match for them to be considered a matched pair? Returns ------- i_this : array of indices The elements of this catalog that are matched. i_ref : array of indices The elements of the reference catalog, corresponding to ''' # find the closest match for each of star in this constellation i_ref, d2d_ref, d3d_ref = self.coordinates.match_to_catalog_sky(reference.atEpoch(self.coordinates.obstime)) # extract only those within the specified radius ok = d2d_ref < radius self.speak('found {} matches within {}'.format(np.sum(ok), radius)) # make a plot, if desired if visualize: self.speak('p') plt.hist(d2d_ref.arcsec, range=(0,15)) plt.axvline(radius.arcsec) plt.xlabel('Separation (arcsec)') plt.ylabel('Number of Matched Sources') # return the indices (of this, and of the reference) for the matches return ok, i_ref[ok]
0.002273
def format_choices(self): """Return the choices in string form.""" ce = enumerate(self.choices) f = lambda i, c: '%s (%d)' % (c, i+1) # apply formatter and append help token toks = [f(i,c) for i, c in ce] + ['Help (?)'] return ' '.join(toks)
0.013841
def expectation(self, operator: Union[PauliTerm, PauliSum]): """ Compute the expectation of an operator. :param operator: The operator :return: The operator's expectation value """ if not isinstance(operator, PauliSum): operator = PauliSum([operator]) return sum(_term_expectation(self.wf, term, n_qubits=self.n_qubits) for term in operator)
0.007299
def create_results_dirs(base_path): """Create base path dir and subdirectories Parameters ---------- base_path : str The base path has subdirectories for raw and processed results """ if not os.path.exists(base_path): print("Creating directory {} for results data.".format(base_path)) os.mkdir(base_path) if not os.path.exists(os.path.join(base_path, 'results')): os.mkdir(os.path.join(base_path, 'results')) if not os.path.exists(os.path.join(base_path, 'plots')): os.mkdir(os.path.join(base_path, 'plots')) if not os.path.exists(os.path.join(base_path, 'info')): os.mkdir(os.path.join(base_path, 'info')) if not os.path.exists(os.path.join(base_path, 'log')): os.mkdir(os.path.join(base_path, 'log'))
0.00125
def controversial(self, limit=None): """GETs controversial links from this subreddit. Calls :meth:`narwal.Reddit.controversial`. :param limit: max number of links to return """ return self._reddit.controversial(self.display_name, limit=limit)
0.014085
def put_metric_alarm(AlarmName=None, AlarmDescription=None, ActionsEnabled=None, OKActions=None, AlarmActions=None, InsufficientDataActions=None, MetricName=None, Namespace=None, Statistic=None, ExtendedStatistic=None, Dimensions=None, Period=None, Unit=None, EvaluationPeriods=None, Threshold=None, ComparisonOperator=None, TreatMissingData=None, EvaluateLowSampleCountPercentile=None): """ Creates or updates an alarm and associates it with the specified metric. Optionally, this operation can associate one or more Amazon SNS resources with the alarm. When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA . The alarm is evaluated and its state is set appropriately. Any actions associated with the state are then executed. When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm. If you are an AWS Identity and Access Management (IAM) user, you must have Amazon EC2 permissions for some operations: If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions won't be performed. However, if you are later granted the required permissions, the alarm actions that you created earlier will be performed. If you are using an IAM role (for example, an Amazon EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies. If you are using temporary security credentials granted using the AWS Security Token Service (AWS STS), you cannot stop or terminate an Amazon EC2 instance using alarm actions. Note that you must create at least one stop, terminate, or reboot alarm using the Amazon EC2 or CloudWatch console to create the EC2ActionsAccess IAM role. After this IAM role is created, you can create stop, terminate, or reboot alarms using a command-line interface or an API. See also: AWS API Documentation :example: response = client.put_metric_alarm( AlarmName='string', AlarmDescription='string', ActionsEnabled=True|False, OKActions=[ 'string', ], AlarmActions=[ 'string', ], InsufficientDataActions=[ 'string', ], MetricName='string', Namespace='string', Statistic='SampleCount'|'Average'|'Sum'|'Minimum'|'Maximum', ExtendedStatistic='string', Dimensions=[ { 'Name': 'string', 'Value': 'string' }, ], Period=123, Unit='Seconds'|'Microseconds'|'Milliseconds'|'Bytes'|'Kilobytes'|'Megabytes'|'Gigabytes'|'Terabytes'|'Bits'|'Kilobits'|'Megabits'|'Gigabits'|'Terabits'|'Percent'|'Count'|'Bytes/Second'|'Kilobytes/Second'|'Megabytes/Second'|'Gigabytes/Second'|'Terabytes/Second'|'Bits/Second'|'Kilobits/Second'|'Megabits/Second'|'Gigabits/Second'|'Terabits/Second'|'Count/Second'|'None', EvaluationPeriods=123, Threshold=123.0, ComparisonOperator='GreaterThanOrEqualToThreshold'|'GreaterThanThreshold'|'LessThanThreshold'|'LessThanOrEqualToThreshold', TreatMissingData='string', EvaluateLowSampleCountPercentile='string' ) :type AlarmName: string :param AlarmName: [REQUIRED] The name for the alarm. This name must be unique within the AWS account. :type AlarmDescription: string :param AlarmDescription: The description for the alarm. :type ActionsEnabled: boolean :param ActionsEnabled: Indicates whether actions should be executed during any changes to the alarm state. :type OKActions: list :param OKActions: The actions to execute when this alarm transitions to an OK state from any other state. Each action is specified as an Amazon Resource Name (ARN). Valid Values: arn:aws:automate:region :ec2:stop | arn:aws:automate:region :ec2:terminate | arn:aws:automate:region :ec2:recover Valid Values (for use with IAM roles): arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Reboot/1.0 (string) -- :type AlarmActions: list :param AlarmActions: The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN). Valid Values: arn:aws:automate:region :ec2:stop | arn:aws:automate:region :ec2:terminate | arn:aws:automate:region :ec2:recover Valid Values (for use with IAM roles): arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Reboot/1.0 (string) -- :type InsufficientDataActions: list :param InsufficientDataActions: The actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN). Valid Values: arn:aws:automate:region :ec2:stop | arn:aws:automate:region :ec2:terminate | arn:aws:automate:region :ec2:recover Valid Values (for use with IAM roles): arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Reboot/1.0 (string) -- :type MetricName: string :param MetricName: [REQUIRED] The name for the metric associated with the alarm. :type Namespace: string :param Namespace: [REQUIRED] The namespace for the metric associated with the alarm. :type Statistic: string :param Statistic: The statistic for the metric associated with the alarm, other than percentile. For percentile statistics, use ExtendedStatistic . :type ExtendedStatistic: string :param ExtendedStatistic: The percentile statistic for the metric associated with the alarm. Specify a value between p0.0 and p100. :type Dimensions: list :param Dimensions: The dimensions for the metric associated with the alarm. (dict) --Expands the identity of a metric. Name (string) -- [REQUIRED]The name of the dimension. Value (string) -- [REQUIRED]The value representing the dimension measurement. :type Period: integer :param Period: [REQUIRED] The period, in seconds, over which the specified statistic is applied. :type Unit: string :param Unit: The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately. If you specify a unit, you must use a unit that is appropriate for the metric. Otherwise, the Amazon CloudWatch alarm can get stuck in the INSUFFICIENT DATA state. :type EvaluationPeriods: integer :param EvaluationPeriods: [REQUIRED] The number of periods over which data is compared to the specified threshold. :type Threshold: float :param Threshold: [REQUIRED] The value against which the specified statistic is compared. :type ComparisonOperator: string :param ComparisonOperator: [REQUIRED] The arithmetic operation to use when comparing the specified statistic and threshold. The specified statistic value is used as the first operand. :type TreatMissingData: string :param TreatMissingData: Sets how this alarm is to handle missing data points. If TreatMissingData is omitted, the default behavior of missing is used. For more information, see Configuring How CloudWatch Alarms Treats Missing Data . Valid Values: breaching | notBreaching | ignore | missing :type EvaluateLowSampleCountPercentile: string :param EvaluateLowSampleCountPercentile: Used only for alarms based on percentiles. If you specify ignore , the alarm state will not change during periods with too few data points to be statistically significant. If you specify evaluate or omit this parameter, the alarm will always be evaluated and possibly change state no matter how many data points are available. For more information, see Percentile-Based CloudWatch Alarms and Low Data Samples . Valid Values: evaluate | ignore :returns: AlarmName (string) -- [REQUIRED] The name for the alarm. This name must be unique within the AWS account. AlarmDescription (string) -- The description for the alarm. ActionsEnabled (boolean) -- Indicates whether actions should be executed during any changes to the alarm state. OKActions (list) -- The actions to execute when this alarm transitions to an OK state from any other state. Each action is specified as an Amazon Resource Name (ARN). Valid Values: arn:aws:automate:region :ec2:stop | arn:aws:automate:region :ec2:terminate | arn:aws:automate:region :ec2:recover Valid Values (for use with IAM roles): arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Reboot/1.0 (string) -- AlarmActions (list) -- The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN). Valid Values: arn:aws:automate:region :ec2:stop | arn:aws:automate:region :ec2:terminate | arn:aws:automate:region :ec2:recover Valid Values (for use with IAM roles): arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Reboot/1.0 (string) -- InsufficientDataActions (list) -- The actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN). Valid Values: arn:aws:automate:region :ec2:stop | arn:aws:automate:region :ec2:terminate | arn:aws:automate:region :ec2:recover Valid Values (for use with IAM roles): arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:us-east-1:{customer-account }:action/actions/AWS_EC2.InstanceId.Reboot/1.0 (string) -- MetricName (string) -- [REQUIRED] The name for the metric associated with the alarm. Namespace (string) -- [REQUIRED] The namespace for the metric associated with the alarm. Statistic (string) -- The statistic for the metric associated with the alarm, other than percentile. For percentile statistics, use ExtendedStatistic . ExtendedStatistic (string) -- The percentile statistic for the metric associated with the alarm. Specify a value between p0.0 and p100. Dimensions (list) -- The dimensions for the metric associated with the alarm. (dict) --Expands the identity of a metric. Name (string) -- [REQUIRED]The name of the dimension. Value (string) -- [REQUIRED]The value representing the dimension measurement. Period (integer) -- [REQUIRED] The period, in seconds, over which the specified statistic is applied. Unit (string) -- The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately. If you specify a unit, you must use a unit that is appropriate for the metric. Otherwise, the Amazon CloudWatch alarm can get stuck in the INSUFFICIENT DATA state. EvaluationPeriods (integer) -- [REQUIRED] The number of periods over which data is compared to the specified threshold. Threshold (float) -- [REQUIRED] The value against which the specified statistic is compared. ComparisonOperator (string) -- [REQUIRED] The arithmetic operation to use when comparing the specified statistic and threshold. The specified statistic value is used as the first operand. TreatMissingData (string) -- Sets how this alarm is to handle missing data points. If TreatMissingData is omitted, the default behavior of missing is used. For more information, see Configuring How CloudWatch Alarms Treats Missing Data . Valid Values: breaching | notBreaching | ignore | missing EvaluateLowSampleCountPercentile (string) -- Used only for alarms based on percentiles. If you specify ignore , the alarm state will not change during periods with too few data points to be statistically significant. If you specify evaluate or omit this parameter, the alarm will always be evaluated and possibly change state no matter how many data points are available. For more information, see Percentile-Based CloudWatch Alarms and Low Data Samples . Valid Values: evaluate | ignore """ pass
0.006917
def send_to_default_exchange(self, sess_id, message=None): """ Send messages through RabbitMQ's default exchange, which will be delivered through routing_key (sess_id). This method only used for un-authenticated users, i.e. login process. Args: sess_id string: Session id message dict: Message object. """ msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following message to %s queue through default exchange:\n%s" % ( sess_id, msg)) self.get_channel().publish(exchange='', routing_key=sess_id, body=msg)
0.004724
def dug(obj, key, value): """ Inverse of dig: recursively set a value in a dictionary, using dot notation. >>> test = {"a":{"b":{"c":1}}} >>> dug(test, "a.b.c", 10) >>> test {'a': {'b': {'c': 10}}} """ array = key.split(".") return _dug(obj, value, *array)
0.003367
def _process_facet_terms(facet_terms): """ We have a list of terms with which we return facets """ elastic_facets = {} for facet in facet_terms: facet_term = {"field": facet} if facet_terms[facet]: for facet_option in facet_terms[facet]: facet_term[facet_option] = facet_terms[facet][facet_option] elastic_facets[facet] = { "terms": facet_term } return elastic_facets
0.002188
def get_session_list(self, account): """ 获取客服的会话列表 详情请参考 http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html :param account: 完整客服账号 :return: 客服的会话列表 """ res = self._get( 'https://api.weixin.qq.com/customservice/kfsession/getsessionlist', params={'kf_account': account}, result_processor=lambda x: x['sessionlist'] ) return res
0.004348
def get_smart_contract_event_by_height(self, height: int, is_full: bool = False) -> List[dict]: """ This interface is used to get the corresponding smart contract event based on the height of block. :param height: a decimal height value. :param is_full: :return: the information of smart contract event in dictionary form. """ payload = self.generate_json_rpc_payload(RpcMethod.GET_SMART_CONTRACT_EVENT, [height, 1]) response = self.__post(self.__url, payload) if is_full: return response event_list = response['result'] if event_list is None: event_list = list() return event_list
0.007123
def equals(self,junc): """test equality with another junction""" if self.left.equals(junc.left): return False if self.right.equals(junc.right): return False return True
0.021739
def spec_update_loaderplugin_registry(spec, default=None): """ Resolve a BasePluginLoaderRegistry instance from spec, and update spec[CALMJS_LOADERPLUGIN_REGISTRY] with that value before returning it. """ registry = spec.get(CALMJS_LOADERPLUGIN_REGISTRY) if isinstance(registry, BaseLoaderPluginRegistry): logger.debug( "loaderplugin registry '%s' already assigned to spec", registry.registry_name) return registry elif not registry: # resolving registry registry = get_registry(spec.get(CALMJS_LOADERPLUGIN_REGISTRY_NAME)) if isinstance(registry, BaseLoaderPluginRegistry): logger.info( "using loaderplugin registry '%s'", registry.registry_name) spec[CALMJS_LOADERPLUGIN_REGISTRY] = registry return registry # acquire the real default instance, if possible. if not isinstance(default, BaseLoaderPluginRegistry): default = get_registry(default) if not isinstance(default, BaseLoaderPluginRegistry): logger.info( "provided default is not a valid loaderplugin registry") default = None if default is None: default = BaseLoaderPluginRegistry('<default_loaderplugins>') # TODO determine the best way to optionally warn about this for # toolchains that require this. if registry: logger.info( "object referenced in spec is not a valid loaderplugin registry; " "using default loaderplugin registry '%s'", default.registry_name) else: logger.info( "no loaderplugin registry referenced in spec; " "using default loaderplugin registry '%s'", default.registry_name) spec[CALMJS_LOADERPLUGIN_REGISTRY] = registry = default return registry
0.000542
def write(self, data): """ Send *n* bytes to socket. Args: data(bytes): The data to send. Raises: EOFError: If the socket was closed. """ while data: try: n = self._socket.send(data) except socket.error: n = None if not n: raise EOFError('Socket closed') data = data[n:]
0.004535
def validate_data(file_type, bs_data): """ Validates json basis set data against a schema Parameters ---------- file_type : str Type of file to read. May be 'component', 'element', 'table', or 'references' bs_data: Data to be validated Raises ------ RuntimeError If the file_type is not valid (and/or a schema doesn't exist) ValidationError If the given file does not pass validation FileNotFoundError If the file given by file_path doesn't exist """ if file_type not in _validate_map: raise RuntimeError("{} is not a valid file_type".format(file_type)) schema = api.get_schema(file_type) jsonschema.validate(bs_data, schema) _validate_map[file_type](bs_data)
0.002581
def join(self, other): """ Returns the smallest possible range spanning both this range and other. Raises :exc:`ValueError` if the ranges do not belong to the same :class:`Buffer`. """ if self.source_buffer != other.source_buffer: raise ValueError if self.expanded_from == other.expanded_from: expanded_from = self.expanded_from else: expanded_from = None return Range(self.source_buffer, min(self.begin_pos, other.begin_pos), max(self.end_pos, other.end_pos), expanded_from=expanded_from)
0.00303
def add_tokens_for_pass(self): """Add tokens for a pass to result""" # Make sure pass not added to group again self.groups.empty = False # Remove existing newline/indentation while self.result[-1][0] in (INDENT, NEWLINE): self.result.pop() # Add pass and indentation self.add_tokens( [ (NAME, 'pass') , (NEWLINE, '\n') , (INDENT, self.indent_type * self.current.scol) ] )
0.016097
def to_ulcer_performance_index(prices, rf=0., nperiods=None): """ Converts from prices -> `ulcer performance index <https://www.investopedia.com/terms/u/ulcerindex.asp>`_. See https://en.wikipedia.org/wiki/Ulcer_index Args: * prices (Series, DataFrame): Prices * rf (float, Series): `Risk-free rate of return <https://www.investopedia.com/terms/r/risk-freerate.asp>`_. Assumed to be expressed in yearly (annualized) terms or return series * nperiods (int): Used to deannualize rf if rf is provided (non-zero) """ if type(rf) is float and rf != 0 and nperiods is None: raise Exception('nperiods must be set if rf != 0 and rf is not a price series') er = prices.to_returns().to_excess_returns(rf, nperiods=nperiods) return np.divide(er.mean(), prices.to_ulcer_index())
0.004723
def approximate_split(x, num_splits, axis=0): """Split approximately equally into num_splits parts. Args: x: a Tensor num_splits: an integer axis: an integer. Returns: a list of num_splits Tensors. """ size = shape_list(x)[axis] size_splits = [tf.div(size + i, num_splits) for i in range(num_splits)] return tf.split(x, size_splits, axis=axis)
0.013333