text
stringlengths
78
104k
score
float64
0
0.18
def gauss_fit(X, Y): """ Fit the function to a gaussian. Parameters ---------- X: 1d array X values Y: 1d array Y values Returns ------- (The return from scipy.optimize.curve_fit) popt : array Optimal values for the parameters pcov : 2d array The estimated covariance of popt. Notes ----- /!\ This uses a slow curve_fit function! do not use if need speed! """ X = np.asarray(X) Y = np.asarray(Y) # Can not have negative values Y[Y < 0] = 0 # define gauss function def gauss(x, a, x0, sigma): return a * np.exp(-(x - x0)**2 / (2 * sigma**2)) # get first estimation for parameter mean = (X * Y).sum() / Y.sum() sigma = np.sqrt((Y * ((X - mean)**2)).sum() / Y.sum()) height = Y.max() # fit with curve_fit return curve_fit(gauss, X, Y, p0=[height, mean, sigma])
0.0022
def process_spawn_qty(self, name): """Return the number of processes to spawn for the given consumer name. :param str name: The consumer name :rtype: int """ return self.consumers[name].qty - self.process_count(name)
0.007752
def track_end(self): """ Ends tracking of attributes changes. Returns the changes that occurred to the attributes. Only the final state of each attribute is obtained """ self.__tracking = False changes = self.__changes self.__changes = {} return changes
0.006116
def modify_endpoint(EndpointArn=None, EndpointIdentifier=None, EndpointType=None, EngineName=None, Username=None, Password=None, ServerName=None, Port=None, DatabaseName=None, ExtraConnectionAttributes=None, CertificateArn=None, SslMode=None, DynamoDbSettings=None, S3Settings=None, MongoDbSettings=None): """ Modifies the specified endpoint. See also: AWS API Documentation :example: response = client.modify_endpoint( EndpointArn='string', EndpointIdentifier='string', EndpointType='source'|'target', EngineName='string', Username='string', Password='string', ServerName='string', Port=123, DatabaseName='string', ExtraConnectionAttributes='string', CertificateArn='string', SslMode='none'|'require'|'verify-ca'|'verify-full', DynamoDbSettings={ 'ServiceAccessRoleArn': 'string' }, S3Settings={ 'ServiceAccessRoleArn': 'string', 'ExternalTableDefinition': 'string', 'CsvRowDelimiter': 'string', 'CsvDelimiter': 'string', 'BucketFolder': 'string', 'BucketName': 'string', 'CompressionType': 'none'|'gzip' }, MongoDbSettings={ 'Username': 'string', 'Password': 'string', 'ServerName': 'string', 'Port': 123, 'DatabaseName': 'string', 'AuthType': 'no'|'password', 'AuthMechanism': 'default'|'mongodb_cr'|'scram_sha_1', 'NestingLevel': 'none'|'one', 'ExtractDocId': 'string', 'DocsToInvestigate': 'string', 'AuthSource': 'string' } ) :type EndpointArn: string :param EndpointArn: [REQUIRED] The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. :type EndpointIdentifier: string :param EndpointIdentifier: The database endpoint identifier. Identifiers must begin with a letter; must contain only ASCII letters, digits, and hyphens; and must not end with a hyphen or contain two consecutive hyphens. :type EndpointType: string :param EndpointType: The type of endpoint. :type EngineName: string :param EngineName: The type of engine for the endpoint. Valid values, depending on the EndPointType, include MYSQL, ORACLE, POSTGRES, MARIADB, AURORA, REDSHIFT, S3, DYNAMODB, MONGODB, SYBASE, and SQLSERVER. :type Username: string :param Username: The user name to be used to login to the endpoint database. :type Password: string :param Password: The password to be used to login to the endpoint database. :type ServerName: string :param ServerName: The name of the server where the endpoint database resides. :type Port: integer :param Port: The port used by the endpoint database. :type DatabaseName: string :param DatabaseName: The name of the endpoint database. :type ExtraConnectionAttributes: string :param ExtraConnectionAttributes: Additional attributes associated with the connection. :type CertificateArn: string :param CertificateArn: The Amazon Resource Name (ARN) of the certificate used for SSL connection. :type SslMode: string :param SslMode: The SSL mode to be used. SSL mode can be one of four values: none, require, verify-ca, verify-full. The default value is none. :type DynamoDbSettings: dict :param DynamoDbSettings: Settings in JSON format for the target Amazon DynamoDB endpoint. For more information about the available settings, see the Using Object Mapping to Migrate Data to DynamoDB section at Using an Amazon DynamoDB Database as a Target for AWS Database Migration Service . ServiceAccessRoleArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) used by the service access IAM role. :type S3Settings: dict :param S3Settings: Settings in JSON format for the target S3 endpoint. For more information about the available settings, see the Extra Connection Attributes section at Using Amazon S3 as a Target for AWS Database Migration Service . ServiceAccessRoleArn (string) --The Amazon Resource Name (ARN) used by the service access IAM role. ExternalTableDefinition (string) -- CsvRowDelimiter (string) --The delimiter used to separate rows in the source files. The default is a carriage return (n). CsvDelimiter (string) --The delimiter used to separate columns in the source files. The default is a comma. BucketFolder (string) --An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path bucketFolder/schema_name/table_name/. If this parameter is not specified, then the path used is schema_name/table_name/. BucketName (string) --The name of the S3 bucket. CompressionType (string) --An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Set to NONE (the default) or do not use to leave the files uncompressed. :type MongoDbSettings: dict :param MongoDbSettings: Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see the Configuration Properties When Using MongoDB as a Source for AWS Database Migration Service section at Using Amazon S3 as a Target for AWS Database Migration Service . Username (string) --The user name you use to access the MongoDB source endpoint. Password (string) --The password for the user account you use to access the MongoDB source endpoint. ServerName (string) --The name of the server on the MongoDB source endpoint. Port (integer) --The port value for the MongoDB source endpoint. DatabaseName (string) --The database name on the MongoDB source endpoint. AuthType (string) --The authentication type you use to access the MongoDB source endpoint. Valid values: NO, PASSWORD When NO is selected, user name and password parameters are not used and can be empty. AuthMechanism (string) --The authentication mechanism you use to access the MongoDB source endpoint. Valid values: DEFAULT, MONGODB_CR, SCRAM_SHA_1 DEFAULT For MongoDB version 2.x, use MONGODB_CR. For MongoDB version 3.x, use SCRAM_SHA_1. This attribute is not used when authType=No. NestingLevel (string) --Specifies either document or table mode. Valid values: NONE, ONE Default value is NONE. Specify NONE to use document mode. Specify ONE to use table mode. ExtractDocId (string) --Specifies the document ID. Use this attribute when NestingLevel is set to NONE. Default value is false. DocsToInvestigate (string) --Indicates the number of documents to preview to determine the document organization. Use this attribute when NestingLevel is set to ONE. Must be a positive value greater than 0. Default value is 1000. AuthSource (string) --The MongoDB database name. This attribute is not used when authType=NO . The default is admin. :rtype: dict :return: { 'Endpoint': { 'EndpointIdentifier': 'string', 'EndpointType': 'source'|'target', 'EngineName': 'string', 'Username': 'string', 'ServerName': 'string', 'Port': 123, 'DatabaseName': 'string', 'ExtraConnectionAttributes': 'string', 'Status': 'string', 'KmsKeyId': 'string', 'EndpointArn': 'string', 'CertificateArn': 'string', 'SslMode': 'none'|'require'|'verify-ca'|'verify-full', 'ExternalId': 'string', 'DynamoDbSettings': { 'ServiceAccessRoleArn': 'string' }, 'S3Settings': { 'ServiceAccessRoleArn': 'string', 'ExternalTableDefinition': 'string', 'CsvRowDelimiter': 'string', 'CsvDelimiter': 'string', 'BucketFolder': 'string', 'BucketName': 'string', 'CompressionType': 'none'|'gzip' }, 'MongoDbSettings': { 'Username': 'string', 'Password': 'string', 'ServerName': 'string', 'Port': 123, 'DatabaseName': 'string', 'AuthType': 'no'|'password', 'AuthMechanism': 'default'|'mongodb_cr'|'scram_sha_1', 'NestingLevel': 'none'|'one', 'ExtractDocId': 'string', 'DocsToInvestigate': 'string', 'AuthSource': 'string' } } } """ pass
0.004688
def list_user_access(self, user): """ Returns a list of all database names for which the specified user has access rights. """ user = utils.get_name(user) uri = "/%s/%s/databases" % (self.uri_base, user) try: resp, resp_body = self.api.method_get(uri) except exc.NotFound as e: raise exc.NoSuchDatabaseUser("User '%s' does not exist." % user) dbs = resp_body.get("databases", {}) return [CloudDatabaseDatabase(self, db) for db in dbs]
0.003717
def tarbell_configure(command, args): """ Tarbell configuration routine. """ puts("Configuring Tarbell. Press ctrl-c to bail out!") # Check if there's settings configured settings = Settings() path = settings.path prompt = True if len(args): prompt = False config = _get_or_create_config(path) if prompt or "drive" in args: config.update(_setup_google_spreadsheets(config, path, prompt)) if prompt or "s3" in args: config.update(_setup_s3(config, path, prompt)) if prompt or "path" in args: config.update(_setup_tarbell_project_path(config, path, prompt)) if prompt or "templates" in args: if "project_templates" in config: override_templates = raw_input("\nFound Base Template config. Would you like to override them? [Default: No, 'none' to skip]") if override_templates and override_templates != "No" and override_templates != "no" and override_templates != "N" and override_templates != "n": config.update(_setup_default_templates(config, path, prompt)) else: puts("\nPreserving Base Template config...") else: config.update(_setup_default_templates(config, path, prompt)) settings.config = config with open(path, 'w') as f: puts("\nWriting {0}".format(colored.green(path))) settings.save() if all: puts("\n- Done configuring Tarbell. Type `{0}` for help.\n" .format(colored.green("tarbell"))) return settings
0.002566
def filter_inactive_ports(query): """Filter ports that aren't in active status """ port_model = models_v2.Port query = (query .filter(port_model.status == n_const.PORT_STATUS_ACTIVE)) return query
0.004444
def get_history_tags(self, exp, rep=0): """ returns all available tags (logging keys) of the given experiment repetition. Note: Technically, each repetition could have different tags, therefore the rep number can be passed in as parameter, even though usually all repetitions have the same tags. The default repetition is 0 and in most cases, can be omitted. """ history = self.get_history(exp, rep, 'all') return history.keys()
0.013035
def to_yaml_file(obj, f): """ Convert Python objects (including rpcq messages) to yaml and write it to `f`. """ yaml.dump(rapidjson.loads(to_json(obj)), f)
0.011696
def load_machine(self, descriptor): """ Load a complete register machine. The descriptor is a map, unspecified values are loaded from the default values. """ def get_cfg(name): if(name in descriptor): return descriptor[name] else: return defaults[name] self.processor = Processor(width = get_cfg("width")) self.rom = ROM(get_cfg("rom_size"), get_cfg("rom_width")) self.processor.register_memory_device(self.rom) self.registers = [] if(get_cfg("ram_enable")): self.ram = RAM(get_cfg("ram_size"), get_cfg("ram_width")) self.processor.register_memory_device(self.ram) else: self.ram = None if(get_cfg("flash_enable")): self.flash = Flash(get_cfg("flash_size"), get_cfg("flash_width")) self.processor.register_device(self.flash) else: self.flash = None for register in get_cfg("registers"): self.processor.add_register(register) self.registers.append(register) for command in get_cfg("commands"): self.processor.register_command(command) self.processor.setup_done()
0.032724
def rerank(args: argparse.Namespace): """ Reranks a list of hypotheses according to a sentence-level metric. Writes all output to STDOUT. :param args: Namespace object holding CLI arguments. """ reranker = Reranker(args.metric, args.return_score) with utils.smart_open(args.reference) as reference, utils.smart_open(args.hypotheses) as hypotheses: for i, (reference_line, hypothesis_line) in enumerate(zip(reference, hypotheses), 1): reference = reference_line.strip() # Expects a JSON object with keys containing at least 'translations', # as returned by sockeye.translate's nbest output hypotheses = json.loads(hypothesis_line.strip()) utils.check_condition('translations' in hypotheses, "Reranking requires nbest JSON input with 'translations' key present.") num_hypotheses = len(hypotheses['translations']) if not num_hypotheses > 1: logger.info("Line %d contains %d hypotheses. Nothing to rerank.", i, num_hypotheses) reranked_hypotheses = hypotheses else: reranked_hypotheses = reranker.rerank(hypotheses, reference) if args.output_best: if not num_hypotheses: print() else: print(reranked_hypotheses['translations'][0]) else: print(json.dumps(reranked_hypotheses, sort_keys=True))
0.003955
def numericalize(self, t:Collection[str]) -> List[int]: "Convert a list of tokens `t` to their ids." return [self.stoi[w] for w in t]
0.020134
def matches_address(self, address): """returns whether this account knows about an email address :param str address: address to look up :rtype: bool """ if self.address == address: return True for alias in self.aliases: if alias == address: return True if self._alias_regexp and self._alias_regexp.match(address): return True return False
0.004396
def finish(self): """Finishes transconding and returns the video. Returns: bytes Raises: IOError: in case of transcoding error. """ if self.proc is None: return None self.proc.stdin.close() for thread in (self._out_thread, self._err_thread): thread.join() (out, err) = [ b"".join(chunks) for chunks in (self._out_chunks, self._err_chunks) ] self.proc.stdout.close() self.proc.stderr.close() if self.proc.returncode: err = "\n".join([" ".join(self.cmd), err.decode("utf8")]) raise IOError(err) del self.proc self.proc = None return out
0.007813
def _bcbio_variation_ensemble(vrn_files, out_file, ref_file, config_file, base_dir, data): """Run a variant comparison using the bcbio.variation toolkit, given an input configuration. """ vrn_files = [_handle_somatic_ensemble(v, data) for v in vrn_files] tmp_dir = utils.safe_makedir(os.path.join(base_dir, "tmp")) resources = config_utils.get_resources("bcbio_variation", data["config"]) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"]) java_args = ["-Djava.io.tmpdir=%s" % tmp_dir] cmd = ["bcbio-variation"] + jvm_opts + java_args + \ ["variant-ensemble", config_file, ref_file, out_file] + vrn_files with utils.chdir(base_dir): cmd = "%s %s" % (utils.local_path_export(), " ".join(str(x) for x in cmd)) do.run(cmd, "Ensemble calling: %s" % os.path.basename(base_dir))
0.004739
def compute(self, tdb, tdb2, derivative=True): """Generate angles and derivatives for time `tdb` plus `tdb2`. If ``derivative`` is true, return a tuple containing both the angle and its derivative; otherwise simply return the angles. """ scalar = not getattr(tdb, 'shape', 0) and not getattr(tdb2, 'shape', 0) if scalar: tdb = array((tdb,)) data = self._data if data is None: self._data = data = self._load() initial_epoch, interval_length, coefficients = data component_count, n, coefficient_count = coefficients.shape # Subtracting tdb before adding tdb2 affords greater precision. index, offset = divmod((tdb - initial_epoch) + tdb2, interval_length) index = index.astype(int) if (index < 0).any() or (index > n).any(): final_epoch = initial_epoch + interval_length * n raise ValueError('segment only covers dates %.1f through %.1f' % (initial_epoch, final_epoch)) omegas = (index == n) index[omegas] -= 1 offset[omegas] += interval_length coefficients = coefficients[:,index] # Chebyshev polynomial. T = empty((coefficient_count, len(index))) T[0] = 1.0 T[1] = t1 = 2.0 * offset / interval_length - 1.0 twot1 = t1 + t1 for i in range(2, coefficient_count): T[i] = twot1 * T[i-1] - T[i-2] components = (T.T * coefficients).sum(axis=2) if scalar: components = components[:,0] if not derivative: return components # Chebyshev differentiation. dT = empty_like(T) dT[0] = 0.0 dT[1] = 1.0 if coefficient_count > 2: dT[2] = twot1 + twot1 for i in range(3, coefficient_count): dT[i] = twot1 * dT[i-1] - dT[i-2] + T[i-1] + T[i-1] dT *= 2.0 dT /= interval_length rates = (dT.T * coefficients).sum(axis=2) if scalar: rates = rates[:,0] return components, rates
0.002818
def _call(self, x, out=None): """Apply resampling operator. The element ``x`` is resampled using the sampling and interpolation operators of the underlying spaces. """ if out is None: return x.interpolation else: out.sampling(x.interpolation)
0.006349
def type_from_ast(schema, type_node): # noqa: F811 """Get the GraphQL type definition from an AST node. Given a Schema and an AST node describing a type, return a GraphQLType definition which applies to that type. For example, if provided the parsed AST node for `[User]`, a GraphQLList instance will be returned, containing the type called "User" found in the schema. If a type called "User" is not found in the schema, then None will be returned. """ if isinstance(type_node, ListTypeNode): inner_type = type_from_ast(schema, type_node.type) return GraphQLList(inner_type) if inner_type else None if isinstance(type_node, NonNullTypeNode): inner_type = type_from_ast(schema, type_node.type) return GraphQLNonNull(inner_type) if inner_type else None if isinstance(type_node, NamedTypeNode): return schema.get_type(type_node.name.value) # Not reachable. All possible type nodes have been considered. raise TypeError( # pragma: no cover f"Unexpected type node: '{inspect(type_node)}'." )
0.004587
def vcenter_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch") id = ET.SubElement(vcenter, "id") id.text = kwargs.pop('id') callback = kwargs.pop('callback', self._callback) return callback(config)
0.007958
def process_data(data, models): """ Convert ``data`` to processed data using ``models``. Data from dictionary ``data`` is processed by each model in list ``models``, and the results collected into a new dictionary ``pdata`` for use in :meth:`MultiFitter.lsqfit` and :meth:`MultiFitter.chained_lsqft`. """ pdata = gvar.BufferDict() for m in MultiFitter.flatten_models(models): pdata[m.datatag] = ( m.builddata(data) if m.ncg <= 1 else MultiFitter.coarse_grain(m.builddata(data), ncg=m.ncg) ) return pdata
0.003145
def _iter_from_annotations_dict(graph: BELGraph, annotations_dict: AnnotationsDict, ) -> Iterable[Tuple[str, Set[str]]]: """Iterate over the key/value pairs in this edge data dictionary normalized to their source URLs.""" for key, names in annotations_dict.items(): if key in graph.annotation_url: url = graph.annotation_url[key] elif key in graph.annotation_list: continue # skip those elif key in graph.annotation_pattern: log.debug('pattern annotation in database not implemented yet not implemented') # FIXME continue else: raise ValueError('Graph resources does not contain keyword: {}'.format(key)) yield url, set(names)
0.008168
def _generate_docker_image_version(layers, runtime): """ Generate the Docker TAG that will be used to create the image Parameters ---------- layers list(samcli.commands.local.lib.provider.Layer) List of the layers runtime str Runtime of the image to create Returns ------- str String representing the TAG to be attached to the image """ # Docker has a concept of a TAG on an image. This is plus the REPOSITORY is a way to determine # a version of the image. We will produced a TAG for a combination of the runtime with the layers # specified in the template. This will allow reuse of the runtime and layers across different # functions that are defined. If two functions use the same runtime with the same layers (in the # same order), SAM CLI will only produce one image and use this image across both functions for invoke. return runtime + '-' + hashlib.sha256( "-".join([layer.name for layer in layers]).encode('utf-8')).hexdigest()[0:25]
0.007136
def unpack_rawr_zip_payload(table_sources, payload): """unpack a zipfile and turn it into a callable "tables" object.""" # the io we get from S3 is streaming, so we can't seek on it, but zipfile # seems to require that. so we buffer it all in memory. RAWR tiles are # generally up to around 100MB in size, which should be safe to store in # RAM. from tilequeue.query.common import Table from io import BytesIO zfh = zipfile.ZipFile(BytesIO(payload), 'r') def get_table(table_name): # need to extract the whole compressed file from zip reader, as it # doesn't support .tell() on the filelike, which gzip requires. data = zfh.open(table_name, 'r').read() unpacker = Unpacker(file_like=BytesIO(data)) source = table_sources[table_name] return Table(source, unpacker) return get_table
0.001148
def create_was_invalidated_by_relation(self, activity_id, entity_kind, entity_id): """ Create a was invalidated by relationship between an activity and a entity(file). :param activity_id: str: uuid of the activity :param entity_kind: str: kind of entity('dds-file') :param entity_id: str: uuid of the entity :return: requests.Response containing the successful result """ return self._create_activity_relation(activity_id, entity_kind, entity_id, ActivityRelationTypes.WAS_INVALIDATED_BY)
0.009058
def _validate_options(self): ''' Make sure there are no conflicting or invalid options ''' if self.obfuscate_hostname and not self.obfuscate: raise ValueError( 'Option `obfuscate_hostname` requires `obfuscate`') if self.analyze_image_id is not None and len(self.analyze_image_id) < 12: raise ValueError( 'Image/Container ID must be at least twelve characters long.') if self.enable_schedule and self.disable_schedule: raise ValueError( 'Conflicting options: --enable-schedule and --disable-schedule') if self.analyze_container and (self.register or self.unregister): raise ValueError('Registration not supported with ' 'image or container analysis.') if self.to_json and self.to_stdout: raise ValueError( 'Conflicting options: --to-stdout and --to-json') if self.payload and not self.content_type: raise ValueError( '--payload requires --content-type') if not self.legacy_upload: if self.group: raise ValueError( '--group is not supported at this time.') if self.analyze_image_id: raise ValueError( '--analyze-image-id is not supported at this time.') if self.analyze_file: raise ValueError( '--analyze-file is not supported at this time.') if self.analyze_mountpoint: raise ValueError( '--analyze-mountpoint is not supported at this time.') if self.analyze_container: raise ValueError( '--analyze-container is not supported at this time.')
0.002173
def scatter_plot(self, ax, topic_dims, t=None, ms_limits=True, **kwargs_plot): """ 2D or 3D scatter plot. :param axes ax: matplotlib axes (use Axes3D if 3D data) :param tuple topic_dims: list of (topic, dims) tuples, where topic is a string and dims is a list of dimensions to be plotted for that topic. :param int t: time indexes to be plotted :param dict kwargs_plot: argument to be passed to matplotlib's plot function, e.g. the style of the plotted points 'or' :param bool ms_limits: if set to True, automatically set axes boundaries to the sensorimotor boundaries (default: True) """ plot_specs = {'marker': 'o', 'linestyle': 'None'} plot_specs.update(kwargs_plot) # t_bound = float('inf') # if t is None: # for topic, _ in topic_dims: # t_bound = min(t_bound, self.counts[topic]) # t = range(t_bound) # data = self.pack(topic_dims, t) data = self.data_t(topic_dims, t) ax.plot(*(data.T), **plot_specs) if ms_limits: ax.axis(self.axes_limits(topic_dims))
0.006908
def _finish_fragment(self): """ Creates fragment """ if self.fragment: self.fragment.finish() if self.fragment.headers: # Regardless of what's been seen to this point, if we encounter a headers fragment, # all the previous fragments should be marked hidden and found_visible set to False. self.found_visible = False for f in self.fragments: f.hidden = True if not self.found_visible: if self.fragment.quoted \ or self.fragment.headers \ or self.fragment.signature \ or (len(self.fragment.content.strip()) == 0): self.fragment.hidden = True else: self.found_visible = True self.fragments.append(self.fragment) self.fragment = None
0.00426
def listPrimaryDsTypes(self, primary_ds_type="", dataset=""): """ API to list primary dataset types :param primary_ds_type: List that primary dataset type (Optional) :type primary_ds_type: str :param dataset: List the primary dataset type for that dataset (Optional) :type dataset: str :returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type) :rtype: list of dicts """ if primary_ds_type: primary_ds_type = primary_ds_type.replace("*", "%") if dataset: dataset = dataset.replace("*", "%") try: return self.dbsPrimaryDataset.listPrimaryDSTypes(primary_ds_type, dataset) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message) except Exception as ex: sError = "DBSReaderModel/listPrimaryDsTypes. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
0.006838
def RandomGraph(nodes=range(10), min_links=2, width=400, height=300, curvature=lambda: random.uniform(1.1, 1.5)): """Construct a random graph, with the specified nodes, and random links. The nodes are laid out randomly on a (width x height) rectangle. Then each node is connected to the min_links nearest neighbors. Because inverse links are added, some nodes will have more connections. The distance between nodes is the hypotenuse times curvature(), where curvature() defaults to a random number between 1.1 and 1.5.""" g = UndirectedGraph() g.locations = {} ## Build the cities for node in nodes: g.locations[node] = (random.randrange(width), random.randrange(height)) ## Build roads from each city to at least min_links nearest neighbors. for i in range(min_links): for node in nodes: if len(g.get(node)) < min_links: here = g.locations[node] def distance_to_node(n): if n is node or g.get(node,n): return infinity return distance(g.locations[n], here) neighbor = argmin(nodes, distance_to_node) d = distance(g.locations[neighbor], here) * curvature() g.connect(node, neighbor, int(d)) return g
0.005263
def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret
0.002119
def add_to_buffer(self, content, read_position): """Add additional bytes content as read from the read_position. Args: content (bytes): data to be added to buffer working BufferWorkSpac. read_position (int): where in the file pointer the data was read from. """ self.read_position = read_position if self.read_buffer is None: self.read_buffer = content else: self.read_buffer = content + self.read_buffer
0.005988
def show_linkinfo_output_show_link_info_linkinfo_version(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_linkinfo = ET.Element("show_linkinfo") config = show_linkinfo output = ET.SubElement(show_linkinfo, "output") show_link_info = ET.SubElement(output, "show-link-info") linkinfo_rbridgeid_key = ET.SubElement(show_link_info, "linkinfo-rbridgeid") linkinfo_rbridgeid_key.text = kwargs.pop('linkinfo_rbridgeid') linkinfo_version = ET.SubElement(show_link_info, "linkinfo-version") linkinfo_version.text = kwargs.pop('linkinfo_version') callback = kwargs.pop('callback', self._callback) return callback(config)
0.004027
def get_storer(self, key): """ return the storer object for a key, raise if not in the file """ group = self.get_node(key) if group is None: raise KeyError('No object named {key} in the file'.format(key=key)) s = self._create_storer(group) s.infer_axes() return s
0.006173
def redraw_now(self, whence=0): """Redraw the displayed image. Parameters ---------- whence See :meth:`get_rgb_object`. """ try: time_start = time.time() self.redraw_data(whence=whence) # finally update the window drawable from the offscreen surface self.update_image() time_done = time.time() time_delta = time_start - self.time_last_redraw time_elapsed = time_done - time_start self.time_last_redraw = time_done self.logger.debug( "widget '%s' redraw (whence=%d) delta=%.4f elapsed=%.4f sec" % ( self.name, whence, time_delta, time_elapsed)) except Exception as e: self.logger.error("Error redrawing image: %s" % (str(e))) try: # log traceback, if possible (type, value, tb) = sys.exc_info() tb_str = "".join(traceback.format_tb(tb)) self.logger.error("Traceback:\n%s" % (tb_str)) except Exception: tb_str = "Traceback information unavailable." self.logger.error(tb_str)
0.002457
def splitclass(classofdevice): """ Splits the given class of device to return a 3-item tuple with the major service class, major device class and minor device class values. These values indicate the device's major services and the type of the device (e.g. mobile phone, laptop, etc.). If you google for "assigned numbers bluetooth baseband" you might find some documents that discuss how to extract this information from the class of device. Example: >>> splitclass(1057036) (129, 1, 3) >>> """ if not isinstance(classofdevice, int): try: classofdevice = int(classofdevice) except (TypeError, ValueError): raise TypeError("Given device class '%s' cannot be split" % \ str(classofdevice)) data = classofdevice >> 2 # skip over the 2 "format" bits service = data >> 11 major = (data >> 6) & 0x1F minor = data & 0x3F return (service, major, minor)
0.003033
def clear_routing_table_entries(self, x, y, app_id): """Clear the routing table entries associated with a given application. """ # Construct the arguments arg1 = (app_id << 8) | consts.AllocOperations.free_rtr_by_app self._send_scp(x, y, 0, SCPCommands.alloc_free, arg1, 0x1)
0.006349
def insert_import_path_to_sys_modules(import_path): """ When importing a module, Python references the directories in sys.path. The default value of sys.path varies depending on the system, But: When you start Python with a script, the directory of the script is inserted into sys.path[0]. So we have to replace sys.path to import object in specified scripts. """ abspath = os.path.abspath(import_path) if os.path.isdir(abspath): sys.path.insert(0, abspath) else: sys.path.insert(0, os.path.dirname(abspath))
0.003565
def get_private(self): """ Derive private key from the brain key and the current sequence number """ encoded = "%s %d" % (self.brainkey, self.sequence) a = _bytes(encoded) s = hashlib.sha256(hashlib.sha512(a).digest()).digest() return PrivateKey(hexlify(s).decode("ascii"), prefix=self.prefix)
0.005666
def read_inquiry_mode(sock): """returns the current mode, or -1 on failure""" # save current filter old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14) # Setup socket filter to receive only events related to the # read_inquiry_mode command flt = bluez.hci_filter_new() opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL, bluez.OCF_READ_INQUIRY_MODE) bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT) bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE); bluez.hci_filter_set_opcode(flt, opcode) sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt ) # first read the current inquiry mode. bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL, bluez.OCF_READ_INQUIRY_MODE ) pkt = sock.recv(255) status,mode = struct.unpack("xxxxxxBB", pkt) if status != 0: mode = -1 # restore old filter sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter ) return mode
0.0143
def parseBranches(self, descendants): """ Parse top level of markdown :param list elements: list of source objects :return: list of filtered TreeOfContents objects """ parsed, parent, cond = [], False, lambda b: (b.string or '').strip() for branch in filter(cond, descendants): if self.getHeadingLevel(branch) == self.depth: parsed.append({'root':branch.string, 'source':branch}) parent = True elif not parent: parsed.append({'root':branch.string, 'source':branch}) else: parsed[-1].setdefault('descendants', []).append(branch) return [TOC(depth=self.depth+1, **kwargs) for kwargs in parsed]
0.007937
def showhtml(): """Open your web browser and display the generated html documentation. """ import webbrowser # copy from paver opts = options docroot = path(opts.get('docroot', 'docs')) if not docroot.exists(): raise BuildFailure("Sphinx documentation root (%s) does not exist." % docroot) builddir = docroot / opts.get("builddir", ".build") # end of copy builddir=builddir / 'html' if not builddir.exists(): raise BuildFailure("Sphinx build directory (%s) does not exist." % builddir) webbrowser.open(builddir / 'index.html')
0.003091
def repack(self): """Removes any blank ranks in the order.""" items = self.grouped_filter().order_by('rank').select_for_update() for count, item in enumerate(items): item.rank = count + 1 item.save(rerank=False)
0.007722
def grouper(self, n, iterable, fillvalue=None): """This generator yields a set of tuples, where the iterable is broken into n sized chunks. If the iterable is not evenly sized then fillvalue will be appended to the last tuple to make up the difference. This function is copied from the standard docs on itertools. """ args = [iter(iterable)] * n if hasattr(itertools, 'zip_longest'): return itertools.zip_longest(*args, fillvalue=fillvalue) return itertools.izip_longest(*args, fillvalue=fillvalue)
0.003384
def defconfig_filename(self): """ See the class documentation. """ if self.defconfig_list: for filename, cond in self.defconfig_list.defaults: if expr_value(cond): try: with self._open_config(filename.str_value) as f: return f.name except IOError: continue return None
0.004415
def _init_metadata(self): """stub""" self._choices_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'choices'), 'element_label': 'Choices', 'instructions': 'Enter as many choices as you wish', 'required': True, 'read_only': False, 'linked': False, 'array': True, 'default_object_values': [''], 'syntax': 'OBJECT', 'object_set': [] } self._choice_name_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'question_string'), 'element_label': 'choice name', 'instructions': 'enter a short label for this choice', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_string_values': [''], 'syntax': 'STRING', 'minimum_string_length': 0, 'maximum_string_length': 1024, 'string_set': [] } self._multi_answer_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'multi_answer'), 'element_label': 'Is Multi-Answer', 'instructions': 'accepts a boolean (True/False) value', 'required': True, 'read_only': False, 'linked': True, 'array': False, 'default_boolean_values': ['False'], 'syntax': 'BOOLEAN', 'id_set': [] }
0.001107
def gen(mimetype): """``gen`` is a decorator factory function, you just need to set a mimetype before using:: @app.route('/') @gen('') def index(): pass A full demo for creating a image stream is available on `GitHub <https://github.com/kxxoling/flask-video-streaming>`__ . """ def streaming(func, *args, **kwargs): @wraps(func) def _(): return Response(func(*args, **kwargs), mimetype=mimetype) return _ return streaming
0.001815
def URLField(default=NOTHING, required=True, repr=True, cmp=True, key=None): """ Create new UUID field on a model. :param default: any value :param bool required: whether or not the object is invalid if not provided. :param bool repr: include this field should appear in object's repr. :param bool cmp: include this field in generated comparison. :param string key: override name of the value when converted to dict. """ cls = ParseResult default = _init_fields.init_default(required, default, None) validator = _init_fields.init_validator(required, cls) return attrib(default=default, converter=converters.str_to_url, validator=validator, repr=repr, cmp=cmp, metadata=dict(key=key))
0.001302
def set_initial_status(self, configuration=None): """ Override behaviour of methods in class DiffusionModel. Overwrites initial status using random real values. Generates random node profiles. """ super(CognitiveOpDynModel, self).set_initial_status(configuration) # set node status for node in self.status: self.status[node] = np.random.random_sample() self.initial_status = self.status.copy() # set new node parameters self.params['nodes']['cognitive'] = {} # first correct the input model parameters and retreive T_range, B_range and R_distribution T_range = (self.params['model']['T_range_min'], self.params['model']['T_range_max']) if self.params['model']['T_range_min'] > self.params['model']['T_range_max']: T_range = (self.params['model']['T_range_max'], self.params['model']['T_range_min']) B_range = (self.params['model']['B_range_min'], self.params['model']['B_range_max']) if self.params['model']['B_range_min'] > self.params['model']['B_range_max']: B_range = (self.params['model']['B_range_max'], self.params['model']['B_range_min']) s = float(self.params['model']['R_fraction_negative'] + self.params['model']['R_fraction_neutral'] + self.params['model']['R_fraction_positive']) R_distribution = (self.params['model']['R_fraction_negative']/s, self.params['model']['R_fraction_neutral']/s, self.params['model']['R_fraction_positive']/s) # then sample parameters from the ranges and distribution for node in self.graph.nodes(): R_prob = np.random.random_sample() if R_prob < R_distribution[0]: R = -1 elif R_prob < (R_distribution[0] + R_distribution[1]): R = 0 else: R = 1 # R, B and T parameters in a tuple self.params['nodes']['cognitive'][node] = (R, B_range[0] + (B_range[1] - B_range[0])*np.random.random_sample(), T_range[0] + (T_range[1] - T_range[0])*np.random.random_sample())
0.005717
def from_index_amount(cls, idx, amount): """ Like Deformation.from_index_amount, except generates a strain from the zero 3x3 tensor or voigt vector with the amount specified in the index location. Ensures symmetric strain. Args: idx (tuple or integer): index to be perturbed, can be voigt or full-tensor notation amount (float): amount to perturb selected index """ if np.array(idx).ndim == 0: v = np.zeros(6) v[idx] = amount return cls.from_voigt(v) elif np.array(idx).ndim == 1: v = np.zeros((3, 3)) for i in itertools.permutations(idx): v[i] = amount return cls(v) else: raise ValueError("Index must either be 2-tuple or integer " "corresponding to full-tensor or voigt index")
0.002148
def get_exception_error(): ''' Get the exception info Sample usage: try: raise Exception("asdfsdfsdf") except: print common.get_exception_error() Return: return the exception infomation. ''' error_message = "" for i in range(len(inspect.trace())): error_line = u""" File: %s - [%s] Function: %s Statement: %s -""" % (inspect.trace()[i][1], inspect.trace()[i][2], inspect.trace()[i][3], inspect.trace()[i][4]) error_message = "%s%s" % (error_message, error_line) error_message = u"""Error!\n%s\n\t%s\n\t%s\n-------------------------------------------------------------------------------------------\n\n""" % (error_message,sys.exc_info()[0], sys.exc_info()[1]) return error_message
0.009667
def raw_conf_process_pyramid(raw_conf): """ Loads the process pyramid of a raw configuration. Parameters ---------- raw_conf : dict Raw mapchete configuration as dictionary. Returns ------- BufferedTilePyramid """ return BufferedTilePyramid( raw_conf["pyramid"]["grid"], metatiling=raw_conf["pyramid"].get("metatiling", 1), pixelbuffer=raw_conf["pyramid"].get("pixelbuffer", 0) )
0.002188
def value(self) -> Union[bool, None]: """Returns the concrete value of this bool if concrete, otherwise None. :return: Concrete value or None """ self.simplify() if self.is_true: return True elif self.is_false: return False else: return None
0.005988
def check_boto_reqs(boto_ver=None, boto3_ver=None, botocore_ver=None, check_boto=True, check_boto3=True): ''' Checks for the version of various required boto libs in one central location. Most boto states and modules rely on a single version of the boto, boto3, or botocore libs. However, some require newer versions of any of these dependencies. This function allows the module to pass in a version to override the default minimum required version. This function is useful in centralizing checks for ``__virtual__()`` functions in the various, and many, boto modules and states. boto_ver The minimum required version of the boto library. Defaults to ``2.0.0``. boto3_ver The minimum required version of the boto3 library. Defaults to ``1.2.6``. botocore_ver The minimum required version of the botocore library. Defaults to ``1.3.23``. check_boto Boolean defining whether or not to check for boto deps. This defaults to ``True`` as most boto modules/states rely on boto, but some do not. check_boto3 Boolean defining whether or not to check for boto3 (and therefore botocore) deps. This defaults to ``True`` as most boto modules/states rely on boto3/botocore, but some do not. ''' if check_boto is True: try: # Late import so we can only load these for this function import boto has_boto = True except ImportError: has_boto = False if boto_ver is None: boto_ver = '2.0.0' if not has_boto or version_cmp(boto.__version__, boto_ver) == -1: return False, 'A minimum version of boto {0} is required.'.format(boto_ver) if check_boto3 is True: try: # Late import so we can only load these for this function import boto3 import botocore has_boto3 = True except ImportError: has_boto3 = False # boto_s3_bucket module requires boto3 1.2.6 and botocore 1.3.23 for # idempotent ACL operations via the fix in https://github.com/boto/boto3/issues/390 if boto3_ver is None: boto3_ver = '1.2.6' if botocore_ver is None: botocore_ver = '1.3.23' if not has_boto3 or version_cmp(boto3.__version__, boto3_ver) == -1: return False, 'A minimum version of boto3 {0} is required.'.format(boto3_ver) elif version_cmp(botocore.__version__, botocore_ver) == -1: return False, 'A minimum version of botocore {0} is required'.format(botocore_ver) return True
0.005867
def LogBinomialCoef(n, k): """Computes the log of the binomial coefficient. http://math.stackexchange.com/questions/64716/ approximating-the-logarithm-of-the-binomial-coefficient n: number of trials k: number of successes Returns: float """ return n * log(n) - k * log(k) - (n - k) * log(n - k)
0.00304
def FillDeviceAttributes(device, descriptor): """Fill out the attributes of the device. Fills the devices HidAttributes and product string into the descriptor. Args: device: A handle to the open device descriptor: The DeviceDescriptor to populate with the attributes. Returns: None Raises: WindowsError when unable to obtain attributes or product string. """ attributes = HidAttributes() result = hid.HidD_GetAttributes(device, ctypes.byref(attributes)) if not result: raise ctypes.WinError() buf = ctypes.create_string_buffer(1024) result = hid.HidD_GetProductString(device, buf, 1024) if not result: raise ctypes.WinError() descriptor.vendor_id = attributes.VendorID descriptor.product_id = attributes.ProductID descriptor.product_string = ctypes.wstring_at(buf)
0.013111
def from_payload(self, payload): """Init frame from binary data.""" self.major_version = payload[0] * 256 + payload[1] self.minor_version = payload[2] * 256 + payload[3]
0.010363
def get_comment_admin_session(self, proxy): """Gets the ``OsidSession`` associated with the comment administration service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.commenting.CommentAdminSession) - a ``CommentAdminSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_admin()`` is ``true``.* """ if not self.supports_comment_admin(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.CommentAdminSession(proxy=proxy, runtime=self._runtime)
0.003663
def _notify_media_transport_available(self, path, transport): """ Called by the endpoint when a new media transport is available """ self.source = BTAudioSource(dev_path=path) self.state = self.source.State self.source.add_signal_receiver(self._property_change_event_handler, BTAudioSource.SIGNAL_PROPERTY_CHANGED, # noqa transport)
0.004264
def dump_privatekey(type, pkey, cipher=None, passphrase=None): """ Dump the private key *pkey* into a buffer string encoded with the type *type*. Optionally (if *type* is :const:`FILETYPE_PEM`) encrypting it using *cipher* and *passphrase*. :param type: The file type (one of :const:`FILETYPE_PEM`, :const:`FILETYPE_ASN1`, or :const:`FILETYPE_TEXT`) :param PKey pkey: The PKey to dump :param cipher: (optional) if encrypted PEM format, the cipher to use :param passphrase: (optional) if encrypted PEM format, this can be either the passphrase to use, or a callback for providing the passphrase. :return: The buffer with the dumped key in :rtype: bytes """ bio = _new_mem_buf() if not isinstance(pkey, PKey): raise TypeError("pkey must be a PKey") if cipher is not None: if passphrase is None: raise TypeError( "if a value is given for cipher " "one must also be given for passphrase") cipher_obj = _lib.EVP_get_cipherbyname(_byte_string(cipher)) if cipher_obj == _ffi.NULL: raise ValueError("Invalid cipher name") else: cipher_obj = _ffi.NULL helper = _PassphraseHelper(type, passphrase) if type == FILETYPE_PEM: result_code = _lib.PEM_write_bio_PrivateKey( bio, pkey._pkey, cipher_obj, _ffi.NULL, 0, helper.callback, helper.callback_args) helper.raise_if_problem() elif type == FILETYPE_ASN1: result_code = _lib.i2d_PrivateKey_bio(bio, pkey._pkey) elif type == FILETYPE_TEXT: if _lib.EVP_PKEY_id(pkey._pkey) != _lib.EVP_PKEY_RSA: raise TypeError("Only RSA keys are supported for FILETYPE_TEXT") rsa = _ffi.gc( _lib.EVP_PKEY_get1_RSA(pkey._pkey), _lib.RSA_free ) result_code = _lib.RSA_print(bio, rsa, 0) else: raise ValueError( "type argument must be FILETYPE_PEM, FILETYPE_ASN1, or " "FILETYPE_TEXT") _openssl_assert(result_code != 0) return _bio_to_string(bio)
0.000472
def find_table_links(self): """ When given a url, this function will find all the available table names for that EPA dataset. """ html = urlopen(self.model_url).read() doc = lh.fromstring(html) href_list = [area.attrib['href'] for area in doc.cssselect('map area')] tables = self._inception_table_links(href_list) return tables
0.005013
def primes(n): """ Simple test function Taken from http://www.huyng.com/posts/python-performance-analysis/ """ if n==2: return [2] elif n<2: return [] s=list(range(3,n+1,2)) mroot = n ** 0.5 half=(n+1)//2-1 i=0 m=3 while m <= mroot: if s[i]: j=(m*m-3)//2 s[j]=0 while j<half: s[j]=0 j+=m i=i+1 m=2*i+3 return [2]+[x for x in s if x]
0.031068
def Entry(self, name, directory=None, create=1): """ Create `SCons.Node.FS.Entry` """ return self._create_node(name, self.env.fs.Entry, directory, create)
0.011765
def calculate_prorated_values(): """ A utility function to prompt for a rate (a string in units per unit time), and return that same rate for various time periods. """ rate = six.moves.input("Enter the rate (3/hour, 50/month)> ") res = re.match(r'(?P<value>[\d.]+)/(?P<period>\w+)$', rate).groupdict() value = float(res['value']) value_per_second = value / get_period_seconds(res['period']) for period in ('minute', 'hour', 'day', 'month', 'year'): period_value = value_per_second * get_period_seconds(period) print("per {period}: {period_value}".format(**locals()))
0.020725
def perform_create(self, serializer): """ determine user when node is added """ if serializer.instance is None: serializer.save(user=self.request.user)
0.011173
def leave_group(self, group_jid): """ Leaves a specific group :param group_jid: The JID of the group to leave """ log.info("[+] Leaving group {}".format(group_jid)) return self._send_xmpp_element(group_adminship.LeaveGroupRequest(group_jid))
0.010345
def one_storage_per_feeder(edisgo, storage_timeseries, storage_nominal_power=None, **kwargs): """ Allocates the given storage capacity to multiple smaller storages. For each feeder with load or voltage issues it is checked if integrating a storage will reduce peaks in the feeder, starting with the feeder with the highest theoretical grid expansion costs. A heuristic approach is used to estimate storage sizing and siting while storage operation is carried over from the given storage operation. Parameters ----------- edisgo : :class:`~.grid.network.EDisGo` storage_timeseries : :pandas:`pandas.DataFrame<dataframe>` Total active and reactive power time series that will be allocated to the smaller storages in feeders with load or voltage issues. Columns of the dataframe are 'p' containing active power time series in kW and 'q' containing the reactive power time series in kvar. Index is a :pandas:`pandas.DatetimeIndex<datetimeindex>`. storage_nominal_power : :obj:`float` or None Nominal power in kW that will be allocated to the smaller storages in feeders with load or voltage issues. If no nominal power is provided the maximum active power given in `storage_timeseries` is used. Default: None. debug : :obj:`Boolean`, optional If dedug is True a dataframe with storage size and path to storage of all installed and possibly discarded storages is saved to a csv file and a plot with all storage positions is created and saved, both to the current working directory with filename `storage_results_{MVgrid_id}`. Default: False. check_costs_reduction : :obj:`Boolean` or :obj:`str`, optional This parameter specifies when and whether it should be checked if a storage reduced grid expansion costs or not. It can be used as a safety check but can be quite time consuming. Possible options are: * 'each_feeder' Costs reduction is checked for each feeder. If the storage did not reduce grid expansion costs it is discarded. * 'once' Costs reduction is checked after the total storage capacity is allocated to the feeders. If the storages did not reduce grid expansion costs they are all discarded. * False Costs reduction is never checked. Default: False. """ def _feeder_ranking(grid_expansion_costs): """ Get feeder ranking from grid expansion costs DataFrame. MV feeders are ranked descending by grid expansion costs that are attributed to that feeder. Parameters ---------- grid_expansion_costs : :pandas:`pandas.DataFrame<dataframe>` grid_expansion_costs DataFrame from :class:`~.grid.network.Results` of the copied edisgo object. Returns ------- :pandas:`pandas.Series<series>` Series with ranked MV feeders (in the copied graph) of type :class:`~.grid.components.Line`. Feeders are ranked by total grid expansion costs of all measures conducted in the feeder. The feeder with the highest costs is in the first row and the feeder with the lowest costs in the last row. """ return grid_expansion_costs.groupby( ['mv_feeder'], sort=False).sum().reset_index().sort_values( by=['total_costs'], ascending=False)['mv_feeder'] def _shortest_path(node): if isinstance(node, LVStation): return len(nx.shortest_path( node.mv_grid.graph, node.mv_grid.station, node)) else: return len(nx.shortest_path( node.grid.graph, node.grid.station, node)) def _find_battery_node(edisgo, critical_lines_feeder, critical_nodes_feeder): """ Evaluates where to install the storage. Parameters ----------- edisgo : :class:`~.grid.network.EDisGo` The original edisgo object. critical_lines_feeder : :pandas:`pandas.DataFrame<dataframe>` Dataframe containing over-loaded lines in MV feeder, their maximum relative over-loading and the corresponding time step. See :func:`edisgo.flex_opt.check_tech_constraints.mv_line_load` for more information. critical_nodes_feeder : :obj:`list` List with all nodes in MV feeder with voltage issues. Returns ------- :obj:`float` Node where storage is installed. """ # if there are overloaded lines in the MV feeder the battery storage # will be installed at the node farthest away from the MV station if not critical_lines_feeder.empty: logger.debug("Storage positioning due to overload.") # dictionary with nodes and their corresponding path length to # MV station path_length_dict = {} for l in critical_lines_feeder.index: nodes = l.grid.graph.nodes_from_line(l) for node in nodes: path_length_dict[node] = _shortest_path(node) # return node farthest away return [_ for _ in path_length_dict.keys() if path_length_dict[_] == max( path_length_dict.values())][0] # if there are voltage issues in the MV grid the battery storage will # be installed at the first node in path that exceeds 2/3 of the line # length from station to critical node with highest voltage deviation if critical_nodes_feeder: logger.debug("Storage positioning due to voltage issues.") node = critical_nodes_feeder[0] # get path length from station to critical node get_weight = lambda u, v, data: data['line'].length path_length = dijkstra_shortest_path_length( edisgo.network.mv_grid.graph, edisgo.network.mv_grid.station, get_weight, target=node) # find first node in path that exceeds 2/3 of the line length # from station to critical node farthest away from the station path = nx.shortest_path(edisgo.network.mv_grid.graph, edisgo.network.mv_grid.station, node) return next(j for j in path if path_length[j] >= path_length[node] * 2 / 3) return None def _calc_storage_size(edisgo, feeder, max_storage_size): """ Calculates storage size that reduces residual load. Parameters ----------- edisgo : :class:`~.grid.network.EDisGo` The original edisgo object. feeder : :class:`~.grid.components.Line` MV feeder the storage will be connected to. The line object is an object from the copied graph. Returns ------- :obj:`float` Storage size that reduced the residual load in the feeder. """ step_size = 200 sizes = [0] + list(np.arange( p_storage_min, max_storage_size + 0.5 * step_size, step_size)) p_feeder = edisgo.network.results.pfa_p.loc[:, repr(feeder)] q_feeder = edisgo.network.results.pfa_q.loc[:, repr(feeder)] p_slack = edisgo.network.pypsa.generators_t.p.loc[ :, 'Generator_slack'] * 1e3 # get sign of p and q l = edisgo.network.pypsa.lines.loc[repr(feeder), :] mv_station_bus = 'bus0' if l.loc['bus0'] == 'Bus_'.format( repr(edisgo.network.mv_grid.station)) else 'bus1' if mv_station_bus == 'bus0': diff = edisgo.network.pypsa.lines_t.p1.loc[:, repr(feeder)] - \ edisgo.network.pypsa.lines_t.p0.loc[:, repr(feeder)] diff_q = edisgo.network.pypsa.lines_t.q1.loc[:, repr(feeder)] - \ edisgo.network.pypsa.lines_t.q0.loc[:, repr(feeder)] else: diff = edisgo.network.pypsa.lines_t.p0.loc[:, repr(feeder)] - \ edisgo.network.pypsa.lines_t.p1.loc[:, repr(feeder)] diff_q = edisgo.network.pypsa.lines_t.q0.loc[:, repr(feeder)] - \ edisgo.network.pypsa.lines_t.q1.loc[:, repr(feeder)] p_sign = pd.Series([-1 if _ < 0 else 1 for _ in diff], index=p_feeder.index) q_sign = pd.Series([-1 if _ < 0 else 1 for _ in diff_q], index=p_feeder.index) # get allowed load factors per case lf = {'feedin_case': edisgo.network.config[ 'grid_expansion_load_factors']['mv_feedin_case_line'], 'load_case': network.config[ 'grid_expansion_load_factors']['mv_load_case_line']} # calculate maximum apparent power for each storage size to find # storage size that minimizes apparent power in the feeder p_feeder = p_feeder.multiply(p_sign) q_feeder = q_feeder.multiply(q_sign) s_max = [] for size in sizes: share = size / storage_nominal_power p_storage = storage_timeseries.p * share q_storage = storage_timeseries.q * share p_total = p_feeder + p_storage q_total = q_feeder + q_storage p_hv_mv_station = p_slack - p_storage lf_ts = p_hv_mv_station.apply( lambda _: lf['feedin_case'] if _ < 0 else lf['load_case']) s_max_ts = (p_total ** 2 + q_total ** 2).apply( sqrt).divide(lf_ts) s_max.append(max(s_max_ts)) return sizes[pd.Series(s_max).idxmin()] def _critical_nodes_feeder(edisgo, feeder): """ Returns all nodes in MV feeder with voltage issues. Parameters ----------- edisgo : :class:`~.grid.network.EDisGo` The original edisgo object. feeder : :class:`~.grid.components.Line` MV feeder the storage will be connected to. The line object is an object from the copied graph. Returns ------- :obj:`list` List with all nodes in MV feeder with voltage issues. """ # get all nodes with voltage issues in MV grid critical_nodes = check_tech_constraints.mv_voltage_deviation( edisgo.network, voltage_levels='mv') if critical_nodes: critical_nodes = critical_nodes[edisgo.network.mv_grid] else: return [] # filter nodes with voltage issues in feeder critical_nodes_feeder = [] for n in critical_nodes.index: if repr(n.mv_feeder) == repr(feeder): critical_nodes_feeder.append(n) return critical_nodes_feeder def _critical_lines_feeder(edisgo, feeder): """ Returns all lines in MV feeder with overload issues. Parameters ----------- edisgo : :class:`~.grid.network.EDisGo` The original edisgo object. feeder : :class:`~.grid.components.Line` MV feeder the storage will be connected to. The line object is an object from the copied graph. Returns ------- :pandas:`pandas.DataFrame<dataframe>` Dataframe containing over-loaded lines in MV feeder, their maximum relative over-loading and the corresponding time step. See :func:`edisgo.flex_opt.check_tech_constraints.mv_line_load` for more information. """ # get all overloaded MV lines critical_lines = check_tech_constraints.mv_line_load(edisgo.network) # filter overloaded lines in feeder critical_lines_feeder = [] for l in critical_lines.index: if repr(tools.get_mv_feeder_from_line(l)) == repr(feeder): critical_lines_feeder.append(l) return critical_lines.loc[critical_lines_feeder, :] def _estimate_new_number_of_lines(critical_lines_feeder): number_parallel_lines = 0 for crit_line in critical_lines_feeder.index: number_parallel_lines += ceil(critical_lines_feeder.loc[ crit_line, 'max_rel_overload'] * crit_line.quantity) - \ crit_line.quantity return number_parallel_lines debug = kwargs.get('debug', False) check_costs_reduction = kwargs.get('check_costs_reduction', False) # global variables # minimum and maximum storage power to be connected to the MV grid p_storage_min = 300 p_storage_max = 4500 # remaining storage nominal power if storage_nominal_power is None: storage_nominal_power = max(abs(storage_timeseries.p)) p_storage_remaining = storage_nominal_power if debug: feeder_repr = [] storage_path = [] storage_repr = [] storage_size = [] # rank MV feeders by grid expansion costs # conduct grid reinforcement on copied edisgo object on worst-case time # steps grid_expansion_results_init = edisgo.reinforce( copy_graph=True, timesteps_pfa='snapshot_analysis') # only analyse storage integration if there were any grid expansion needs if grid_expansion_results_init.equipment_changes.empty: logger.debug('No storage integration necessary since there are no ' 'grid expansion needs.') return else: equipment_changes_reinforcement_init = \ grid_expansion_results_init.equipment_changes.loc[ grid_expansion_results_init.equipment_changes.iteration_step > 0] total_grid_expansion_costs = \ grid_expansion_results_init.grid_expansion_costs.total_costs.sum() if equipment_changes_reinforcement_init.empty: logger.debug('No storage integration necessary since there are no ' 'grid expansion needs.') return else: network = equipment_changes_reinforcement_init.index[ 0].grid.network # calculate grid expansion costs without costs for new generators # to be used in feeder ranking grid_expansion_costs_feeder_ranking = costs.grid_expansion_costs( network, without_generator_import=True) ranked_feeders = _feeder_ranking(grid_expansion_costs_feeder_ranking) count = 1 storage_obj_list = [] total_grid_expansion_costs_new = 'not calculated' for feeder in ranked_feeders.values: logger.debug('Feeder: {}'.format(count)) count += 1 # first step: find node where storage will be installed critical_nodes_feeder = _critical_nodes_feeder(edisgo, feeder) critical_lines_feeder = _critical_lines_feeder(edisgo, feeder) # get node the storage will be connected to (in original graph) battery_node = _find_battery_node(edisgo, critical_lines_feeder, critical_nodes_feeder) if battery_node: # add to output lists if debug: feeder_repr.append(repr(feeder)) storage_path.append(nx.shortest_path( edisgo.network.mv_grid.graph, edisgo.network.mv_grid.station, battery_node)) # second step: calculate storage size max_storage_size = min(p_storage_remaining, p_storage_max) p_storage = _calc_storage_size(edisgo, feeder, max_storage_size) # if p_storage is greater than or equal to the minimum storage # power required, do storage integration if p_storage >= p_storage_min: # third step: integrate storage share = p_storage / storage_nominal_power edisgo.integrate_storage( timeseries=storage_timeseries.p * share, position=battery_node, voltage_level='mv', timeseries_reactive_power=storage_timeseries.q * share) tools.assign_mv_feeder_to_nodes(edisgo.network.mv_grid) # get new storage object storage_obj = [_ for _ in edisgo.network.mv_grid.graph.nodes_by_attribute( 'storage') if _ in edisgo.network.mv_grid.graph.neighbors( battery_node)][0] storage_obj_list.append(storage_obj) logger.debug( 'Storage with nominal power of {} kW connected to ' 'node {} (path to HV/MV station {}).'.format( p_storage, battery_node, nx.shortest_path( battery_node.grid.graph, battery_node.grid.station, battery_node))) # fourth step: check if storage integration reduced grid # reinforcement costs or number of issues if check_costs_reduction == 'each_feeder': # calculate new grid expansion costs grid_expansion_results_new = edisgo.reinforce( copy_graph=True, timesteps_pfa='snapshot_analysis') total_grid_expansion_costs_new = \ grid_expansion_results_new.grid_expansion_costs.\ total_costs.sum() costs_diff = total_grid_expansion_costs - \ total_grid_expansion_costs_new if costs_diff > 0: logger.debug( 'Storage integration in feeder {} reduced grid ' 'expansion costs by {} kEuro.'.format( feeder, costs_diff)) if debug: storage_repr.append(repr(storage_obj)) storage_size.append(storage_obj.nominal_power) total_grid_expansion_costs = \ total_grid_expansion_costs_new else: logger.debug( 'Storage integration in feeder {} did not reduce ' 'grid expansion costs (costs increased by {} ' 'kEuro).'.format(feeder, -costs_diff)) tools.disconnect_storage(edisgo.network, storage_obj) p_storage = 0 if debug: storage_repr.append(None) storage_size.append(0) edisgo.integrate_storage( timeseries=storage_timeseries.p * 0, position=battery_node, voltage_level='mv', timeseries_reactive_power= storage_timeseries.q * 0) tools.assign_mv_feeder_to_nodes( edisgo.network.mv_grid) else: number_parallel_lines_before = \ _estimate_new_number_of_lines(critical_lines_feeder) edisgo.analyze() critical_lines_feeder_new = _critical_lines_feeder( edisgo, feeder) critical_nodes_feeder_new = _critical_nodes_feeder( edisgo, feeder) number_parallel_lines = _estimate_new_number_of_lines( critical_lines_feeder_new) # if there are critical lines check if number of parallel # lines was reduced if not critical_lines_feeder.empty: diff_lines = number_parallel_lines_before - \ number_parallel_lines # if it was not reduced check if there are critical # nodes and if the number was reduced if diff_lines <= 0: # if there are no critical nodes remove storage if not critical_nodes_feeder: logger.debug( 'Storage integration in feeder {} did not ' 'reduce number of critical lines (number ' 'increased by {}), storage ' 'is therefore removed.'.format( feeder, -diff_lines)) tools.disconnect_storage(edisgo.network, storage_obj) p_storage = 0 if debug: storage_repr.append(None) storage_size.append(0) edisgo.integrate_storage( timeseries=storage_timeseries.p * 0, position=battery_node, voltage_level='mv', timeseries_reactive_power= storage_timeseries.q * 0) tools.assign_mv_feeder_to_nodes( edisgo.network.mv_grid) else: logger.debug( 'Critical nodes in feeder {} ' 'before and after storage integration: ' '{} vs. {}'.format( feeder, critical_nodes_feeder, critical_nodes_feeder_new)) if debug: storage_repr.append(repr(storage_obj)) storage_size.append( storage_obj.nominal_power) else: logger.debug( 'Storage integration in feeder {} reduced ' 'number of critical lines.'.format(feeder)) if debug: storage_repr.append(repr(storage_obj)) storage_size.append(storage_obj.nominal_power) # if there are no critical lines else: logger.debug( 'Critical nodes in feeder {} ' 'before and after storage integration: ' '{} vs. {}'.format( feeder, critical_nodes_feeder, critical_nodes_feeder_new)) if debug: storage_repr.append(repr(storage_obj)) storage_size.append(storage_obj.nominal_power) # fifth step: if there is storage capacity left, rerun # the past steps for the next feeder in the ranking # list p_storage_remaining = p_storage_remaining - p_storage if not p_storage_remaining > p_storage_min: break else: logger.debug('No storage integration in feeder {}.'.format( feeder)) if debug: storage_repr.append(None) storage_size.append(0) edisgo.integrate_storage( timeseries=storage_timeseries.p * 0, position=battery_node, voltage_level='mv', timeseries_reactive_power=storage_timeseries.q * 0) tools.assign_mv_feeder_to_nodes(edisgo.network.mv_grid) else: logger.debug('No storage integration in feeder {} because there ' 'are neither overloading nor voltage issues.'.format( feeder)) if debug: storage_repr.append(None) storage_size.append(0) feeder_repr.append(repr(feeder)) storage_path.append([]) if check_costs_reduction == 'once': # check costs reduction and discard all storages if costs were not # reduced grid_expansion_results_new = edisgo.reinforce( copy_graph=True, timesteps_pfa='snapshot_analysis') total_grid_expansion_costs_new = \ grid_expansion_results_new.grid_expansion_costs. \ total_costs.sum() costs_diff = total_grid_expansion_costs - \ total_grid_expansion_costs_new if costs_diff > 0: logger.info( 'Storage integration in grid {} reduced grid ' 'expansion costs by {} kEuro.'.format( edisgo.network.id, costs_diff)) else: logger.info( 'Storage integration in grid {} did not reduce ' 'grid expansion costs (costs increased by {} ' 'kEuro).'.format(edisgo.network.id, -costs_diff)) for storage in storage_obj_list: tools.disconnect_storage(edisgo.network, storage) elif check_costs_reduction == 'each_feeder': # if costs redcution was checked after each storage only give out # total costs reduction if total_grid_expansion_costs_new == 'not calculated': costs_diff = 0 else: total_grid_expansion_costs = grid_expansion_results_init.\ grid_expansion_costs.total_costs.sum() costs_diff = total_grid_expansion_costs - \ total_grid_expansion_costs_new logger.info( 'Storage integration in grid {} reduced grid ' 'expansion costs by {} kEuro.'.format( edisgo.network.id, costs_diff)) if debug: plots.storage_size(edisgo.network.mv_grid, edisgo.network.pypsa, filename='storage_results_{}.pdf'.format( edisgo.network.id), lopf=False) storages_df = pd.DataFrame({'path': storage_path, 'repr': storage_repr, 'p_nom': storage_size}, index=feeder_repr) storages_df.to_csv('storage_results_{}.csv'.format(edisgo.network.id)) edisgo.network.results.storages_costs_reduction = pd.DataFrame( {'grid_expansion_costs_initial': total_grid_expansion_costs, 'grid_expansion_costs_with_storages': total_grid_expansion_costs_new}, index=[edisgo.network.id])
0.000549
def set_model_domain(model, domain): """ Sets the domain on the ONNX model. :param model: instance of an ONNX model :param domain: string containing the domain name of the model Example: :: from onnxmltools.utils import set_model_domain onnx_model = load_model("SqueezeNet.onnx") set_model_domain(onnx_model, "com.acme") """ if model is None or not isinstance(model, onnx_proto.ModelProto): raise ValueError("Model is not a valid ONNX model.") if not convert_utils.is_string_type(domain): raise ValueError("Domain must be a string type.") model.domain = domain
0.001555
def format_for_baseline_output(self): """ :rtype: dict """ results = self.json() for key in results: results[key] = sorted(results[key], key=lambda x: x['line_number']) plugins_used = list(map( lambda x: x.__dict__, self.plugins, )) plugins_used = sorted(plugins_used, key=lambda x: x['name']) return { 'generated_at': strftime("%Y-%m-%dT%H:%M:%SZ", gmtime()), 'exclude': { 'files': self.exclude_files, 'lines': self.exclude_lines, }, 'plugins_used': plugins_used, 'results': results, 'version': self.version, }
0.002729
def is_suspended(order_book_id, count=1): """ 判断某只股票是否全天停牌。 :param str order_book_id: 某只股票的代码或股票代码,可传入单只股票的order_book_id, symbol :param int count: 回溯获取的数据个数。默认为当前能够获取到的最近的数据 :return: count为1时 `bool`; count>1时 `pandas.DataFrame` """ dt = Environment.get_instance().calendar_dt.date() order_book_id = assure_stock_order_book_id(order_book_id) return Environment.get_instance().data_proxy.is_suspended(order_book_id, dt, count)
0.00432
def ids(cls, values, itype=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/ids-filter.html Filters documents that only have the provided ids. Note, this filter does not require the _id field to be indexed since it works using the _uid field. ''' instance = cls(ids={'values': values}) if itype is not None: instance['ids']['type'] = itype return instance
0.006912
def start(self): """ Start the sensor """ # open device openni2.initialize(PrimesenseSensor.OPENNI2_PATH) self._device = openni2.Device.open_any() # open depth stream self._depth_stream = self._device.create_depth_stream() self._depth_stream.configure_mode(PrimesenseSensor.DEPTH_IM_WIDTH, PrimesenseSensor.DEPTH_IM_HEIGHT, PrimesenseSensor.FPS, openni2.PIXEL_FORMAT_DEPTH_1_MM) self._depth_stream.start() # open color stream self._color_stream = self._device.create_color_stream() self._color_stream.configure_mode(PrimesenseSensor.COLOR_IM_WIDTH, PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.FPS, openni2.PIXEL_FORMAT_RGB888) self._color_stream.camera.set_auto_white_balance(self._auto_white_balance) self._color_stream.camera.set_auto_exposure(self._auto_exposure) self._color_stream.start() # configure device if self._registration_mode == PrimesenseRegistrationMode.DEPTH_TO_COLOR: self._device.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR) else: self._device.set_image_registration_mode(openni2.IMAGE_REGISTRATION_OFF) self._device.set_depth_color_sync_enabled(self._enable_depth_color_sync) self._running = True
0.005682
def fixMissingPythonLib(self, binaries): """Add the Python library if missing from the binaries. Some linux distributions (e.g. debian-based) statically build the Python executable to the libpython, so bindepend doesn't include it in its output. Darwin custom builds could possibly also have non-framework style libraries, so this method also checks for that variant as well. """ if is_aix: # Shared libs on AIX are archives with shared object members, thus the ".a" suffix. names = ('libpython%d.%d.a' % sys.version_info[:2],) elif is_unix: # Other *nix platforms. names = ('libpython%d.%d.so' % sys.version_info[:2],) elif is_darwin: names = ('Python', 'libpython%d.%d.dylib' % sys.version_info[:2]) else: return for (nm, fnm, typ) in binaries: for name in names: if typ == 'BINARY' and name in fnm: # lib found return # resume search using the first item in names name = names[0] if is_unix: lib = bindepend.findLibrary(name) if lib is None: raise IOError("Python library not found!") elif is_darwin: # On MacPython, Analysis.assemble is able to find the libpython with # no additional help, asking for config['python'] dependencies. # However, this fails on system python, because the shared library # is not listed as a dependency of the binary (most probably it's # opened at runtime using some dlopen trickery). lib = os.path.join(sys.exec_prefix, 'Python') if not os.path.exists(lib): raise IOError("Python library not found!") binaries.append((os.path.split(lib)[1], lib, 'BINARY'))
0.003132
def get_node_list(self) -> list: """Get a list of nodes. Only the manager nodes can retrieve all the nodes Returns: list, all the ids of the nodes in swarm """ # Initialising empty list nodes = [] # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Only the Swarm manager node ' 'can retrieve all the nodes.') node_list = self._client.nodes.list() for n_list in node_list: nodes.append(n_list.id) return nodes
0.003311
def process_field(elt, ascii=False): """Process a 'field' tag. """ # NOTE: if there is a variable defined in this field and it is different # from the default, we could change the value and restart. scale = np.uint8(1) if elt.get("type") == "bitfield" and not ascii: current_type = ">u" + str(int(elt.get("length")) // 8) scale = np.dtype(current_type).type(1) elif (elt.get("length") is not None): if ascii: add = 33 else: add = 0 current_type = "S" + str(int(elt.get("length")) + add) else: current_type = TYPEC[elt.get("type")] try: scale = (10 / float(elt.get("scaling-factor", "10").replace("^", "e"))) except ValueError: scale = (10 / np.array( elt.get("scaling-factor").replace("^", "e").split(","), dtype=np.float)) return ((elt.get("name"), current_type, scale))
0.001026
def check_misc(text): """Avoid mixing metaphors. source: Garner's Modern American Usage source_url: http://bit.ly/1T4alrY """ err = "mixed_metaphors.misc.misc" msg = u"Mixed metaphor. Try '{}'." preferences = [ ["cream rises to the top", ["cream rises to the crop"]], ["fasten your seatbelts", ["button your seatbelts"]], ["a minute to decompress", ["a minute to decompose"]], ["sharpest tool in the shed", ["sharpest marble in the (shed|box)"]], ["not rocket science", ["not rocket surgery"]], ] return preferred_forms_check(text, preferences, err, msg)
0.001527
def get_transition(self, # suppress(too-many-arguments) line, line_index, column, is_escaped, comment_system_transitions, eof=False): """Return a parser state, a move-ahead amount, and an append range. If this parser state should terminate and return back to the TEXT state, then return that state and also any corresponding chunk that would have been yielded as a result. """ raise NotImplementedError("""Cannot instantiate base ParserState""")
0.012638
def wait(self, timeout=None): """ Block until the container stops, then return its exit code. Similar to the ``podman wait`` command. :param timeout: int, microseconds to wait before polling for completion :return: int, exit code """ timeout = ["--interval=%s" % timeout] if timeout else [] cmdline = ["podman", "wait"] + timeout + [self._id or self.get_id()] return run_cmd(cmdline, return_output=True)
0.004202
def ratio_beyond_r_sigma(x, r): """ Ratio of values that are more than r*std(x) (so r sigma) away from the mean of x. :param x: the time series to calculate the feature of :type x: iterable :return: the value of this feature :return type: float """ if not isinstance(x, (np.ndarray, pd.Series)): x = np.asarray(x) return np.sum(np.abs(x - np.mean(x)) > r * np.std(x))/x.size
0.004773
def from_indra_statements(stmts, name: Optional[str] = None, version: Optional[str] = None, description: Optional[str] = None, authors: Optional[str] = None, contact: Optional[str] = None, license: Optional[str] = None, copyright: Optional[str] = None, disclaimer: Optional[str] = None, ): """Import a model from :mod:`indra`. :param List[indra.statements.Statement] stmts: A list of statements :param name: The graph's name :param version: The graph's version. Recommended to use `semantic versioning <http://semver.org/>`_ or ``YYYYMMDD`` format. :param description: The description of the graph :param authors: The authors of this graph :param contact: The contact email for this graph :param license: The license for this graph :param copyright: The copyright for this graph :param disclaimer: The disclaimer for this graph :rtype: pybel.BELGraph """ from indra.assemblers.pybel import PybelAssembler pba = PybelAssembler( stmts=stmts, name=name, version=version, description=description, authors=authors, contact=contact, license=license, copyright=copyright, disclaimer=disclaimer, ) graph = pba.make_model() return graph
0.001309
def _handle(self, nick, target, message, **kwargs): """ client callback entrance """ for regex, (func, pattern) in self.routes.items(): match = regex.match(message) if match: self.client.loop.create_task( func(nick, target, message, match, **kwargs))
0.006135
def _maybe_classify(self, y, k, cutoffs): '''Helper method for classifying continuous data. ''' rows, cols = y.shape if cutoffs is None: if self.fixed: mcyb = mc.Quantiles(y.flatten(), k=k) yb = mcyb.yb.reshape(y.shape) cutoffs = mcyb.bins k = len(cutoffs) return yb, cutoffs[:-1], k else: yb = np.array([mc.Quantiles(y[:, i], k=k).yb for i in np.arange(cols)]).transpose() return yb, None, k else: cutoffs = list(cutoffs) + [np.inf] cutoffs = np.array(cutoffs) yb = mc.User_Defined(y.flatten(), np.array(cutoffs)).yb.reshape( y.shape) k = len(cutoffs) return yb, cutoffs[:-1], k
0.002315
def add_role(self, role, term, start_date=None, end_date=None, **kwargs): """ Examples: leg.add_role('member', term='2009', chamber='upper', party='Republican', district='10th') """ self['roles'].append(dict(role=role, term=term, start_date=start_date, end_date=end_date, **kwargs))
0.006993
def list_rooms(self, message): """what are the rooms?: List all the rooms I know about.""" context = {"rooms": self.available_rooms.values(), } self.say(rendered_template("rooms.html", context), message=message, html=True)
0.012195
def GetMemBalloonedMB(self): '''Retrieves the amount of memory that has been reclaimed from this virtual machine by the vSphere memory balloon driver (also referred to as the "vmmemctl" driver).''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemBalloonedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
0.013423
def _send(self, messages): """A helper method that does the actual sending.""" if len(messages) == 1: to_send = self._build_message(messages[0]) if to_send is False: # The message was missing recipients. # Bail. return False else: pm_messages = list(map(self._build_message, messages)) pm_messages = [m for m in pm_messages if m] if len(pm_messages) == 0: # If after filtering, there aren't any messages # to send, bail. return False to_send = PMBatchMail(messages=pm_messages) try: to_send.send(test=self.test_mode) except: if self.fail_silently: return False raise return True
0.003546
def get_requirements(filename=None): """ Read requirements from 'requirements txt' :return: requirements :rtype: list """ if filename is None: filename = 'requirements.txt' file = WORK_DIR / filename install_reqs = parse_requirements(str(file), session='hack') return [str(ir.req) for ir in install_reqs]
0.002849
def setObjectName( self, objectName ): """ Updates the style sheet for this line edit when the name changes. :param objectName | <str> """ super(XLineEdit, self).setObjectName(objectName) self.adjustStyleSheet()
0.017857
def crop(self, doy, depth, lat, lon, var): """ Crop a subset of the dataset for each var Given doy, depth, lat and lon, it returns the smallest subset that still contains the requested coordinates inside it. It handels special cases like a region around greenwich and the international date line. Accepts 0 to 360 and -180 to 180 longitude reference. It extends time and longitude coordinates, so simplify the use of series. For example, a ship track can be requested with a longitude sequence like [352, 358, 364, 369, 380], and the equivalent for day of year above 365. """ dims, idx = cropIndices(self.dims, lat, lon, depth) dims['time'] = np.atleast_1d(doy) idx['tn'] = np.arange(dims['time'].size) # Temporary solution. Create an object for CARS dataset xn = idx['xn'] yn = idx['yn'] zn = idx['zn'] tn = idx['tn'] subset = {} for v in var: if v == 'mn': mn = [] for d in doy: t = 2 * np.pi * d/366 # Naive solution # FIXME: This is not an efficient solution. value = self.ncs[0]['mean'][:, yn, xn] value[:64] += self.ncs[0]['an_cos'][:, yn, xn] * np.cos(t) + \ self.ncs[0]['an_sin'][:, yn, xn] * np.sin(t) value[:55] += self.ncs[0]['sa_cos'][:, yn, xn] * np.cos(2*t) + \ self.ncs[0]['sa_sin'][:, yn, xn] * np.sin(2*t) mn.append(value[zn]) subset['mn'] = ma.asanyarray(mn) else: subset[v] = ma.asanyarray( doy.size * [self[v][zn, yn, xn]]) return subset, dims
0.002112
def read(self, file_or_filename): """ Loads a pickled case. """ if isinstance(file_or_filename, basestring): fname = os.path.basename(file_or_filename) logger.info("Unpickling case file [%s]." % fname) file = None try: file = open(file_or_filename, "rb") except: logger.error("Error opening %s." % fname) return None finally: if file is not None: case = pickle.load(file) file.close() else: file = file_or_filename case = pickle.load(file) return case
0.004329
def random_peak_magnitudes( log, peakMagnitudeDistributions, snTypesArray, plot=True): """ *Generate a numpy array of random (distribution weighted) peak magnitudes for the given sn types.* **Key Arguments:** - ``log`` -- logger - ``peakMagnitudeDistributions`` -- yaml style dictionary of peak magnitude distributions - ``snTypesArray`` -- the pre-generated array of random sn types - ``plot`` -- generate plot? **Return:** - None """ ################ > IMPORTS ################ ## STANDARD LIB ## ## THIRD PARTY ## import matplotlib.pyplot as plt import numpy as np ## LOCAL APPLICATION ## ################ >ACTION(S) ################ magDistributions = {} for snType, peakMag in peakMagnitudeDistributions['magnitude'].iteritems(): sigma = peakMagnitudeDistributions['sigma'][snType] magDistributions[snType] = [peakMag, sigma] peakMagList = [] for item in snTypesArray: thisPeak = magDistributions[item][ 1] * np.random.randn() + magDistributions[item][0] peakMagList.append(thisPeak) peakMagArray = np.array(peakMagList) # log.debug('peakMagArray %s' % (peakMagArray,)) return peakMagArray
0.006192
def save(self, config=None): """Saves config to config files. Args: config (configobj.ConfigObj): optional config object to save. Raises: dvc.config.ConfigError: thrown if failed to write config file. """ if config is not None: clist = [config] else: clist = [ self._system_config, self._global_config, self._repo_config, self._local_config, ] for conf in clist: if conf.filename is None: continue try: logger.debug("Writing '{}'.".format(conf.filename)) dname = os.path.dirname(os.path.abspath(conf.filename)) try: os.makedirs(dname) except OSError as exc: if exc.errno != errno.EEXIST: raise conf.write() except Exception as exc: msg = "failed to write config '{}'".format(conf.filename) raise ConfigError(msg, exc)
0.001771
def save(self, request, connect=False): """ Saves a new account. Note that while the account is new, the user may be an existing one (when connecting accounts) """ assert not self.is_existing user = self.user user.save() self.account.user = user self.account.save() if app_settings.STORE_TOKENS and self.token: self.token.account = self.account self.token.save() if connect: # TODO: Add any new email addresses automatically? pass else: setup_user_email(request, user, self.email_addresses)
0.003096
def build_uri(orig_uriparts, kwargs): """ Build the URI from the original uriparts and kwargs. Modifies kwargs. """ uriparts = [] for uripart in orig_uriparts: # If this part matches a keyword argument (starting with _), use # the supplied value. Otherwise, just use the part. if uripart.startswith("_"): part = (str(kwargs.pop(uripart, uripart))) else: part = uripart uriparts.append(part) uri = '/'.join(uriparts) # If an id kwarg is present and there is no id to fill in in # the list of uriparts, assume the id goes at the end. id = kwargs.pop('id', None) if id: uri += "/%s" % (id) return uri
0.001399
def render(self, name, value, attrs=None, renderer=None): """Include a hidden input to store the serialized upload value.""" location = getattr(value, '_seralized_location', '') if location and not hasattr(value, 'url'): value.url = '#' if hasattr(self, 'get_template_substitution_values'): # Django 1.8-1.10 self.template_with_initial = ( '%(initial_text)s: %(initial)s %(clear_template)s' '<br />%(input_text)s: %(input)s') attrs = attrs or {} attrs.update({'data-upload-url': self.url}) hidden_name = self.get_hidden_name(name) kwargs = {} if django_version >= (1, 11): kwargs['renderer'] = renderer parent = super(StickyUploadWidget, self).render(name, value, attrs=attrs, **kwargs) hidden = forms.HiddenInput().render(hidden_name, location, **kwargs) return mark_safe(parent + '\n' + hidden)
0.003018
def _get_reference_classnames(self, classname, namespace, resultclass_name, role): """ Get list of classnames that are references for which this classname is a target filtered by the result_class and role parameters if they are none. This is a common method used by all of the other reference and associator methods to create a list of reference classnames Returns: list of classnames that satisfy the criteria. """ self._validate_namespace(namespace) result_classes = self._classnamedict(resultclass_name, namespace) rtn_classnames_set = set() role = role.lower() if role else role for cl in self._get_association_classes(namespace): for prop in six.itervalues(cl.properties): if prop.type == 'reference' and \ self._ref_prop_matches(prop, classname, cl.classname, result_classes, role): rtn_classnames_set.add(cl.classname) return list(rtn_classnames_set)
0.002433
def long_path_formatter(line, max_width=pd.get_option('max_colwidth')): """ If a path is longer than max_width, it substitute it with the first and last element, joined by "...". For example 'this.is.a.long.path.which.we.want.to.shorten' becomes 'this...shorten' :param line: :param max_width: :return: """ if len(line) > max_width: tokens = line.split(".") trial1 = "%s...%s" % (tokens[0], tokens[-1]) if len(trial1) > max_width: return "...%s" %(tokens[-1][-1:-(max_width-3)]) else: return trial1 else: return line
0.006359
def ap_state(value, failure_string=None): """ Converts a state's name, postal abbreviation or FIPS to A.P. style. Example usage: >> ap_state("California") 'Calif.' """ try: return statestyle.get(value).ap except: if failure_string: return failure_string else: return value
0.013333
def logged_in(): """ Method called by Strava (redirect) that includes parameters. - state - code - error """ error = request.args.get('error') state = request.args.get('state') if error: return render_template('login_error.html', error=error) else: code = request.args.get('code') client = Client() access_token = client.exchange_code_for_token(client_id=app.config['STRAVA_CLIENT_ID'], client_secret=app.config['STRAVA_CLIENT_SECRET'], code=code) # Probably here you'd want to store this somewhere -- e.g. in a database. strava_athlete = client.get_athlete() return render_template('login_results.html', athlete=strava_athlete, access_token=access_token)
0.005814
def _insert(self, dom_group, idx=None, prepend=False, name=None): """Inserts a DOMGroup inside this element. If provided at the given index, if prepend at the start of the childs list, by default at the end. If the child is a DOMElement, correctly links the child. If the DOMGroup have a name, an attribute containing the child is created in this instance. """ if idx and idx < 0: idx = 0 if prepend: idx = 0 else: idx = idx if idx is not None else len(self.childs) if dom_group is not None: if not isinstance(dom_group, Iterable) or isinstance( dom_group, (DOMElement, str) ): dom_group = [dom_group] for i_group, elem in enumerate(dom_group): if elem is not None: # Element insertion in this DOMElement childs self.childs.insert(idx + i_group, elem) # Managing child attributes if needed if issubclass(elem.__class__, DOMElement): elem.parent = self if name: setattr(self, name, elem)
0.003263
def primitive_datatype_nostring(self, t: URIRef, v: Optional[str] = None) -> Optional[URIRef]: """ Return the data type for primitive type t, if any, defaulting string to no type :param t: type :param v: value - for munging dates if we're doing FHIR official output :return: corresponding data type """ vt = self.primitive_datatype(t) if self.fhir_dates and vt == XSD.dateTime and v: return XSD.gYear if len(v) == 4 else XSD.gYearMonth if len(v) == 7 \ else XSD.date if (len(v) == 10 or (len(v) > 10 and v[10] in '+-')) else XSD.dateTime # For some reason the oid datatype is represented as a string as well if self.fhir_oids and vt == XSD.anyURI: vt = None return None if vt == XSD.string else vt
0.007273