text
stringlengths
78
104k
score
float64
0
0.18
def random_str(length=16, only_digits=False): """ 生成随机字符串 :return: """ choices = string.digits if not only_digits: choices += string.ascii_uppercase return ''.join(random.SystemRandom().choice(choices) for _ in range(length))
0.003546
def autoscale_y(ax,margin=0.1): """This function rescales the y-axis based on the data that is visible given the current xlim of the axis. ax -- a matplotlib axes object margin -- the fraction of the total height of the y-data to pad the upper and lower ylims""" # Thanks to http://stackoverflow.com/questions/29461608/matplotlib-fixing-x-axis-scale-and-autoscale-y-axis for inspiring this! import numpy as np def get_bottom_top(line,lo,hi): xd = line.get_xdata() yd = line.get_ydata() y_displayed = yd[((xd>lo) & (xd<hi))] if len(y_displayed)==0: #No plotted data is inside the xlims return None, None else: h = np.max(y_displayed) - np.min(y_displayed) bot = np.min(y_displayed)-margin*h top = np.max(y_displayed)+margin*h return bot,top lines = ax.get_lines() lo,hi = ax.get_xlim() # Do a quick check to see if the x-axis has been inverted if lo>hi: # Reverse them for now lo,hi = hi,lo # Initialize limits bot,top = np.inf, -np.inf for line in lines: new_bot, new_top = get_bottom_top(line,lo,hi) if new_bot is not None: if new_bot < bot: bot = new_bot if new_top > top: top = new_top # Check to see if it is appropriate to change the boundaries if bot !=np.inf and top != -np.inf: ax.set_ylim(bot,top) return ax
0.020188
def log_local_message(message_format, *args): """ Log a request so that it matches our local log format. """ prefix = '{} {}'.format(color('INFO', fg=248), color('request', fg=5)) message = message_format % args sys.stderr.write('{} {}\n'.format(prefix, message))
0.003472
def tempo_account_add_customer(self, data=None): """ Gets all or some Attribute whose key or name contain a specific substring. Attributes can be a Category or Customer. :param data: :return: if error will show in error log, like validation unsuccessful. If success will good. """ if data is None: return """Please, set the data as { isNew:boolean name:string key:string id:number } or you can put only name and key parameters""" url = 'rest/tempo-accounts/1/customer' return self.post(url, data=data)
0.006859
def field(ctx, text, index, delimiter=' '): """ Reference a field in string separated by a delimiter """ splits = text.split(delimiter) # remove our delimiters and whitespace splits = [f for f in splits if f != delimiter and len(f.strip()) > 0] index = conversions.to_integer(index, ctx) if index < 1: raise ValueError('Field index cannot be less than 1') if index <= len(splits): return splits[index-1] else: return ''
0.002058
def save_translations(self, instances): """ Saves cached translations (cached in model instances as dictionaries). """ if not isinstance(instances, (list, tuple)): instances = [instances] for instance in instances: translations = [] for obj in instance._linguist.translation_instances: if obj.field_name: obj.object_id = instance.pk if (obj.is_new and obj.field_value) or ( obj.has_changed and not obj.is_new ): field = instance.get_field_object(obj.field_name, obj.language) if hasattr(field, "pre_save") and callable(field.pre_save): obj.field_value = field.pre_save(instance, True) translations.append(obj) to_create = [ (obj, self.model(**obj.attrs)) for obj in translations if obj.is_new and obj.field_value ] to_update = [ obj for obj in translations if obj.has_changed and not obj.is_new ] to_delete = [obj for obj in translations if obj.deleted] created = True if to_create: objects = [obj for cached, obj in to_create] try: with transaction.atomic(): self.bulk_create(objects) except IntegrityError: created = False if to_update: for obj in to_update: self.filter(**obj.lookup).update(field_value=obj.field_value) obj.has_changed = False if created: for cached, obj in to_create: cached.is_new = False cached.has_changed = False if to_delete: for obj in to_delete: self.filter(**obj.lookup).delete() obj.has_changed = False
0.002887
def _datalog(self, parameter, run, maxrun, det_id): "Extract data from database" values = { 'parameter_name': parameter, 'minrun': run, 'maxrun': maxrun, 'detid': det_id, } data = urlencode(values) content = self._get_content('streamds/datalognumbers.txt?' + data) if content.startswith('ERROR'): log.error(content) return None try: dataframe = read_csv(content) except ValueError: log.warning( "Empty dataset" ) # ...probably. Waiting for more info return make_empty_dataset() else: add_datetime(dataframe) try: self._add_converted_units(dataframe, parameter) except KeyError: log.warning( "Could not add converted units for {0}".format(parameter) ) return dataframe
0.002012
def _do_prioritize(items): """Determine if we should perform prioritization. Currently done on tumor-only input samples and feeding into PureCN which needs the germline annotations. """ if not any("tumoronly-prioritization" in dd.get_tools_off(d) for d in items): if vcfutils.get_paired_phenotype(items[0]): has_tumor = False has_normal = False for sub_data in items: if vcfutils.get_paired_phenotype(sub_data) == "tumor": has_tumor = True elif vcfutils.get_paired_phenotype(sub_data) == "normal": has_normal = True return has_tumor and not has_normal
0.002857
def parse(args): """Parse command-line arguments. Arguments may consist of any combination of directories, files, and options.""" import argparse parser = argparse.ArgumentParser( add_help=False, description="Remove spam and advertising from subtitle files.", usage="%(prog)s [OPTION]... TARGET...") parser.add_argument( "-a", "--aeidon", action="store_true", dest="aeidon", help="use python3-aeidon to process subtitles") parser.add_argument( "-f", "--file", action="append", dest="pattern_files", help="obtain matches from FILE") parser.add_argument( "--fix", action="store_true", dest="fix", help="repair potentially damaged subtitle files with aeidon") parser.add_argument( "-g", "--gui", action="store_true", dest="gui", help="indicate use from a GUI") parser.add_argument( "-h", "--help", action="help", help=argparse.SUPPRESS) parser.add_argument( "-r", "--regex", action="store_true", dest="regex", help="perform regex matching") parser.add_argument( "-s", "--case-sensitive", action="store_true", default=False, dest="case_sensitive", help="match case-sensitively") parser.add_argument( "-y", "--yes", action="store_true", dest="autoyes", help="automatic yes to prompts") parser.add_argument( "--version", action="version", version='%(prog)s ' + __version__) parser.add_argument( dest="targets", help=argparse.SUPPRESS, nargs="*") options = parser.parse_args(args) arguments = options.targets return options, arguments
0.000547
def appendGraph(self, graph_name, graph): """Utility method to associate Graph Object to Plugin. This utility method is for use in constructor of child classes for associating a MuninGraph instances to the plugin. @param graph_name: Graph Name @param graph: MuninGraph Instance """ self._graphDict[graph_name] = graph self._graphNames.append(graph_name) if not self.isMultigraph and len(self._graphNames) > 1: raise AttributeError("Simple Munin Plugins cannot have more than one graph.")
0.010033
def reset(self): """Clear ConfigObj instance and restore to 'freshly created' state.""" self.clear() self._initialise() # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload) # requires an empty dictionary self.configspec = None # Just to be sure ;-) self._original_configspec = None
0.007895
def _plugin_get(self, plugin_name): """ Find plugins in controller :param plugin_name: Name of the plugin to find :type plugin_name: str | None :return: Plugin or None and error message :rtype: (settable_plugin.SettablePlugin | None, str) """ if not plugin_name: return None, u"Plugin name not set" for plugin in self.controller.plugins: if not isinstance(plugin, SettablePlugin): continue if plugin.name == plugin_name: return plugin, "" return None, u"Settable plugin '{}' not found".format(plugin_name)
0.003058
def bsrchi(value, ndim, array): """ Do a binary search for a key value within an integer array, assumed to be in increasing order. Return the index of the matching array entry, or -1 if the key value is not found. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bsrchi_c.html :param value: Value to find in array. :type value: int :param ndim: Dimension of array. :type ndim: int :param array: Array to be searched. :type array: Array of ints :return: index :rtype: int """ value = ctypes.c_int(value) ndim = ctypes.c_int(ndim) array = stypes.toIntVector(array) return libspice.bsrchi_c(value, ndim, array)
0.001453
def print_version(ctx, value): """Print the current version of sandman and exit.""" if not value: return import pkg_resources version = None try: version = pkg_resources.get_distribution('sandman').version finally: del pkg_resources click.echo(version) ctx.exit()
0.003135
def returner(ret): ''' Write the return data to a file on the minion. ''' opts = _get_options(ret) try: with salt.utils.files.flopen(opts['filename'], 'a') as logfile: salt.utils.json.dump(ret, logfile) logfile.write(str('\n')) # future lint: disable=blacklisted-function except Exception: log.error('Could not write to rawdata_json file %s', opts['filename']) raise
0.004556
def write(self, *args, **kwargs): """Deprecated, use :meth:`~chemcoord.Zmat.to_zmat` """ message = 'Will be removed in the future. Please use to_zmat().' with warnings.catch_warnings(): warnings.simplefilter("always") warnings.warn(message, DeprecationWarning) return self.to_zmat(*args, **kwargs)
0.00554
def _derive(self, record, hist=None): """ Derivation filters like 'deriveValue' to replace given input values from one or more fields. In case 'copyValue' copy value to the target field from given an input value from one field. 'deriveRegex' replace given an input value from one field, derive target field value using regular expressions. If 'deriveIncludes' applies then given an input value from one field, derive target field based on at least one of the following: includes strings, excludes strings, starts with string, ends with string :param dict record: dictionary of values to validate :param dict hist: existing input of history values """ def check_derive_options(option, derive_set_config): """ Check derive option is exist into options list and return relevant flag. :param str option: drive options value :param list derive_set_config: options list :return boolean: True or False based on option exist into options list """ return option in derive_set_config hist_obj = {} if hist is None: hist = {} for field in record: field_val_new = field_val = record[field] if field in self.fields: for derive_set in self.fields[field]['derive']: check_match = False derive_set_config = derive_set if set.issubset(set(derive_set_config['fieldSet']), record.keys()): # sorting here to ensure sub document match from # query derive_input = {val: record[val] for val in derive_set_config['fieldSet']} if derive_set_config['type'] == 'deriveValue': overwrite_flag = check_derive_options( 'overwrite', derive_set_config["options"]) blank_if_no_match_flag = check_derive_options( 'blankIfNoMatch', derive_set_config["options"]) field_val_new, hist_obj, check_match = \ DeriveDataLookup( fieldName=field, db=self.mongo, deriveInput=derive_input, overwrite=overwrite_flag, fieldVal=record[field], histObj=hist, blankIfNoMatch=blank_if_no_match_flag) elif derive_set_config['type'] == 'copyValue': overwrite_flag = check_derive_options( 'overwrite', derive_set_config["options"]) field_val_new, hist_obj, check_match = \ DeriveDataCopyValue( fieldName=field, deriveInput=derive_input, overwrite=overwrite_flag, fieldVal=record[field], histObj=hist) elif derive_set_config['type'] == 'deriveRegex': overwrite_flag = check_derive_options( 'overwrite', derive_set_config["options"]) blank_if_no_match_flag = check_derive_options( 'blankIfNoMatch', derive_set_config["options"]) field_val_new, hist_obj, check_match = \ DeriveDataRegex( fieldName=field, db=self.mongo, deriveInput=derive_input, overwrite=overwrite_flag, fieldVal=record[field], histObj=hist, blankIfNoMatch=blank_if_no_match_flag) elif derive_set_config['type'] == 'deriveIncludes': overwrite_flag = check_derive_options( 'overwrite', derive_set_config["options"]) blank_if_no_match_flag = check_derive_options( 'blankIfNoMatch', derive_set_config["options"]) field_val_new, hist_obj, check_match = \ IncludesLookup( fieldVal=record[field], lookupType='deriveIncludes', deriveFieldName= \ derive_set_config['fieldSet'][0], deriveInput=derive_input, db=self.mongo, fieldName=field, histObj=hist, overwrite=overwrite_flag, blankIfNoMatch=blank_if_no_match_flag) if check_match or field_val_new != field_val: record[field] = field_val_new break return record, hist_obj
0.000854
def locally(self): """ Will execute the current queryset and pass it to the python backend so user can run query on the local dataset (instead of contacting the store) """ from .backends import python from . import models store = python.IterableStore(values=self) return store.query(self.manager.model).all()
0.008021
def load_ply(fileobj): """Same as load_ply, but takes a file-like object""" def nextline(): """Read next line, skip comments""" while True: line = fileobj.readline() assert line != '' # eof if not line.startswith('comment'): return line.strip() assert nextline() == 'ply' assert nextline() == 'format ascii 1.0' line = nextline() assert line.startswith('element vertex') nverts = int(line.split()[2]) # print 'nverts : ', nverts assert nextline() == 'property float x' assert nextline() == 'property float y' assert nextline() == 'property float z' line = nextline() assert line.startswith('element face') nfaces = int(line.split()[2]) # print 'nfaces : ', nfaces assert nextline() == 'property list uchar int vertex_indices' line = nextline() has_texcoords = line == 'property list uchar float texcoord' if has_texcoords: assert nextline() == 'end_header' else: assert line == 'end_header' # Verts verts = np.zeros((nverts, 3)) for i in range(nverts): vals = nextline().split() verts[i, :] = [float(v) for v in vals[:3]] # Faces faces = [] faces_uv = [] for i in range(nfaces): vals = nextline().split() assert int(vals[0]) == 3 faces.append([int(v) for v in vals[1:4]]) if has_texcoords: assert len(vals) == 11 assert int(vals[4]) == 6 faces_uv.append([(float(vals[5]), float(vals[6])), (float(vals[7]), float(vals[8])), (float(vals[9]), float(vals[10]))]) # faces_uv.append([float(v) for v in vals[5:]]) else: assert len(vals) == 4 return verts, faces, faces_uv
0.000544
def isFile(self): """ Check if the given object is a file """ try: filetype = file except NameError: filetype = io.IOBase return self._wrap(type(self.obj) is filetype)
0.008621
def volume_correlation(results, references): r""" Volume correlation. Computes the linear correlation in binary object volume between the contents of the successive binary images supplied. Measured through the Pearson product-moment correlation coefficient. Parameters ---------- results : sequence of array_like Ordered list of input data containing objects. Each array_like will be converted into binary: background where 0, object everywhere else. references : sequence of array_like Ordered list of input data containing objects. Each array_like will be converted into binary: background where 0, object everywhere else. The order must be the same as for ``results``. Returns ------- r : float The correlation coefficient between -1 and 1. p : float The two-side p value. """ results = numpy.atleast_2d(numpy.array(results).astype(numpy.bool)) references = numpy.atleast_2d(numpy.array(references).astype(numpy.bool)) results_volumes = [numpy.count_nonzero(r) for r in results] references_volumes = [numpy.count_nonzero(r) for r in references] return pearsonr(results_volumes, references_volumes)
0.006314
def superpose(ras, rbs, weights=None): """Compute the transformation that minimizes the RMSD between the points ras and rbs Arguments: | ``ras`` -- a ``np.array`` with 3D coordinates of geometry A, shape=(N,3) | ``rbs`` -- a ``np.array`` with 3D coordinates of geometry B, shape=(N,3) Optional arguments: | ``weights`` -- a numpy array with fitting weights for each coordinate, shape=(N,) Return value: | ``transformation`` -- the transformation that brings geometry A into overlap with geometry B Each row in ras and rbs represents a 3D coordinate. Corresponding rows contain the points that are brought into overlap by the fitting procedure. The implementation is based on the Kabsch Algorithm: http://dx.doi.org/10.1107%2FS0567739476001873 """ if weights is None: ma = ras.mean(axis=0) mb = rbs.mean(axis=0) else: total_weight = weights.sum() ma = np.dot(weights, ras)/total_weight mb = np.dot(weights, rbs)/total_weight # Kabsch if weights is None: A = np.dot((rbs-mb).transpose(), ras-ma) else: weights = weights.reshape((-1, 1)) A = np.dot(((rbs-mb)*weights).transpose(), (ras-ma)*weights) v, s, wt = np.linalg.svd(A) s[:] = 1 if np.linalg.det(np.dot(v, wt)) < 0: s[2] = -1 r = np.dot(wt.T*s, v.T) return Complete(r, np.dot(r, -mb) + ma)
0.002554
def timed_loop(name=None, rgstr_stamps=None, save_itrs=SET['SI'], loop_end_stamp=None, end_stamp_unique=SET['UN'], keep_prev_subdivisions=SET['KS'], keep_end_subdivisions=SET['KS'], quick_print=SET['QP']): """ Instantiate a TimedLoop object for measuring loop iteration timing data. Can be used with either for or while loops. Example:: loop = timed_loop() while x > 0: # or for x in <iterable>: next(loop) # or loop.next() <body of loop, with gtimer stamps> loop.exit() Notes: Can be used as a context manager around the loop, without requiring separate call to exit(). Redundant calls to exit() do no harm. Loop functionality is implemented in the next() or __next__() methods. Each instance can only be used once, so for an inner loop, this function must be called within the outer loop. Any awaiting subdivisions kept at entrance to a loop section will go to the 'UNASSIGNED' position to indicate that they are not properly accounted for in the hierarchy. Likewise for any awaiting subdivisions kept at the end of loop iterations without a named stamp. Args: name (any, optional): Identifier (makes the loop a subdivision), passed through str(). rgstr_stamps (list, tuple, optional): Identifiers, see subdivision(). save_itrs (bool, optional): see subdivision(). loop_end_stamp (any, optional): Identifier, automatic stamp at end of every iteration. end_stamp_unique (bool, optional): see stamp(). keep_prev_subdivisions (bool, optional): Keep awaiting subdivisions on entering loop. keep_end_subdivisions (bool, optional): Keep awaiting subdivisions at end of iterations. quick_print (bool, optional): Named loop only, print at end of each iteration. Returns: TimedLoop: Custom gtimer object for measuring loops. """ return TimedLoop(name=name, rgstr_stamps=rgstr_stamps, save_itrs=save_itrs, loop_end_stamp=loop_end_stamp, end_stamp_unique=end_stamp_unique, keep_prev_subdivisions=keep_prev_subdivisions, keep_end_subdivisions=keep_end_subdivisions)
0.002021
def desaturate(c, k=0): """ Utility function to desaturate a color c by an amount k. """ from matplotlib.colors import ColorConverter c = ColorConverter().to_rgb(c) intensity = 0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2] return [intensity * k + i * (1 - k) for i in c]
0.00339
def body(self, features): """Body of the model. Args: features: a dictionary with the tensors. Returns: A pair (predictions, losses) where predictions is the generated image and losses is a dictionary of losses (that get added for the final loss). """ features["targets"] = features["inputs"] is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN # Input images. inputs = tf.to_float(features["targets_raw"]) # Noise vector. z = tf.random_uniform([self.hparams.batch_size, self.hparams.bottleneck_bits], minval=-1, maxval=1, name="z") # Generator output: fake images. out_shape = common_layers.shape_list(inputs)[1:4] g = self.generator(z, is_training, out_shape) losses = self.losses(inputs, g) # pylint: disable=not-callable summary_g_image = tf.reshape( g[0, :], [1] + common_layers.shape_list(inputs)[1:]) tf.summary.image("generated", summary_g_image, max_outputs=1) if is_training: # Returns an dummy output and the losses dictionary. return tf.zeros_like(inputs), losses return tf.reshape(g, tf.shape(inputs)), losses
0.001667
def get_func(fullFuncName): """Retrieve a function object from a full dotted-package name.""" # Parse out the path, module, and function lastDot = fullFuncName.rfind(u".") funcName = fullFuncName[lastDot + 1:] modPath = fullFuncName[:lastDot] aMod = get_mod(modPath) aFunc = getattr(aMod, funcName) # Assert that the function is a *callable* attribute. assert callable(aFunc), u"%s is not callable." % fullFuncName # Return a reference to the function itself, # not the results of the function. return aFunc
0.001789
def put_pipeline_definition(pipeline_id, pipeline_objects, parameter_objects=None, parameter_values=None, region=None, key=None, keyid=None, profile=None): ''' Add tasks, schedules, and preconditions to the specified pipeline. This function is idempotent and will replace an existing definition. CLI example: .. code-block:: bash salt myminion boto_datapipeline.put_pipeline_definition my_pipeline_id my_pipeline_objects ''' parameter_objects = parameter_objects or [] parameter_values = parameter_values or [] client = _get_client(region, key, keyid, profile) r = {} try: response = client.put_pipeline_definition( pipelineId=pipeline_id, pipelineObjects=pipeline_objects, parameterObjects=parameter_objects, parameterValues=parameter_values, ) if response['errored']: r['error'] = response['validationErrors'] else: r['result'] = response except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: r['error'] = six.text_type(e) return r
0.005155
def schedule(self, schedule_time): """Add a specific enqueue time to the message. :param schedule_time: The scheduled time to enqueue the message. :type schedule_time: ~datetime.datetime """ if not self.properties.message_id: self.properties.message_id = str(uuid.uuid4()) if not self.message.annotations: self.message.annotations = {} self.message.annotations[types.AMQPSymbol(self._x_OPT_SCHEDULED_ENQUEUE_TIME)] = schedule_time
0.005871
def network_protocol(self, layer: Optional[Layer] = None) -> str: """Get a random network protocol form OSI model. :param layer: Enum object Layer. :return: Protocol name. :Example: AMQP """ key = self._validate_enum(item=layer, enum=Layer) protocols = NETWORK_PROTOCOLS[key] return self.random.choice(protocols)
0.005128
def list_directory2(self, mdir, limit=None, marker=None): """A lower-level version of `list_directory` that returns the response object (which includes the headers). ... @returns (res, dirents) {2-tuple} """ log.debug('ListDirectory %r', mdir) query = {} if limit: query["limit"] = limit if marker: query["marker"] = marker res, content = self._request(mdir, "GET", query=query) if res["status"] != "200": raise errors.MantaAPIError(res, content) lines = content.splitlines(False) dirents = [] for line in lines: if not line.strip(): continue try: dirents.append(json.loads(line.decode("utf-8"))) except ValueError: raise errors.MantaError('invalid directory entry: %r' % line) return res, dirents
0.002128
def copyPage(self, pno, to=-1): """Copy a page to before some other page of the document. Specify 'to = -1' to copy after last page. """ pl = list(range(len(self))) if pno < 0 or pno > pl[-1]: raise ValueError("'from' page number out of range") if to < -1 or to > pl[-1]: raise ValueError("'to' page number out of range") if to == -1: pl.append(pno) else: pl.insert(to, pno) return self.select(pl)
0.005894
def connected_subgraphs(self, directed=True, ordered=False): '''Generates connected components as subgraphs. When ordered=True, subgraphs are ordered by number of vertices. ''' num_ccs, labels = self.connected_components(directed=directed) # check the trivial case first if num_ccs == 1: yield self raise StopIteration if ordered: # sort by descending size (num vertices) order = np.argsort(np.bincount(labels))[::-1] else: order = range(num_ccs) # don't use self.subgraph() here, because we can reuse adj adj = self.matrix('dense', 'csr', 'csc') for c in order: mask = labels == c sub_adj = adj[mask][:,mask] yield self.__class__.from_adj_matrix(sub_adj)
0.013369
def train_by_stream(self, stream: StreamWrapper) -> None: """ Train the model with the given stream. :param stream: stream to train with """ self._run_epoch(stream=stream, train=True)
0.008929
def ensure_flat_galactic(f): """ A decorator for class methods of the form .. code-block:: python Class.method(self, coords, **kwargs) where ``coords`` is an :obj:`astropy.coordinates.SkyCoord` object. The decorator ensures that the ``coords`` that gets passed to ``Class.method`` is a flat array of Galactic coordinates. It also reshapes the output of ``Class.method`` to have the same shape (possibly scalar) as the input ``coords``. If the output of ``Class.method`` is a tuple or list (instead of an array), each element in the output is reshaped instead. Args: f (class method): A function with the signature ``(self, coords, **kwargs)``, where ``coords`` is a :obj:`SkyCoord` object containing an array. Returns: A function that takes :obj:`SkyCoord` input with any shape (including scalar). """ @wraps(f) def _wrapper_func(self, coords, **kwargs): # t0 = time.time() if coords.frame.name != 'galactic': gal = coords.transform_to('galactic') else: gal = coords # t1 = time.time() is_array = not coords.isscalar if is_array: orig_shape = coords.shape shape_flat = (np.prod(orig_shape),) # print 'Original shape: {}'.format(orig_shape) # print 'Flattened shape: {}'.format(shape_flat) gal = gal_to_shape(gal, shape_flat) else: gal = gal_to_shape(gal, (1,)) # t2 = time.time() out = f(self, gal, **kwargs) # t3 = time.time() if is_array: if isinstance(out, list) or isinstance(out, tuple): # Apply to each array in output list for o in out: o.shape = orig_shape + o.shape[1:] else: # Only one array in output out.shape = orig_shape + out.shape[1:] else: if isinstance(out, list) or isinstance(out, tuple): out = list(out) # Apply to each array in output list for k,o in enumerate(out): out[k] = o[0] else: # Only one array in output out = out[0] # t4 = time.time() # print('') # print('time inside ensure_flat_galactic: {:.4f} s'.format(t4-t0)) # print('{: >7.4f} s : {: >6.4f} s : transform_to("galactic")'.format(t1-t0, t1-t0)) # print('{: >7.4f} s : {: >6.4f} s : reshape coordinates'.format(t2-t0, t2-t1)) # print('{: >7.4f} s : {: >6.4f} s : execute query'.format(t3-t0, t3-t2)) # print('{: >7.4f} s : {: >6.4f} s : reshape output'.format(t4-t0, t4-t3)) # print('') return out return _wrapper_func
0.002137
def update_object_with_data(content, record): """Update the content with the record data :param content: A single folderish catalog brain or content object :type content: ATContentType/DexterityContentType/CatalogBrain :param record: The data to update :type record: dict :returns: The updated content object :rtype: object :raises: APIError, :class:`~plone.jsonapi.routes.exceptions.APIError` """ # ensure we have a full content object content = get_object(content) # get the proper data manager dm = IDataManager(content) if dm is None: fail(400, "Update for this object is not allowed") # Iterate through record items for k, v in record.items(): try: success = dm.set(k, v, **record) except Unauthorized: fail(401, "Not allowed to set the field '%s'" % k) except ValueError, exc: fail(400, str(exc)) if not success: logger.warn("update_object_with_data::skipping key=%r", k) continue logger.debug("update_object_with_data::field %r updated", k) # Validate the entire content object invalid = validate_object(content, record) if invalid: fail(400, u.to_json(invalid)) # do a wf transition if record.get("transition", None): t = record.get("transition") logger.debug(">>> Do Transition '%s' for Object %s", t, content.getId()) do_transition_for(content, t) # reindex the object content.reindexObject() return content
0.001267
def invites(self): """ Access the invites :returns: twilio.rest.chat.v1.service.channel.invite.InviteList :rtype: twilio.rest.chat.v1.service.channel.invite.InviteList """ if self._invites is None: self._invites = InviteList( self._version, service_sid=self._solution['service_sid'], channel_sid=self._solution['sid'], ) return self._invites
0.004255
def get_text(self, locator, params=None, timeout=None, visible=True): """ Get text or value from element based on locator with optional parameters. :param locator: element identifier :param params: (optional) locator parameters :param timeout: (optional) time to wait for text (default: None) :param visible: should element be visible before getting text (default: True) :return: element text, value or empty string """ element = locator if not isinstance(element, WebElement): element = self.get_present_element(locator, params, timeout, visible) if element and element.text: return element.text else: try: return element.get_attribute('value') except AttributeError: return ""
0.005841
def to_dict(self, column_names=None, selection=None, strings=True, virtual=False): """Return a dict containing the ndarray corresponding to the evaluated data :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used :param selection: {selection} :param strings: argument passed to DataFrame.get_column_names when column_names is None :param virtual: argument passed to DataFrame.get_column_names when column_names is None :return: dict """ return dict(self.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual))
0.011611
def init_from_wave_file(wavpath): """Init a sonic visualiser environment structure based the analysis of the main audio file. The audio file have to be encoded in wave Args: wavpath(str): the full path to the wavfile """ try: samplerate, data = SW.read(wavpath) nframes = data.shape[0] except: # scipy cannot handle 24 bit wav files # and wave cannot handle 32 bit wav files try: w = wave.open(wavpath) samplerate = w.getframerate() nframes = w.getnframes() except: raise Exception('Cannot decode wavefile ' + wavpath) return SVEnv(samplerate, nframes, wavpath)
0.009103
def show_channel(self, channel, owner): '''List the channels for owner If owner is none, the currently logged in user is used ''' url = '%s/channels/%s/%s' % (self.domain, owner, channel) res = self.session.get(url) self._check_response(res, [200]) return res.json()
0.006192
def mBank_set_iph_id(transactions, tag, tag_dict, *args): """ mBank Collect uses ID IPH to distinguish between virtual accounts, adding iph_id may be helpful in further processing """ matches = iph_id_re.search(tag_dict[tag.slug]) if matches: # pragma no branch tag_dict['iph_id'] = matches.groupdict()['iph_id'] return tag_dict
0.002725
def draw_polygon( self, *pts, close_path=True, stroke=None, stroke_width=1, stroke_dash=None, fill=None ) -> None: """Draws the given polygon.""" c = self.c c.saveState() if stroke is not None: c.setStrokeColorRGB(*stroke) c.setLineWidth(stroke_width) c.setDash(stroke_dash) if fill is not None: c.setFillColorRGB(*fill) p = c.beginPath() fn = p.moveTo for x,y in zip(*[iter(pts)]*2): fn(x, y) fn = p.lineTo if close_path: p.close() c.drawPath(p, stroke=(stroke is not None), fill=(fill is not None)) c.restoreState()
0.003807
def index(self, elem): """Find the index of elem in the reversed iterator.""" return _coconut.len(self._iter) - self._iter.index(elem) - 1
0.012987
def addScalarBar3D( self, obj=None, at=0, pos=(0, 0, 0), normal=(0, 0, 1), sx=0.1, sy=2, nlabels=9, ncols=256, cmap=None, c=None, alpha=1, ): """Draw a 3D scalar bar. ``obj`` input can be: - a list of numbers, - a list of two numbers in the form `(min, max)`, - a ``vtkActor`` already containing a set of scalars associated to vertices or cells, - if ``None`` the last actor in the list of actors will be used. .. hint:: |scalbar| |mesh_coloring.py|_ """ return addons.addScalarBar3D(obj, at, pos, normal, sx, sy, nlabels, ncols, cmap, c, alpha)
0.006803
def SUSSelection(self, mating_pool_size): ''' Make Selection of the mating pool with the stochastic universal sampling algorithm ''' A = numpy.zeros(self.length) mating_pool = numpy.zeros(mating_pool_size) r = numpy.random.random()/float(mating_pool_size) [F,S,P] = self.rankingEval() P_Sorted = numpy.zeros(self.length) for i in range(self.length): P_Sorted[i] = P[S[i]] for i in range(self.length): A[i] = P_Sorted[0:(i+1)].sum() i = 0 j = 0 while j < mating_pool_size: i = 0 while A[i] <= r: i += 1 mating_pool[j] = S[i] j += 1 r += (1/float(mating_pool_size)) return mating_pool
0.049612
def bounds(self): """ Return the axis aligned bounding box of the current path. Returns ---------- bounds: (2, dimension) float, (min, max) coordinates """ # get the exact bounds of each entity # some entities (aka 3- point Arc) have bounds that can't # be generated from just bound box of vertices points = np.array([e.bounds(self.vertices) for e in self.entities], dtype=np.float64) # flatten bound extrema into (n, dimension) array points = points.reshape((-1, self.vertices.shape[1])) # get the max and min of all bounds bounds = np.array([points.min(axis=0), points.max(axis=0)], dtype=np.float64) return bounds
0.00237
def _request_activity_list(self, athlete): """Actually do the request for activity list This call is slow and therefore this method is memory cached. Keyword arguments: athlete -- Full name of athlete """ response = self._get_request(self._athlete_endpoint(athlete)) response_buffer = StringIO(response.text) activity_list = pd.read_csv( filepath_or_buffer=response_buffer, parse_dates={'datetime': ['date', 'time']}, sep=',\s*', engine='python' ) activity_list.rename(columns=lambda x: x.lower(), inplace=True) activity_list.rename( columns=lambda x: '_' + x if x[0].isdigit() else x, inplace=True) activity_list['has_hr'] = activity_list.average_heart_rate.map(bool) activity_list['has_spd'] = activity_list.average_speed.map(bool) activity_list['has_pwr'] = activity_list.average_power.map(bool) activity_list['has_cad'] = activity_list.average_heart_rate.map(bool) activity_list['data'] = pd.Series(dtype=np.dtype("object")) return activity_list
0.002613
def position_target_global_int_encode(self, time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate): ''' Reports the current commanded vehicle position, velocity, and acceleration as specified by the autopilot. This should match the commands sent in SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being controlled this way. time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t) coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t) lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t) alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float) ''' return MAVLink_position_target_global_int_message(time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate)
0.00566
def disconnect(self, instance, another_instance): ''' Disconnect an *instance* from *another_instance*. ''' if instance not in self: return False if another_instance not in self[instance]: return False self[instance].remove(another_instance) return True
0.011628
def get(self, name="", owner=None, app=None, sharing=None, **query): """Performs a GET request to the server on the collection. If *owner*, *app*, and *sharing* are omitted, this method takes a default namespace from the :class:`Service` object for this :class:`Endpoint`. All other keyword arguments are included in the URL as query parameters. :raises AuthenticationError: Raised when the ``Service`` is not logged in. :raises HTTPError: Raised when an error in the request occurs. :param path_segment: A path segment relative to this endpoint. :type path_segment: ``string`` :param owner: The owner context of the namespace (optional). :type owner: ``string`` :param app: The app context of the namespace (optional). :type app: ``string`` :param sharing: The sharing mode for the namespace (optional). :type sharing: "global", "system", "app", or "user" :param query: All other keyword arguments, which are used as query parameters. :type query: ``string`` :return: The response from the server. :rtype: ``dict`` with keys ``body``, ``headers``, ``reason``, and ``status`` Example: import splunklib.client s = client.service(...) saved_searches = s.saved_searches saved_searches.get("my/saved/search") == \\ {'body': ...a response reader object..., 'headers': [('content-length', '26208'), ('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'), ('server', 'Splunkd'), ('connection', 'close'), ('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'), ('date', 'Fri, 11 May 2012 16:30:35 GMT'), ('content-type', 'text/xml; charset=utf-8')], 'reason': 'OK', 'status': 200} saved_searches.get('nonexistant/search') # raises HTTPError s.logout() saved_searches.get() # raises AuthenticationError """ name = UrlEncoded(name, encode_slash=True) return super(Collection, self).get(name, owner, app, sharing, **query)
0.00256
def _update_capacity(self, data): """ Update the consumed capacity metrics """ if 'ConsumedCapacity' in data: # This is all for backwards compatibility consumed = data['ConsumedCapacity'] if not isinstance(consumed, list): consumed = [consumed] for cap in consumed: self.capacity += cap.get('CapacityUnits', 0) self.table_capacity += cap.get('Table', {}).get('CapacityUnits', 0) local_indexes = cap.get('LocalSecondaryIndexes', {}) for k, v in six.iteritems(local_indexes): self.indexes.setdefault(k, 0) self.indexes[k] += v['CapacityUnits'] global_indexes = cap.get('GlobalSecondaryIndexes', {}) for k, v in six.iteritems(global_indexes): self.global_indexes.setdefault(k, 0) self.global_indexes[k] += v['CapacityUnits']
0.001953
def outline(self, face_ids=None, **kwargs): """ Given a list of face indexes find the outline of those faces and return it as a Path3D. The outline is defined here as every edge which is only included by a single triangle. Note that this implies a non-watertight mesh as the outline of a watertight mesh is an empty path. Parameters ---------- face_ids : (n,) int Indices to compute the outline of. If None, outline of full mesh will be computed. **kwargs: passed to Path3D constructor Returns ---------- path : Path3D Curve in 3D of the outline """ from .path.exchange.misc import faces_to_path from .path.exchange.load import _create_path path = _create_path(**faces_to_path(self, face_ids, **kwargs)) return path
0.00202
def __get_start_stop_iterations(sync_output_dynamic, start_iteration, stop_iteration): """! @brief Apply rule of preparation for start iteration and stop iteration values. @param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network. @param[in] start_iteration (uint): The first iteration that is used for calculation. @param[in] stop_iteration (uint): The last iteration that is used for calculation. @return (tuple) New values of start and stop iterations. """ if (start_iteration is None): start_iteration = 0; if (stop_iteration is None): stop_iteration = len(sync_output_dynamic); return (start_iteration, stop_iteration);
0.018428
def make_transformer(self, umap_kwargs={}): """ Creates an internal transformer pipeline to project the data set into 2D space using UMAP. This method will reset the transformer on the class. Parameters ---------- Returns ------- transformer : Pipeline Pipelined transformer for UMAP projections """ # Create the pipeline steps steps = [] # Add the UMAP manifold steps.append(('umap', UMAP( n_components=2, random_state=self.random_state, **umap_kwargs))) # return the pipeline return Pipeline(steps)
0.003044
def decode(self, name, as_map_key=False): """Always returns the name""" if is_cache_key(name) and (name in self.key_to_value): return self.key_to_value[name] return self.encache(name) if is_cacheable(name, as_map_key) else name
0.007605
def is_subscribed(user, obj): """ Returns ``True`` if the user is subscribed to the given object. :param user: A ``User`` instance. :param obj: Any object. """ if not user.is_authenticated(): return False ctype = ContentType.objects.get_for_model(obj) try: Subscription.objects.get( user=user, content_type=ctype, object_id=obj.pk) except Subscription.DoesNotExist: return False return True
0.002123
def RegisterPathSpec(cls, path_spec_type): """Registers a path specification type. Args: path_spec_type (type): path specification type. Raises: KeyError: if path specification is already registered. """ type_indicator = path_spec_type.TYPE_INDICATOR if type_indicator in cls._path_spec_types: raise KeyError( 'Path specification type: {0:s} already set.'.format( type_indicator)) cls._path_spec_types[type_indicator] = path_spec_type if getattr(path_spec_type, '_IS_SYSTEM_LEVEL', False): cls._system_level_type_indicators[type_indicator] = path_spec_type
0.004688
def pairwise_tukey(dv=None, between=None, data=None, alpha=.05, tail='two-sided', effsize='hedges'): '''Pairwise Tukey-HSD post-hoc test. Parameters ---------- dv : string Name of column containing the dependant variable. between: string Name of column containing the between factor. data : pandas DataFrame DataFrame alpha : float Significance level tail : string Indicates whether to return the 'two-sided' or 'one-sided' p-values effsize : string or None Effect size type. Available methods are :: 'none' : no effect size 'cohen' : Unbiased Cohen d 'hedges' : Hedges g 'glass': Glass delta 'eta-square' : Eta-square 'odds-ratio' : Odds ratio 'AUC' : Area Under the Curve Returns ------- stats : DataFrame Stats summary :: 'A' : Name of first measurement 'B' : Name of second measurement 'mean(A)' : Mean of first measurement 'mean(B)' : Mean of second measurement 'diff' : Mean difference 'SE' : Standard error 'tail' : indicate whether the p-values are one-sided or two-sided 'T' : T-values 'p-tukey' : Tukey-HSD corrected p-values 'efsize' : effect sizes 'eftype' : type of effect size Notes ----- Tukey HSD post-hoc is best for balanced one-way ANOVA. It has been proven to be conservative for one-way ANOVA with unequal sample sizes. However, it is not robust if the groups have unequal variances, in which case the Games-Howell test is more adequate. Tukey HSD is not valid for repeated measures ANOVA. Note that when the sample sizes are unequal, this function actually performs the Tukey-Kramer test (which allows for unequal sample sizes). The T-values are defined as: .. math:: t = \\frac{\\overline{x}_i - \\overline{x}_j} {\\sqrt{2 \\cdot MS_w / n}} where :math:`\\overline{x}_i` and :math:`\\overline{x}_j` are the means of the first and second group, respectively, :math:`MS_w` the mean squares of the error (computed using ANOVA) and :math:`n` the sample size. If the sample sizes are unequal, the Tukey-Kramer procedure is automatically used: .. math:: t = \\frac{\\overline{x}_i - \\overline{x}_j}{\\sqrt{\\frac{MS_w}{n_i} + \\frac{MS_w}{n_j}}} where :math:`n_i` and :math:`n_j` are the sample sizes of the first and second group, respectively. The p-values are then approximated using the Studentized range distribution :math:`Q(\\sqrt2*|t_i|, r, N - r)` where :math:`r` is the total number of groups and :math:`N` is the total sample size. Note that the p-values might be slightly different than those obtained using R or Matlab since the studentized range approximation is done using the Gleason (1999) algorithm, which is more efficient and accurate than the algorithms used in Matlab or R. References ---------- .. [1] Tukey, John W. "Comparing individual means in the analysis of variance." Biometrics (1949): 99-114. .. [2] Gleason, John R. "An accurate, non-iterative approximation for studentized range quantiles." Computational statistics & data analysis 31.2 (1999): 147-158. Examples -------- Pairwise Tukey post-hocs on the pain threshold dataset. >>> from pingouin import pairwise_tukey, read_dataset >>> df = read_dataset('anova') >>> pt = pairwise_tukey(dv='Pain threshold', between='Hair color', data=df) ''' from pingouin.external.qsturng import psturng # First compute the ANOVA aov = anova(dv=dv, data=data, between=between, detailed=True) df = aov.loc[1, 'DF'] ng = aov.loc[0, 'DF'] + 1 grp = data.groupby(between)[dv] n = grp.count().values gmeans = grp.mean().values gvar = aov.loc[1, 'MS'] / n # Pairwise combinations g1, g2 = np.array(list(combinations(np.arange(ng), 2))).T mn = gmeans[g1] - gmeans[g2] se = np.sqrt(gvar[g1] + gvar[g2]) tval = mn / se # Critical values and p-values # from pingouin.external.qsturng import qsturng # crit = qsturng(1 - alpha, ng, df) / np.sqrt(2) pval = psturng(np.sqrt(2) * np.abs(tval), ng, df) pval *= 0.5 if tail == 'one-sided' else 1 # Uncorrected p-values # from scipy.stats import t # punc = t.sf(np.abs(tval), n[g1].size + n[g2].size - 2) * 2 # Effect size d = tval * np.sqrt(1 / n[g1] + 1 / n[g2]) ef = convert_effsize(d, 'cohen', effsize, n[g1], n[g2]) # Create dataframe # Careful: pd.unique does NOT sort whereas numpy does stats = pd.DataFrame({ 'A': np.unique(data[between])[g1], 'B': np.unique(data[between])[g2], 'mean(A)': gmeans[g1], 'mean(B)': gmeans[g2], 'diff': mn, 'SE': np.round(se, 3), 'tail': tail, 'T': np.round(tval, 3), # 'alpha': alpha, # 'crit': np.round(crit, 3), 'p-tukey': pval, 'efsize': np.round(ef, 3), 'eftype': effsize, }) return stats
0.000184
def cmd(admin_only=False, acl='*', aliases=None, while_ignored=False, *args, **kwargs): """ Decorator to mark plugin functions as commands in the form of !<cmd_name> * admin_only - indicates only users in bot_admin are allowed to execute (only used if AuthManager is loaded) * acl - indicates which ACL to perform permission checks against (only used if AuthManager is loaded) * aliases - register function with additional commands (i.e. !alias1, !alias2, etc) * while_ignored - allows a command to be run, even if channel has been !sleep """ def wrapper(func): func.is_cmd = True func.is_subcmd = len(func.__name__.split('_')) > 1 func.cmd_name = func.__name__.replace('_', ' ') func.admin_only = admin_only func.acl = acl func.aliases = aliases func.while_ignored = while_ignored return func return wrapper
0.006601
def cleanup_defenses(self): """Cleans up all data about defense work in current round.""" print_header('CLEANING UP DEFENSES DATA') work_ancestor_key = self.datastore_client.key('WorkType', 'AllDefenses') keys_to_delete = [ e.key for e in self.datastore_client.query_fetch(kind=u'ClassificationBatch') ] + [ e.key for e in self.datastore_client.query_fetch(kind=u'Work', ancestor=work_ancestor_key) ] self._cleanup_keys_with_confirmation(keys_to_delete)
0.001773
def add_bundle(self, *args): """ Add some bundle to build group :type bundle: static_bundle.bundles.AbstractBundle @rtype: BuildGroup """ for bundle in args: if not self.multitype and self.has_bundles(): first_bundle = self.get_first_bundle() if first_bundle.get_type() != bundle.get_type(): raise Exception( 'Different bundle types for one Asset: %s[%s -> %s]' 'check types or set multitype parameter to True' % (self.name, first_bundle.get_type(), bundle.get_type()) ) self.bundles.append(bundle) return self
0.004082
def get_data(self, start=None, length=None): """Get data chunk from a section. Allows to query data from the section by passing the addresses where the PE file would be loaded by default. It is then possible to retrieve code and data by their real addresses as they would be if loaded. Returns bytes() under Python 3.x and set() under Python 2.7 """ PointerToRawData_adj = self.pe.adjust_FileAlignment( self.PointerToRawData, self.pe.OPTIONAL_HEADER.FileAlignment ) VirtualAddress_adj = self.pe.adjust_SectionAlignment( self.VirtualAddress, self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment ) if start is None: offset = PointerToRawData_adj else: offset = ( start - VirtualAddress_adj ) + PointerToRawData_adj if length is not None: end = offset + length else: end = offset + self.SizeOfRawData # PointerToRawData is not adjusted here as we might want to read any possible extra bytes # that might get cut off by aligning the start (and hence cutting something off the end) # if end > self.PointerToRawData + self.SizeOfRawData: end = self.PointerToRawData + self.SizeOfRawData return self.pe.__data__[offset:end]
0.010901
def loader(filepath, logger=None, **kwargs): """ Load an object from an ASDF file. See :func:`ginga.util.loader` for more info. TODO: kwargs may contain info about what part of the file to load """ # see ginga.util.loader module # TODO: return an AstroTable if loading a table, etc. # for now, assume always an image from ginga import AstroImage image = AstroImage.AstroImage(logger=logger) with asdf.open(filepath) as asdf_f: #image.load_asdf(asdf_f, **kwargs) image.load_asdf(asdf_f) return image
0.003527
def _FloatingPointEncoder(wire_type, format): """Return a constructor for an encoder for float fields. This is like StructPackEncoder, but catches errors that may be due to passing non-finite floating-point values to struct.pack, and makes a second attempt to encode those values. Args: wire_type: The field's wire type, for encoding tags. format: The format string to pass to struct.pack(). """ value_size = struct.calcsize(format) if value_size == 4: def EncodeNonFiniteOrRaise(write, value): # Remember that the serialized form uses little-endian byte order. if value == _POS_INF: write(b'\x00\x00\x80\x7F') elif value == _NEG_INF: write(b'\x00\x00\x80\xFF') elif value != value: # NaN write(b'\x00\x00\xC0\x7F') else: raise elif value_size == 8: def EncodeNonFiniteOrRaise(write, value): if value == _POS_INF: write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F') elif value == _NEG_INF: write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF') elif value != value: # NaN write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F') else: raise else: raise ValueError('Can\'t encode floating-point values that are ' '%d bytes long (only 4 or 8)' % value_size) def SpecificEncoder(field_number, is_repeated, is_packed): local_struct_pack = struct.pack if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, len(value) * value_size) for element in value: # This try/except block is going to be faster than any code that # we could write to check whether element is finite. try: write(local_struct_pack(format, element)) except SystemError: EncodeNonFiniteOrRaise(write, element) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_type) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) try: write(local_struct_pack(format, element)) except SystemError: EncodeNonFiniteOrRaise(write, element) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_type) def EncodeField(write, value): write(tag_bytes) try: write(local_struct_pack(format, value)) except SystemError: EncodeNonFiniteOrRaise(write, value) return EncodeField return SpecificEncoder
0.014301
def dumpf(obj, path): """ Write an nginx configuration to file. :param obj obj: nginx object (Conf, Server, Container) :param str path: path to nginx configuration on disk :returns: path the configuration was written to """ with open(path, 'w') as f: dump(obj, f) return path
0.003165
def show(self, func_name, values, labels=None): """Prints out nice representations of the given values.""" s = self.Stanza(self.indent) if func_name == '<module>' and self.in_console: func_name = '<console>' s.add([func_name + ': ']) reprs = map(self.safe_repr, values) if labels: sep = '' for label, repr in zip(labels, reprs): s.add([label + '=', self.CYAN, repr, self.NORMAL], sep) sep = ', ' else: sep = '' for repr in reprs: s.add([self.CYAN, repr, self.NORMAL], sep) sep = ', ' self.writer.write(s.chunks)
0.002861
def get_objective_bank_query_session(self, proxy): """Gets the OsidSession associated with the objective bank query service. :param proxy: a proxy :type proxy: ``osid.proxy.Proxy`` :return: an ``ObjectiveBankQuerySession`` :rtype: ``osid.learning.ObjectiveBankQuerySession`` :raise: ``NullArgument`` -- ``proxy`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``Unimplemented`` -- ``supports_objective_bank_query() is false`` *compliance: optional -- This method must be implemented if ``supports_objective_bank_query()`` is true.* """ if not self.supports_objective_bank_query(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.ObjectiveBankQuerySession(proxy=proxy, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
0.00552
def check_package(self, package, package_dir): """Check namespace packages' __init__ for declare_namespace""" try: return self.packages_checked[package] except KeyError: pass init_py = orig.build_py.check_package(self, package, package_dir) self.packages_checked[package] = init_py if not init_py or not self.distribution.namespace_packages: return init_py for pkg in self.distribution.namespace_packages: if pkg == package or pkg.startswith(package + '.'): break else: return init_py with io.open(init_py, 'rb') as f: contents = f.read() if b'declare_namespace' not in contents: raise distutils.errors.DistutilsError( "Namespace package problem: %s is a namespace package, but " "its\n__init__.py does not call declare_namespace()! Please " 'fix it.\n(See the setuptools manual under ' '"Namespace Packages" for details.)\n"' % (package,) ) return init_py
0.001779
def _spawn(self, command, args=[], preexec_fn=None, dimensions=None): '''This starts the given command in a child process. This does all the fork/exec type of stuff for a pty. This is called by __init__. If args is empty then command will be parsed (split on spaces) and args will be set to parsed arguments. ''' # The pid and child_fd of this object get set by this method. # Note that it is difficult for this method to fail. # You cannot detect if the child process cannot start. # So the only way you can tell if the child process started # or not is to try to read from the file descriptor. If you get # EOF immediately then it means that the child is already dead. # That may not necessarily be bad because you may have spawned a child # that performs some task; creates no stdout output; and then dies. # If command is an int type then it may represent a file descriptor. if isinstance(command, type(0)): raise ExceptionPexpect('Command is an int type. ' + 'If this is a file descriptor then maybe you want to ' + 'use fdpexpect.fdspawn which takes an existing ' + 'file descriptor instead of a command string.') if not isinstance(args, type([])): raise TypeError('The argument, args, must be a list.') if args == []: self.args = split_command_line(command) self.command = self.args[0] else: # Make a shallow copy of the args list. self.args = args[:] self.args.insert(0, command) self.command = command command_with_path = which(self.command, env=self.env) if command_with_path is None: raise ExceptionPexpect('The command was not found or was not ' + 'executable: %s.' % self.command) self.command = command_with_path self.args[0] = self.command self.name = '<' + ' '.join(self.args) + '>' assert self.pid is None, 'The pid member must be None.' assert self.command is not None, 'The command member must not be None.' kwargs = {'echo': self.echo, 'preexec_fn': preexec_fn} if self.ignore_sighup: def preexec_wrapper(): "Set SIGHUP to be ignored, then call the real preexec_fn" signal.signal(signal.SIGHUP, signal.SIG_IGN) if preexec_fn is not None: preexec_fn() kwargs['preexec_fn'] = preexec_wrapper if dimensions is not None: kwargs['dimensions'] = dimensions if self.encoding is not None: # Encode command line using the specified encoding self.args = [a if isinstance(a, bytes) else a.encode(self.encoding) for a in self.args] self.ptyproc = self._spawnpty(self.args, env=self.env, cwd=self.cwd, **kwargs) self.pid = self.ptyproc.pid self.child_fd = self.ptyproc.fd self.terminated = False self.closed = False
0.002517
async def finalize_request( self, result: ResponseReturnValue, request_context: Optional[RequestContext]=None, from_error_handler: bool=False, ) -> Response: """Turns the view response return value into a response. Arguments: result: The result of the request to finalize into a response. request_context: The request context, optional as Flask omits this argument. """ response = await self.make_response(result) try: response = await self.process_response(response, request_context) await request_finished.send(self, response=response) except Exception: if not from_error_handler: raise self.logger.exception('Request finalizing errored') return response
0.008206
def append(self, item): """add item to the end of the item list """ if not self.items: # was list empty ? self.items = item # then this is the new head item.insert(self.items)
0.008696
def packet_is_for_me(self, m): '''returns true if this packet is appropriately addressed''' if m.target_system != self.master.mav.srcSystem: return False if m.target_component != self.master.mav.srcComponent: return False # if have a sender we can also check the source address: if self.sender is not None: if (m.get_srcSystem(), m.get_srcComponent()) != self.sender: return False return True
0.004065
def clean(self): '''Check to make sure password fields match.''' data = super(SignupForm, self).clean() # basic check for now if 'username' in data: if User.objects.filter( username=data['username'], email=data['email']).exists(): raise validators.ValidationError( _('Username or email exists in database.')) if 'password' in data: if data['password'] != data.get('confirm_password', None): raise validators.ValidationError(_('Passwords do not match.')) else: data.pop('confirm_password') return data
0.002882
def bar(self, serie, rescale=False): """Draw a bar graph for a serie""" serie_node = self.svg.serie(serie) bars = self.svg.node(serie_node['plot'], class_="bars") if rescale and self.secondary_series: points = self._rescale(serie.points) else: points = serie.points for i, (x, y) in enumerate(points): if None in (x, y) or (self.logarithmic and y <= 0): continue metadata = serie.metadata.get(i) val = self._format(serie, i) bar = decorate( self.svg, self.svg.node(bars, class_='bar'), metadata ) x_, y_, width, height = self._bar( serie, bar, x, y, i, self.zero, secondary=rescale ) self._confidence_interval( serie_node['overlay'], x_ + width / 2, y_, serie.values[i], metadata ) self._tooltip_and_print_values( serie_node, serie, bar, i, val, metadata, x_, y_, width, height )
0.001845
def shape(self): """Copy the shape from TCMPS as a new numpy ndarray.""" # Create C variables that will serve as out parameters for TCMPS. shape_ptr = _ctypes.POINTER(_ctypes.c_size_t)() # size_t* shape_ptr dim = _ctypes.c_size_t() # size_t dim # Obtain pointer into memory owned by the C++ object self.handle. status_code = self._LIB.TCMPSGetFloatArrayShape( self.handle, _ctypes.byref(shape_ptr), _ctypes.byref(dim)) assert status_code == 0, "Error calling TCMPSGetFloatArrayShape" return _shape_tuple_from_ctypes(shape_ptr, dim)
0.00315
def _win32_junction(path, link, verbose=0): """ On older (pre 10) versions of windows we need admin privledges to make symlinks, however junctions seem to work. For paths we do a junction (softlink) and for files we use a hard link CommandLine: python -m ubelt._win32_links _win32_junction Example: >>> # xdoc: +REQUIRES(WIN32) >>> import ubelt as ub >>> root = ub.ensure_app_cache_dir('ubelt', 'win32_junction') >>> ub.delete(root) >>> ub.ensuredir(root) >>> fpath = join(root, 'fpath.txt') >>> dpath = join(root, 'dpath') >>> fjunc = join(root, 'fjunc.txt') >>> djunc = join(root, 'djunc') >>> ub.touch(fpath) >>> ub.ensuredir(dpath) >>> ub.ensuredir(join(root, 'djunc_fake')) >>> ub.ensuredir(join(root, 'djunc_fake with space')) >>> ub.touch(join(root, 'djunc_fake with space file')) >>> _win32_junction(fpath, fjunc) >>> _win32_junction(dpath, djunc) >>> # thank god colons are not allowed >>> djunc2 = join(root, 'djunc2 [with pathological attrs]') >>> _win32_junction(dpath, djunc2) >>> _win32_is_junction(djunc) >>> ub.writeto(join(djunc, 'afile.txt'), 'foo') >>> assert ub.readfrom(join(dpath, 'afile.txt')) == 'foo' >>> ub.writeto(fjunc, 'foo') """ # junctions store absolute paths path = os.path.abspath(path) link = os.path.abspath(link) from ubelt import util_cmd if os.path.isdir(path): # try using a junction (soft link) if verbose: print('... as soft link') # TODO: what is the windows api for this? command = 'mklink /J "{}" "{}"'.format(link, path) else: # try using a hard link if verbose: print('... as hard link') # command = 'mklink /H "{}" "{}"'.format(link, path) try: jwfs.link(path, link) # this seems to be allowed except Exception: print('Failed to hardlink link={} to path={}'.format(link, path)) raise command = None if command is not None: info = util_cmd.cmd(command, shell=True) if info['ret'] != 0: from ubelt import util_format print('Failed command:') print(info['command']) print(util_format.repr2(info, nl=1)) raise OSError(str(info)) return link
0.000407
def get_secret(secret_name, default=None): """ Gets contents of secret file :param secret_name: The name of the secret present in BANANAS_SECRETS_DIR :param default: Default value to return if no secret was found :return: The secret or default if not found """ secrets_dir = get_secrets_dir() secret_path = os.path.join(secrets_dir, secret_name) try: with open(secret_path, "r") as secret_file: return secret_file.read() except OSError: return default
0.001923
def WSGIHandler(self): """Returns GRR's WSGI handler.""" sdm = werkzeug_wsgi.SharedDataMiddleware(self, { "/": config.CONFIG["AdminUI.document_root"], }) # Use DispatcherMiddleware to make sure that SharedDataMiddleware is not # used at all if the URL path doesn't start with "/static". This is a # workaround for cases when unicode URLs are used on systems with # non-unicode filesystems (as detected by Werkzeug). In this case # SharedDataMiddleware may fail early while trying to convert the # URL into the file path and not dispatch the call further to our own # WSGI handler. return werkzeug_wsgi.DispatcherMiddleware(self, { "/static": sdm, })
0.001403
def post(self, request, *args, **kwargs): """ Do the login and password protection. """ self.object = self.get_object() self.login() if self.object.password: entry_password = self.request.POST.get('entry_password') if entry_password: if entry_password == self.object.password: self.request.session[self.session_key % self.object.pk] = self.object.password return self.get(request, *args, **kwargs) else: self.error = True return self.password() return self.get(request, *args, **kwargs)
0.002813
def query(self, *args): """ Send a query to the watchman service and return the response This call will block until the response is returned. If any unilateral responses are sent by the service in between the request-response they will be buffered up in the client object and NOT returned via this method. """ log("calling client.query") self._connect() try: self.sendConn.send(args) res = self.receive() while self.isUnilateralResponse(res): res = self.receive() return res except EnvironmentError as ee: # When we can depend on Python 3, we can use PEP 3134 # exception chaining here. raise WatchmanEnvironmentError( "I/O error communicating with watchman daemon", ee.errno, ee.strerror, args, ) except WatchmanError as ex: ex.setCommand(args) raise
0.001918
def update(self, *args, **kwargs): """Calls update on each of the systems self.systems.""" for system in self.systems: system.update(self, *args, **kwargs)
0.010929
def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs): """Parse an HTML fragment as a string or file-like object into a tree :arg doc: the fragment to parse as a string or file-like object :arg container: the container context to parse the fragment in :arg treebuilder: the treebuilder to use when parsing :arg namespaceHTMLElements: whether or not to namespace HTML elements :returns: parsed tree Example: >>> from html5lib.html5libparser import parseFragment >>> parseFragment('<b>this is a fragment</b>') <Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090> """ tb = treebuilders.getTreeBuilder(treebuilder) p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) return p.parseFragment(doc, container=container, **kwargs)
0.002387
def _re_establish_use_watch(self): """Call after a close/re-connect. Automatically re-establishes the USE and WATCH configs previously setup. """ if self.current_tube != self.desired_tube: self.use(self.desired_tube) if self._watchlist != self.desired_watchlist: self.watchlist = self.desired_watchlist
0.008174
def get_enthalpy(self, temperature, electronic_energy = 'Default'): """Returns the internal energy of an adsorbed molecule. Parameters ---------- temperature : numeric temperature in K electronic_energy : numeric energy in eV Returns ------- internal_energy : numeric Internal energy in eV """ if not temperature: # either None or 0 return(0, 0, 0) if electronic_energy == 'Default': electronic_energy = molecule_dict[self.name]['electronic_energy'] if overbinding == True: electronic_energy += molecule_dict[self.name]['overbinding'] else: ideal_gas_object = IdealGasThermo(vib_energies=self.get_vib_energies(), potentialenergy=electronic_energy, atoms=self.atom_object, geometry=molecule_dict[self.name]['geometry'], symmetrynumber=molecule_dict[self.name]['symmetrynumber'], spin=molecule_dict[self.name]['spin']) energy = ideal_gas_object.get_enthalpy(temperature=temperature, verbose=False) self.enthalpy = energy return(self.enthalpy)
0.008615
def breadth_first_search(graph, root_node=None): """Searches through the tree in a breadth-first fashion. If root_node is None, an arbitrary node will be used as the root. If root_node is not None, it will be used as the root for the search tree. Returns a list of nodes, in the order that they were reached. """ ordering = [] all_nodes = graph.get_all_node_ids() if not all_nodes: return ordering queue = deque() discovered = defaultdict(lambda: False) to_visit = set(all_nodes) if root_node is None: root_node = all_nodes[0] discovered[root_node] = True queue.appendleft(root_node) # We need to make sure we visit all the nodes, including disconnected ones while True: # BFS Main Loop while len(queue) > 0: current_node = queue.pop() ordering.append(current_node) to_visit.remove(current_node) for n in graph.neighbors(current_node): if not discovered[n]: discovered[n] = True queue.appendleft(n) # New root node if we still have more nodes if len(to_visit) > 0: node = to_visit.pop() to_visit.add(node) # --We need this here because we remove the node as part of the BFS algorithm discovered[node] = True queue.appendleft(node) else: break return ordering
0.002045
def qs_from_dict(qsdict, prefix=""): ''' Same as dict_from_qs, but in reverse i.e. {"period": {"di": {}, "fhr": {}}} => "period.di,period.fhr" ''' prefix = prefix + '.' if prefix else "" def descend(qsd): for key, val in sorted(qsd.items()): if val: yield qs_from_dict(val, prefix + key) else: yield prefix + key return ",".join(descend(qsdict))
0.002283
def _get_converter_type(identifier): """Return the converter type for `identifier`.""" if isinstance(identifier, str): return ConverterType[identifier] if isinstance(identifier, ConverterType): return identifier return ConverterType(identifier)
0.003623
def postinit(self, test=None, fail=None): """Do some setup after initialisation. :param test: The test that passes or fails the assertion. :type test: NodeNG or None :param fail: The message shown when the assertion fails. :type fail: NodeNG or None """ self.fail = fail self.test = test
0.005666
def get_subhash(hash): """Get a second hash based on napiprojekt's hash. :param str hash: napiprojekt's hash. :return: the subhash. :rtype: str """ idx = [0xe, 0x3, 0x6, 0x8, 0x2] mul = [2, 2, 5, 4, 3] add = [0, 0xd, 0x10, 0xb, 0x5] b = [] for i in range(len(idx)): a = add[i] m = mul[i] i = idx[i] t = a + int(hash[i], 16) v = int(hash[t:t + 2], 16) b.append(('%x' % (v * m))[-1]) return ''.join(b)
0.00202
def visitFunctionCall(self, ctx): """ expression : fnname LPAREN parameters? RPAREN """ func_name = ctx.fnname().getText() if ctx.parameters() is not None: parameters = self.visit(ctx.parameters()) else: parameters = [] return self._functions.invoke_function(self._eval_context, func_name, parameters)
0.007833
def info(self): """ Show expiration dates, equity price, quote time. Returns ------- self : :class:`~pynance.opt.core.Options` Returns a reference to the calling object to allow chaining. expiries : :class:`pandas.tseries.index.DatetimeIndex` Examples -------- >>> fopt, fexp = pn.opt.get('f').info() Expirations: ... Stock: 16.25 Quote time: 2015-03-01 16:00 """ print("Expirations:") _i = 0 for _datetime in self.data.index.levels[1].to_pydatetime(): print("{:2d} {}".format(_i, _datetime.strftime('%Y-%m-%d'))) _i += 1 print("Stock: {:.2f}".format(self.data.iloc[0].loc['Underlying_Price'])) print("Quote time: {}".format(self.quotetime().strftime('%Y-%m-%d %H:%M%z'))) return self, self.exps()
0.004405
def string_to_double_precision_float(s: str) -> float: """ Double precision float in Fortran file will have form 'x.ydz' or 'x.yDz', this cannot be convert directly to float by Python ``float`` function, so I wrote this function to help conversion. For example, :param s: a string denoting a double precision number :return: a Python floating point number .. doctest:: >>> string_to_double_precision_float('1d-82') 1e-82 >>> string_to_double_precision_float('1.0D-82') 1e-82 >>> string_to_double_precision_float('0.8D234') 8e+233 >>> string_to_double_precision_float('.8d234') 8e+233 """ first, second, exponential = re.match( "(-?\d*)\.?(-?\d*)d(-?\d+)", s, re.IGNORECASE).groups() return float(first + '.' + second + 'e' + exponential)
0.008255
def yield_figs(self, **kwargs): # pragma: no cover """ This function *generates* a predefined list of matplotlib figures with minimal input from the user. """ yield self.plot_densities(title="PAW densities", show=False) yield self.plot_waves(title="PAW waves", show=False) yield self.plot_projectors(title="PAW projectors", show=False)
0.007813
def SetProperties(has_props_cls, input_dict, include_immutable=True): """A helper method to set an ``HasProperties`` object's properties from a dictionary""" props = has_props_cls() if not isinstance(input_dict, (dict, collections.OrderedDict)): raise RuntimeError('input_dict invalid: ', input_dict) for k, v in iter(input_dict.items()): if (k in has_props_cls._props and ( include_immutable or any(hasattr(has_props_cls._props[k], att) for att in ('required', 'new_name')) ) ): p = props._props.get(k) if isinstance(p, properties.HasProperties): props._set(k, SetProperties(p, v, include_immutable=include_immutable)) elif isinstance(p, properties.Instance): props._set(k, SetProperties(p.instance_class, v, include_immutable=include_immutable)) elif isinstance(p, properties.List): if not isinstance(v, list): raise RuntimeError('property value mismatch', p, v) if not isinstance(v[0], properties.HasProperties): prop = p.prop.instance_class newlist = [] for i in v: value = SetProperties(prop, i, include_immutable=include_immutable) newlist.append(value) props._set(k, newlist) else: props._set(k, v) else: props._set(k, p.from_json(v)) # Return others as well # others_dict = {k: v for k, v in iter(input_dict.items()) # if k not in has_props_cls._props} return props
0.00407
def phase_by(val: Any, phase_turns: float, qubit_index: int, default: TDefault = RaiseTypeErrorIfNotProvided): """Returns a phased version of the effect. For example, an X gate phased by 90 degrees would be a Y gate. This works by calling `val`'s _phase_by_ method and returning the result. Args: val: The value to describe with a unitary matrix. phase_turns: The amount to phase the gate, in fractions of a whole turn. Divide by 2pi to get radians. qubit_index: The index of the target qubit the phasing applies to. For operations this is the index of the qubit within the operation's qubit list. For gates it's the index of the qubit within the tuple of qubits taken by the gate's `on` method. default: The default value to return if `val` can't be phased. If not specified, an error is raised when `val` can't be phased. Returns: If `val` has a _phase_by_ method and its result is not NotImplemented, that result is returned. Otherwise, the function will return the default value provided or raise a TypeError if none was provided. Raises: TypeError: `val` doesn't have a _phase_by_ method (or that method returned NotImplemented) and no `default` was specified. """ getter = getattr(val, '_phase_by_', None) result = NotImplemented if getter is None else getter( phase_turns, qubit_index) if result is not NotImplemented: return result if default is not RaiseTypeErrorIfNotProvided: return default if getter is None: raise TypeError("object of type '{}' " "has no _phase_by_ method.".format(type(val))) raise TypeError("object of type '{}' does have a _phase_by_ method, " "but it returned NotImplemented.".format(type(val)))
0.000518
def check_composite_tokens(self, name, tokens): """ Return the key and contents of a KEY..END block for PATTERN, POINTS, and PROJECTION """ assert len(tokens) >= 2 key = tokens[0] assert key.value.lower() == name assert tokens[-1].value.lower() == "end" if len(tokens) == 2: body = [] # empty TYPE..END block else: body = tokens[1:-1] body_tokens = [] for t in body: if isinstance(t, dict): body_tokens.append(t["__tokens__"]) else: body_tokens.append(t) return key, body_tokens
0.002999
def SensorShare(self, sensor_id, parameters): """ Share a sensor with a user @param sensor_id (int) - Id of sensor to be shared @param parameters (dictionary) - Additional parameters for the call @return (bool) - Boolean indicating whether the ShareSensor call was successful """ if not parameters['user']['id']: parameters['user'].pop('id') if not parameters['user']['username']: parameters['user'].pop('username') if self.__SenseApiCall__("/sensors/{0}/users".format(sensor_id), "POST", parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
0.009056
def batch_query_state_changes( self, batch_size: int, filters: List[Tuple[str, Any]] = None, logical_and: bool = True, ) -> Iterator[List[StateChangeRecord]]: """Batch query state change records with a given batch size and an optional filter This is a generator function returning each batch to the caller to work with. """ limit = batch_size offset = 0 result_length = 1 while result_length != 0: result = self._get_state_changes( limit=limit, offset=offset, filters=filters, logical_and=logical_and, ) result_length = len(result) offset += result_length yield result
0.006234
def split_name(name): """Splits a (possibly versioned) name into unversioned name and version. Returns a tuple ``(unversioned_name, version)``, where ``version`` may be ``None``. """ s = name.rsplit('@', 1) if len(s) == 1: return s[0], None else: try: return s[0], int(s[1]) except ValueError: raise ValueError("Invalid Filetracker filename: version must " "be int, not %r" % (s[1],))
0.002016
def _estimate_label_shape(self): """Helper function to estimate label shape""" max_count = 0 self.reset() try: while True: label, _ = self.next_sample() label = self._parse_label(label) max_count = max(max_count, label.shape[0]) except StopIteration: pass self.reset() return (max_count, label.shape[1])
0.00464