text
stringlengths
78
104k
score
float64
0
0.18
def _CreateExpandedDSA(client, ad_group_id): """Creates the expanded Dynamic Search Ad. Args: client: an AdwordsClient instance. ad_group_id: an integer ID of the ad group in which the DSA is added. """ # Get the AdGroupAdService. ad_group_ad_service = client.GetService('AdGroupAdService') # Create the operation operations = [{ 'operator': 'ADD', 'operand': { 'xsi_type': 'AdGroupAd', 'adGroupId': ad_group_id, # Create the expanded dynamic search ad. This ad will have its # headline and final URL auto-generated at serving time according to # domain name specific information provided by DynamicSearchAdsSetting # at the campaign level. 'ad': { 'xsi_type': 'ExpandedDynamicSearchAd', # Set the ad description. 'description': 'Buy your tickets now!', 'description2': 'Discount ends soon' }, # Optional: Set the status. 'status': 'PAUSED', } }] # Create the ad. ad = ad_group_ad_service.mutate(operations)['value'][0]['ad'] # Display the results. print ('Expanded dynamic search ad with ID "%d", description "%s", and ' 'description 2 "%s" was added' % (ad['id'], ad['description'], ad['description2']))
0.009002
def base_station(self): """Return the base_station assigned for the given camera.""" try: return list(filter(lambda x: x.device_id == self.parent_id, self._session.base_stations))[0] except (IndexError, AttributeError): return None
0.006452
def run(self, task, **kwargs): """ This is a utility method to call a task from within a task. For instance: def grouped_tasks(task): task.run(my_first_task) task.run(my_second_task) nornir.run(grouped_tasks) This method will ensure the subtask is run only for the host in the current thread. """ if not self.host or not self.nornir: msg = ( "You have to call this after setting host and nornir attributes. ", "You probably called this from outside a nested task", ) raise Exception(msg) if "severity_level" not in kwargs: kwargs["severity_level"] = self.severity_level task = Task(task, **kwargs) r = task.start(self.host, self.nornir) self.results.append(r[0] if len(r) == 1 else r) if r.failed: # Without this we will keep running the grouped task raise NornirSubTaskError(task=task, result=r) return r
0.004721
def pipool(name, ivals): """ This entry point provides toolkit programmers a method for programmatically inserting integer data into the kernel pool. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pipool_c.html :param name: The kernel pool name to associate with values. :type name: str :param ivals: An array of integers to insert into the pool. :type ivals: Array of ints """ name = stypes.stringToCharP(name) n = ctypes.c_int(len(ivals)) ivals = stypes.toIntVector(ivals) libspice.pipool_c(name, n, ivals)
0.001751
def ip_hide_community_list_holder_community_list_extended_ip_action(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def") hide_community_list_holder = ET.SubElement(ip, "hide-community-list-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") community_list = ET.SubElement(hide_community_list_holder, "community-list") extended = ET.SubElement(community_list, "extended") name_key = ET.SubElement(extended, "name") name_key.text = kwargs.pop('name') seq_keyword_key = ET.SubElement(extended, "seq-keyword") seq_keyword_key.text = kwargs.pop('seq_keyword') instance_key = ET.SubElement(extended, "instance") instance_key.text = kwargs.pop('instance') ip_action = ET.SubElement(extended, "ip-action") ip_action.text = kwargs.pop('ip_action') callback = kwargs.pop('callback', self._callback) return callback(config)
0.005676
def validate_parsed_json(obj_json, options=None): """ Validate objects from parsed JSON. This supports a single object, or a list of objects. If a single object is given, a single result is returned. Otherwise, a list of results is returned. If an error occurs, a ValidationErrorResults instance or list which includes one of these instances, is returned. :param obj_json: The parsed json :param options: Validation options :return: An ObjectValidationResults instance, or a list of such. """ validating_list = isinstance(obj_json, list) if not options: options = ValidationOptions() if not options.no_cache: init_requests_cache(options.refresh_cache) results = None if validating_list: results = [] for obj in obj_json: try: results.append(validate_instance(obj, options)) except SchemaInvalidError as ex: error_result = ObjectValidationResults(is_valid=False, object_id=obj.get('id', ''), errors=[str(ex)]) results.append(error_result) else: try: results = validate_instance(obj_json, options) except SchemaInvalidError as ex: error_result = ObjectValidationResults(is_valid=False, object_id=obj_json.get('id', ''), errors=[str(ex)]) results = error_result if not options.no_cache and options.clear_cache: clear_requests_cache() return results
0.001761
async def update_offer(self, **params): """Updates offer after transaction confirmation Accepts: - transaction id - coinid - confirmed (boolean flag) """ logging.debug("\n\n -- Update offer. ") if params.get("message"): params = json.loads(params.get("message", "{}")) if not params: return {"error":400, "reason":"Missed required fields"} # Check if required fields exists txid = params.get("txid") coinid = params.get("coinid").upper() try: coinid = coinid.replace("TEST", "") except: pass # Try to find offer with account id and cid database = client[coinid] offer_db = database[settings.OFFER] offer = await offer_db.find_one({"txid":txid}) logging.debug("\n\n -- Try to get offer. ") logging.debug(offer) if not offer: return {"error":404, "reason":"Offer with txid %s not found" % txid } # Update offer await offer_db.find_one_and_update( {"txid":txid}, {"$set":{"confirmed":1}}) # Get updated offer updated = await offer_db.find_one({"txid":txid}) return {i:updated[i] for i in updated if i != "_id"}
0.046406
def SetLowerTimestamp(cls, timestamp): """Sets the lower bound timestamp.""" if not hasattr(cls, '_lower'): cls._lower = timestamp return if timestamp < cls._lower: cls._lower = timestamp
0.018349
def replace_vcf_info(keyword, annotation, variant_line=None, variant_dict=None): """Replace the information of a info field of a vcf variant line or a variant dict. Arguments: variant_line (str): A vcf formatted variant line variant_dict (dict): A variant dictionary keyword (str): The info field key annotation (str): If the annotation is a key, value pair this is the string that represents the value Returns: variant_line (str): A annotated variant line """ new_info = '{0}={1}'.format(keyword, annotation) logger.debug("Replacing the variant information {0}".format(new_info)) fixed_variant = None new_info_list = [] if variant_line: logger.debug("Adding information to a variant line") splitted_variant = variant_line.rstrip('\n').split('\t') logger.debug("Adding information to splitted variant line") old_info = splitted_variant[7] if old_info == '.': new_info_string = new_info else: splitted_info_string = old_info.split(';') for info in splitted_info_string: splitted_info_entry = info.split('=') if splitted_info_entry[0] == keyword: new_info_list.append(new_info) else: new_info_list.append(info) new_info_string = ';'.join(new_info_list) splitted_variant[7] = new_info_string fixed_variant = '\t'.join(splitted_variant) elif variant_dict: logger.debug("Adding information to a variant dict") old_info = variant_dict['INFO'] if old_info == '.': variant_dict['INFO'] = new_info else: for info in old_info.split(';'): splitted_info_entry = info.split('=') if splitted_info_entry[0] == keyword: new_info_list.append(new_info) else: new_info_list.append(info) new_info_string = ';'.join(new_info_list) variant_dict['INFO'] = new_info_string fixed_variant = variant_dict return fixed_variant
0.006195
def add_contact(self, segment_id, contact_id): """ Add a contact to the segment :param segment_id: int Segment ID :param contact_id: int Contact ID :return: dict|str """ response = self._client.session.post( '{url}/{segment_id}/contact/add/{contact_id}'.format( url=self.endpoint_url, segment_id=segment_id, contact_id=contact_id ) ) return self.process_response(response)
0.003861
def find_user_by_username(self, username): """Find a User object by username.""" return self.db_adapter.ifind_first_object(self.UserClass, username=username)
0.017341
def create_tensorprod_function(funcs): """Combine 1-D rules into multivariate rule using tensor product.""" dim = len(funcs) def tensprod_rule(order, part=None): """Tensor product rule.""" order = order*numpy.ones(dim, int) values = [funcs[idx](order[idx]) for idx in range(dim)] abscissas = [numpy.array(_[0]).flatten() for _ in values] abscissas = chaospy.quad.combine(abscissas, part=part).T weights = [numpy.array(_[1]).flatten() for _ in values] weights = numpy.prod(chaospy.quad.combine(weights, part=part), -1) return abscissas, weights return tensprod_rule
0.001541
def post_mortem(trace_back=None, exc_info=None): """ Breaks on a traceback and send all execution information to the debugger client. If the interpreter is handling an exception at this traceback, exception information is sent to _line_tracer() which will transmit it to the debugging client. Caller can also pass an *exc_info* that will be used to extract exception information. If passed exc_info has precedence over traceback. This method is useful for integrating with systems that manage exceptions. Using it, you can setup a developer mode where unhandled exceptions are sent to the developer. Once user resumes execution, control is returned to caller. IKP3db is just used to "pretty" display the execution environment. To call post_mortem() use: .. code-block:: python import ikp3db ... ikp3db.postmortem(any_traceback) :param trace_back: The traceback at which to break on. :type trace_back: traceback :param exc_info: Complete description of the raised Exception as returned by sys.exc_info. :type exc_info: tuple :return: An error message or None is everything went fine. :rtype: str or None """ if not ikpdb: return "Error: IKP3db must be launched before calling ikpd.post_mortem()." if exc_info: trace_back = exc_info[2] elif trace_back and not exc_info: if sys.exc_info()[2] == trace_back: exc_info = sys.exc_info() else: return "missing parameter trace_back or exc_info" pm_traceback = trace_back while pm_traceback.tb_next: pm_traceback = pm_traceback.tb_next ikpdb._line_tracer(pm_traceback.tb_frame, exc_info=exc_info) _logger.g_info("Post mortem processing finished.") return None
0.010701
def sina_download(url, output_dir='.', merge=True, info_only=False, **kwargs): """Downloads Sina videos by URL. """ if 'news.sina.com.cn/zxt' in url: sina_zxt(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) return vid = match1(url, r'vid=(\d+)') if vid is None: video_page = get_content(url) vid = hd_vid = match1(video_page, r'hd_vid\s*:\s*\'([^\']+)\'') if hd_vid == '0': vids = match1(video_page, r'[^\w]vid\s*:\s*\'([^\']+)\'').split('|') vid = vids[-1] if vid is None: vid = match1(video_page, r'vid:"?(\d+)"?') if vid: #title = match1(video_page, r'title\s*:\s*\'([^\']+)\'') sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only) else: vkey = match1(video_page, r'vkey\s*:\s*"([^"]+)"') if vkey is None: vid = match1(url, r'#(\d+)') sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only) return title = match1(video_page, r'title\s*:\s*"([^"]+)"') sina_download_by_vkey(vkey, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
0.005728
def break_array(a, threshold=numpy.pi, other=None): """Create a array which masks jumps >= threshold. Extra points are inserted between two subsequent values whose absolute difference differs by more than threshold (default is pi). Other can be a secondary array which is also masked according to *a*. Returns (*a_masked*, *other_masked*) (where *other_masked* can be ``None``) """ assert len(a.shape) == 1, "Only 1D arrays supported" if other is not None and a.shape != other.shape: raise ValueError("arrays must be of identical shape") # jump occurs after the index in break breaks = numpy.where(numpy.abs(numpy.diff(a)) >= threshold)[0] # insert a blank after breaks += 1 # is this needed?? -- no, but leave it here as a reminder #f2 = numpy.diff(a, 2) #up = (f2[breaks - 1] >= 0) # >0: up, <0: down # sort into up and down breaks: #breaks_up = breaks[up] #breaks_down = breaks[~up] # new array b including insertions for all the breaks m = len(breaks) b = numpy.empty((len(a) + m)) # calculate new indices for breaks in b, taking previous insertions into account b_breaks = breaks + numpy.arange(m) mask = numpy.zeros_like(b, dtype=numpy.bool) mask[b_breaks] = True b[~mask] = a b[mask] = numpy.NAN if other is not None: c = numpy.empty_like(b) c[~mask] = other c[mask] = numpy.NAN ma_c = numpy.ma.array(c, mask=mask) else: ma_c = None return numpy.ma.array(b, mask=mask), ma_c
0.004453
def remove_root(self, model, setter=None): ''' Remove a model as root model from this Document. Changes to this model may still trigger ``on_change`` callbacks on this document, if the model is still referred to by other root models. Args: model (Model) : The model to add as a root of this document. setter (ClientSession or ServerSession or None, optional) : This is used to prevent "boomerang" updates to Bokeh apps. (default: None) In the context of a Bokeh server application, incoming updates to properties will be annotated with the session that is doing the updating. This value is propagated through any subsequent change notifications that the update triggers. The session can compare the event setter to itself, and suppress any updates that originate from itself. ''' if model not in self._roots: return # TODO (bev) ValueError? self._push_all_models_freeze() try: self._roots.remove(model) finally: self._pop_all_models_freeze() self._trigger_on_change(RootRemovedEvent(self, model, setter))
0.002306
def _apply_data(self, f, ts, reverse=False): """ Convenience function for all of the math stuff. """ # TODO: needs to catch np numeric types? if isinstance(ts, (int, float)): d = ts * np.ones(self.shape[0]) elif ts is None: d = None elif np.array_equal(ts.index, self.index): d = ts.values else: d = ts._retime(self.index) if not reverse: new_data = np.apply_along_axis(f, 0, self.values, d) else: new_data = np.apply_along_axis(f, 0, d, self.values) return Trace(new_data, self.index, name=self.name)
0.003021
def get_pkg_version(pkg_name, parse=False): """ Verify and get installed python package version. :param pkg_name: python package name :param parse: parse version number with pkg_resourc.parse_version -function :return: None if pkg is not installed, otherwise version as a string or parsed version when parse=True """ import pkg_resources # part of setuptools try: version = pkg_resources.require(pkg_name)[0].version return pkg_resources.parse_version(version) if parse else version except pkg_resources.DistributionNotFound: return None
0.00165
def close(args): """ %prog close scaffolds.fasta PE*.fastq Run GapFiller to fill gaps. """ p = OptionParser(close.__doc__) p.set_home("gapfiller") p.set_cpus() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) scaffolds = args[0] libtxt = write_libraries(args[1:], aligner="bwa") cmd = "perl " + op.join(opts.gapfiller_home, "GapFiller.pl") cmd += " -l {0} -s {1} -T {2}".format(libtxt, scaffolds, opts.cpus) runsh = "run.sh" write_file(runsh, cmd)
0.001821
def _setup_profiles(self, conversion_profiles): ''' Add given conversion profiles checking for invalid profiles ''' # Check for invalid profiles for key, path in conversion_profiles.items(): if isinstance(path, str): path = (path, ) for left, right in pair_looper(path): pair = (_format(left), _format(right)) if pair not in self.converters: msg = 'Invalid conversion profile %s, unknown step %s' log.warning(msg % (repr(key), repr(pair))) break else: # If it did not break, then add to conversion profiles self.conversion_profiles[key] = path
0.002635
def configure(ctx, helper, edit): ''' Update configuration ''' ctx.obj.config = ConfigFile(ctx.obj.config_file) if edit: ctx.obj.config.edit_config_file() return if os.path.isfile(ctx.obj.config.config_file): ctx.obj.config.read_config() if ctx.obj.profile is None: ctx.obj.profile = ctx.obj.config.default_profile args, kwargs = _parse_args_and_kwargs(ctx.args) assert len(args) == 0, 'Unrecognized arguments: "{}"'.format(args) if ctx.obj.profile not in ctx.obj.config.config['profiles']: ctx.obj.config.config['profiles'][ctx.obj.profile] = { 'api': {'user_config': {}}, 'manager': {}, 'authorities': {}} profile_config = ctx.obj.config.config['profiles'][ctx.obj.profile] profile_config['api']['user_config'].update(kwargs) ctx.obj.config.write_config(ctx.obj.config_file) _generate_api(ctx) if ctx.obj.api.manager is not None: check_requirements( to_populate=profile_config['api']['user_config'], prompts=ctx.obj.api.manager.required_user_config, helper=helper) ctx.obj.config.write_config(ctx.obj.config_file)
0.000843
def render(self, surf): """Render the button""" if self.clicked: icon = self.icon_pressed else: icon = self.icon surf.blit(icon, self)
0.010417
def list_user_participants(self, appointment_group, **kwargs): """ List user participants in this appointment group. .. warning:: .. deprecated:: 0.10.0 Use :func:`canvasapi. canvas.Canvas.get_user_participants` instead. :calls: `GET /api/v1/appointment_groups/:id/users \ <https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.users>`_ :param appointment_group: The object or ID of the appointment group. :type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.user.User` """ warnings.warn( "`list_user_participants` is being deprecated and will be removed in a future version." " Use `get_user_participants` instead", DeprecationWarning ) return self.get_user_participants(appointment_group, **kwargs)
0.005941
def setvar(parser, token): """ {% setvar <var_name> to <var_value> %} """ try: setvar, var_name, to_, var_value = token.split_contents() except ValueError: raise template.TemplateSyntaxError('Invalid arguments for %r' % token.split_contents()[0]) return SetVarNode(var_name, var_value)
0.00627
def open(self, inp, opts=None): """Use this to set what file to read from. """ if isinstance(inp, io.TextIOWrapper): self.input = inp elif isinstance(inp, 'string'.__class__): # FIXME self.name = inp self.input = open(inp, 'r') else: raise IOError("Invalid input type (%s) for %s" % (inp.__class__.__name__, inp)) return
0.006881
def offset(self, offset): """Fetch results after `offset` value""" clone = self._clone() if isinstance(offset, int): clone._offset = offset return clone
0.010101
def get_serializer_in(self, *args, **kwargs): """ Return the serializer instance that should be used for validating and deserializing input, and for serializing output. """ serializer_class = self.get_serializer_class_in() kwargs['context'] = self.get_serializer_context() return serializer_class(*args, **kwargs)
0.00542
def _remove_old_stderr_files(self): """ Remove stderr files left by previous Spyder instances. This is only required on Windows because we can't clean up stderr files while Spyder is running on it. """ if os.name == 'nt': tmpdir = get_temp_dir() for fname in os.listdir(tmpdir): if osp.splitext(fname)[1] == '.stderr': try: os.remove(osp.join(tmpdir, fname)) except Exception: pass
0.003497
def getdata(self, blc=(), trc=(), inc=()): """Get image data. Using the arguments blc (bottom left corner), trc (top right corner), and inc (stride) it is possible to get a data slice. The data is returned as a numpy array. Its dimensionality is the same as the dimensionality of the image, even if an axis has length 1. """ return self._getdata(self._adjustBlc(blc), self._adjustTrc(trc), self._adjustInc(inc))
0.003788
def swap(self): '''Swap stereo channels. If the input is not stereo, pairs of channels are swapped, and a possible odd last channel passed through. E.g., for seven channels, the output order will be 2, 1, 4, 3, 6, 5, 7. See Also ---------- remix ''' effect_args = ['swap'] self.effects.extend(effect_args) self.effects_log.append('swap') return self
0.004535
def max_height(self): """ :return: The max height of the rendered text (across all images if an animated renderer). """ if len(self._plain_images) <= 0: self._convert_images() if self._max_height == 0: for image in self._plain_images: self._max_height = max(len(image), self._max_height) return self._max_height
0.004854
def ProcessXMLAnnotation(xml_file): """Process a single XML file containing a bounding box.""" # pylint: disable=broad-except try: tree = ET.parse(xml_file) except Exception: print('Failed to parse: ' + xml_file, file=sys.stderr) return None # pylint: enable=broad-except root = tree.getroot() num_boxes = FindNumberBoundingBoxes(root) boxes = [] for index in range(num_boxes): box = BoundingBox() # Grab the 'index' annotation. box.xmin = GetInt('xmin', root, index) box.ymin = GetInt('ymin', root, index) box.xmax = GetInt('xmax', root, index) box.ymax = GetInt('ymax', root, index) box.width = GetInt('width', root) box.height = GetInt('height', root) box.filename = GetItem('filename', root) + '.JPEG' box.label = GetItem('name', root) xmin = float(box.xmin) / float(box.width) xmax = float(box.xmax) / float(box.width) ymin = float(box.ymin) / float(box.height) ymax = float(box.ymax) / float(box.height) # Some images contain bounding box annotations that # extend outside of the supplied image. See, e.g. # n03127925/n03127925_147.xml # Additionally, for some bounding boxes, the min > max # or the box is entirely outside of the image. min_x = min(xmin, xmax) max_x = max(xmin, xmax) box.xmin_scaled = min(max(min_x, 0.0), 1.0) box.xmax_scaled = min(max(max_x, 0.0), 1.0) min_y = min(ymin, ymax) max_y = max(ymin, ymax) box.ymin_scaled = min(max(min_y, 0.0), 1.0) box.ymax_scaled = min(max(max_y, 0.0), 1.0) boxes.append(box) return boxes
0.006884
def process_lists(self): """Do any preprocessing of the lists.""" for l1_idx, obj1 in enumerate(self.l1): for l2_idx, obj2 in enumerate(self.l2): if self.equal(obj1, obj2): self.matches.add((l1_idx, l2_idx))
0.00738
def get_segments(self, addr, size): """ Get a segmented memory region based on AbstractLocation information available from VSA. Here are some assumptions to make this method fast: - The entire memory region [addr, addr + size] is located within the same MemoryRegion - The address 'addr' has only one concrete value. It cannot be concretized to multiple values. :param addr: An address :param size: Size of the memory area in bytes :return: An ordered list of sizes each segment in the requested memory region """ address_wrappers = self.normalize_address(addr, is_write=False) # assert len(address_wrappers) > 0 aw = address_wrappers[0] region_id = aw.region if region_id in self.regions: region = self.regions[region_id] alocs = region.get_abstract_locations(aw.address, size) # Collect all segments and sort them segments = [ ] for aloc in alocs: segments.extend(aloc.segments) segments = sorted(segments, key=lambda x: x.offset) # Remove all overlapping segments processed_segments = [ ] last_seg = None for seg in segments: if last_seg is None: last_seg = seg processed_segments.append(seg) else: # Are they overlapping? if seg.offset >= last_seg.offset and seg.offset <= last_seg.offset + size: continue processed_segments.append(seg) # Make it a list of sizes sizes = [ ] next_pos = aw.address for seg in processed_segments: if seg.offset > next_pos: gap = seg.offset - next_pos assert gap > 0 sizes.append(gap) next_pos += gap if seg.size + next_pos > aw.address + size: sizes.append(aw.address + size - next_pos) next_pos += aw.address + size - next_pos else: sizes.append(seg.size) next_pos += seg.size if not sizes: return [ size ] return sizes else: # The region doesn't exist. Then there is only one segment! return [ size ]
0.006031
def FileEntryExistsByPathSpec(self, path_spec): """Determines if a file entry for a path specification exists. Args: path_spec (PathSpec): path specification. Returns: bool: True if the file entry exists. """ # All checks for correct path spec is done in SQLiteBlobFile. # Therefore, attempt to open the path specification and # check if errors occurred. try: file_object = resolver.Resolver.OpenFileObject( path_spec, resolver_context=self._resolver_context) except (IOError, ValueError, errors.AccessError, errors.PathSpecError): return False file_object.close() return True
0.004566
def set_description(self, description, lang=None): """Sets the `description` metadata property on your Thing/Point. Only one description is allowed per language, so any other descriptions in this language are removed before adding this one Raises `ValueError` containing an error message if the parameters fail validation `description` (mandatory) (string) the new text of the description `lang` (optional) (string) The two-character ISO 639-1 language code to use for your label. None means use the default language for your agent. See [Config](./Config.m.html#IoticAgent.IOT.Config.Config.__init__) """ description = Validation.description_check_convert(description) lang = Validation.lang_check_convert(lang, default=self._default_lang) # remove any other descriptions with this language before adding self.delete_description(lang) subj = self._get_uuid_uriref() self._graph.add((subj, self._commentPredicate, Literal(description, lang)))
0.006623
def build(self, X, Y, w=None, edges=None): """ Assigns data to this object and builds the Morse-Smale Complex @ In, X, an m-by-n array of values specifying m n-dimensional samples @ In, Y, a m vector of values specifying the output responses corresponding to the m samples specified by X @ In, w, an optional m vector of values specifying the weights associated to each of the m samples used. Default of None means all points will be equally weighted @ In, edges, an optional list of custom edges to use as a starting point for pruning, or in place of a computed graph. """ super(MorseComplex, self).build(X, Y, w, edges) if self.debug: sys.stdout.write("Decomposition: ") start = time.clock() morse_complex = MorseComplexFloat( vectorFloat(self.Xnorm.flatten()), vectorFloat(self.Y), str(self.gradient), str(self.simplification), vectorFloat(self.w), self.graph_rep.full_graph(), self.debug, ) self.__amc = morse_complex self.persistences = [] self.merge_sequence = {} morse_complex_json = json.loads(morse_complex.to_json()) hierarchy = morse_complex_json["Hierarchy"] for merge in hierarchy: self.persistences.append(merge["Persistence"]) self.merge_sequence[merge["Dying"]] = ( merge["Persistence"], merge["Surviving"], merge["Saddle"], ) self.persistences = sorted(list(set(self.persistences))) partitions = morse_complex_json["Partitions"] self.base_partitions = {} for i, label in enumerate(partitions): if label not in self.base_partitions: self.base_partitions[label] = [] self.base_partitions[label].append(i) self.max_indices = list(self.base_partitions.keys()) if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
0.00092
def process_summary(summaryfile, **kwargs): """Extracting information from an albacore summary file. Only reads which have a >0 length are returned. The fields below may or may not exist, depending on the type of sequencing performed. Fields 1-14 are for 1D sequencing. Fields 1-23 for 2D sequencing. Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing Fields 28-38 for barcoded workflows 1 filename 2 read_id 3 run_id 4 channel 5 start_time 6 duration 7 num_events 8 template_start 9 num_events_template 10 template_duration 11 num_called_template 12 sequence_length_template 13 mean_qscore_template 14 strand_score_template 15 complement_start 16 num_events_complement 17 complement_duration 18 num_called_complement 19 sequence_length_complement 20 mean_qscore_complement 21 strand_score_complement 22 sequence_length_2d 23 mean_qscore_2d 24 filename1 25 filename2 26 read_id1 27 read_id2 28 barcode_arrangement 29 barcode_score 30 barcode_full_arrangement 31 front_score 32 rear_score 33 front_begin_index 34 front_foundseq_length 35 rear_end_index 36 rear_foundseq_length 37 kit 38 variant """ logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format( summaryfile, kwargs["readtype"])) ut.check_existance(summaryfile) if kwargs["readtype"] == "1D": cols = ["read_id", "run_id", "channel", "start_time", "duration", "sequence_length_template", "mean_qscore_template"] elif kwargs["readtype"] in ["2D", "1D2"]: cols = ["read_id", "run_id", "channel", "start_time", "duration", "sequence_length_2d", "mean_qscore_2d"] if kwargs["barcoded"]: cols.append("barcode_arrangement") logging.info("Nanoget: Extracting metrics per barcode.") try: datadf = pd.read_csv( filepath_or_buffer=summaryfile, sep="\t", usecols=cols, ) except ValueError: logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format( summaryfile, ', '.join(cols))) sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format( summaryfile, ', '.join(cols))) if kwargs["barcoded"]: datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals", "barcode"] else: datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"] logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile)) return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
0.002738
def get_intervals(self, sort=False): """Give all the intervals or points. :param bool sort: Flag for yielding the intervals or points sorted. :yields: All the intervals """ for i in sorted(self.intervals) if sort else self.intervals: yield i
0.006803
def _populate(cls, as_of=None, delete=False): """Populate the table with billing cycles starting from `as_of` Args: as_of (date): The date at which to begin the populating delete (bool): Should future billing cycles be deleted? """ billing_cycle_helper = get_billing_cycle() billing_cycles_exist = BillingCycle.objects.exists() try: current_billing_cycle = BillingCycle.objects.as_of(date=as_of) except BillingCycle.DoesNotExist: current_billing_cycle = None # If no cycles exist then disable the deletion logic if not billing_cycles_exist: delete = False # Cycles exist, but a date has been specified outside of them if billing_cycles_exist and not current_billing_cycle: raise CannotPopulateForDateOutsideExistingCycles() # Omit the current billing cycle if we are deleting (as # deleting the current billing cycle will be a Bad Idea) omit_current = (current_billing_cycle and delete) stop_date = as_of + relativedelta(years=settings.SWIFTWIND_BILLING_CYCLE_YEARS) date_ranges = billing_cycle_helper.generate_date_ranges(as_of, stop_date=stop_date, omit_current=omit_current) date_ranges = list(date_ranges) beginning_date = date_ranges[0][0] with db_transaction.atomic(): if delete: # Delete all the future unused transactions cls.objects.filter(start_date__gte=beginning_date).delete() for start_date, end_date in date_ranges: exists = BillingCycle.objects.filter(date_range=(start_date, end_date)).exists() if exists: if delete: raise Exception( 'It should not be possible to get here as future billing cycles have just been deleted' ) else: # We're updating, so we can just ignore cycles that already exist pass else: BillingCycle.objects.create( date_range=(start_date, end_date), )
0.003101
def load_commodities(self): """ Load the commodities for Amounts in this object. """ base, quote = self.market.split("_") if isinstance(self.price, Amount): self.price = Amount("{0:.8f} {1}".format(self.price.to_double(), quote)) else: self.price = Amount("{0:.8f} {1}".format(float(self.price), quote)) if isinstance(self.amount, Amount): self.amount = Amount("{0:.8f} {1}".format(self.amount.to_double(), base)) else: self.amount = Amount("{0:.8f} {1}".format(float(self.amount), base)) fee_currency = base if self.fee_side == 'base' else quote if isinstance(self.fee, Amount): self.fee = Amount("{0:.8f} {1}".format(float(self.fee.to_double()), fee_currency)) else: self.fee = Amount("{0:.8f} {1}".format(float(self.fee), fee_currency))
0.007786
def copy_to(self, new_key, bucket=None): """Copies this item to the specified new key. Args: new_key: the new key to copy this item to. bucket: the bucket of the new item; if None (the default) use the same bucket. Returns: An Item corresponding to new key. Raises: Exception if there was an error copying the item. """ if bucket is None: bucket = self._bucket try: new_info = self._api.objects_copy(self._bucket, self._key, bucket, new_key) except Exception as e: raise e return Item(bucket, new_key, new_info, context=self._context)
0.009788
def Lewis(D=None, alpha=None, Cp=None, k=None, rho=None): r'''Calculates Lewis number or `Le` for a fluid with the given parameters. .. math:: Le = \frac{k}{\rho C_p D} = \frac{\alpha}{D} Inputs can be either of the following sets: * Diffusivity and Thermal diffusivity * Diffusivity, heat capacity, thermal conductivity, and density Parameters ---------- D : float Diffusivity of a species, [m^2/s] alpha : float, optional Thermal diffusivity, [m^2/s] Cp : float, optional Heat capacity, [J/kg/K] k : float, optional Thermal conductivity, [W/m/K] rho : float, optional Density, [kg/m^3] Returns ------- Le : float Lewis number [] Notes ----- .. math:: Le=\frac{\text{Thermal diffusivity}}{\text{Mass diffusivity}} = \frac{Sc}{Pr} An error is raised if none of the required input sets are provided. Examples -------- >>> Lewis(D=22.6E-6, alpha=19.1E-6) 0.8451327433628318 >>> Lewis(D=22.6E-6, rho=800., k=.2, Cp=2200) 0.00502815768302494 References ---------- .. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook, Eighth Edition. McGraw-Hill Professional, 2007. .. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and Applications. Boston: McGraw Hill Higher Education, 2006. .. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition. Berlin; New York:: Springer, 2010. ''' if k and Cp and rho: alpha = k/(rho*Cp) elif alpha: pass else: raise Exception('Insufficient information provided for Le calculation') return alpha/D
0.000577
def top(**kwargs): ''' Look up top data in Cobbler for a minion. ''' url = __opts__['cobbler.url'] user = __opts__['cobbler.user'] password = __opts__['cobbler.password'] minion_id = kwargs['opts']['id'] log.info("Querying cobbler for information for %r", minion_id) try: server = salt.ext.six.moves.xmlrpc_client.Server(url, allow_none=True) if user: server.login(user, password) data = server.get_blended_data(None, minion_id) except Exception: log.exception( 'Could not connect to cobbler.' ) return {} return {data['status']: data['mgmt_classes']}
0.001493
def query(self, tablename, attributes=None, consistent=False, count=False, index=None, limit=None, desc=False, return_capacity=None, filter=None, filter_or=False, exclusive_start_key=None, **kwargs): """ Perform an index query on a table This uses the older version of the DynamoDB API. See also: :meth:`~.query2`. Parameters ---------- tablename : str Name of the table to query attributes : list If present, only fetch these attributes from the item consistent : bool, optional Perform a strongly consistent read of the data (default False) count : bool, optional If True, return a count of matched items instead of the items themselves (default False) index : str, optional The name of the index to query limit : int, optional Maximum number of items to return desc : bool, optional If True, return items in descending order (default False) return_capacity : {NONE, INDEXES, TOTAL}, optional INDEXES will return the consumed capacity for indexes, TOTAL will return the consumed capacity for the table and the indexes. (default NONE) filter : dict, optional Query arguments. Same format as **kwargs, but these arguments filter the results on the server before they are returned. They will NOT use an index, as that is what the **kwargs are for. filter_or : bool, optional If True, multiple filter args will be OR'd together. If False, they will be AND'd together. (default False) exclusive_start_key : dict, optional The ExclusiveStartKey to resume a previous query **kwargs : dict, optional Query arguments (examples below) Examples -------- You may pass in constraints using the Django-style '__' syntax. For example: .. code-block:: python connection.query('mytable', foo__eq=5) connection.query('mytable', foo__eq=5, bar__lt=22) connection.query('mytable', foo__eq=5, bar__between=(1, 10)) """ keywords = { 'TableName': tablename, 'ReturnConsumedCapacity': self._default_capacity(return_capacity), 'ConsistentRead': consistent, 'ScanIndexForward': not desc, 'KeyConditions': encode_query_kwargs(self.dynamizer, kwargs), } if attributes is not None: keywords['AttributesToGet'] = attributes if index is not None: keywords['IndexName'] = index if filter is not None: if len(filter) > 1: keywords['ConditionalOperator'] = 'OR' if filter_or else 'AND' keywords['QueryFilter'] = encode_query_kwargs(self.dynamizer, filter) if exclusive_start_key is not None: keywords['ExclusiveStartKey'] = \ self.dynamizer.maybe_encode_keys(exclusive_start_key) if not isinstance(limit, Limit): limit = Limit(limit) if count: keywords['Select'] = COUNT return self._count('query', limit, keywords) else: return ResultSet(self, limit, 'query', **keywords)
0.00145
def kernel_pull(self, user_name, kernel_slug, **kwargs): # noqa: E501 """Pull the latest code from a kernel # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.kernel_pull(user_name, kernel_slug, async_req=True) >>> result = thread.get() :param async_req bool :param str user_name: Kernel owner (required) :param str kernel_slug: Kernel name (required) :return: Result If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.kernel_pull_with_http_info(user_name, kernel_slug, **kwargs) # noqa: E501 else: (data) = self.kernel_pull_with_http_info(user_name, kernel_slug, **kwargs) # noqa: E501 return data
0.002041
def discretize_arc(points, close=False, scale=1.0): """ Returns a version of a three point arc consisting of line segments. Parameters --------- points : (3, d) float Points on the arc where d in [2,3] close : boolean If True close the arc into a circle scale : float What is the approximate overall drawing scale Used to establish order of magnitude for precision Returns --------- discrete : (m, d) float Connected points in space """ # make sure points are (n, 3) points, is_2D = util.stack_3D(points, return_2D=True) # find the center of the points center_info = arc_center(points) center, R, N, angle = (center_info['center'], center_info['radius'], center_info['normal'], center_info['span']) # if requested, close arc into a circle if close: angle = np.pi * 2 # the number of facets, based on the angle criteria count_a = angle / res.seg_angle count_l = ((R * angle)) / (res.seg_frac * scale) # figure out the number of line segments count = np.max([count_a, count_l]) # force at LEAST 4 points for the arc # otherwise the endpoints will diverge count = np.clip(count, 4, np.inf) count = int(np.ceil(count)) V1 = util.unitize(points[0] - center) V2 = util.unitize(np.cross(-N, V1)) t = np.linspace(0, angle, count) discrete = np.tile(center, (count, 1)) discrete += R * np.cos(t).reshape((-1, 1)) * V1 discrete += R * np.sin(t).reshape((-1, 1)) * V2 # do an in-process check to make sure result endpoints # match the endpoints of the source arc if not close: arc_dist = np.linalg.norm(points[[0, -1]] - discrete[[0, -1]], axis=1) arc_ok = (arc_dist < tol.merge).all() if not arc_ok: log.warn( 'failed to discretize arc (endpoint distance %s)', str(arc_dist)) log.warn('Failed arc points: %s', str(points)) raise ValueError('Arc endpoints diverging!') discrete = discrete[:, :(3 - is_2D)] return discrete
0.000443
def _read_mode_ts(self, size, kind): """Read Time Stamp option. Positional arguments: * size - int, length of option * kind - int, 68 (TS) Returns: * dict -- extracted Time Stamp (TS) option Structure of Timestamp (TS) option [RFC 791]: +--------+--------+--------+--------+ |01000100| length | pointer|oflw|flg| +--------+--------+--------+--------+ | internet address | +--------+--------+--------+--------+ | timestamp | +--------+--------+--------+--------+ | . | . . Octets Bits Name Description 0 0 ip.ts.kind Kind (25) 0 0 ip.ts.type.copy Copied Flag (0) 0 1 ip.ts.type.class Option Class (0) 0 3 ip.ts.type.number Option Number (25) 1 8 ip.ts.length Length (≤40) 2 16 ip.ts.pointer Pointer (≥5) 3 24 ip.ts.overflow Overflow Octets 3 28 ip.ts.flag Flag 4 32 ip.ts.ip Internet Address 8 64 ip.ts.timestamp Timestamp """ if size > 40 or size < 4: raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') _tptr = self._read_unpack(1) _oflg = self._read_binary(1) _oflw = int(_oflg[:4], base=2) _flag = int(_oflg[4:], base=2) if _tptr < 5: raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') data = dict( kind=kind, type=self._read_opt_type(kind), length=size, pointer=_tptr, overflow=_oflw, flag=_flag, ) endpoint = min(_tptr, size) if _flag == 0: if (size - 4) % 4 != 0: raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') counter = 5 timestamp = list() while counter < endpoint: counter += 4 time = self._read_unpack(4, lilendian=True) timestamp.append(datetime.datetime.fromtimestamp(time)) data['timestamp'] = timestamp or None elif _flag == 1 or _flag == 3: if (size - 4) % 8 != 0: raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') counter = 5 ipaddress = list() timestamp = list() while counter < endpoint: counter += 8 ipaddress.append(self._read_ipv4_addr()) time = self._read_unpack(4, lilendian=True) timestamp.append(datetime.datetime.fromtimestamp(time)) data['ip'] = ipaddress or None data['timestamp'] = timestamp or None else: data['data'] = self._read_fileng(size - 4) or None return data
0.001211
def create_parser(subparsers): """ create argument parser """ parser = subparsers.add_parser( 'clusters', help='Display existing clusters', usage="%(prog)s [options]", add_help=True) args.add_verbose(parser) args.add_tracker_url(parser) parser.set_defaults(subcommand='clusters') return subparsers
0.021021
def simple_paginate(self, per_page=15, current_page=None, columns=None): """ Paginate the given query. :param per_page: The number of records per page :type per_page: int :param current_page: The current page of results :type current_page: int :param columns: The columns to return :type columns: list :return: The paginator :rtype: Paginator """ if columns is None: columns = ["*"] page = current_page or Paginator.resolve_current_page() self.skip((page - 1) * per_page).take(per_page + 1) return Paginator(self.get(columns), per_page, page)
0.002937
def _severity_by_name(name): """ Return the severity integer value by it's name. If not found, return 'information'. :rtype: int """ for intvalue, sevname in SEVERITY.items(): if name.lower() == sevname: return intvalue return 1
0.007117
def jsonAsCti(dct): """ Config tree item JSON decoding function. Returns a CTI given a dictionary of attributes. The full class name of desired CTI class should be in dct['_class_'']. """ if '_class_'in dct: full_class_name = dct['_class_'] # TODO: how to handle the full_class_name? cls = import_symbol(full_class_name) return cls.createFromJsonDict(dct) else: return dct
0.011682
def get_output(db, output_id): """ :param db: a :class:`openquake.server.dbapi.Db` instance :param output_id: ID of an Output object :returns: (ds_key, calc_id, dirname) """ out = db('SELECT output.*, ds_calc_dir FROM output, job ' 'WHERE oq_job_id=job.id AND output.id=?x', output_id, one=True) return out.ds_key, out.oq_job_id, os.path.dirname(out.ds_calc_dir)
0.002481
def connectShell(connection, protocol): """Connect a Protocol to a ssh shell session """ deferred = connectSession(connection, protocol) @deferred.addCallback def requestSubsystem(session): return session.requestShell() return deferred
0.007491
def wait_for_ribcl_firmware_update_to_complete(ribcl_object): """Continuously checks for iLO firmware update to complete.""" def is_ilo_reset_initiated(): """Checks for initiation of iLO reset Invokes the ``get_product_name`` api and returns i) True, if exception gets raised as that marks the iLO reset initiation. ii) False, if the call gets through without any failure, marking that iLO is yet to be reset. """ try: LOG.debug(ribcl_object._('Checking for iLO reset...')) ribcl_object.get_product_name() return False except exception.IloError: LOG.debug(ribcl_object._('iLO is being reset...')) return True # Note(deray): wait for 5 secs, before checking if iLO reset got triggered # at every interval of 6 secs. This looping call happens for 10 times. # Once it comes out of the wait of iLO reset trigger, then it starts # waiting for iLO to be up again after reset. wait_for_operation_to_complete( is_ilo_reset_initiated, delay_bw_retries=6, delay_before_attempts=5, is_silent_loop_exit=True ) wait_for_ilo_after_reset(ribcl_object)
0.000792
def split_fragment(cls, fragment): """A heuristic used to split a string into version name/fragment: >>> SourcePackage.split_fragment('pysolr-2.1.0-beta') ('pysolr', '2.1.0-beta') >>> SourcePackage.split_fragment('cElementTree-1.0.5-20051216') ('cElementTree', '1.0.5-20051216') >>> SourcePackage.split_fragment('pil-1.1.7b1-20090412') ('pil', '1.1.7b1-20090412') >>> SourcePackage.split_fragment('django-plugin-2-2.3') ('django-plugin-2', '2.3') """ def likely_version_component(enumerated_fragment): return sum(bool(v and v[0].isdigit()) for v in enumerated_fragment[1].split('.')) fragments = fragment.split('-') if len(fragments) == 1: return fragment, '' max_index, _ = max(enumerate(fragments), key=likely_version_component) return '-'.join(fragments[0:max_index]), '-'.join(fragments[max_index:])
0.004444
def repeat_op(repetitions, inputs, op, *args, **kwargs): """Build a sequential Tower starting from inputs by using an op repeatedly. It creates new scopes for each operation by increasing the counter. Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1') it will repeat the given op under the following variable_scopes: conv1/Conv conv1/Conv_1 conv1/Conv_2 Args: repetitions: number or repetitions. inputs: a tensor of size [batch_size, height, width, channels]. op: an operation. *args: args for the op. **kwargs: kwargs for the op. Returns: a tensor result of applying the operation op, num times. Raises: ValueError: if the op is unknown or wrong. """ scope = kwargs.pop('scope', None) with tf.variable_scope(scope, 'RepeatOp', [inputs]): tower = inputs for _ in range(repetitions): tower = op(tower, *args, **kwargs) return tower
0.00533
def set_unobserved_before(self,tlen,qlen,nt,p): """Set the unobservable sequence data before this base :param tlen: target homopolymer length :param qlen: query homopolymer length :param nt: nucleotide :param p: p is the probability of attributing this base to the unobserved error :type tlen: int :type qlen: int :type nt: char :type p: float """ self._unobservable.set_before(tlen,qlen,nt,p)
0.020501
def _createAction(self, widget, iconFileName, text, shortcut, slot): """Create QAction with given parameters and add to the widget """ icon = qutepart.getIcon(iconFileName) action = QAction(icon, text, widget) action.setShortcut(QKeySequence(shortcut)) action.setShortcutContext(Qt.WidgetShortcut) action.triggered.connect(slot) widget.addAction(action) return action
0.004535
async def vcx_init(config_path: str) -> None: """ Initializes VCX with config file. :param config_path: String Example: await vcx_init('/home/username/vcxconfig.json') :return: """ logger = logging.getLogger(__name__) if not hasattr(vcx_init, "cb"): logger.debug("vcx_init: Creating callback") vcx_init.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32)) c_config_path = c_char_p(config_path.encode('utf-8')) result = await do_call('vcx_init', c_config_path, vcx_init.cb) logger.debug("vcx_init completed") return result
0.001548
def update_dns_ha_resource_params(resources, resource_params, relation_id=None, crm_ocf='ocf:maas:dns'): """ Configure DNS-HA resources based on provided configuration and update resource dictionaries for the HA relation. @param resources: Pointer to dictionary of resources. Usually instantiated in ha_joined(). @param resource_params: Pointer to dictionary of resource parameters. Usually instantiated in ha_joined() @param relation_id: Relation ID of the ha relation @param crm_ocf: Corosync Open Cluster Framework resource agent to use for DNS HA """ _relation_data = {'resources': {}, 'resource_params': {}} update_hacluster_dns_ha(charm_name(), _relation_data, crm_ocf) resources.update(_relation_data['resources']) resource_params.update(_relation_data['resource_params']) relation_set(relation_id=relation_id, groups=_relation_data['groups'])
0.000915
def scatter(self, x, y, xerr=[], yerr=[], mark='o', markstyle=None): """Plot a series of points. Plot a series of points (marks) that are not connected by a line. Shortcut for plot with linestyle=None. :param x: array containing x-values. :param y: array containing y-values. :param xerr: array containing errors on the x-values. :param yerr: array containing errors on the y-values. :param mark: the symbol used to mark the data points. May be any plot mark accepted by TikZ (e.g. ``*, x, +, o, square, triangle``). :param markstyle: the style of the plot marks (e.g. 'mark size=.75pt') Example:: >>> plot = artist.Plot() >>> x = np.random.normal(size=20) >>> y = np.random.normal(size=20) >>> plot.scatter(x, y, mark='*') """ self.plot(x, y, xerr=xerr, yerr=yerr, mark=mark, linestyle=None, markstyle=markstyle)
0.001969
def _find_experiment_tag(self): """Finds the experiment associcated with the metadata.EXPERIMENT_TAG tag. Caches the experiment if it was found. Returns: The experiment or None if no such experiment is found. """ with self._experiment_from_tag_lock: if self._experiment_from_tag is None: mapping = self.multiplexer.PluginRunToTagToContent( metadata.PLUGIN_NAME) for tag_to_content in mapping.values(): if metadata.EXPERIMENT_TAG in tag_to_content: self._experiment_from_tag = metadata.parse_experiment_plugin_data( tag_to_content[metadata.EXPERIMENT_TAG]) break return self._experiment_from_tag
0.004225
def replace_namespaced_resource_quota(self, name, namespace, body, **kwargs): # noqa: E501 """replace_namespaced_resource_quota # noqa: E501 replace the specified ResourceQuota # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_resource_quota(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ResourceQuota (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ResourceQuota body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ResourceQuota If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_resource_quota_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.replace_namespaced_resource_quota_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
0.001265
def make_fig(self): """ Figure constructor, called before `self.plot()` """ self.fig = plt.figure(figsize=(8, 4)) self._all_figures.append(self.fig)
0.010638
def is_user(self, organisation_id): """Is the user valid and approved in this organisation""" return (self._has_role(organisation_id, self.roles.user) or self.is_org_admin(organisation_id))
0.00905
def getThirdPartyLibLinkerFlags(self, libs): """ Retrieves the linker flags for building against the Unreal-bundled versions of the specified third-party libraries """ fmt = PrintingFormat.singleLine() if libs[0] == '--multiline': fmt = PrintingFormat.multiLine() libs = libs[1:] includeLibs = True if (libs[0] == '--flagsonly'): includeLibs = False libs = libs[1:] platformDefaults = True if libs[0] == '--nodefaults': platformDefaults = False libs = libs[1:] details = self.getThirdpartyLibs(libs, includePlatformDefaults=platformDefaults) return details.getLinkerFlags(self.getEngineRoot(), fmt, includeLibs)
0.040909
def delete(cls, filename, offset=None): """Delete ID3v1 tag from a file (if present).""" with fileutil.opened(filename, "rb+") as file: if offset is None: file.seek(-128, 2) else: file.seek(offset) offset = file.tell() data = file.read(128) if data[:3] == b"TAG": fileutil.replace_chunk(file, offset, 128, b"", in_place=True)
0.004454
def protocol_tree_items(self): """ :rtype: dict[int, list of ProtocolTreeItem] """ result = {} for i, group in enumerate(self.rootItem.children): result[i] = [child for child in group.children] return result
0.007463
def getQRArray(text, errorCorrection): """ Takes in text and errorCorrection (letter), returns 2D array of the QR code""" # White is True (1) # Black is False (0) # ECC: L7, M15, Q25, H30 # Create the object qr = pyqrcode.create(text, error=errorCorrection) # Get the terminal representation and split by lines (get rid of top and bottom white spaces) plainOut = qr.terminal().split("\n")[5:-5] # Initialize the output 2D list out = [] for line in plainOut: thisOut = [] for char in line: if char == u'7': # This is white thisOut.append(1) elif char == u'4': # This is black, it's part of the u'49' thisOut.append(0) # Finally add everything to the output, stipping whitespaces at start and end out.append(thisOut[4:-4]) # Everything is done, return the qr code list return out
0.031515
def _concrete_acl(self, acl_doc): """Concretize an ACL document. :param dict acl_doc: A document describing an ACL entry. Should come from the API. :returns: An :py:class:`Acl`, or None. :rtype: :py:class:`bases.BaseInstance` """ if not isinstance(acl_doc, dict): return None # Attempt to instantiate an Acl object with the given dict. try: return Acl(document=acl_doc, acls=self) # If construction fails, log the exception and return None. except Exception as ex: logger.exception(ex) logger.error('Could not instantiate ACL document. You probably need to upgrade to a ' 'recent version of the client. Document which caused this error: {}' .format(acl_doc)) return None
0.005794
def start_roles(self, service_name, deployment_name, role_names): ''' Starts the specified virtual machines. service_name: The name of the service. deployment_name: The name of the deployment. role_names: The names of the roles, as an enumerable of strings. ''' _validate_not_none('service_name', service_name) _validate_not_none('deployment_name', deployment_name) _validate_not_none('role_names', role_names) return self._perform_post( self._get_roles_operations_path(service_name, deployment_name), _XmlSerializer.start_roles_operation_to_xml(role_names), as_async=True)
0.002755
def _render_image(self, spec, container_args, alt_text=None): """ Render an image specification into an <img> tag """ try: path, image_args, title = image.parse_image_spec(spec) except Exception as err: # pylint: disable=broad-except logger.exception("Got error on spec %s: %s", spec, err) return ('<span class="error">Couldn\'t parse image spec: ' + '<code>{}</code> {}</span>'.format(flask.escape(spec), flask.escape(str(err)))) composite_args = {**container_args, **image_args} try: img = image.get_image(path, self._search_path) except Exception as err: # pylint: disable=broad-except logger.exception("Got error on image %s: %s", path, err) return ('<span class="error">Error loading image {}: {}</span>'.format( flask.escape(spec), flask.escape(str(err)))) return img.get_img_tag(title, alt_text, **composite_args)
0.002871
def is_available(self, fname): """ Check availability of a remote file without downloading it. Use this method when working with large files to check if they are available for download. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. Returns ------- status : bool True if the file is available for download. False otherwise. """ self._assert_file_in_registry(fname) source = self.get_url(fname) response = requests.head(source, allow_redirects=True) return bool(response.status_code == 200)
0.005413
def get_list_from_file(file_name): """read the lines from a file into a list""" with open(file_name, mode='r', encoding='utf-8') as f1: lst = f1.readlines() return lst
0.005348
def get_raw_path(self): """Returns the raw path to the mounted disk image, i.e. the raw :file:`.dd`, :file:`.raw` or :file:`ewf1` file. :rtype: str """ if self.disk_mounter == 'dummy': return self.paths[0] else: if self.disk_mounter == 'avfs' and os.path.isdir(os.path.join(self.mountpoint, 'avfs')): logger.debug("AVFS mounted as a directory, will look in directory for (random) file.") # there is no support for disks inside disks, so this will fail to work for zips containing # E01 files or so. searchdirs = (os.path.join(self.mountpoint, 'avfs'), self.mountpoint) else: searchdirs = (self.mountpoint, ) raw_path = [] if self._paths.get('nbd'): raw_path.append(self._paths['nbd']) for searchdir in searchdirs: # avfs: apparently it is not a dir for pattern in ['*.dd', '*.iso', '*.raw', '*.dmg', 'ewf1', 'flat', 'avfs']: raw_path.extend(glob.glob(os.path.join(searchdir, pattern))) if not raw_path: logger.warning("No viable mount file found in {}.".format(searchdirs)) return None return raw_path[0]
0.007502
def sectorPerformanceDF(token='', version=''): '''This returns an array of each sector and performance for the current trading day. Performance is based on each sector ETF. https://iexcloud.io/docs/api/#sector-performance 8am-5pm ET Mon-Fri Args: token (string); Access token version (string); API version Returns: DataFrame: result ''' df = pd.DataFrame(sectorPerformance(token, version)) _toDatetime(df) _reindex(df, 'name') return df
0.003976
def generate_ecc_signing_key(algorithm): """Returns an ECC signing key. :param algorithm: Algorithm object which determines what signature to generate :type algorithm: aws_encryption_sdk.identifiers.Algorithm :returns: Generated signing key :raises NotSupportedError: if signing algorithm is not supported on this platform """ try: verify_interface(ec.EllipticCurve, algorithm.signing_algorithm_info) return ec.generate_private_key(curve=algorithm.signing_algorithm_info(), backend=default_backend()) except InterfaceNotImplemented: raise NotSupportedError("Unsupported signing algorithm info")
0.006144
def calculate_checksum(self): """Calculate ISBN checksum. Returns: ``str``: ISBN checksum value """ if len(self.isbn) in (9, 12): return calculate_checksum(self.isbn) else: return calculate_checksum(self.isbn[:-1])
0.006849
def _call(self, x): """Return ``self(x)``.""" x_norm = self.pointwise_norm(x).ufuncs.max() if x_norm > 1: return np.inf else: return 0
0.010471
def writeAnnotation(self, onset_in_seconds, duration_in_seconds, description, str_format='utf-8'): """ Writes an annotation/event to the file """ if str_format == 'utf-8': if duration_in_seconds >= 0: return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(int), np.round(duration_in_seconds*10000).astype(int), du(description)) else: return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(int), -1, du(description)) else: if duration_in_seconds >= 0: return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(int), np.round(duration_in_seconds*10000).astype(int), u(description).encode('latin1')) else: return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(int), -1, u(description).encode('latin1'))
0.007284
def execute_lines(self, lines): """ Execute a set of lines as multiple command lines: multiple lines of text to be executed as single commands """ for line in lines.splitlines(): stripped_line = line.strip() if stripped_line.startswith('#'): continue self.write(line+os.linesep, flush=True) self.execute_command(line+"\n") self.flush()
0.004329
def find_entry_point(site_packages: Path, console_script: str) -> str: """Find a console_script in a site-packages directory. Console script metadata is stored in entry_points.txt per setuptools convention. This function searches all entry_points.txt files and returns the import string for a given console_script argument. :param site_packages: A path to a site-packages directory on disk. :param console_script: A console_script string. """ config_parser = ConfigParser() config_parser.read(site_packages.rglob("entry_points.txt")) return config_parser["console_scripts"][console_script]
0.001582
def set_config_token_from_env(section, token, config): '''Given a config section and token, checks for an appropriate environment variable. If the variable exists, sets the config entry to its value. The environment variable checked is of the form SECTION_TOKEN, all upper case, with any dots replaced by underscores. Returns True if the environment variable exists and was used, or False otherwise. ''' env_var_name = ''.join([section.upper(), '_', token.upper().replace('.', '_')]) env_var = os.environ.get(env_var_name) if env_var is None: return False config.set(section, token, env_var) return True
0.001441
def precision(y_true, y_score, k=None, return_bounds=False): """ If return_bounds is False then returns precision on the labeled examples in the top k. If return_bounds is True the returns a tuple containing: - precision on the labeled examples in the top k - number of labeled examples in the top k - lower bound of precision in the top k, assuming all unlabaled examples are False - upper bound of precision in the top k, assuming all unlabaled examples are True """ y_true, y_score = to_float(y_true, y_score) top = _argtop(y_score, k) n = np.nan_to_num(y_true[top]).sum() # fill missing labels with 0 d = (~np.isnan(y_true[top])).sum() # count number of labels p = n/d if return_bounds: k = len(y_true) if k is None else k bounds = (n/k, (n+k-d)/k) if k != 0 else (np.nan, np.nan) return p, d, bounds[0], bounds[1] else: return p
0.001017
def matches_section(section_name): """Decorator for SectionSchema classes to define the mapping between a config section schema class and one or more config sections with matching name(s). .. sourcecode:: @matches_section("foo") class FooSchema(SectionSchema): pass @matches_section(["bar", "baz.*"]) class BarAndBazSchema(SectionSchema): pass .. sourcecode:: ini # -- FILE: *.ini [foo] # USE: FooSchema ... [bar] # USE: BarAndBazSchema ... [baz.alice] # USE: BarAndBazSchema ... """ section_names = section_name if isinstance(section_name, six.string_types): section_names = [section_name] elif not isinstance(section_name, (list, tuple)): raise ValueError("%r (expected: string, strings)" % section_name) def decorator(cls): class_section_names = getattr(cls, "section_names", None) if class_section_names is None: cls.section_names = list(section_names) else: # -- BETTER SUPPORT: For multiple decorators # @matches_section("foo") # @matches_section("bar.*") # class Example(SectionSchema): # pass # assert Example.section_names == ["foo", "bar.*"] approved = [name for name in section_names if name not in cls.section_names] cls.section_names = approved + cls.section_names return cls return decorator
0.000635
def delete(self, delete_contents=False): """Issues a request to delete the dataset. Args: delete_contents: if True, any tables and views in the dataset will be deleted. If False and the dataset is non-empty an exception will be raised. Returns: None on success. Raises: Exception if the delete fails (including if table was nonexistent). """ if not self.exists(): raise Exception('Cannot delete non-existent dataset %s' % self._full_name) try: self._api.datasets_delete(self._name_parts, delete_contents=delete_contents) except Exception as e: raise e self._info = None return None
0.010479
def do_handshake(self, timeout): 'perform a SSL/TLS handshake' tout = _timeout(timeout) if not self._blocking: return self._sslobj.do_handshake() while 1: try: return self._sslobj.do_handshake() except ssl.SSLError, exc: if exc.args[0] == ssl.SSL_ERROR_WANT_READ: self._wait_event(tout.now) continue elif exc.args[0] == ssl.SSL_ERROR_WANT_WRITE: self._wait_event(tout.now, write=True) continue raise self._wait_event(timeout) self._sslobj.do_handshake()
0.002928
def str_replace(x, pat, repl, n=-1, flags=0, regex=False): """Replace occurences of a pattern/regex in a column with some other string. :param str pattern: string or a regex pattern :param str replace: a replacement string :param int n: number of replacements to be made from the start. If -1 make all replacements. :param int flags: ?? :param bool regex: If True, ...? :returns: an expression containing the string replacements. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.replace(pat='et', repl='__') Expression = str_replace(text, pat='et', repl='__') Length: 5 dtype: str (expression) --------------------------------- 0 Som__hing 1 very pr__ty 2 is coming 3 our 4 way. """ sl = _to_string_sequence(x).replace(pat, repl, n, flags, regex) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
0.003419
def ConsultarTiposCategoriaReceptor(self, sep="||"): "Obtener el código y descripción para cada tipos de categorías de receptor" ret = self.client.consultarTiposCategoriaReceptor( authRequest={ 'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit, }, )['consultarCategoriasReceptorReturn'] self.__analizar_errores(ret) array = ret.get('arrayCategoriasReceptor', []) lista = [it['codigoDescripcionString'] for it in array] return [(u"%s {codigo} %s {descripcion} %s" % (sep, sep, sep)).format(**it) if sep else it for it in lista]
0.005571
def _neg_bounded_fun(fun, bounds, x, args=()): """ Wrapper for bounding and taking the negative of `fun` for the Nelder-Mead algorithm. JIT-compiled in `nopython` mode using Numba. Parameters ---------- fun : callable The objective function to be minimized. `fun(x, *args) -> float` where x is an 1-D array with shape (n,) and args is a tuple of the fixed parameters needed to completely specify the function. This function must be JIT-compiled in `nopython` mode using Numba. bounds: ndarray(float, ndim=2) Sequence of (min, max) pairs for each element in x. x : ndarray(float, ndim=1) 1-D array with shape (n,) of independent variables at which `fun` is to be evaluated. args : tuple, optional Extra arguments passed to the objective function. Returns ---------- scalar `-fun(x, *args)` if x is within `bounds`, `np.inf` otherwise. """ if _check_bounds(x, bounds): return -fun(x, *args) else: return np.inf
0.000931
def iso_mesh_line(vertices, tris, vertex_data, levels): """Generate an isocurve from vertex data in a surface mesh. Parameters ---------- vertices : ndarray, shape (Nv, 3) Vertex coordinates. tris : ndarray, shape (Nf, 3) Indices of triangular element into the vertices array. vertex_data : ndarray, shape (Nv,) data at vertex. levels : ndarray, shape (Nl,) Levels at which to generate an isocurve Returns ------- lines : ndarray, shape (Nvout, 3) Vertex coordinates for lines points connects : ndarray, shape (Ne, 2) Indices of line element into the vertex array. vertex_level: ndarray, shape (Nvout,) level for vertex in lines Notes ----- Uses a marching squares algorithm to generate the isolines. """ lines = None connects = None vertex_level = None level_index = None if not all([isinstance(x, np.ndarray) for x in (vertices, tris, vertex_data, levels)]): raise ValueError('all inputs must be numpy arrays') if vertices.shape[1] <= 3: verts = vertices elif vertices.shape[1] == 4: verts = vertices[:, :-1] else: verts = None if (verts is not None and tris.shape[1] == 3 and vertex_data.shape[0] == verts.shape[0]): edges = np.vstack((tris.reshape((-1)), np.roll(tris, -1, axis=1).reshape((-1)))).T edge_datas = vertex_data[edges] edge_coors = verts[edges].reshape(tris.shape[0]*3, 2, 3) for lev in levels: # index for select edges with vertices have only False - True # or True - False at extremity index = (edge_datas >= lev) index = index[:, 0] ^ index[:, 1] # xor calculation # Selectect edge edge_datas_Ok = edge_datas[index, :] xyz = edge_coors[index] # Linear interpolation ratio = np.array([(lev - edge_datas_Ok[:, 0]) / (edge_datas_Ok[:, 1] - edge_datas_Ok[:, 0])]) point = xyz[:, 0, :] + ratio.T * (xyz[:, 1, :] - xyz[:, 0, :]) nbr = point.shape[0]//2 if connects is not None: connect = np.arange(0, nbr*2).reshape((nbr, 2)) + \ len(lines) connects = np.append(connects, connect, axis=0) lines = np.append(lines, point, axis=0) vertex_level = np.append(vertex_level, np.zeros(len(point)) + lev) level_index = np.append(level_index, np.array(len(point))) else: lines = point connects = np.arange(0, nbr*2).reshape((nbr, 2)) vertex_level = np.zeros(len(point)) + lev level_index = np.array(len(point)) vertex_level = vertex_level.reshape((vertex_level.size, 1)) return lines, connects, vertex_level, level_index
0.000328
def recap(self, nc): # type: (int) -> None """recap changes the maximum size limit of the dynamic table. It also proceeds to a resize(), if the new size is lower than the previous one. @param int nc: the new cap of the dynamic table (that is the maximum-maximum size) # noqa: E501 @raise AssertionError """ assert(nc >= 0) t = self._dynamic_table_cap_size > nc self._dynamic_table_cap_size = nc if t: # The RFC is not clear about whether this resize should happen; # we do it anyway self.resize(nc)
0.00487
def create_normal_logq(self,z): """ Create logq components for mean-field normal family (the entropy estimate) """ means, scale = self.get_means_and_scales() return ss.norm.logpdf(z,loc=means,scale=scale).sum()
0.024
def _audio_response_for_run(self, tensor_events, run, tag, sample): """Builds a JSON-serializable object with information about audio. Args: tensor_events: A list of image event_accumulator.TensorEvent objects. run: The name of the run. tag: The name of the tag the audio entries all belong to. sample: The zero-indexed sample of the audio sample for which to retrieve information. For instance, setting `sample` to `2` will fetch information about only the third audio clip of each batch, and steps with fewer than three audio clips will be omitted from the results. Returns: A list of dictionaries containing the wall time, step, URL, width, and height for each audio entry. """ response = [] index = 0 filtered_events = self._filter_by_sample(tensor_events, sample) content_type = self._get_mime_type(run, tag) for (index, tensor_event) in enumerate(filtered_events): data = tensor_util.make_ndarray(tensor_event.tensor_proto) label = data[sample, 1] response.append({ 'wall_time': tensor_event.wall_time, 'step': tensor_event.step, 'label': plugin_util.markdown_to_safe_html(label), 'contentType': content_type, 'query': self._query_for_individual_audio(run, tag, sample, index) }) return response
0.002892
def addUsersToGroup(self, user_ids, thread_id=None): """ Adds users to a group. :param user_ids: One or more user IDs to add :param thread_id: Group ID to add people to. See :ref:`intro_threads` :type user_ids: list :raises: FBchatException if request failed """ thread_id, thread_type = self._getThread(thread_id, None) data = self._getSendData(thread_id=thread_id, thread_type=ThreadType.GROUP) data["action_type"] = "ma-type:log-message" data["log_message_type"] = "log:subscribe" user_ids = require_list(user_ids) for i, user_id in enumerate(user_ids): if user_id == self._uid: raise FBchatUserError( "Error when adding users: Cannot add self to group thread" ) else: data[ "log_message_data[added_participants][{}]".format(i) ] = "fbid:{}".format(user_id) return self._doSendRequest(data)
0.00289
def get_compliance_expansion(self): """ Gets a compliance tensor expansion from the elastic tensor expansion. """ # TODO: this might have a general form if not self.order <= 4: raise ValueError("Compliance tensor expansion only " "supported for fourth-order and lower") ce_exp = [ElasticTensor(self[0]).compliance_tensor] einstring = "ijpq,pqrsuv,rskl,uvmn->ijklmn" ce_exp.append(np.einsum(einstring, -ce_exp[-1], self[1], ce_exp[-1], ce_exp[-1])) if self.order == 4: # Four terms in the Fourth-Order compliance tensor einstring_1 = "pqab,cdij,efkl,ghmn,abcdefgh" tensors_1 = [ce_exp[0]]*4 + [self[-1]] temp = -np.einsum(einstring_1, *tensors_1) einstring_2 = "pqab,abcdef,cdijmn,efkl" einstring_3 = "pqab,abcdef,efklmn,cdij" einstring_4 = "pqab,abcdef,cdijkl,efmn" for es in [einstring_2, einstring_3, einstring_4]: temp -= np.einsum(es, ce_exp[0], self[-2], ce_exp[1], ce_exp[0]) ce_exp.append(temp) return TensorCollection(ce_exp)
0.002463
def add_collaborator(self, login): """Add ``login`` as a collaborator to a repository. :param str login: (required), login of the user :returns: bool -- True if successful, False otherwise """ resp = False if login: url = self._build_url('collaborators', login, base_url=self._api) resp = self._boolean(self._put(url), 204, 404) return resp
0.004751
def detect_keep_boundary(start, end, namespaces): """a helper to inspect a link and see if we should keep the link boundary """ result_start, result_end = False, False parent_start = start.getparent() parent_end = end.getparent() if parent_start.tag == "{%s}p" % namespaces['text']: # more than one child in the containing paragraph ? # we keep the boundary result_start = len(parent_start.getchildren()) > 1 if parent_end.tag == "{%s}p" % namespaces['text']: # more than one child in the containing paragraph ? # we keep the boundary result_end = len(parent_end.getchildren()) > 1 return result_start, result_end
0.001437
def fetch_all_objects_from_db(self, cls: Type[T], table: str, fieldlist: Sequence[str], construct_with_pk: bool, *args) -> List[T]: """Fetches all objects from a table, returning an array of objects of class cls.""" return self.fetch_all_objects_from_db_where( cls, table, fieldlist, construct_with_pk, None, *args)
0.013436