text
stringlengths
78
104k
score
float64
0
0.18
def exists(self, path): """ Use ``hadoop fs -stat`` to check file existence. """ cmd = load_hadoop_cmd() + ['fs', '-stat', path] logger.debug('Running file existence check: %s', subprocess.list2cmdline(cmd)) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, universal_newlines=True) stdout, stderr = p.communicate() if p.returncode == 0: return True else: not_found_pattern = "^.*No such file or directory$" not_found_re = re.compile(not_found_pattern) for line in stderr.split('\n'): if not_found_re.match(line): return False raise hdfs_error.HDFSCliError(cmd, p.returncode, stdout, stderr)
0.005
def unpack_fraction(num: str) -> str: """ Returns unpacked fraction string 5/2 -> 2 1/2 """ nums = [int(n) for n in num.split('/') if n] if len(nums) == 2 and nums[0] > nums[1]: over = nums[0] // nums[1] rem = nums[0] % nums[1] return f'{over} {rem}/{nums[1]}' return num
0.003135
def from_wire(rdclass, rdtype, wire, current, rdlen, origin = None): """Build an rdata object from wire format This function attempts to dynamically load a class which implements the specified rdata class and type. If there is no class-and-type-specific implementation, the GenericRdata class is used. Once a class is chosen, its from_wire() class method is called with the parameters to this function. @param rdclass: The rdata class @type rdclass: int @param rdtype: The rdata type @type rdtype: int @param wire: The wire-format message @type wire: string @param current: The offet in wire of the beginning of the rdata. @type current: int @param rdlen: The length of the wire-format rdata @type rdlen: int @param origin: The origin to use for relative names @type origin: dns.name.Name @rtype: dns.rdata.Rdata instance""" wire = dns.wiredata.maybe_wrap(wire) cls = get_rdata_class(rdclass, rdtype) return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
0.002817
def terminate(self, signal_chain=KILL_CHAIN, kill_wait=KILL_WAIT_SEC, purge=True): """Ensure a process is terminated by sending a chain of kill signals (SIGTERM, SIGKILL).""" alive = self.is_alive() if alive: logger.debug('terminating {}'.format(self._name)) for signal_type in signal_chain: pid = self.pid try: logger.debug('sending signal {} to pid {}'.format(signal_type, pid)) self._kill(signal_type) except OSError as e: logger.warning('caught OSError({e!s}) during attempt to kill -{signal} {pid}!' .format(e=e, signal=signal_type, pid=pid)) # Wait up to kill_wait seconds to terminate or move onto the next signal. try: if self._deadline_until(self.is_dead, 'daemon to exit', timeout=kill_wait): alive = False logger.debug('successfully terminated pid {}'.format(pid)) break except self.Timeout: # Loop to the next kill signal on timeout. pass if alive: raise ProcessManager.NonResponsiveProcess('failed to kill pid {pid} with signals {chain}' .format(pid=self.pid, chain=signal_chain)) if purge: self.purge_metadata(force=True)
0.01474
def normalRDD(sc, size, numPartitions=None, seed=None): """ Generates an RDD comprised of i.i.d. samples from the standard normal distribution. To transform the distribution in the generated RDD from standard normal to some other normal N(mean, sigma^2), use C{RandomRDDs.normal(sc, n, p, seed)\ .map(lambda v: mean + sigma * v)} :param sc: SparkContext used to create the RDD. :param size: Size of the RDD. :param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`). :param seed: Random seed (default: a random long integer). :return: RDD of float comprised of i.i.d. samples ~ N(0.0, 1.0). >>> x = RandomRDDs.normalRDD(sc, 1000, seed=1) >>> stats = x.stats() >>> stats.count() 1000 >>> abs(stats.mean() - 0.0) < 0.1 True >>> abs(stats.stdev() - 1.0) < 0.1 True """ return callMLlibFunc("normalRDD", sc._jsc, size, numPartitions, seed)
0.002865
def remove(self, experiment): """Remove the configuration of an experiment""" try: project_path = self.projects[self[experiment]['project']]['root'] except KeyError: return config_path = osp.join(project_path, '.project', experiment + '.yml') for f in [config_path, config_path + '~', config_path + '.lck']: if os.path.exists(f): os.remove(f) del self[experiment]
0.00432
def generate(env): """Add Builders and construction variables for masm to an Environment.""" static_obj, shared_obj = SCons.Tool.createObjBuilders(env) for suffix in ASSuffixes: static_obj.add_action(suffix, SCons.Defaults.ASAction) shared_obj.add_action(suffix, SCons.Defaults.ASAction) static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter) shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter) for suffix in ASPPSuffixes: static_obj.add_action(suffix, SCons.Defaults.ASPPAction) shared_obj.add_action(suffix, SCons.Defaults.ASPPAction) static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter) shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter) env['AS'] = 'ml' env['ASFLAGS'] = SCons.Util.CLVar('/nologo') env['ASPPFLAGS'] = '$ASFLAGS' env['ASCOM'] = '$AS $ASFLAGS /c /Fo$TARGET $SOURCES' env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c /Fo$TARGET $SOURCES' env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
0.00541
def unique_id(self): """Creates a unique ID for the `Atom` based on its parents. Returns ------- unique_id : (str, str, str) (polymer.id, residue.id, atom.id) """ chain = self.parent.parent.id residue = self.parent.id return chain, residue, self.id
0.006154
def print_packet_count(): """Print the number of packets grouped by packet name.""" for name in archive.list_packet_names(): packet_count = 0 for group in archive.list_packet_histogram(name): for rec in group.records: packet_count += rec.count print(' {: <40} {: >20}'.format(name, packet_count))
0.002801
def replace_one(self, filter, replacement, upsert=False, bypass_document_validation=False, collation=None, session=None): """Replace a single document matching the filter. >>> for doc in db.test.find({}): ... print(doc) ... {u'x': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')} >>> result = db.test.replace_one({'x': 1}, {'y': 1}) >>> result.matched_count 1 >>> result.modified_count 1 >>> for doc in db.test.find({}): ... print(doc) ... {u'y': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')} The *upsert* option can be used to insert a new document if a matching document does not exist. >>> result = db.test.replace_one({'x': 1}, {'x': 1}, True) >>> result.matched_count 0 >>> result.modified_count 0 >>> result.upserted_id ObjectId('54f11e5c8891e756a6e1abd4') >>> db.test.find_one({'x': 1}) {u'x': 1, u'_id': ObjectId('54f11e5c8891e756a6e1abd4')} :Parameters: - `filter`: A query that matches the document to replace. - `replacement`: The new document. - `upsert` (optional): If ``True``, perform an insert if no documents match the filter. - `bypass_document_validation`: (optional) If ``True``, allows the write to opt-out of document level validation. Default is ``False``. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. .. note:: `bypass_document_validation` requires server version **>= 3.2** .. versionchanged:: 3.6 Added ``session`` parameter. .. versionchanged:: 3.4 Added the `collation` option. .. versionchanged:: 3.2 Added bypass_document_validation support .. versionadded:: 3.0 """ common.validate_is_mapping("filter", filter) common.validate_ok_for_replace(replacement) write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( filter, replacement, upsert, write_concern=write_concern, bypass_doc_val=bypass_document_validation, collation=collation, session=session), write_concern.acknowledged)
0.001817
def render_image(self, rgbobj, dst_x, dst_y): """Render the image represented by (rgbobj) at dst_x, dst_y in the pixel space. *** internal method-- do not use *** """ self.logger.debug("redraw surface=%s" % (self.surface)) if self.surface is None: return self.logger.debug("drawing to surface") # Prepare array for rendering # TODO: what are options for high bit depth under Qt? data = rgbobj.get_array(self.rgb_order, dtype=np.uint8) (height, width) = data.shape[:2] daht, dawd, depth = data.shape self.logger.debug("data shape is %dx%dx%d" % (dawd, daht, depth)) # Get qimage for copying pixel data qimage = self._get_qimage(data) drawable = self.surface painter = QPainter(drawable) #painter.setWorldMatrixEnabled(True) # fill surface with background color #imgwin_wd, imgwin_ht = self.viewer.get_window_size() size = drawable.size() sf_wd, sf_ht = size.width(), size.height() bg = self.viewer.img_bg bgclr = self._get_color(*bg) painter.fillRect(QtCore.QRect(0, 0, sf_wd, sf_ht), bgclr) # draw image data from buffer to offscreen pixmap painter.drawImage(QtCore.QRect(dst_x, dst_y, width, height), qimage, QtCore.QRect(0, 0, width, height))
0.002797
def get_user_modified_lines(self): """ Output: {file_path: [(line_a_start, line_a_end), (line_b_start, line_b_end)]} Lines ranges are sorted and not overlapping """ # I assume that git diff: # - doesn't mix diffs from different files, # - diffs are not overlapping # - diffs are sorted based on line numbers output = {} FILE_NAME_RE = r'^\+\+\+ (.+)$' CHANGED_LINES_RE = r'^@@ -[0-9,]+ \+([0-9]+)(?:,([0-9]+))? @@' current_file_name = None for line in self.git_wrapper.get_min_diff(self.remote_sha1, self.local_sha1).split('\n'): file_name_match = re.match(FILE_NAME_RE, line) if file_name_match: current_file_name, = file_name_match.groups() output[current_file_name] = [] continue line_number_match = re.match(CHANGED_LINES_RE, line) if line_number_match: assert current_file_name if current_file_name == '/dev/null': continue line_start, diff_len = line_number_match.groups() line_start, diff_len = int(line_start), int(diff_len or 0) output[current_file_name].append(LinesRange(line_start, line_start + diff_len)) continue return output
0.003652
def byName(cls, name, recurse=True, default=None): """ Returns the addon whose name matches the inputted name. If the optional recurse flag is set to True, then all the base classes will be searched for the given addon as well. If no addon is found, the default is returned. :param name | <str> recurse | <bool> default | <variant> """ cls.initAddons() prop = '_{0}__addons'.format(cls.__name__) try: return getattr(cls, prop, {})[name] except KeyError: if recurse: for base in cls.__bases__: if issubclass(base, AddonManager): return base.byName(name, recurse) return default
0.003686
def resolve_one(self, correlation_id, key): """ Resolves a single connection parameters by its key. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the connection. :return: a resolved connection. """ connection = None for item in self._items: if item.key == key and item.connection != None: connection = item.connection break return connection
0.007394
def find_blocked_biomass_precursors(reaction, model): """ Return a list of all biomass precursors that cannot be produced. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. model : cobra.Model The metabolic model under investigation. Returns ------- list Metabolite objects that are reactants of the biomass reaction excluding ATP and H2O that cannot be produced by flux balance analysis. """ LOGGER.debug("Finding blocked biomass precursors") precursors = find_biomass_precursors(model, reaction) blocked_precursors = list() _, ub = helpers.find_bounds(model) for precursor in precursors: with model: dm_rxn = model.add_boundary( precursor, type="safe-demand", reaction_id="safe_demand", lb=0, ub=ub ) flux = helpers.run_fba(model, dm_rxn.id, direction='max') if np.isnan(flux) or abs(flux) < 1E-08: blocked_precursors.append(precursor) return blocked_precursors
0.000847
def _match_operator(self, p, value): """ Returns True or False if the operator (&, |, or ! with filters, or ^ with filters) matches the value dictionary """ if p[0] == '!': return self._OPERATOR_MAP[p[0]](self._match(p[1], value)) elif p[0] == '^': return self._OPERATOR_MAP[p[0]](self._match(p[1][0], value), self._match(p[1][1], value)) else: return self._OPERATOR_MAP[p[0]]([self._match(operator_or_filter, value) for operator_or_filter in p[1]])
0.009381
def get_unique_reads(self, ignore_haplotype=False, shallow=False): """ Pull out alignments of uniquely-aligning reads :param ignore_haplotype: whether to regard allelic multiread as uniquely-aligning read :param shallow: whether to copy sparse 3D matrix only or not :return: a new AlignmentPropertyMatrix object that particular reads are """ if self.finalized: if ignore_haplotype: summat = self.sum(axis=self.Axis.HAPLOTYPE) nnz_per_read = np.diff(summat.tocsr().indptr) unique_reads = np.logical_and(nnz_per_read > 0, nnz_per_read < 2) else: # allelic multireads should be removed alncnt_per_read = self.sum(axis=self.Axis.LOCUS).sum(axis=self.Axis.HAPLOTYPE) unique_reads = np.logical_and(alncnt_per_read > 0, alncnt_per_read < 2) return self.pull_alignments_from(unique_reads, shallow=shallow) else: raise RuntimeError('The matrix is not finalized.')
0.005629
def getAnalysisServiceSettings(self, uid): """Returns a dictionary with the settings for the analysis service that match with the uid provided. If there are no settings for the analysis service and analysis requests: 1. looks for settings in AR's ARTemplate. If found, returns the settings for the AnalysisService set in the Template 2. If no settings found, looks in AR's ARProfile. If found, returns the settings for the AnalysisService from the AR Profile. Otherwise, returns a one entry dictionary with only the key 'uid' """ sets = [s for s in self.getAnalysisServicesSettings() if s.get('uid', '') == uid] # Created by using an ARTemplate? if not sets and self.getTemplate(): adv = self.getTemplate().getAnalysisServiceSettings(uid) sets = [adv] if 'hidden' in adv else [] # Created by using an AR Profile? if not sets and self.getProfiles(): adv = [] adv += [profile.getAnalysisServiceSettings(uid) for profile in self.getProfiles()] sets = adv if 'hidden' in adv[0] else [] return sets[0] if sets else {'uid': uid}
0.001587
def num_samples(self, sr=None): """ Return the number of samples. Args: sr (int): Calculate the number of samples with the given sampling-rate. If None use the native sampling-rate. Returns: int: Number of samples """ native_sr = self.sampling_rate num_samples = units.seconds_to_sample(self.duration, native_sr) if sr is not None: ratio = float(sr) / native_sr num_samples = int(np.ceil(num_samples * ratio)) return num_samples
0.003484
def request_access_token(self, access_code): "Request access token from GitHub" token_response = request_session.post( "https://github.com/login/oauth/access_token", data={ "client_id": self.oauth_client_id, "client_secret": self.oauth_client_secret, "code": access_code }, headers={"Accept": "application/json"}, ) return helper_request_access_token(token_response.json())
0.003992
def items(self, *args, **kwargs): """ Return the list of items within this tag. This function is only applicable in search results from PlexServer :func:`~plexapi.server.PlexServer.search()`. """ if not self.key: raise BadRequest('Key is not defined for this tag: %s' % self.tag) return self.fetchItems(self.key)
0.01087
def _check_operations(ctx, need_ops, arg): ''' Checks an allow or a deny caveat. The need_ops parameter specifies whether we require all the operations in the caveat to be declared in the context. ''' ctx_ops = ctx.get(OP_KEY, []) if len(ctx_ops) == 0: if need_ops: f = arg.split() if len(f) == 0: return 'no operations allowed' return '{} not allowed'.format(f[0]) return None fields = arg.split() for op in ctx_ops: err = _check_op(op, need_ops, fields) if err is not None: return err return None
0.001585
def get_texture(self, label: str) -> Union[moderngl.Texture, moderngl.TextureArray, moderngl.Texture3D, moderngl.TextureCube]: """ Get a texture by label Args: label (str): The label for the texture to fetch Returns: Texture instance """ return self._get_resource(label, self._textures, "texture")
0.011655
def disable_tracing(self): """ Disable tracing if it is disabled and debugged program is running, else do nothing. :return: False if tracing has been disabled, True else. """ _logger.x_debug("disable_tracing()") #self.dump_tracing_state("before disable_tracing()") if self.tracing_enabled and self.execution_started: threading.settrace(None) # don't trace threads to come iksettrace3._set_trace_off() self.tracing_enabled = False #self.dump_tracing_state("after disable_tracing()") return self.tracing_enabled
0.008065
def quickinfo(self): """ Returns a short string describing some of the options of the actor. :return: the info, None if not available :rtype: str """ return "incremental: " + str(self.config["incremental"]) \ + ", custom: " + str(self.config["use_custom_loader"]) \ + ", loader: " + base.to_commandline(self.config["custom_loader"])
0.007299
def get_zone(server, token, domain, keyword='', raw_flag=False): """Retrieve zone records. Argument: server: TonicDNS API server token: TonicDNS API authentication token domain: Specify domain name keyword: Search keyword x-authentication-token: token """ method = 'GET' uri = 'https://' + server + '/zone/' + domain data = connect.tonicdns_client(uri, method, token, data=False, keyword=keyword, raw_flag=raw_flag) return data
0.001876
def get_profile(self): """ Get my own profile """ r = self._session.get(API_URL + "/logins/me") r.raise_for_status() return r.json()
0.01105
def fdfilter(data, *filt, **kwargs): """Filter a frequency-domain data object See Also -------- gwpy.frequencyseries.FrequencySeries.filter gwpy.spectrogram.Spectrogram.filter """ # parse keyword args inplace = kwargs.pop('inplace', False) analog = kwargs.pop('analog', False) fs = kwargs.pop('sample_rate', None) if kwargs: raise TypeError("filter() got an unexpected keyword argument '%s'" % list(kwargs.keys())[0]) # parse filter if fs is None: fs = 2 * (data.shape[-1] * data.df).to('Hz').value form, filt = parse_filter(filt, analog=analog, sample_rate=fs) lti = signal.lti(*filt) # generate frequency response freqs = data.frequencies.value.copy() fresp = numpy.nan_to_num(abs(lti.freqresp(w=freqs)[1])) # apply to array if inplace: data *= fresp return data new = data * fresp return new
0.001062
def init_registry_from_json(mongo, filename, clear_collection=False): """Initialize a model registry with a list of model definitions that are stored in a given file in Json format. Parameters ---------- mongo : scodata.MongoDBFactory Connector for MongoDB filename : string Path to file containing model definitions clear_collection : boolean If true, collection will be dropped before models are created """ # Read model definition file (JSON) with open(filename, 'r') as f: models = json.load(f) init_registry(mongo, models, clear_collection)
0.001608
def badge_width(self): """The total width of badge. >>> badge = Badge('pylint', '5', font_name='DejaVu Sans,Verdana,Geneva,sans-serif', ... font_size=11) >>> badge.badge_width 91 """ return self.get_text_width(' ' + ' ' * int(float(self.num_padding_chars) * 2.0)) \ + self.label_width + self.value_width
0.010309
def create_NT_hashed_password_v1(passwd, user=None, domain=None): "create NT hashed password" # if the passwd provided is already a hash, we just return the second half if re.match(r'^[\w]{32}:[\w]{32}$', passwd): return binascii.unhexlify(passwd.split(':')[1]) digest = hashlib.new('md4', passwd.encode('utf-16le')).digest() return digest
0.002717
def sra_download_paired_end(credentials, instance_config, instance_name, script_dir, sra_run_acc, output_dir, **kwargs): """Download paired-end reads from SRA and convert to gzip'ed FASTQ files. TODO: docstring""" template = _TEMPLATE_ENV.get_template('sra_download_paired-end.sh') startup_script = template.render( script_dir=script_dir, sra_run_acc=sra_run_acc, output_dir=output_dir) if len(startup_script) > 32768: raise ValueError('Startup script larger than 32,768 bytes!') #print(startup_script) instance_config.create_instance( credentials, instance_name, startup_script=startup_script, **kwargs)
0.004231
def get_uids(self): """Returns a uids list of the objects this action must be performed against to. If no values for uids param found in the request, returns the uid of the current context """ uids = self.get_uids_from_request() if not uids and api.is_object(self.context): uids = [api.get_uid(self.context)] return uids
0.005155
def value_get(method_name): """ Creates a getter that will call value's method with specified name using the context's key as first argument. @param method_name: the name of a method belonging to the value. @type method_name: str """ def value_get(value, context, **_params): method = getattr(value, method_name) return _get(method, context["key"], (), {}) return value_get
0.002364
def nl_list_for_each_entry(pos, head, member): """https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/list.h#L79. Positional arguments: pos -- class instance holding an nl_list_head instance. head -- nl_list_head class instance. member -- attribute (string). Returns: Generator yielding a class instances. """ pos = nl_list_entry(head.next_, type(pos), member) while True: yield pos if getattr(pos, member) != head: pos = nl_list_entry(getattr(pos, member).next_, type(pos), member) continue break
0.001664
def create_user(self, claims): """Return object for a newly created user account.""" email = claims.get('email') username = self.get_username(claims) return self.UserModel.objects.create_user(username, email)
0.008333
def _rest_make_phenotypes(): #phenotype sources neuroner = Path(devconfig.git_local_base, 'neuroNER/resources/bluima/neuroner/hbp_morphology_ontology.obo').as_posix() neuroner1 = Path(devconfig.git_local_base, 'neuroNER/resources/bluima/neuroner/hbp_electrophysiology_ontology.obo').as_posix() neuroner2 = Path(devconfig.git_local_base, 'neuroNER/resources/bluima/neuroner/hbp_electrophysiology-triggers_ontology.obo').as_posix() nif_qual = Path(devconfig.ontology_local_repo, 'ttl/NIF-Quality.ttl').as_posix() mo = OboFile(os.path.expanduser(neuroner)) mo1 = OboFile(os.path.expanduser(neuroner1)) mo2 = OboFile(os.path.expanduser(neuroner2)) mo_ttl = mo.__ttl__() + mo1.__ttl__() + mo2.__ttl__() mo_ttl = """\ @prefix : <http://FIXME.org/> . @prefix nsu: <http://www.FIXME.org/nsupper#> . @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . @prefix owl: <http://www.w3.org/2002/07/owl#> . @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . """ + mo_ttl #sio = io.StringIO() #sio.write(mo_ttl) ng = rdflib.Graph() ng.parse(data=mo_ttl, format='turtle') ng.parse(os.path.expanduser(nif_qual), format='turtle') #ng.namespace_manager.bind('default1', None, override=False, replace=True) ng.remove((None, rdflib.OWL.imports, None)) bad_match = { 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#nlx_qual_20090505', 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1693353776', 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1288413465', 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao4459136323', 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#nlx_qual_20090507', } exact = [] similar = [] quals = [] s2 = {} for subject, label in sorted(ng.subject_objects(rdflib.RDFS.label)): syns = set([a for a in ng.objects(subject, rdflib.URIRef('http://www.FIXME.org/nsupper#synonym'))]) syns.update(set([a for a in ng.objects(subject, rdflib.URIRef('http://ontology.neuinfo.org/NIF/Backend/OBO_annotation_properties.owl#synonym'))])) #if syns: #print(syns) #print(subject) #print(label.lower()) if 'quality' in label.lower(): quals.append((subject, label)) subpre = ng.namespace_manager.compute_qname(subject)[1] llower = rdflib.Literal(label.lower(), lang='en') for s in ng.subjects(rdflib.RDFS.label, llower): if s != subject: exact.append((subject, s, label, llower)) for s, p, o in sorted(ng.triples((None, rdflib.RDFS.label, None))): spre = ng.namespace_manager.compute_qname(s)[1] if subject != s and label.lower() in o.lower().split(' ') and spre != subpre: if s.toPython() in bad_match or subject.toPython() in bad_match: continue #print() #print(spre, subpre) similar.append((subject, s, label, o)) if subpre.toPython() == 'http://FIXME.org/': print('YAY') print(label, ',', o) print(subject, s) subject, s = s, subject label, o = o, label if subject in s2: #print('YES IT EXISTS') #print(syns, label, [subject, s]) s2[subject]['syns'].update(syns) s2[subject]['syns'].add(label) s2[subject]['xrefs'] += [subject, s] else: s2[subject] = {'label': label.toPython(), 'o': o.toPython(), 'xrefs':[subject, s], 'syns':syns} # FIXME overwrites pprint(quals) """ print stuff print('matches') pprint(exact) pprint(similar) #print('EXACT', exact) print() for k, v in s2.items(): print(k) for k, v2 in sorted(v.items()): print(' ', k, ':', v2) #""" desired_nif_terms = set() #{ #'NIFQUAL:sao1959705051', # dendrite #'NIFQUAL:sao2088691397', # axon #'NIFQUAL:sao1057800815', # morphological #'NIFQUAL:sao-1126011106', # soma #'NIFQUAL:', #'NIFQUAL:', #} starts = [ #"NIFQUAL:sao2088691397", #"NIFQUAL:sao1278200674", #"NIFQUAL:sao2088691397", #"NIFQUAL:sao-1126011106", # FIXME WTF IS THIS NONSENSE (scigraph bug?) quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1959705051").replace('/','%2F'), quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao2088691397").replace('/','%2F'), quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1278200674").replace('/','%2F'), quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao2088691397").replace('/','%2F'), quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao-1126011106").replace('/','%2F'), ] for id_ in starts: want = sgg.getNeighbors(id_, relationshipType='subClassOf', direction='INCOMING', depth=5) #print(id_, want) desired_nif_terms.update([n['id'] for n in want['nodes']]) print(desired_nif_terms) ilx_start = 50114 print(ilx_base.format(ilx_start)) new_terms = {} dg = makeGraph('uwotm8', prefixes=PREFIXES) xr = makeGraph('xrefs', prefixes=PREFIXES) for s, o in sorted(ng.subject_objects(rdflib.RDFS.label))[::-1]: spre = ng.namespace_manager.compute_qname(s)[1] #if spre.toPython() == g.namespaces['NIFQUAL']: #print('skipping', s) #continue # TODO if s in new_terms: print(s, 'already in as xref probably') continue #elif spre.toPython() != 'http://uri.interlex.org/base/ilx_' or spre.toPython() != 'http://FIXME.org/' and s.toPython() not in desired_nif_terms: #elif spre.toPython() != 'http://FIXME.org/' and s.toPython() not in desired_nif_terms: #print('DO NOT WANT', s, spre) #continue syns = set([s for s in ng.objects(s, dg.namespaces['nsu']['synonym'])]) #data['syns'] += syns data = {} id_ = ilx_base.format(ilx_start) ilx_start += 1 if s in s2: d = s2[s] syns.update(d['syns']) new_terms[d['xrefs'][0]] = {'replaced_by':id_} xr.add_trip(d['xrefs'][0], 'oboInOwl:replacedBy', id_) #dg.add_trip(d['xrefs'][0], 'oboInOwl:replacedBy', id_) new_terms[d['xrefs'][1]] = {'replaced_by':id_} xr.add_trip(d['xrefs'][1], 'oboInOwl:replacedBy', id_) #dg.add_trip(d['xrefs'][1], 'oboInOwl:replacedBy', id_) data['labels'] = [d['label'], d['o']] #dg.add_trip(id_, rdflib.RDFS.label, d['label']) dg.add_trip(id_, rdflib.RDFS.label, d['o']) data['xrefs'] = d['xrefs'] for x in d['xrefs']: # FIXME... expecting order of evaluation errors here... dg.add_trip(id_, 'oboInOwl:hasDbXref', x) # xr xr.add_trip(id_, 'oboInOwl:hasDbXref', x) # x elif spre.toPython() != 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#' or ng.namespace_manager.qname(s).replace('default1','NIFQUAL') in desired_nif_terms: # skip non-xref quals #print(ng.namespace_manager.qname(s).replace('default1','NIFQUAL')) new_terms[s] = {'replaced_by':id_} xr.add_trip(s, 'oboInOwl:replacedBy', id_) data['labels'] = [o.toPython()] dg.add_trip(id_, rdflib.RDFS.label, o.toPython()) data['xrefs'] = [s] dg.add_trip(id_, 'oboInOwl:hasDbXref', s) # xr xr.add_trip(id_, 'oboInOwl:hasDbXref', s) # xr else: ilx_start -= 1 continue new_terms[id_] = data dg.add_trip(id_, rdflib.RDF.type, rdflib.OWL.Class) xr.add_trip(id_, rdflib.RDF.type, rdflib.OWL.Class) for syn in syns: if syn.toPython() not in data['labels']: if len(syn) > 3: dg.add_trip(id_, 'NIFRID:synonym', syn) elif syn: dg.add_trip(id_, 'NIFRID:abbrev', syn) if 'EPHYS' in s or any(['EPHYS' in x for x in data['xrefs']]): dg.add_trip(id_, rdflib.RDFS.subClassOf, ephys_phenotype) elif 'MORPHOLOGY' in s or any(['MORPHOLOGY' in x for x in data['xrefs']]): dg.add_trip(id_, rdflib.RDFS.subClassOf, morpho_phenotype) #dg.write(convert=False) xr.write(convert=False) #skip this for now, we can use DG to do lookups later #for t in dg.g.triples((None, None, None)): #g.add_trip(*t) # only way to clean prefixes :/ add_phenotypes(g) g.write(convert=False) g2 = makeGraph('pheno-comp', PREFIXES) for t in ng.triples((None, None, None)): g2.add_trip(*t) # only way to clean prefixes :/ g2.write(convert=False) syn_mappings = {} for sub, syn in [_ for _ in g.g.subject_objects(g.expand('NIFRID:synonym'))] + [_ for _ in g.g.subject_objects(rdflib.RDFS.label)]: syn = syn.toPython() if syn in syn_mappings: log.error(f'duplicate synonym! {syn} {sub}') syn_mappings[syn] = sub #embed() return syn_mappings, pedges, ilx_start
0.009853
def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna """ # we may need to actually resample as if we are timestamps if self.kind == 'timestamp': return super()._upsample(method, limit=limit, fill_value=fill_value) self._set_binner() ax = self.ax obj = self.obj new_index = self.binner # Start vs. end of period memb = ax.asfreq(self.freq, how=self.convention) # Get the fill indexer indexer = memb.get_indexer(new_index, method=method, limit=limit) return self._wrap_result(_take_new_index( obj, indexer, new_index, axis=self.axis))
0.001871
def clean_file_name(filename, unique=True, replace="_", force_nt=False): """ Return a filename version, which has no characters in it which are forbidden. On Windows these are for example <, /, ?, ... The intention of this function is to allow distribution of files to different OSes. :param filename: string to clean :param unique: check if the filename is already taken and append an integer to be unique (default: True) :param replace: replacement character. (default: '_') :param force_nt: Force shortening of paths like on NT systems (default: False) :return: clean string """ if re.match(r'[<>:"/\\|?* .\x00-\x1f]', replace): raise ValueError("replacement character is not allowed!") path, fname = os.path.split(filename) # For Windows see: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx # Other operating systems seems to be more tolerant... # Not allowed filenames, attach replace character if necessary if re.match(r'(CON|PRN|AUX|NUL|COM[1-9]|LPT[1-9])', fname): fname += replace # reserved characters fname = re.sub(r'[<>:"/\\|?*\x00-\x1f]', replace, fname) # Do not end with dot or space fname = re.sub(r'[ .]$', replace, fname) if force_nt or os.name == 'nt': PATH_MAX_LENGTH = 230 # give extra space for other stuff... # Check filename length limit, usually a problem on older Windows versions if len(fname) > PATH_MAX_LENGTH: if "." in fname: f, ext = fname.rsplit(".", 1) fname = "{}.{}".format(f[:PATH_MAX_LENGTH-(len(ext)+1)], ext) else: fname = fname[:PATH_MAX_LENGTH] # Special behaviour... On Windows, there is also a problem with the maximum path length in explorer.exe # maximum length is limited to 260 chars, so use 250 to have room for other stuff if len(os.path.abspath(os.path.join(path, fname))) > 250: fname = fname[:250 - (len(os.path.abspath(path)) + 1)] if unique: counter = 0 origname = fname while os.path.isfile(os.path.join(path, fname)): if "." in fname: # assume extension f, ext = origname.rsplit(".", 1) fname = "{}_{}.{}".format(f, counter, ext) else: fname = "{}_{}".format(origname, counter) counter += 1 return os.path.join(path, fname)
0.003617
def get_periodicfeatures( pfpickle, lcbasedir, outdir, fourierorder=5, # these are depth, duration, ingress duration transitparams=(-0.01,0.1,0.1), # these are depth, duration, depth ratio, secphase ebparams=(-0.2,0.3,0.7,0.5), pdiff_threshold=1.0e-4, sidereal_threshold=1.0e-4, sampling_peak_multiplier=5.0, sampling_startp=None, sampling_endp=None, starfeatures=None, timecols=None, magcols=None, errcols=None, lcformat='hat-sql', lcformatdir=None, sigclip=10.0, verbose=True, raiseonfail=False ): '''This gets all periodic features for the object. Parameters ---------- pfpickle : str The period-finding result pickle containing period-finder results to use for the calculation of LC fit, periodogram, and phased LC features. lcbasedir : str The base directory where the light curve for the current object is located. outdir : str The output directory where the results will be written. fourierorder : int The Fourier order to use to generate sinusoidal function and fit that to the phased light curve. transitparams : list of floats The transit depth, duration, and ingress duration to use to generate a trapezoid planet transit model fit to the phased light curve. The period used is the one provided in `period`, while the epoch is automatically obtained from a spline fit to the phased light curve. ebparams : list of floats The primary eclipse depth, eclipse duration, the primary-secondary depth ratio, and the phase of the secondary eclipse to use to generate an eclipsing binary model fit to the phased light curve. The period used is the one provided in `period`, while the epoch is automatically obtained from a spline fit to the phased light curve. pdiff_threshold : float This is the max difference between periods to consider them the same. sidereal_threshold : float This is the max difference between any of the 'best' periods and the sidereal day periods to consider them the same. sampling_peak_multiplier : float This is the minimum multiplicative factor of a 'best' period's normalized periodogram peak over the sampling periodogram peak at the same period required to accept the 'best' period as possibly real. sampling_startp, sampling_endp : float If the `pgramlist` doesn't have a time-sampling Lomb-Scargle periodogram, it will be obtained automatically. Use these kwargs to control the minimum and maximum period interval to be searched when generating this periodogram. starfeatures : str or None If not None, this should be the filename of the `starfeatures-<objectid>.pkl` created by :py:func:`astrobase.lcproc.lcsfeatures.get_starfeatures` for this object. This is used to get the neighbor's light curve and phase it with this object's period to see if this object is blended. timecols : list of str or None The timecol keys to use from the lcdict in calculating the features. magcols : list of str or None The magcol keys to use from the lcdict in calculating the features. errcols : list of str or None The errcol keys to use from the lcdict in calculating the features. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. verbose : bool If True, will indicate progress while working. raiseonfail : bool If True, will raise an Exception if something goes wrong. Returns ------- str Returns a filename for the output pickle containing all of the periodic features for the input object's LC. ''' try: formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir) if formatinfo: (fileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo else: LOGERROR("can't figure out the light curve format") return None except Exception as e: LOGEXCEPTION("can't figure out the light curve format") return None # open the pfpickle if pfpickle.endswith('.gz'): infd = gzip.open(pfpickle) else: infd = open(pfpickle) pf = pickle.load(infd) infd.close() lcfile = os.path.join(lcbasedir, pf['lcfbasename']) objectid = pf['objectid'] if 'kwargs' in pf: kwargs = pf['kwargs'] else: kwargs = None # override the default timecols, magcols, and errcols # using the ones provided to the periodfinder # if those don't exist, use the defaults from the lcformat def if kwargs and 'timecols' in kwargs and timecols is None: timecols = kwargs['timecols'] elif not kwargs and not timecols: timecols = dtimecols if kwargs and 'magcols' in kwargs and magcols is None: magcols = kwargs['magcols'] elif not kwargs and not magcols: magcols = dmagcols if kwargs and 'errcols' in kwargs and errcols is None: errcols = kwargs['errcols'] elif not kwargs and not errcols: errcols = derrcols # check if the light curve file exists if not os.path.exists(lcfile): LOGERROR("can't find LC %s for object %s" % (lcfile, objectid)) return None # check if we have neighbors we can get the LCs for if starfeatures is not None and os.path.exists(starfeatures): with open(starfeatures,'rb') as infd: starfeat = pickle.load(infd) if starfeat['closestnbrlcfname'].size > 0: nbr_full_lcf = starfeat['closestnbrlcfname'][0] # check for this LC in the lcbasedir if os.path.exists(os.path.join(lcbasedir, os.path.basename(nbr_full_lcf))): nbrlcf = os.path.join(lcbasedir, os.path.basename(nbr_full_lcf)) # if it's not there, check for this file at the full LC location elif os.path.exists(nbr_full_lcf): nbrlcf = nbr_full_lcf # otherwise, we can't find it, so complain else: LOGWARNING("can't find neighbor light curve file: %s in " "its original directory: %s, or in this object's " "lcbasedir: %s, skipping neighbor processing..." % (os.path.basename(nbr_full_lcf), os.path.dirname(nbr_full_lcf), lcbasedir)) nbrlcf = None else: nbrlcf = None else: nbrlcf = None # now, start processing for periodic feature extraction try: # get the object LC into a dict lcdict = readerfunc(lcfile) # this should handle lists/tuples being returned by readerfunc # we assume that the first element is the actual lcdict # FIXME: figure out how to not need this assumption if ( (isinstance(lcdict, (list, tuple))) and (isinstance(lcdict[0], dict)) ): lcdict = lcdict[0] # get the nbr object LC into a dict if there is one if nbrlcf is not None: nbrlcdict = readerfunc(nbrlcf) # this should handle lists/tuples being returned by readerfunc # we assume that the first element is the actual lcdict # FIXME: figure out how to not need this assumption if ( (isinstance(nbrlcdict, (list, tuple))) and (isinstance(nbrlcdict[0], dict)) ): nbrlcdict = nbrlcdict[0] # this will be the output file outfile = os.path.join(outdir, 'periodicfeatures-%s.pkl' % squeeze(objectid).replace(' ','-')) # normalize using the special function if specified if normfunc is not None: lcdict = normfunc(lcdict) if nbrlcf: nbrlcdict = normfunc(nbrlcdict) resultdict = {} for tcol, mcol, ecol in zip(timecols, magcols, errcols): # dereference the columns and get them from the lcdict if '.' in tcol: tcolget = tcol.split('.') else: tcolget = [tcol] times = _dict_get(lcdict, tcolget) if nbrlcf: nbrtimes = _dict_get(nbrlcdict, tcolget) else: nbrtimes = None if '.' in mcol: mcolget = mcol.split('.') else: mcolget = [mcol] mags = _dict_get(lcdict, mcolget) if nbrlcf: nbrmags = _dict_get(nbrlcdict, mcolget) else: nbrmags = None if '.' in ecol: ecolget = ecol.split('.') else: ecolget = [ecol] errs = _dict_get(lcdict, ecolget) if nbrlcf: nbrerrs = _dict_get(nbrlcdict, ecolget) else: nbrerrs = None # # filter out nans, etc. from the object and any neighbor LC # # get the finite values finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs) ftimes, fmags, ferrs = times[finind], mags[finind], errs[finind] if nbrlcf: nfinind = (np.isfinite(nbrtimes) & np.isfinite(nbrmags) & np.isfinite(nbrerrs)) nbrftimes, nbrfmags, nbrferrs = (nbrtimes[nfinind], nbrmags[nfinind], nbrerrs[nfinind]) # get nonzero errors nzind = np.nonzero(ferrs) ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind] if nbrlcf: nnzind = np.nonzero(nbrferrs) nbrftimes, nbrfmags, nbrferrs = (nbrftimes[nnzind], nbrfmags[nnzind], nbrferrs[nnzind]) # normalize here if not using special normalization if normfunc is None: ntimes, nmags = normalize_magseries( ftimes, fmags, magsarefluxes=magsarefluxes ) times, mags, errs = ntimes, nmags, ferrs if nbrlcf: nbrntimes, nbrnmags = normalize_magseries( nbrftimes, nbrfmags, magsarefluxes=magsarefluxes ) nbrtimes, nbrmags, nbrerrs = nbrntimes, nbrnmags, nbrferrs else: nbrtimes, nbrmags, nbrerrs = None, None, None else: times, mags, errs = ftimes, fmags, ferrs if times.size > 999: # # now we have times, mags, errs (and nbrtimes, nbrmags, nbrerrs) # available_pfmethods = [] available_pgrams = [] available_bestperiods = [] for k in pf[mcol].keys(): if k in PFMETHODS: available_pgrams.append(pf[mcol][k]) if k != 'win': available_pfmethods.append( pf[mcol][k]['method'] ) available_bestperiods.append( pf[mcol][k]['bestperiod'] ) # # process periodic features for this magcol # featkey = 'periodicfeatures-%s' % mcol resultdict[featkey] = {} # first, handle the periodogram features pgramfeat = periodicfeatures.periodogram_features( available_pgrams, times, mags, errs, sigclip=sigclip, pdiff_threshold=pdiff_threshold, sidereal_threshold=sidereal_threshold, sampling_peak_multiplier=sampling_peak_multiplier, sampling_startp=sampling_startp, sampling_endp=sampling_endp, verbose=verbose ) resultdict[featkey].update(pgramfeat) resultdict[featkey]['pfmethods'] = available_pfmethods # then for each bestperiod, get phasedlc and lcfit features for _ind, pfm, bp in zip(range(len(available_bestperiods)), available_pfmethods, available_bestperiods): resultdict[featkey][pfm] = periodicfeatures.lcfit_features( times, mags, errs, bp, fourierorder=fourierorder, transitparams=transitparams, ebparams=ebparams, sigclip=sigclip, magsarefluxes=magsarefluxes, verbose=verbose ) phasedlcfeat = periodicfeatures.phasedlc_features( times, mags, errs, bp, nbrtimes=nbrtimes, nbrmags=nbrmags, nbrerrs=nbrerrs ) resultdict[featkey][pfm].update(phasedlcfeat) else: LOGERROR('not enough finite measurements in magcol: %s, for ' 'pfpickle: %s, skipping this magcol' % (mcol, pfpickle)) featkey = 'periodicfeatures-%s' % mcol resultdict[featkey] = None # # end of per magcol processing # # write resultdict to pickle outfile = os.path.join(outdir, 'periodicfeatures-%s.pkl' % squeeze(objectid).replace(' ','-')) with open(outfile,'wb') as outfd: pickle.dump(resultdict, outfd, pickle.HIGHEST_PROTOCOL) return outfile except Exception as e: LOGEXCEPTION('failed to run for pf: %s, lcfile: %s' % (pfpickle, lcfile)) if raiseonfail: raise else: return None
0.00183
def _data(self, copy=False): """ Get all data associated with the container as key value pairs. """ data = {} for key, obj in self.__dict__.items(): if isinstance(obj, (pd.Series, pd.DataFrame, pd.SparseSeries, pd.SparseDataFrame)): if copy: data[key] = obj.copy() else: data[key] = obj return data
0.006977
def get_account_funds(self, wallet=None, session=None, lightweight=None): """ Get available to bet amount. :param str wallet: Name of the wallet in question :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: resources.AccountFunds """ params = clean_locals(locals()) method = '%s%s' % (self.URI, 'getAccountFunds') (response, elapsed_time) = self.request(method, params, session) return self.process_response(response, resources.AccountFunds, elapsed_time, lightweight)
0.004702
def program_binary_data(cls, session, address, data): """! @brief Helper routine to write a single chunk of data. The session options for chip_erase and trust_crc are used. @param cls @param session The session instance. @param address Start address of the data to program. @param data A list of byte values that will be programmed starting at _address_. """ mgr = cls(session) mgr.add_data(address, data) mgr.commit()
0.00969
def update_field(self, f, obj): """ update a field :param str f: name of field to be updated. :param obj: value of field to be updated. """ n = self.get_private_name(f) if not hasattr(self, n): raise AttributeError('{0} is not in {1}'.format(n, self.__class__.__name__)) setattr(self, n, obj) self.__origin_keys.add(f)
0.007576
def find_transition(self, gene: Gene, multiplexes: Tuple[Multiplex, ...]) -> Transition: """ Find and return a transition in the model for the given gene and multiplexes. Raise an AttributeError if there is no multiplex in the graph with the given name. """ multiplexes = tuple(multiplex for multiplex in multiplexes if gene in multiplex.genes) for transition in self.transitions: if transition.gene == gene and set(transition.multiplexes) == set(multiplexes): return transition raise AttributeError(f'transition K_{gene.name}' + ''.join(f"+{multiplex!r}" for multiplex in multiplexes) + ' does not exist')
0.013025
def copy(self): """ Copy this object into a new object of the same type. The returned object will not have a parent object. """ copyClass = self.copyClass if copyClass is None: copyClass = self.__class__ copied = copyClass() copied.copyData(self) return copied
0.005814
def _save_config(self, filename=None): """ Save the given user configuration. """ if filename is None: filename = self._config_filename parent_path = os.path.dirname(filename) if not os.path.isdir(parent_path): os.makedirs(parent_path) with open(filename, "w") as configfile: self._config.write(configfile)
0.005025
def sync_media(self, sync_set=None, clean=0, iter_local_paths=0): """ Uploads select media to an Apache accessible directory. """ # Ensure a site is selected. self.genv.SITE = self.genv.SITE or self.genv.default_site r = self.local_renderer clean = int(clean) self.vprint('Getting site data for %s...' % self.genv.SITE) self.set_site_specifics(self.genv.SITE) sync_sets = r.env.sync_sets if sync_set: sync_sets = [sync_set] ret_paths = [] for _sync_set in sync_sets: for paths in r.env.sync_sets[_sync_set]: r.env.sync_local_path = os.path.abspath(paths['local_path'] % self.genv) if paths['local_path'].endswith('/') and not r.env.sync_local_path.endswith('/'): r.env.sync_local_path += '/' if iter_local_paths: ret_paths.append(r.env.sync_local_path) continue r.env.sync_remote_path = paths['remote_path'] % self.genv if clean: r.sudo('rm -Rf {apache_sync_remote_path}') print('Syncing %s to %s...' % (r.env.sync_local_path, r.env.sync_remote_path)) r.env.tmp_chmod = paths.get('chmod', r.env.chmod) r.sudo('mkdir -p {apache_sync_remote_path}') r.sudo('chmod -R {apache_tmp_chmod} {apache_sync_remote_path}') r.local('rsync -rvz --progress --recursive --no-p --no-g ' '--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" {apache_sync_local_path} {user}@{host_string}:{apache_sync_remote_path}') r.sudo('chown -R {apache_web_user}:{apache_web_group} {apache_sync_remote_path}') if iter_local_paths: return ret_paths
0.004306
def validate(self, data): """Apply a JSON schema to an object""" try: schema_path = os.path.normpath(SCHEMA_ROOT) location = u'file://%s' % (schema_path) fs_resolver = resolver.LocalRefResolver(location, self.schema) jsonschema.Draft3Validator(self.schema, resolver=fs_resolver).validate(data) except jsonschema.ValidationError as exc: # print "data %s" % (data) raise jsonschema.exceptions.ValidationError(str(exc))
0.005848
def _get_demand_array_construct(self): """ Returns a construct for an array of power demand data. """ bus_no = integer.setResultsName("bus_no") s_rating = real.setResultsName("s_rating") # MVA p_direction = real.setResultsName("p_direction") # p.u. q_direction = real.setResultsName("q_direction") # p.u. p_bid_max = real.setResultsName("p_bid_max") # p.u. p_bid_min = real.setResultsName("p_bid_min") # p.u. p_optimal_bid = Optional(real).setResultsName("p_optimal_bid") p_fixed = real.setResultsName("p_fixed") # $/hr p_proportional = real.setResultsName("p_proportional") # $/MWh p_quadratic = real.setResultsName("p_quadratic") # $/MW^2h q_fixed = real.setResultsName("q_fixed") # $/hr q_proportional = real.setResultsName("q_proportional") # $/MVArh q_quadratic = real.setResultsName("q_quadratic") # $/MVAr^2h commitment = boolean.setResultsName("commitment") cost_tie_break = real.setResultsName("cost_tie_break") # $/MWh cost_cong_up = real.setResultsName("cost_cong_up") # $/h cost_cong_down = real.setResultsName("cost_cong_down") # $/h status = Optional(boolean).setResultsName("status") demand_data = bus_no + s_rating + p_direction + q_direction + \ p_bid_max + p_bid_min + p_optimal_bid + p_fixed + \ p_proportional + p_quadratic + q_fixed + q_proportional + \ q_quadratic + commitment + cost_tie_break + cost_cong_up + \ cost_cong_down + status + scolon demand_data.setParseAction(self.push_demand) demand_array = Literal("Demand.con") + "=" + "[" + "..." + \ ZeroOrMore(demand_data + Optional("]" + scolon)) return demand_array
0.008894
def modify_mempool(mempool, remove=0, add=0, verbose=False): """ Given a list of txids (mempool), add and remove some items to simulate an out of sync mempool. """ for i in range(remove): popped = mempool.pop() if verbose: print("removed:", popped) for i in range(add): new_txid = _make_txid() mempool.append(new_txid) if verbose: print("added:", new_txid) return mempool
0.006803
def execute(self, processProtocol, command, env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): """Execute a process on the remote machine using SSH @param processProtocol: the ProcessProtocol instance to connect @param executable: the executable program to run @param args: the arguments to pass to the process @param env: environment variables to request the remote ssh server to set @param path: the remote path to start the remote process on @param uid: user id or username to connect to the ssh server with @param gid: this is not used for remote ssh processes @param usePTY: wither to request a pty for the process @param childFDs: file descriptors to use for stdin, stdout and stderr """ sshCommand = (command if isinstance(command, SSHCommand) else SSHCommand(command, self.precursor, path)) commandLine = sshCommand.getCommandLine() # Get connection to ssh server connectionDeferred = self.getConnection(uid) # spawn the remote process connectionDeferred.addCallback(connectProcess, processProtocol, commandLine, env, usePTY, childFDs) return connectionDeferred
0.00306
def ULE(a: BitVec, b: BitVec) -> Bool: """Create an unsigned less than expression. :param a: :param b: :return: """ return Or(ULT(a, b), a == b)
0.005917
def execute_command_in_message(controller, cliargs, clioptions, message): """ Runs the command in message['command'], which is one of: 'start' / 'stop'. Updates the chef's initial command line args and options with args and options provided in message['args'] and message['options']. """ SUPPORTED_COMMANDS = ['start'] # , 'stop'] # TODO print(message) # args and options from SushiBar overrride command line args and options args = cliargs options = clioptions if 'args' in message: args.update(message['args']) if 'options' in message: options.update(message['options']) if message['command'] == 'start': if not controller.thread or not controller.thread.isAlive(): controller.thread = threading.Thread( target=controller.chef.run, args=(args, options), ) controller.thread.start() else: config.LOGGER.info('Not starting because chef is already running.') else: config.LOGGER.info('Command not supported: %s' % message['command'])
0.002703
def range_minmax(ranges): """ Returns the span of a collection of ranges where start is the smallest of all starts, and end is the largest of all ends. >>> ranges = [(30, 45), (40, 50), (10, 100)] >>> range_minmax(ranges) (10, 100) """ rmin = min(ranges)[0] rmax = max(ranges, key=lambda x: x[1])[1] return rmin, rmax
0.002793
def must_stop(self): """ Return True if the worker must stop when the current loop is over. """ return bool(self.terminate_gracefuly and self.end_signal_caught or self.num_loops >= self.max_loops or self.end_forced or self.wanted_end_date and datetime.utcnow() >= self.wanted_end_date)
0.014245
def solvePerfForesight(solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac): ''' Solves a single period consumption-saving problem for a consumer with perfect foresight. Parameters ---------- solution_next : ConsumerSolution The solution to next period's one period problem. DiscFac : float Intertemporal discount factor for future utility. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. CRRA : float Coefficient of relative risk aversion. Rfree : float Risk free interest factor on end-of-period assets. PermGroFac : float Expected permanent income growth factor at the end of this period. Returns ------- solution : ConsumerSolution The solution to this period's problem. ''' solver = ConsPerfForesightSolver(solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac) solution = solver.solve() return solution
0.013
def emulate_until(self, target: int): """ Tells the CPU to set up a concrete unicorn emulator and use it to execute instructions until target is reached. :param target: Where Unicorn should hand control back to Manticore. Set to 0 for all instructions. """ self._concrete = True self._break_unicorn_at = target if self.emu: self.emu._stop_at = target
0.009368
def get_sql(self, with_default_expression=True): ''' Returns an SQL expression describing the field (e.g. for CREATE TABLE). :param with_default_expression: If True, adds default value to sql. It doesn't affect fields with alias and materialized values. ''' if with_default_expression: if self.alias: return '%s ALIAS %s' % (self.db_type, self.alias) elif self.materialized: return '%s MATERIALIZED %s' % (self.db_type, self.materialized) else: default = self.to_db_string(self.default) return '%s DEFAULT %s' % (self.db_type, default) else: return self.db_type
0.002729
def save_history(self, f): """Saves the history of ``NeuralNet`` as a json file. In order to use this feature, the history must only contain JSON encodable Python data structures. Numpy and PyTorch types should not be in the history. Parameters ---------- f : file-like object or str Examples -------- >>> before = NeuralNetClassifier(mymodule) >>> before.fit(X, y, epoch=2) # Train for 2 epochs >>> before.save_params('path/to/params') >>> before.save_history('path/to/history.json') >>> after = NeuralNetClassifier(mymodule).initialize() >>> after.load_params('path/to/params') >>> after.load_history('path/to/history.json') >>> after.fit(X, y, epoch=2) # Train for another 2 epochs """ # TODO: Remove warning in a future release warnings.warn( "save_history is deprecated and will be removed in the next " "release, please use save_params with the f_history keyword", DeprecationWarning) self.history.to_file(f)
0.001781
def apmAggregate(self, **criteria): """collect all match history's apm data to report player's calculated MMR""" apms = [m.apm(self) for m in self.matchSubset(**criteria)] if not apms: return 0 # no apm information without match history return sum(apms) / len(apms)
0.016835
def download_object(container_name, object_name, destination_path, profile, overwrite_existing=False, delete_on_failure=True, **libcloud_kwargs): ''' Download an object to the specified destination path. :param container_name: Container name :type container_name: ``str`` :param object_name: Object name :type object_name: ``str`` :param destination_path: Full path to a file or a directory where the incoming file will be saved. :type destination_path: ``str`` :param profile: The profile key :type profile: ``str`` :param overwrite_existing: True to overwrite an existing file, defaults to False. :type overwrite_existing: ``bool`` :param delete_on_failure: True to delete a partially downloaded file if the download was not successful (hash mismatch / file size). :type delete_on_failure: ``bool`` :param libcloud_kwargs: Extra arguments for the driver's download_object method :type libcloud_kwargs: ``dict`` :return: True if an object has been successfully downloaded, False otherwise. :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_storage.download_object MyFolder me.jpg /tmp/me.jpg profile1 ''' conn = _get_driver(profile=profile) obj = conn.get_object(container_name, object_name) libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) return conn.download_object(obj, destination_path, overwrite_existing, delete_on_failure, **libcloud_kwargs)
0.002978
def find_worst(rho, pval, m=1, rlim=.10, plim=.35): """Find the N "worst", i.e. insignificant/random and low, correlations Parameters ---------- rho : ndarray, list 1D array with correlation coefficients pval : ndarray, list 1D array with p-values m : int The desired number of indicies to return (How many "worst" correlations to find?) rlim : float Desired maximum absolute correlation coefficient (Default: 0.10) plim : float Desired minimum p-value (Default: 0.35) Return ------ selected : list Indicies of rho and pval of the "worst" correlations. """ # convert to lists n = len(rho) r = list(np.abs(rho)) p = list(pval) i = list(range(n)) # check m if m > n: warnings.warn( 'm is bigger than the available correlations in rho and pval.') m = n # selected indicies selected = list() # (1) pick the highest/worst p-value # |r| <= r_lim # p > p_lim it = 0 while (len(selected) < m) and (it < n): temp = p.index(max(p)) # temporary index of the remaining values worst = i[temp] # store original index as 'worst' before abort loop # check if (r[temp] <= rlim) and (p[temp] > plim): # delete from lists r.pop(temp) p.pop(temp) i.pop(temp) # append to abort selected.append(worst) # next step it = it + 1 # print(selected, i) # (2) Just pick the highest/worst p-value of the remaining # with bad correlations # |r| <= r_lim it = 0 n2 = len(i) while (len(selected) < m) and (it < n2): temp = p.index(max(p)) # temporary index of the remaining values worst = i[temp] # store original index as 'worst' before abort loop # check if (r[temp] <= rlim): # delete from lists r.pop(temp) p.pop(temp) i.pop(temp) # append to abort selected.append(worst) # next step it = it + 1 # (3) Pick the lowest correlations it = 0 n3 = len(i) while (len(selected) < m) and (it < n3): # find the smallest p-value temp = r.index(min(r)) worst = i[temp] # delete from lists r.pop(temp) p.pop(temp) i.pop(temp) # append to abort selected.append(worst) # next step it = it + 1 return selected
0.000389
def extract_hook_names(ent): """Extract hook names from the given entity""" hnames = [] for hook in ent["hooks"]["enter"] + ent["hooks"]["exit"]: hname = os.path.basename(hook["fpath_orig"]) hname = os.path.splitext(hname)[0] hname = hname.strip() hname = hname.replace("_enter", "") hname = hname.replace("_exit", "") if hname in hnames: continue hnames.append(hname) hnames.sort() return hnames
0.002049
def dangling(self): """ List of entities that aren't included in a closed path Returns ---------- dangling: (n,) int, index of self.entities """ if len(self.paths) == 0: return np.arange(len(self.entities)) else: included = np.hstack(self.paths) dangling = np.setdiff1d(np.arange(len(self.entities)), included) return dangling
0.00432
def _create_raw_data(self): """ Gathers the different sections ids and creates a string as first cookie data. :return: A dictionary like: {'analyses':'all','analysisrequest':'all','worksheets':'all'} """ result = {} for section in self.get_sections(): result[section.get('id')] = 'all' return result
0.005155
def validate_user(user, device, token): ''' Send a message to a Pushover user or group. :param user: The user or group name, either will work. :param device: The device for the user. :param token: The PushOver token. ''' res = { 'message': 'User key is invalid', 'result': False } parameters = dict() parameters['user'] = user parameters['token'] = token if device: parameters['device'] = device response = query(function='validate_user', method='POST', header_dict={'Content-Type': 'application/x-www-form-urlencoded'}, data=_urlencode(parameters)) if response['res']: if 'message' in response: _message = response.get('message', '') if 'status' in _message: if _message.get('dict', {}).get('status', None) == 1: res['result'] = True res['message'] = 'User key is valid.' else: res['result'] = False res['message'] = ''.join(_message.get('dict', {}).get('errors')) return res
0.002421
def get_basic_functional_groups(self, func_groups=None): """ Identify functional groups that cannot be identified by the Ertl method of get_special_carbon and get_heteroatoms, such as benzene rings, methyl groups, and ethyl groups. TODO: Think of other functional groups that are important enough to be added (ex: do we need ethyl, butyl, propyl?) :param func_groups: List of strs representing the functional groups of interest. Default to None, meaning that all of the functional groups defined in this function will be sought. :return: list of sets of ints, representing groups of connected atoms """ strat = OpenBabelNN() hydrogens = {n for n in self.molgraph.graph.nodes if str(self.species[n]) == "H"} carbons = [n for n in self.molgraph.graph.nodes if str(self.species[n]) == "C"] if func_groups is None: func_groups = ["methyl", "phenyl"] results = [] if "methyl" in func_groups: for node in carbons: neighbors = strat.get_nn_info(self.molecule, node) hs = {n["site_index"] for n in neighbors if n["site_index"] in hydrogens} # Methyl group is CH3, but this will also catch methane if len(hs) >= 3: hs.add(node) results.append(hs) if "phenyl" in func_groups: rings_indices = [set(sum(ring, ())) for ring in self.molgraph.find_rings()] possible_phenyl = [r for r in rings_indices if len(r) == 6] for ring in possible_phenyl: # Phenyl group should have only one (0 for benzene) member whose # neighbors are not two carbons and one hydrogen num_deviants = 0 for node in ring: neighbors = strat.get_nn_info(self.molecule, node) neighbor_spec = sorted([str(self.species[n["site_index"]]) for n in neighbors]) if neighbor_spec != ["C", "C", "H"]: num_deviants += 1 if num_deviants <= 1: for node in ring: ring_group = copy.deepcopy(ring) neighbors = self.molgraph.graph[node] # Add hydrogens to the functional group for neighbor in neighbors.keys(): if neighbor in hydrogens: ring_group.add(neighbor) results.append(ring_group) return results
0.002178
def get_session(region, profile=None): """Creates a boto3 session with a cache Args: region (str): The region for the session profile (str): The profile for the session Returns: :class:`boto3.session.Session`: A boto3 session with credential caching """ if profile is None: logger.debug("No AWS profile explicitly provided. " "Falling back to default.") profile = default_profile logger.debug("Building session using profile \"%s\" in region \"%s\"" % (profile, region)) session = boto3.Session(region_name=region, profile_name=profile) c = session._session.get_component('credential_provider') provider = c.get_provider('assume-role') provider.cache = credential_cache provider._prompter = ui.getpass return session
0.001166
def encode(input, output_filename): """Encodes the input data with reed-solomon error correction in 223 byte blocks, and outputs each block along with 32 parity bytes to a new file by the given filename. input is a file-like object The outputted image will be in png format, and will be 255 by x pixels with one color channel. X is the number of 255 byte blocks from the input. Each block of data will be one row, therefore, the data can be recovered if no more than 16 pixels per row are altered. """ coder = rs.RSCoder(255,223) output = [] while True: block = input.read(223) if not block: break code = coder.encode_fast(block) output.append(code) sys.stderr.write(".") sys.stderr.write("\n") out = Image.new("L", (rowstride,len(output))) out.putdata("".join(output)) out.save(output_filename)
0.00442
def get_success_url(self): """ Returns the url to redirect to after a successful update. if `self.redirect_to_view` is None the current url will be used. Otherwise the get_view_url will be called on the current bundle using `self.redirect_to_view` as the view name. If the name is "main" or "main_list" no object will be passed. Otherwise `self.object` will be passed as a kwarg. """ if self.redirect_to_view: kwargs = {} if self.redirect_to_view != 'main' and \ self.redirect_to_view != 'main_list': kwargs['object'] = self.object return self.bundle.get_view_url(self.redirect_to_view, self.request.user, kwargs, self.kwargs) else: return self.request.build_absolute_uri()
0.003195
def diff(name_a, name_b=None, **kwargs): ''' Display the difference between a snapshot of a given filesystem and another snapshot of that filesystem from a later time or the current contents of the filesystem. name_a : string name of snapshot name_b : string (optional) name of snapshot or filesystem show_changetime : boolean display the path's inode change time as the first column of output. (default = True) show_indication : boolean display an indication of the type of file. (default = True) parsable : boolean if true we don't parse the timestamp to a more readable date (default = True) .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zfs.diff myzpool/mydataset@yesterday myzpool/mydataset ''' ## Configure command # NOTE: initialize the defaults flags = ['-H'] target = [] # NOTE: set extra config from kwargs if kwargs.get('show_changetime', True): flags.append('-t') if kwargs.get('show_indication', True): flags.append('-F') # NOTE: update target target.append(name_a) if name_b: target.append(name_b) ## Diff filesystem/snapshot res = __salt__['cmd.run_all']( __utils__['zfs.zfs_command']( command='diff', flags=flags, target=target, ), python_shell=False, ) if res['retcode'] != 0: return __utils__['zfs.parse_command_result'](res) else: if not kwargs.get('parsable', True) and kwargs.get('show_changetime', True): ret = OrderedDict() for entry in res['stdout'].splitlines(): entry = entry.split() entry_timestamp = __utils__['dateutils.strftime'](entry[0], '%Y-%m-%d.%H:%M:%S.%f') entry_data = "\t\t".join(entry[1:]) ret[entry_timestamp] = entry_data else: ret = res['stdout'].splitlines() return ret
0.00346
def add_gene_info(self, variant_obj, gene_panels=None): """Add extra information about genes from gene panels Args: variant_obj(dict): A variant from the database gene_panels(list(dict)): List of panels from database """ gene_panels = gene_panels or [] # Add a variable that checks if there are any refseq transcripts variant_obj['has_refseq'] = False # We need to check if there are any additional information in the gene panels # extra_info will hold information from gene panels # Collect all extra info from the panels in a dictionary with hgnc_id as keys extra_info = {} for panel_obj in gene_panels: for gene_info in panel_obj['genes']: hgnc_id = gene_info['hgnc_id'] if hgnc_id not in extra_info: extra_info[hgnc_id] = [] extra_info[hgnc_id].append(gene_info) # Loop over the genes in the variant object to add information # from hgnc_genes and panel genes to the variant object for variant_gene in variant_obj.get('genes', []): hgnc_id = variant_gene['hgnc_id'] # Get the hgnc_gene hgnc_gene = self.hgnc_gene(hgnc_id) if not hgnc_gene: continue # Create a dictionary with transcripts information # Use ensembl transcript id as keys transcripts_dict = {} # Add transcript information from the hgnc gene for transcript in hgnc_gene.get('transcripts', []): tx_id = transcript['ensembl_transcript_id'] transcripts_dict[tx_id] = transcript # Add the transcripts to the gene object hgnc_gene['transcripts_dict'] = transcripts_dict if hgnc_gene.get('incomplete_penetrance'): variant_gene['omim_penetrance'] = True ############# PANEL SPECIFIC INFORMATION ############# # Panels can have extra information about genes and transcripts panel_info = extra_info.get(hgnc_id, []) # Manually annotated disease associated transcripts disease_associated = set() # We need to strip the version to compare against others disease_associated_no_version = set() manual_penetrance = False mosaicism = False manual_inheritance = set() # We need to loop since there can be information from multiple panels for gene_info in panel_info: # Check if there are manually annotated disease transcripts for tx in gene_info.get('disease_associated_transcripts', []): # We remove the version of transcript at this stage stripped = re.sub(r'\.[0-9]', '', tx) disease_associated_no_version.add(stripped) disease_associated.add(tx) if gene_info.get('reduced_penetrance'): manual_penetrance = True if gene_info.get('mosaicism'): mosaicism = True manual_inheritance.update(gene_info.get('inheritance_models', [])) variant_gene['disease_associated_transcripts'] = list(disease_associated) variant_gene['manual_penetrance'] = manual_penetrance variant_gene['mosaicism'] = mosaicism variant_gene['manual_inheritance'] = list(manual_inheritance) # Now add the information from hgnc and panels # to the transcripts on the variant # First loop over the variants transcripts for transcript in variant_gene.get('transcripts', []): tx_id = transcript['transcript_id'] if not tx_id in transcripts_dict: continue # This is the common information about the transcript hgnc_transcript = transcripts_dict[tx_id] # Check in the common information if it is a primary transcript if hgnc_transcript.get('is_primary'): transcript['is_primary'] = True # If the transcript has a ref seq identifier we add that # to the variants transcript if not hgnc_transcript.get('refseq_id'): continue refseq_id = hgnc_transcript['refseq_id'] transcript['refseq_id'] = refseq_id variant_obj['has_refseq'] = True # Check if the refseq id are disease associated if refseq_id in disease_associated_no_version: transcript['is_disease_associated'] = True # Since a ensemble transcript can have multiple refseq identifiers we add all of # those transcript['refseq_identifiers'] = hgnc_transcript.get('refseq_identifiers',[]) variant_gene['common'] = hgnc_gene # Add the associated disease terms variant_gene['disease_terms'] = self.disease_terms(hgnc_id) return variant_obj
0.002313
def redistribute_duplicates(data): """Given a dictionary of photo sets, will look at lat/lon between sets, if they match, randomly move them around so the google map markeres do not overlap """ coordinate_list=[] # Build a list of coordinates for myset in data['sets']: coordinate_list.append((myset['latitude'],myset['longitude'])) for myset in data['sets']: lat=myset['latitude'] lon=myset['longitude'] item=(lat,lon) if coordinate_list.count(item) > 1: print("moving %s"%(myset['set_title'])) random_number=random.random() myset['latitude']=str(random_number*POS_MOVE_DEG\ +float(myset['latitude'])) myset['longitude']=str(random_number*POS_MOVE_DEG\ +float(myset['longitude']))
0.021251
def disconnect(self): """disconnect events""" self.canvas.mpl_disconnect(self._cidmotion) self.canvas.mpl_disconnect(self._ciddraw)
0.012903
def acked_tuple(self, stream_id, complete_latency_ns): """Apply updates to the ack metrics""" self.update_count(self.ACK_COUNT, key=stream_id) self.update_reduced_metric(self.COMPLETE_LATENCY, complete_latency_ns, key=stream_id)
0.008333
def get_asset_composition_design_session(self, proxy): """Gets the session for creating asset compositions. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetCompositionDesignSession) - an ``AssetCompositionDesignSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_asset_composition_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_composition_design()`` is ``true``.* """ if not self.supports_asset_composition_design(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.AssetCompositionDesignSession(proxy=proxy, runtime=self._runtime)
0.00346
def cache_affected_objects_review_history(portal): """Fills the review_history_cache dict. The keys are the uids of the objects to be bound to new workflow and the values are their current review_history """ logger.info("Caching review_history ...") query = dict(portal_type=NEW_SENAITE_WORKFLOW_BINDINGS) brains = api.search(query, UID_CATALOG) total = len(brains) for num, brain in enumerate(brains): if num % 100 == 0: logger.info("Caching review_history: {}/{}" .format(num, total)) review_history = get_review_history_for(brain) review_history_cache[api.get_uid(brain)] = review_history
0.002928
def from_taxtable(cls, taxtable_fp): """ Generate a node from an open handle to a taxtable, as generated by ``taxit taxtable`` """ r = csv.reader(taxtable_fp) headers = next(r) rows = (collections.OrderedDict(list(zip(headers, i))) for i in r) row = next(rows) root = cls(rank=row['rank'], tax_id=row[ 'tax_id'], name=row['tax_name']) path_root = headers.index('root') root.ranks = headers[path_root:] for row in rows: rank, tax_id, name = [ row[i] for i in ('rank', 'tax_id', 'tax_name')] path = [_f for _f in list(row.values())[path_root:] if _f] parent = root.path(path[:-1]) parent.add_child(cls(rank, tax_id, name=name)) return root
0.002421
def add(self, *tasks): """ Interfaces the GraphNode `add` method """ nodes = [x.node for x in tasks] self.node.add(*nodes) return self
0.011494
def _sign_of(money): """Determines the amount sign of a money instance Args: money (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the instance to test Return: int: 1, 0 or -1 """ units = money.units nanos = money.nanos if units: if units > 0: return 1 elif units < 0: return -1 if nanos: if nanos > 0: return 1 elif nanos < 0: return -1 return 0
0.003945
def configure_retrievefor(self, ns, definition): """ Register a relation endpoint. The definition's func should be a retrieve function, which must: - accept kwargs for path data and optional request data - return an item The definition's request_schema will be used to process query string arguments, if any. :param ns: the namespace :param definition: the endpoint definition """ request_schema = definition.request_schema or Schema() @self.add_route(ns.relation_path, Operation.RetrieveFor, ns) @qs(request_schema) @response(definition.response_schema) @wraps(definition.func) def retrieve(**path_data): headers = dict() request_data = load_query_string_data(request_schema) response_data = require_response_data(definition.func(**merge_data(path_data, request_data))) definition.header_func(headers, response_data) response_format = self.negotiate_response_content(definition.response_formats) return dump_response_data( definition.response_schema, response_data, headers=headers, response_format=response_format, ) retrieve.__doc__ = "Retrieve {} relative to a {}".format(pluralize(ns.object_name), ns.subject_name)
0.004283
def has_types(self, types, all_=True): ''' Check whether the current component list matches all Stim types in the types argument. Args: types (Stim, list): a Stim class or iterable of Stim classes. all_ (bool): if True, all input types must match; if False, at least one input type must match. Returns: True if all passed types match at least one Stim in the component list, otherwise False. ''' func = all if all_ else any return func([self.get_stim(t) for t in listify(types)])
0.003328
def match(self, url): """ Return a list of all active Messages which match the given URL. """ return list({ message for message in self.active() if message.is_global or message.match(url) })
0.007605
def _PromptUserForInput(self, input_text): """Prompts user for an input. Args: input_text (str): text used for prompting the user for input. Returns: str: input read from the user. """ self._output_writer.Write('{0:s}: '.format(input_text)) return self._input_reader.Read()
0.003215
def transfer_command( batch, sync_level, recursive, destination, source, label, preserve_mtime, verify_checksum, encrypt, submission_id, dry_run, delete, deadline, skip_activation_check, notify, perf_cc, perf_p, perf_pp, perf_udt, ): """ Executor for `globus transfer` """ source_endpoint, cmd_source_path = source dest_endpoint, cmd_dest_path = destination if recursive and batch: raise click.UsageError( ( "You cannot use --recursive in addition to --batch. " "Instead, use --recursive on lines of --batch input " "which need it" ) ) if (cmd_source_path is None or cmd_dest_path is None) and (not batch): raise click.UsageError( ("transfer requires either SOURCE_PATH and DEST_PATH or " "--batch") ) # because python can't handle multiple **kwargs expansions in a single # call, we need to get a little bit clever # both the performance options (of which there are a few), and the # notification options (also there are a few) have elements which should be # omitted in some cases # notify comes to us clean, perf opts need more care # put them together into a dict before passing to TransferData kwargs = {} perf_opts = dict( (k, v) for (k, v) in dict( perf_cc=perf_cc, perf_p=perf_p, perf_pp=perf_pp, perf_udt=perf_udt ).items() if v is not None ) kwargs.update(perf_opts) kwargs.update(notify) client = get_client() transfer_data = TransferData( client, source_endpoint, dest_endpoint, label=label, sync_level=sync_level, verify_checksum=verify_checksum, preserve_timestamp=preserve_mtime, encrypt_data=encrypt, submission_id=submission_id, delete_destination_extra=delete, deadline=deadline, skip_activation_check=skip_activation_check, **kwargs ) if batch: @click.command() @click.option("--recursive", "-r", is_flag=True) @click.argument("source_path", type=TaskPath(base_dir=cmd_source_path)) @click.argument("dest_path", type=TaskPath(base_dir=cmd_dest_path)) def process_batch_line(dest_path, source_path, recursive): """ Parse a line of batch input and turn it into a transfer submission item. """ transfer_data.add_item( str(source_path), str(dest_path), recursive=recursive ) shlex_process_stdin( process_batch_line, ( "Enter transfers, line by line, as\n\n" " [--recursive] SOURCE_PATH DEST_PATH\n" ), ) else: transfer_data.add_item(cmd_source_path, cmd_dest_path, recursive=recursive) if dry_run: formatted_print( transfer_data, response_key="DATA", fields=( ("Source Path", "source_path"), ("Dest Path", "destination_path"), ("Recursive", "recursive"), ), ) # exit safely return # autoactivate after parsing all args and putting things together # skip this if skip-activation-check is given if not skip_activation_check: autoactivate(client, source_endpoint, if_expires_in=60) autoactivate(client, dest_endpoint, if_expires_in=60) res = client.submit_transfer(transfer_data) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=(("Message", "message"), ("Task ID", "task_id")), )
0.000796
def accept_publication_license(cursor, publication_id, user_id, document_ids, is_accepted=False): """Accept or deny the document license for the publication (``publication_id``) and user (at ``user_id``) for the documents (listed by id as ``document_ids``). """ cursor.execute("""\ UPDATE license_acceptances AS la SET accepted = %s FROM pending_documents AS pd WHERE pd.publication_id = %s AND la.user_id = %s AND pd.uuid = ANY(%s::UUID[]) AND pd.uuid = la.uuid""", (is_accepted, publication_id, user_id, document_ids,))
0.00165
def fan_speed(self, speed: int = None) -> bool: """Adjust Fan Speed by Specifying 1,2,3 as argument or cycle through speeds increasing by one""" body = helpers.req_body(self.manager, 'devicestatus') body['uuid'] = self.uuid head = helpers.req_headers(self.manager) if self.details.get('mode') != 'manual': self.mode_toggle('manual') else: if speed is not None: level = int(self.details.get('level')) if speed == level: return False elif speed in [1, 2, 3]: body['level'] = speed else: if (level + 1) > 3: body['level'] = 1 else: body['level'] = int(level + 1) r, _ = helpers.call_api('/131airPurifier/v1/device/updateSpeed', 'put', json=body, headers=head) if r is not None and helpers.check_response(r, 'airpur_status'): self.details['level'] = body['level'] return True else: return False
0.001712
def verify(token, key, algorithms, verify=True): """Verifies a JWS string's signature. Args: token (str): A signed JWS to be verified. key (str or dict): A key to attempt to verify the payload with. Can be individual JWK or JWK set. algorithms (str or list): Valid algorithms that should be used to verify the JWS. Returns: str: The str representation of the payload, assuming the signature is valid. Raises: JWSError: If there is an exception verifying a token. Examples: >>> token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8' >>> jws.verify(token, 'secret', algorithms='HS256') """ header, payload, signing_input, signature = _load(token) if verify: _verify_signature(signing_input, header, signature, key, algorithms) return payload
0.0044
def delete(self, database, key, callback=None): """ Delete an item from the given database. :param database: The database from which to delete the value. :type database: .BlobDatabaseID :param key: The key to delete. :type key: uuid.UUID :param callback: A callback to be called on success or failure. """ token = self._get_token() self._enqueue(self._PendingItem(token, BlobCommand(token=token, database=database, content=DeleteCommand(key=key.bytes)), callback))
0.006211
def create_new_space(self, space_definition, callback=None): """ Creates a new Space. The incoming Space does not include an id, but must include a Key and Name, and should include a Description. :param space_definition (dict): The dictionary describing the new space. Must include keys "key", "name", and "description". :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the space endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. Example space data: { "key": "TST", "name": "Example space", "description": { "plain": { "value": "This is an example space", "representation": "plain" } } } """ assert isinstance(space_definition, dict) and {"key", "name", "description"} <= set(space_definition.keys()) return self._service_post_request("rest/api/space", data=json.dumps(space_definition), headers={"Content-Type": "application/json"}, callback=callback)
0.006522
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters): """ Get all pages at url, yielding individual results :param url: the url to fetch :param page: start from this page :param page_size: results per page :param yield_pages: yield whole pages rather than individual results :param filters: additional filters :return: a generator of objects (dicts) from the API """ n = 0 for page in itertools.count(page): r = self.request(url, page=page, page_size=page_size, **filters) n += len(r['results']) log.debug("Got {url} page {page} / {pages}".format(url=url, **r)) if yield_pages: yield r else: for row in r['results']: yield row if r['next'] is None: break
0.002205
def load_config(filename=None, text=None, test=False, commit=True, debug=False, replace=False, commit_in=None, commit_at=None, revert_in=None, revert_at=None, commit_jid=None, inherit_napalm_device=None, saltenv='base', **kwargs): # pylint: disable=unused-argument ''' Applies configuration changes on the device. It can be loaded from a file or from inline string. If you send both a filename and a string containing the configuration, the file has higher precedence. By default this function will commit the changes. If there are no changes, it does not commit and the flag ``already_configured`` will be set as ``True`` to point this out. To avoid committing the configuration, set the argument ``test`` to ``True`` and will discard (dry run). To keep the changes but not commit, set ``commit`` to ``False``. To replace the config, set ``replace`` to ``True``. filename Path to the file containing the desired configuration. This can be specified using the absolute path to the file, or using one of the following URL schemes: - ``salt://``, to fetch the template from the Salt fileserver. - ``http://`` or ``https://`` - ``ftp://`` - ``s3://`` - ``swift://`` .. versionchanged:: 2018.3.0 text String containing the desired configuration. This argument is ignored when ``filename`` is specified. test: False Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False`` and will commit the changes on the device. commit: True Commit? Default: ``True``. debug: False Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw configuration loaded on the device. .. versionadded:: 2016.11.2 replace: False Load and replace the configuration. Default: ``False``. .. versionadded:: 2016.11.2 commit_in: ``None`` Commit the changes in a specific number of minutes / hours. Example of accepted formats: ``5`` (commit in 5 minutes), ``2m`` (commit in 2 minutes), ``1h`` (commit the changes in 1 hour)`, ``5h30m`` (commit the changes in 5 hours and 30 minutes). .. note:: This feature works on any platforms, as it does not rely on the native features of the network operating system. .. note:: After the command is executed and the ``diff`` is not satisfactory, or for any other reasons you have to discard the commit, you are able to do so using the :py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>` execution function, using the commit ID returned by this function. .. warning:: Using this feature, Salt will load the exact configuration you expect, however the diff may change in time (i.e., if an user applies a manual configuration change, or a different process or command changes the configuration in the meanwhile). .. versionadded:: 2019.2.0 commit_at: ``None`` Commit the changes at a specific time. Example of accepted formats: ``1am`` (will commit the changes at the next 1AM), ``13:20`` (will commit at 13:20), ``1:20am``, etc. .. note:: This feature works on any platforms, as it does not rely on the native features of the network operating system. .. note:: After the command is executed and the ``diff`` is not satisfactory, or for any other reasons you have to discard the commit, you are able to do so using the :py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>` execution function, using the commit ID returned by this function. .. warning:: Using this feature, Salt will load the exact configuration you expect, however the diff may change in time (i.e., if an user applies a manual configuration change, or a different process or command changes the configuration in the meanwhile). .. versionadded:: 2019.2.0 revert_in: ``None`` Commit and revert the changes in a specific number of minutes / hours. Example of accepted formats: ``5`` (revert in 5 minutes), ``2m`` (revert in 2 minutes), ``1h`` (revert the changes in 1 hour)`, ``5h30m`` (revert the changes in 5 hours and 30 minutes). .. note:: To confirm the commit, and prevent reverting the changes, you will have to execute the :mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>` function, using the commit ID returned by this function. .. warning:: This works on any platform, regardless if they have or don't have native capabilities to confirming a commit. However, please be *very* cautious when using this feature: on Junos (as it is the only NAPALM core platform supporting this natively) it executes a commit confirmed as you would do from the command line. All the other platforms don't have this capability natively, therefore the revert is done via Salt. That means, your device needs to be reachable at the moment when Salt will attempt to revert your changes. Be cautious when pushing configuration changes that would prevent you reach the device. Similarly, if an user or a different process apply other configuration changes in the meanwhile (between the moment you commit and till the changes are reverted), these changes would be equally reverted, as Salt cannot be aware of them. .. versionadded:: 2019.2.0 revert_at: ``None`` Commit and revert the changes at a specific time. Example of accepted formats: ``1am`` (will commit and revert the changes at the next 1AM), ``13:20`` (will commit and revert at 13:20), ``1:20am``, etc. .. note:: To confirm the commit, and prevent reverting the changes, you will have to execute the :mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>` function, using the commit ID returned by this function. .. warning:: This works on any platform, regardless if they have or don't have native capabilities to confirming a commit. However, please be *very* cautious when using this feature: on Junos (as it is the only NAPALM core platform supporting this natively) it executes a commit confirmed as you would do from the command line. All the other platforms don't have this capability natively, therefore the revert is done via Salt. That means, your device needs to be reachable at the moment when Salt will attempt to revert your changes. Be cautious when pushing configuration changes that would prevent you reach the device. Similarly, if an user or a different process apply other configuration changes in the meanwhile (between the moment you commit and till the changes are reverted), these changes would be equally reverted, as Salt cannot be aware of them. .. versionadded:: 2019.2.0 saltenv: ``base`` Specifies the Salt environment name. .. versionadded:: 2018.3.0 :return: a dictionary having the following keys: * result (bool): if the config was applied successfully. It is ``False`` only in case of failure. In case \ there are no changes to be applied and successfully performs all operations it is still ``True`` and so will be \ the ``already_configured`` flag (example below) * comment (str): a message for the user * already_configured (bool): flag to check if there were no changes applied * loaded_config (str): the configuration loaded on the device. Requires ``debug`` to be set as ``True`` * diff (str): returns the config changes applied CLI Example: .. code-block:: bash salt '*' net.load_config text='ntp peer 192.168.0.1' salt '*' net.load_config filename='/absolute/path/to/your/file' salt '*' net.load_config filename='/absolute/path/to/your/file' test=True salt '*' net.load_config filename='/absolute/path/to/your/file' commit=False Example output: .. code-block:: python { 'comment': 'Configuration discarded.', 'already_configured': False, 'result': True, 'diff': '[edit interfaces xe-0/0/5]+ description "Adding a description";' } ''' fun = 'load_merge_candidate' if replace: fun = 'load_replace_candidate' if salt.utils.napalm.not_always_alive(__opts__): # if a not-always-alive proxy # or regular minion # do not close the connection after loading the config # this will be handled in _config_logic # after running the other features: # compare_config, discard / commit # which have to be over the same session napalm_device['CLOSE'] = False # pylint: disable=undefined-variable if filename: text = __salt__['cp.get_file_str'](filename, saltenv=saltenv) if text is False: # When using salt:// or https://, if the resource is not available, # it will either raise an exception, or return False. ret = { 'result': False, 'out': None } ret['comment'] = 'Unable to read from {}. Please specify a valid file or text.'.format(filename) log.error(ret['comment']) return ret if commit_jid: # When the commit_jid argument is passed, it probably is a scheduled # commit to be executed, and filename is a temporary file which # can be removed after reading it. salt.utils.files.safe_rm(filename) _loaded = salt.utils.napalm.call( napalm_device, # pylint: disable=undefined-variable fun, **{ 'config': text } ) return _config_logic(napalm_device, # pylint: disable=undefined-variable _loaded, test=test, debug=debug, replace=replace, commit_config=commit, loaded_config=text, commit_at=commit_at, commit_in=commit_in, revert_in=revert_in, revert_at=revert_at, commit_jid=commit_jid, **kwargs)
0.00223
def set_default_option(cls, key, value): """Class method. Set the default value of the option `key` (string) to `value` for all future instances of the class. Note that this does not affect existing instances or the instance called from.""" cls._default_options.update(cls._option_schema({key: value}))
0.005831
def _validate_checksum(self, buffer): """Validate the buffer response against the checksum. When reading the serial interface, data will come back in a raw format with an included checksum process. :returns: bool """ self._log.debug("Validating the buffer") if len(buffer) == 0: self._log.debug("Buffer was empty") if self._conn.isOpen(): self._log.debug('Closing connection') self._conn.close() return False p0 = hex2int(buffer[0]) p1 = hex2int(buffer[1]) checksum = sum([hex2int(c) for c in buffer[:35]]) & 0xFF p35 = hex2int(buffer[35]) if p0 != 165 or p1 != 150 or p35 != checksum: self._log.debug("Buffer checksum was not valid") return False return True
0.002334
def _example_rt_data(quote_ctx): """ 获取分时数据,输出 时间,数据状态,开盘多少分钟,目前价,昨收价,平均价,成交量,成交额 """ stock_code_list = ["US.AAPL", "HK.00700"] ret_status, ret_data = quote_ctx.subscribe(stock_code_list, ft.SubType.RT_DATA) if ret_status != ft.RET_OK: print(ret_data) exit() for stk_code in stock_code_list: ret_status, ret_data = quote_ctx.get_rt_data(stk_code) if ret_status != ft.RET_OK: print(stk_code, ret_data) exit() print("%s RT_DATA" % stk_code) print(ret_data) print("\n\n")
0.003454
def _call_zincrby(self, command, value, *args, **kwargs): """ This command update a score of a given value. But it can be a new value of the sorted set, so we index it. """ if self.indexable: self.index([value]) return self._traverse_command(command, value, *args, **kwargs)
0.005988
def Ergun(dp, voidage, vs, rho, mu, L=1): r'''Calculates pressure drop across a packed bed of spheres using a correlation developed in [1]_, as shown in [2]_ and [3]_. Eighteenth most accurate correlation overall in the review of [2]_. Most often presented in the following form: .. math:: \Delta P = \frac{150\mu (1-\epsilon)^2 v_s L}{\epsilon^3 d_p^2} + \frac{1.75 (1-\epsilon) \rho v_s^2 L}{\epsilon^3 d_p} It is also often presented with a term for sphericity, which is multiplied by particle diameter everywhere in the equation. However, this is highly empirical and better correlations for beds of differently-shaped particles exist. To use sphericity in this model, multiple the input particle diameter by the spericity separately. In the review of [2]_, it is expressed in terms of a parameter `fp`, shown below. This is a convenient means of expressing all forms of pressure drop in packed beds correlations in a way that allows for easy comparison. .. math:: f_p = \left(150 + 1.75\left(\frac{Re}{1-\epsilon}\right)\right) \frac{(1-\epsilon)^2}{\epsilon^3 Re} .. math:: f_p = \frac{\Delta P d_p}{\rho v_s^2 L} .. math:: Re = \frac{\rho v_s d_p}{\mu} Parameters ---------- dp : float Particle diameter of spheres [m] voidage : float Void fraction of bed packing [-] vs : float Superficial velocity of the fluid (volumetric flow rate/cross-sectional area)[m/s] rho : float Density of the fluid [kg/m^3] mu : float Viscosity of the fluid, [Pa*s] L : float, optional Length the fluid flows in the packed bed [m] Returns ------- dP : float Pressure drop across the bed [Pa] Notes ----- The first term in this equation represents laminar loses, and the second, turbulent loses. Developed with data from spheres, sand, and pulverized coke. Fluids tested were carbon dioxide, nitrogen, methane, and hydrogen. Validity range shown in [3]_ is :math:`1 < Re_{Erg} < 2300`. Over predicts pressure drop for :math:`Re_{Erg} > 700`. Examples -------- >>> Ergun(dp=8E-4, voidage=0.4, vs=1E-3, rho=1E3, mu=1E-3) 1338.8671874999995 References ---------- .. [1] Ergun, S. (1952) "Fluid flow through packed columns", Chem. Eng. Prog., 48, 89-94. .. [2] Erdim, Esra, Ömer Akgiray, and İbrahim Demir. "A Revisit of Pressure Drop-Flow Rate Correlations for Packed Beds of Spheres." Powder Technology 283 (October 2015): 488-504. doi:10.1016/j.powtec.2015.06.017. .. [3] Jones, D. P., and H. Krier. "Gas Flow Resistance Measurements Through Packed Beds at High Reynolds Numbers." Journal of Fluids Engineering 105, no. 2 (June 1, 1983): 168-172. doi:10.1115/1.3240959. ''' Re = dp*rho*vs/mu fp = (150 + 1.75*(Re/(1-voidage)))*(1-voidage)**2/(voidage**3*Re) return fp*rho*vs**2*L/dp
0.000994
def _calc_T_var(self,X) -> int: """Calculate the number of samples, T, from the shape of X""" shape = X.shape tensor_rank: int = len(shape) if tensor_rank == 0: return 1 if tensor_rank == 1: return shape[0] if tensor_rank == 2: if shape[1] > 1: raise ValueError('Initial value of a variable must have dimension T*1.') return shape[0]
0.008989
def bio_write(self, buf): """ If the Connection was created with a memory BIO, this method can be used to add bytes to the read end of that memory BIO. The Connection can then read the bytes (for example, in response to a call to :meth:`recv`). :param buf: The string to put into the memory BIO. :return: The number of bytes written """ buf = _text_to_bytes_and_warn("buf", buf) if self._into_ssl is None: raise TypeError("Connection sock was not None") result = _lib.BIO_write(self._into_ssl, buf, len(buf)) if result <= 0: self._handle_bio_errors(self._into_ssl, result) return result
0.002786