text
stringlengths
78
104k
score
float64
0
0.18
def boundingRect( self ): """ Determines the bounding rectangle for this node. :return <QRectF> """ rect = super(XNode, self).boundingRect() pad = self.highlightPadding() x = rect.x() - pad / 2.0 y = rect.y() - pad / 2.0 w = rect.width() + pad h = rect.height() + pad return QRectF( x, y, w, h )
0.021739
def get_language_from_abbr(self, abbr): """Get language full name from abbreviation.""" for language in self.user_data.languages: if language['language'] == abbr: return language['language_string'] return None
0.007663
def clean_path(p): """ Clean a path by expanding user and environment variables and ensuring absolute path. """ p = os.path.expanduser(p) p = os.path.expandvars(p) p = os.path.abspath(p) return p
0.004405
def fetch(self, callback): """ Perform a full synchronization flow. .. code-block:: python :linenos: >>> client = basecrm.Client(access_token='<YOUR_PERSONAL_ACCESS_TOKEN>') >>> sync = basecrm.Sync(client=client, device_uuid='<YOUR_DEVICES_UUID>') >>> sync.fetch(lambda meta, data: basecrm.Sync.ACK) :param callback: Callback that will be called for every item in a queue. Takes two input arguments: synchronization meta data and assodicated data. It must return either ack or nack. """ # Set up a new synchronization session for a given device's UUID session = self.client.sync.start(self.device_uuid) # Check if there is anything to synchronize if session is None or 'id' not in session: return # Drain the main queue until there is no more data (empty array) while True: # Fetch the main queue queue_items = self.client.sync.fetch(self.device_uuid, session['id']) # nothing more to synchronize ? if not queue_items: break # let client know about both data and meta ack_keys = [] for item in queue_items: if callback(item['meta'], item['data']): ack_keys.append(item['meta']['sync']['ack_key']) # As we fetch new data, we need to send acknowledgement keys # if any .. if ack_keys: self.client.sync.ack(self.device_uuid, ack_keys)
0.004372
def sphere_volume(R, n): """Return the volume of a sphere in an arbitrary number of dimensions. Parameters ---------- R: array-like Radius. n: array-like The number of dimensions of the space in which the sphere lives. Returns ------- V: array-like Volume. """ return ((np.pi ** (n / 2.0)) / scipy.special.gamma(n / 2.0 + 1)) * R ** n
0.0025
def generate_menu(): """Generate a new list of menus.""" root_menu = Menu(list(copy.deepcopy(settings.WAFER_MENUS))) for dynamic_menu_func in settings.WAFER_DYNAMIC_MENUS: dynamic_menu_func = maybe_obj(dynamic_menu_func) dynamic_menu_func(root_menu) return root_menu
0.003356
def get_graph(graph=None, *, _limit=(), _print=()): """ Extracts a list of cafes with on euro in Paris, renames the name, address and zipcode fields, reorders the fields and formats to json and csv files. """ graph = graph or bonobo.Graph() producer = ( graph.get_cursor() >> ODSReader(dataset="liste-des-cafes-a-un-euro", netloc="opendata.paris.fr") >> PartialGraph(*_limit) >> bonobo.UnpackItems(0) >> bonobo.Rename(name="nom_du_cafe", address="adresse", zipcode="arrondissement") >> bonobo.Format(city="Paris", country="France") >> bonobo.OrderFields(["name", "address", "zipcode", "city", "country", "geometry", "geoloc"]) >> PartialGraph(*_print) ) # Comma separated values. graph.get_cursor(producer.output) >> bonobo.CsvWriter( "coffeeshops.csv", fields=["name", "address", "zipcode", "city"], delimiter="," ) # Standard JSON graph.get_cursor(producer.output) >> bonobo.JsonWriter(path="coffeeshops.json") # Line-delimited JSON graph.get_cursor(producer.output) >> bonobo.LdjsonWriter(path="coffeeshops.ldjson") return graph
0.006843
def _refresh_state(self): """ Refresh the job info. """ self._info = self._api.projects().jobs().get(name=self._name).execute() self._fatal_error = self._info.get('errorMessage', None) state = str(self._info.get('state')) self._is_complete = (state == 'SUCCEEDED' or state == 'FAILED')
0.003279
def _validate_fromutc_inputs(f): """ The CPython version of ``fromutc`` checks that the input is a ``datetime`` object and that ``self`` is attached as its ``tzinfo``. """ @wraps(f) def fromutc(self, dt): if not isinstance(dt, datetime): raise TypeError("fromutc() requires a datetime argument") if dt.tzinfo is not self: raise ValueError("dt.tzinfo is not self") return f(self, dt) return fromutc
0.002105
def mod_run_check(cmd_kwargs, onlyif, unless, creates): ''' Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) else return True ''' # never use VT for onlyif/unless executions because this will lead # to quote problems cmd_kwargs = copy.deepcopy(cmd_kwargs) cmd_kwargs['use_vt'] = False cmd_kwargs['bg'] = False if onlyif is not None: if isinstance(onlyif, six.string_types): cmd = __salt__['cmd.retcode'](onlyif, ignore_retcode=True, python_shell=True, **cmd_kwargs) log.debug('Last command return code: %s', cmd) if cmd != 0: return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} elif isinstance(onlyif, list): for entry in onlyif: cmd = __salt__['cmd.retcode'](entry, ignore_retcode=True, python_shell=True, **cmd_kwargs) log.debug('Last command \'%s\' return code: %s', entry, cmd) if cmd != 0: return {'comment': 'onlyif condition is false: {0}'.format(entry), 'skip_watch': True, 'result': True} elif not isinstance(onlyif, six.string_types): if not onlyif: log.debug('Command not run: onlyif did not evaluate to string_type') return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} if unless is not None: if isinstance(unless, six.string_types): cmd = __salt__['cmd.retcode'](unless, ignore_retcode=True, python_shell=True, **cmd_kwargs) log.debug('Last command return code: %s', cmd) if cmd == 0: return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} elif isinstance(unless, list): cmd = [] for entry in unless: cmd.append(__salt__['cmd.retcode'](entry, ignore_retcode=True, python_shell=True, **cmd_kwargs)) log.debug('Last command return code: %s', cmd) if all([c == 0 for c in cmd]): return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} elif not isinstance(unless, six.string_types): if unless: log.debug('Command not run: unless did not evaluate to string_type') return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} if isinstance(creates, six.string_types) and os.path.exists(creates): return {'comment': '{0} exists'.format(creates), 'result': True} elif isinstance(creates, list) and all([ os.path.exists(path) for path in creates ]): return {'comment': 'All files in creates exist', 'result': True} # No reason to stop, return True return True
0.002488
def createMenuItem( self, title ): """ Creates a new menu item with the given title. :param title | <str> :return <QTreeWidgetItem> """ item = QTreeWidgetItem([title]) ico = projexui.resources.find('img/folder.png') item.setIcon(0, QIcon(ico)) item.setSizeHint(0, QSize(120, 20)) item.setData(0, Qt.UserRole, wrapVariant('menu')) return item
0.016878
def data(place): """get forecast data.""" lat, lon = place url = "https://api.forecast.io/forecast/%s/%s,%s?solar" % (APIKEY, lat, lon) w_data = json.loads(urllib2.urlopen(url).read()) return w_data
0.003559
def concatenate_attributes(attributes): '''Concatenate InstanceAttribute to return a bigger one.''' # We get a template/ tpl = attributes[0] attr = InstanceAttribute(tpl.name, tpl.shape, tpl.dtype, tpl.dim, alias=None) # Special case, not a single array has size bigger than 0 if all(a.size == 0 for a in attributes): return attr else: attr.value = np.concatenate([a.value for a in attributes if a.size > 0], axis=0) return attr
0.00969
def generate(env): """Add Builders and construction variables for Microsoft Visual Studio project files to an Environment.""" try: env['BUILDERS']['MSVSProject'] except KeyError: env['BUILDERS']['MSVSProject'] = projectBuilder try: env['BUILDERS']['MSVSSolution'] except KeyError: env['BUILDERS']['MSVSSolution'] = solutionBuilder env['MSVSPROJECTCOM'] = projectAction env['MSVSSOLUTIONCOM'] = solutionAction if SCons.Script.call_stack: # XXX Need to find a way to abstract this; the build engine # shouldn't depend on anything in SCons.Script. env['MSVSSCONSCRIPT'] = SCons.Script.call_stack[0].sconscript else: global default_MSVS_SConscript if default_MSVS_SConscript is None: default_MSVS_SConscript = env.File('SConstruct') env['MSVSSCONSCRIPT'] = default_MSVS_SConscript env['MSVSSCONS'] = '"%s" -c "%s"' % (python_executable, getExecScriptMain(env)) env['MSVSSCONSFLAGS'] = '-C "${MSVSSCONSCRIPT.dir.get_abspath()}" -f ${MSVSSCONSCRIPT.name}' env['MSVSSCONSCOM'] = '$MSVSSCONS $MSVSSCONSFLAGS' env['MSVSBUILDCOM'] = '$MSVSSCONSCOM "$MSVSBUILDTARGET"' env['MSVSREBUILDCOM'] = '$MSVSSCONSCOM "$MSVSBUILDTARGET"' env['MSVSCLEANCOM'] = '$MSVSSCONSCOM -c "$MSVSBUILDTARGET"' # Set-up ms tools paths for default version msvc_setup_env_once(env) if 'MSVS_VERSION' in env: version_num, suite = msvs_parse_version(env['MSVS_VERSION']) else: (version_num, suite) = (7.0, None) # guess at a default if 'MSVS' not in env: env['MSVS'] = {} if (version_num < 7.0): env['MSVS']['PROJECTSUFFIX'] = '.dsp' env['MSVS']['SOLUTIONSUFFIX'] = '.dsw' elif (version_num < 10.0): env['MSVS']['PROJECTSUFFIX'] = '.vcproj' env['MSVS']['SOLUTIONSUFFIX'] = '.sln' else: env['MSVS']['PROJECTSUFFIX'] = '.vcxproj' env['MSVS']['SOLUTIONSUFFIX'] = '.sln' if (version_num >= 10.0): env['MSVSENCODING'] = 'utf-8' else: env['MSVSENCODING'] = 'Windows-1252' env['GET_MSVSPROJECTSUFFIX'] = GetMSVSProjectSuffix env['GET_MSVSSOLUTIONSUFFIX'] = GetMSVSSolutionSuffix env['MSVSPROJECTSUFFIX'] = '${GET_MSVSPROJECTSUFFIX}' env['MSVSSOLUTIONSUFFIX'] = '${GET_MSVSSOLUTIONSUFFIX}' env['SCONS_HOME'] = os.environ.get('SCONS_HOME')
0.004562
def _compute_vectorized(self, *args): """Compare attributes (vectorized) Parameters ---------- *args : pandas.Series pandas.Series' as arguments. Returns ------- pandas.Series, pandas.DataFrame, numpy.ndarray The result of comparing record pairs (the features). Can be a tuple with multiple pandas.Series, pandas.DataFrame, numpy.ndarray objects. """ if self._f_compare_vectorized: return self._f_compare_vectorized( *(args + self.args), **self.kwargs) else: raise NotImplementedError()
0.003044
def network_pf(network, snapshots=None, skip_pre=False, x_tol=1e-6, use_seed=False): """ Full non-linear power flow for generic network. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. x_tol: float Tolerance for Newton-Raphson power flow. use_seed : bool, default False Use a seed for the initial guess for the Newton-Raphson algorithm. Returns ------- Dictionary with keys 'n_iter', 'converged', 'error' and dataframe values indicating number of iterations, convergence status, and iteration error for each snapshot (rows) and sub_network (columns) """ return _network_prepare_and_run_pf(network, snapshots, skip_pre, linear=False, x_tol=x_tol, use_seed=use_seed)
0.003918
def get_json_argument(self, name, default=None): """Find and return the argument with key 'name' from JSON request data. Similar to Tornado's get_argument() method. :param str name: The name of the json key you want to get the value for :param bool default: The default value if nothing is found :returns: value of the argument name request """ if default is None: default = self._ARG_DEFAULT if not self.request.arguments: self.load_json() if name not in self.request.arguments: if default is self._ARG_DEFAULT: msg = "Missing argument '%s'" % name self.logger.debug(msg) self.raise_error(400, msg) self.logger.debug("Returning default argument %s, as we couldn't " "find '%s' in %s" % (default, name, self.request.arguments)) return default arg = self.request.arguments[name] return arg
0.001874
def _to_dsn(hosts): """Convert a host URI into a dsn for aiopg. >>> _to_dsn('aiopg://myhostname:4242/mydb') 'postgres://crate@myhostname:4242/mydb' >>> _to_dsn('aiopg://myhostname:4242') 'postgres://crate@myhostname:4242/doc' >>> _to_dsn('aiopg://hoschi:pw@myhostname:4242/doc?sslmode=require') 'postgres://hoschi:pw@myhostname:4242/doc?sslmode=require' >>> _to_dsn('aiopg://myhostname') 'postgres://crate@myhostname:5432/doc' """ p = urlparse(hosts) try: user_and_pw, netloc = p.netloc.split('@', maxsplit=1) except ValueError: netloc = p.netloc user_and_pw = 'crate' try: host, port = netloc.split(':', maxsplit=1) except ValueError: host = netloc port = 5432 dbname = p.path[1:] if p.path else 'doc' dsn = f'postgres://{user_and_pw}@{host}:{port}/{dbname}' if p.query: dsn += '?' + '&'.join(k + '=' + v[0] for k, v in parse_qs(p.query).items()) return dsn
0.002012
def mim2reg(mimfile, regfile): """ Convert a MIMAS region (.mim) file into a DS9 region (.reg) file. Parameters ---------- mimfile : str Input file in MIMAS format. regfile : str Output file. """ region = Region.load(mimfile) region.write_reg(regfile) logging.info("Converted {0} -> {1}".format(mimfile, regfile)) return
0.002611
def load_extra_data(cls, data): """Loads extra JSON configuration parameters from a data buffer. The data buffer must represent a JSON object. Args: data: str, the buffer to load the JSON data from. """ try: cls._extra_config.update(json.loads(data)) except ValueError as exception: sys.stderr.write('Could convert to JSON. {0:s}'.format(exception)) exit(-1)
0.009804
def search_authors(self, query): query = query.replace(" ", "+") """ FIXME: Don't create a process to do this! """ p = subprocess.Popen("curl -H 'Accept: application/orcid+json' \ 'http://pub.sandbox-1.orcid.org/search/orcid-bio?q=" + query + "&start=0&rows=10'", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) jsonResults = "" for line in p.stdout.readlines(): jsonResults = line self.authorsDict = json.loads(jsonResults)
0.004545
def _retry_storage_check(exception): """Return True if we should retry, False otherwise.""" now = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') print_error( '%s: Exception %s: %s' % (now, type(exception).__name__, str(exception))) return isinstance(exception, oauth2client.client.AccessTokenRefreshError)
0.015625
def make_meta_dict_consistent(self): """ Remove the possibility of the main keys being undefined. """ if self.meta_dict is None: self.meta_dict = {} if "galaxy_info" not in self.meta_dict: self.meta_dict["galaxy_info"] = {} if "dependencies" not in self.meta_dict: self.meta_dict["dependencies"] = [] if "ansigenome_info" not in self.meta_dict: self.meta_dict["ansigenome_info"] = {}
0.004082
def append(self,text): """Add a text (or speech) to the document: Example 1:: doc.append(folia.Text) Example 2:: doc.append( folia.Text(doc, id='example.text') ) Example 3:: doc.append(folia.Speech) """ if text is Text: text = Text(self, id=self.id + '.text.' + str(len(self.data)+1) ) elif text is Speech: text = Speech(self, id=self.id + '.speech.' + str(len(self.data)+1) ) #pylint: disable=redefined-variable-type else: assert isinstance(text, Text) or isinstance(text, Speech) self.data.append(text) return text
0.011869
def parse(self): ''' read and parse a set of data read from the console. after the data is parsed it is available in the fields variable. ''' fields = self._get_loop_fields() fields['Archive'] = self._get_new_archive_fields() self._calc_derived_fields(fields) # set the fields variable the the values in the dict self.fields = fields
0.004902
def int_check(*args, func=None): """Check if arguments are integrals.""" func = func or inspect.stack()[2][3] for var in args: if not isinstance(var, numbers.Integral): name = type(var).__name__ raise ComplexError( f'Function {func} expected integral number, {name} got instead.')
0.005882
def is_numeric(value, minimum = None, maximum = None, **kwargs): """Indicate whether ``value`` is a numeric value. :param value: The value to evaluate. :param minimum: If supplied, will make sure that ``value`` is greater than or equal to this value. :type minimum: numeric :param maximum: If supplied, will make sure that ``value`` is less than or equal to this value. :type maximum: numeric :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator """ try: value = validators.numeric(value, minimum = minimum, maximum = maximum, **kwargs) except SyntaxError as error: raise error except Exception: return False return True
0.010329
def find_peaks(sig): """ Find hard peaks and soft peaks in a signal, defined as follows: - Hard peak: a peak that is either /\ or \/ - Soft peak: a peak that is either /-*\ or \-*/ In this case we define the middle as the peak Parameters ---------- sig : np array The 1d signal array Returns ------- hard_peaks : numpy array Array containing the indices of the hard peaks: soft_peaks : numpy array Array containing the indices of the soft peaks """ if len(sig) == 0: return np.empty([0]), np.empty([0]) tmp = sig[1:] tmp = np.append(tmp, [sig[-1]]) tmp = sig - tmp tmp[np.where(tmp>0)] = 1 tmp[np.where(tmp==0)] = 0 tmp[np.where(tmp<0)] = -1 tmp2 = tmp[1:] tmp2 = np.append(tmp2, [0]) tmp = tmp-tmp2 hard_peaks = np.where(np.logical_or(tmp==-2, tmp==+2))[0] + 1 soft_peaks = [] for iv in np.where(np.logical_or(tmp==-1,tmp==+1))[0]: t = tmp[iv] i = iv+1 while True: if i==len(tmp) or tmp[i] == -t or tmp[i] == -2 or tmp[i] == 2: break if tmp[i] == t: soft_peaks.append(int(iv + (i - iv)/2)) break i += 1 soft_peaks = np.array(soft_peaks, dtype='int') + 1 return hard_peaks, soft_peaks
0.010401
def get_batch(self, batch_size, next_states=False): """ Samples a batch of the specified size according to priority. Args: batch_size: The batch size next_states: A boolean flag indicating whether 'next_states' values should be included Returns: A dict containing states, actions, rewards, terminals, internal states (and next states) """ if batch_size > len(self.observations): raise TensorForceError( "Requested batch size is larger than observations in memory: increase config.first_update.") # Init empty states states = {name: np.zeros((batch_size,) + tuple(state['shape']), dtype=util.np_dtype( state['type'])) for name, state in self.states_spec.items()} internals = [np.zeros((batch_size,) + shape, dtype) for shape, dtype in self.internals_spec] actions = {name: np.zeros((batch_size,) + tuple(action['shape']), dtype=util.np_dtype(action['type'])) for name, action in self.actions_spec.items()} terminal = np.zeros((batch_size,), dtype=util.np_dtype('bool')) reward = np.zeros((batch_size,), dtype=util.np_dtype('float')) if next_states: next_states = {name: np.zeros((batch_size,) + tuple(state['shape']), dtype=util.np_dtype( state['type'])) for name, state in self.states_spec.items()} next_internals = [np.zeros((batch_size,) + shape, dtype) for shape, dtype in self.internals_spec] # Start with unseen observations unseen_indices = list(xrange( self.none_priority_index + self.observations._capacity - 1, len(self.observations) + self.observations._capacity - 1) ) self.batch_indices = unseen_indices[:batch_size] # Get remaining observations using weighted sampling remaining = batch_size - len(self.batch_indices) if remaining: samples = self.observations.sample_minibatch(remaining) sample_indices = [i for i, o in samples] self.batch_indices += sample_indices # Shuffle np.random.shuffle(self.batch_indices) # Collect observations for n, index in enumerate(self.batch_indices): observation, _ = self.observations._memory[index] for name, state in states.items(): state[n] = observation[0][name] for k, internal in enumerate(internals): internal[n] = observation[1][k] for name, action in actions.items(): action[n] = observation[2][name] terminal[n] = observation[3] reward[n] = observation[4] if next_states: for name, next_state in next_states.items(): next_state[n] = observation[5][name] for k, next_internal in enumerate(next_internals): next_internal[n] = observation[6][k] if next_states: return dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals ) else: return dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
0.00225
def get_ot_study_info_from_treebase_nexml(src=None, nexml_content=None, encoding=u'utf8', nexson_syntax_version=DEFAULT_NEXSON_VERSION, merge_blocks=True, sort_arbitrary=False): """Normalize treebase-specific metadata into the locations where open tree of life software that expects it. See get_ot_study_info_from_nexml for the explanation of the src, nexml_content, encoding, and nexson_syntax_version arguments If merge_blocks is True then peyotl.manip.merge_otus_and_trees Actions to "normalize" TreeBase objects to ot Nexson 1. the meta id for any meta item that has only a value and an id 2. throw away rdfs:isDefinedBy 3. otu @label -> otu ^ot:originalLabel 4. ^tb:indentifier.taxon, ^tb:indentifier.taxonVariant and some skos:closeMatch fields to ^ot:taxonLink 5. remove "@xml:base" 6. coerce edge lengths to native types """ # pylint: disable=R0915 raw = get_ot_study_info_from_nexml(src=src, nexml_content=nexml_content, encoding=encoding, nexson_syntax_version=BY_ID_HONEY_BADGERFISH) nexml = raw['nexml'] SKOS_ALT_LABEL = '^skos:altLabel' SKOS_CLOSE_MATCH = '^skos:closeMatch' strippable_pre = { 'http://www.ubio.org/authority/metadata.php?lsid=urn:lsid:ubio.org:namebank:': '@ubio', 'http://purl.uniprot.org/taxonomy/': '@uniprot', } moveable2taxon_link = {"^tb:identifier.taxon": '@tb:identifier.taxon', "^tb:identifier.taxonVariant": '@tb:identifier.taxonVariant', } to_del = ['^rdfs:isDefinedBy', '@xml:base'] for tag in to_del: if tag in nexml: del nexml[tag] _simplify_all_meta_by_id_del(nexml) _otu2label = {} prefix_map = {} # compose dataDeposit nexid = nexml['@id'] tb_url = 'http://purl.org/phylo/treebase/phylows/study/TB2:' + nexid nexml['^ot:dataDeposit'] = {'@href': tb_url} # compose dataDeposit bd = nexml.get("^dcterms:bibliographicCitation") if bd: nexml['^ot:studyPublicationReference'] = bd doi = nexml.get('^prism:doi') if doi: doi = doi2url(doi) nexml['^ot:studyPublication'] = {'@href': doi} year = nexml.get('^prism:publicationDate') if year: try: nexml['^ot:studyYear'] = int(year) except: pass # for otus in nexml['otusById'].values(): for tag in to_del: if tag in otus: del otus[tag] _simplify_all_meta_by_id_del(otus) for oid, otu in otus['otuById'].items(): for tag in to_del: if tag in otu: del otu[tag] _simplify_all_meta_by_id_del(otu) label = otu['@label'] _otu2label[oid] = label otu['^ot:originalLabel'] = label del otu['@label'] al = otu.get(SKOS_ALT_LABEL) if al is not None: if otu.get('^ot:altLabel') is None: otu['^ot:altLabel'] = al del otu[SKOS_ALT_LABEL] tl = {} scm = otu.get(SKOS_CLOSE_MATCH) # _LOG.debug('scm = ' + str(scm)) if scm: if isinstance(scm, dict): h = scm.get('@href') if h: try: for p, t in strippable_pre.items(): if h.startswith(p): ident = h[len(p):] tl[t] = ident del otu[SKOS_CLOSE_MATCH] prefix_map[t] = p except: pass else: nm = [] try: for el in scm: h = el.get('@href') if h: found = False for p, t in strippable_pre.items(): if h.startswith(p): ident = h[len(p):] tl[t] = ident found = True prefix_map[t] = p break if not found: nm.append(el) except: pass if len(nm) < len(scm): if len(nm) > 1: otu[SKOS_CLOSE_MATCH] = nm elif len(nm) == 1: otu[SKOS_CLOSE_MATCH] = nm[0] else: del otu[SKOS_CLOSE_MATCH] # _LOG.debug('tl =' + str(tl)) for k, t in moveable2taxon_link.items(): al = otu.get(k) if al: tl[t] = al del otu[k] if tl: otu['^ot:taxonLink'] = tl for trees in nexml['treesById'].values(): for tag in to_del: if tag in trees: del trees[tag] _simplify_all_meta_by_id_del(trees) for tree in trees['treeById'].values(): for tag in to_del: if tag in tree: del tree[tag] _simplify_all_meta_by_id_del(tree) tt = tree.get('@xsi:type', 'nex:FloatTree') if tt.lower() == 'nex:inttree': e_len_coerce = int else: e_len_coerce = float for edge_d in tree['edgeBySourceId'].values(): for edge in edge_d.values(): try: x = e_len_coerce(edge['@length']) edge['@length'] = x except: pass for node in tree['nodeById'].values(): nl = node.get('@label') if nl: no = node.get('@otu') if no and _otu2label[no] == nl: del node['@label'] if prefix_map: nexml['^ot:taxonLinkPrefixes'] = prefix_map if merge_blocks: from peyotl.manip import merge_otus_and_trees merge_otus_and_trees(raw) if nexson_syntax_version != BY_ID_HONEY_BADGERFISH: convert_nexson_format(raw, nexson_syntax_version, current_format=BY_ID_HONEY_BADGERFISH, sort_arbitrary=sort_arbitrary) elif sort_arbitrary: sort_arbitrarily_ordered_nexson(raw) return raw
0.001411
def to_array(self): """ Serializes this InlineQueryResultCachedPhoto to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(InlineQueryResultCachedPhoto, self).to_array() array['type'] = u(self.type) # py2: type unicode, py3: type str array['id'] = u(self.id) # py2: type unicode, py3: type str array['photo_file_id'] = u(self.photo_file_id) # py2: type unicode, py3: type str if self.title is not None: array['title'] = u(self.title) # py2: type unicode, py3: type str if self.description is not None: array['description'] = u(self.description) # py2: type unicode, py3: type str if self.caption is not None: array['caption'] = u(self.caption) # py2: type unicode, py3: type str if self.parse_mode is not None: array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str if self.reply_markup is not None: array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup if self.input_message_content is not None: array['input_message_content'] = self.input_message_content.to_array() # type InputMessageContent return array
0.006065
def create_loadfile(entities, f): """Create payload and save to file.""" with open(f, 'w') as out: out.write(Entity.create_payload(entities))
0.011834
def idxmin(self, axis=0, skipna=True): """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- Series Indexes of minima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmin Notes ----- This method is the DataFrame version of ``ndarray.argmin``. """ axis = self._get_axis_number(axis) indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) result = [index[i] if i >= 0 else np.nan for i in indices] return Series(result, index=self._get_agg_axis(axis))
0.0018
def normalize_ip(ip): """ Transform the address into a standard, fixed-length form, such as: 1234:0:01:02:: -> 1234:0000:0001:0002:0000:0000:0000:0000 1234::A -> 1234:0000:0000:0000:0000:0000:0000:000a :type ip: string :param ip: An IP address. :rtype: string :return: The normalized IP. """ theip = ip if theip.startswith('::'): theip = '0' + theip if theip.endswith('::'): theip += '0' segments = theip.split(':') if len(segments) == 1: raise ValueError('no colons in ipv6 address: ' + repr(ip)) fill = 8 - len(segments) if fill < 0: raise ValueError('ipv6 address has too many segments: ' + repr(ip)) result = [] for segment in segments: if segment == '': if fill == 0: raise ValueError('unexpected double colon: ' + repr(ip)) for n in range(fill + 1): result.append('0000') fill = 0 else: try: int(segment, 16) except ValueError: raise ValueError('invalid hex value in ' + repr(ip)) result.append(segment.rjust(4, '0')) return ':'.join(result).lower()
0.000815
def read_from_memory_region(self, *, region_name: str): """ Reads from a memory region named region_name on the QAM. This is a shim over the eventual API and only can return memory from a region named "ro" of type ``BIT``. :param region_name: The string naming the declared memory region. :return: A list of values of the appropriate type. """ warnings.warn("pyquil.api._qam.QAM.read_from_memory_region is deprecated, please use " "pyquil.api._qam.QAM.read_memory instead.", DeprecationWarning) assert self.status == 'done' if region_name != "ro": raise QAMError("Currently only allowed to read measurement data from ro.") if self._bitstrings is None: raise QAMError("Bitstrings have not yet been populated. Something has gone wrong.") return self._bitstrings
0.006479
def fit(self, Xs=None, ys=None, Xt=None, yt=None): """Build a coupling matrix from source and target sets of samples (Xs, ys) and (Xt, yt) Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabeled, fill the yt's elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- self : object Returns self. """ self.mu_s = self.distribution_estimation(Xs) self.mu_t = self.distribution_estimation(Xt) # coupling estimation returned_ = OT_mapping_linear(Xs, Xt, reg=self.reg, ws=self.mu_s.reshape((-1, 1)), wt=self.mu_t.reshape((-1, 1)), bias=self.bias, log=self.log) # deal with the value of log if self.log: self.A_, self.B_, self.log_ = returned_ else: self.A_, self.B_, = returned_ self.log_ = dict() # re compute inverse mapping self.A1_ = linalg.inv(self.A_) self.B1_ = -self.B_.dot(self.A1_) return self
0.001267
def main(): """Executed when script is run as-is.""" # magic_files = {} for filename in locate_files(ROOT_DIR): print("Processing %s" % filename) with open(filename, "rt") as f: tokens = list(tokenize.generate_tokens(f.readline)) text1 = tokenize.untokenize(tokens) ntokens = normalize_tokens(tokens) text2 = tokenize.untokenize(ntokens) assert text1 == text2
0.002237
def http_log_resp(resp, body): """ When pyrax.get_http_debug() is True, outputs the response received from the API request. """ if not pyrax.get_http_debug(): return log = logging.getLogger("pyrax") log.debug("RESP: %s\n%s", resp, resp.headers) if body: log.debug("RESP BODY: %s", body)
0.002994
def __getDependenciesRecursiveWithProvider(self, available_components = None, search_dirs = None, target = None, traverse_links = False, update_installed = False, provider = None, test = False, _processed = None ): ''' Get installed components using "provider" to find (and possibly install) components. This function is called with different provider functions in order to retrieve a list of all of the dependencies, or install all dependencies. Returns ======= (components, errors) components: dictionary of name:Component errors: sequence of errors Parameters ========== available_components: None (default) or a dictionary of name:component. This is searched before searching directories or fetching remote components search_dirs: None (default), or sequence of directories to search for already installed, (but not yet loaded) components. Used so that manually installed or linked components higher up the dependency tree are found by their users lower down. These directories are searched in order, and finally the current directory is checked. target: None (default), or a Target object. If specified the target name and it's similarTo list will be used in resolving dependencies. If None, then only target-independent dependencies will be installed traverse_links: False (default) or True: whether to recurse into linked dependencies. You normally want to set this to "True" when getting a list of dependencies, and False when installing them (unless the user has explicitly asked dependencies to be installed in linked components). provider: None (default) or function: provider( dependency_spec, available_components, search_dirs, working_directory, update_if_installed ) test: True, False, 'toplevel': should test-only dependencies be included (yes, no, or only at this level, not recursively) ''' def recursionFilter(c): if not c: logger.debug('do not recurse into failed component') # don't recurse into failed components return False if c.getName() in _processed: logger.debug('do not recurse into already processed component: %s' % c) return False if c.installedLinked() and not traverse_links: return False return True available_components = self.ensureOrderedDict(available_components) if search_dirs is None: search_dirs = [] if _processed is None: _processed = set() assert(test in [True, False, 'toplevel']) search_dirs.append(self.modulesPath()) logger.debug('process %s\nsearch dirs:%s' % (self.getName(), search_dirs)) if self.isTestDependency(): logger.debug("won't provide test dependencies recursively for test dependency %s", self.getName()) test = False components, errors = self.__getDependenciesWithProvider( available_components = available_components, search_dirs = search_dirs, update_installed = update_installed, target = target, provider = provider, test = test ) _processed.add(self.getName()) if errors: errors = ['Failed to satisfy dependencies of %s:' % self.path] + errors need_recursion = [x for x in filter(recursionFilter, components.values())] available_components.update(components) logger.debug('processed %s\nneed recursion: %s\navailable:%s\nsearch dirs:%s' % (self.getName(), need_recursion, available_components, search_dirs)) if test == 'toplevel': test = False # NB: can't perform this step in parallel, since the available # components list must be updated in order for c in need_recursion: dep_components, dep_errors = c.__getDependenciesRecursiveWithProvider( available_components = available_components, search_dirs = search_dirs, target = target, traverse_links = traverse_links, update_installed = update_installed, provider = provider, test = test, _processed = _processed ) available_components.update(dep_components) components.update(dep_components) errors += dep_errors return (components, errors)
0.012976
def decompress_decoder_1d(x, hparams, name=None): """Decoder that decompresses 1-D inputs by 2**num_compress_steps. Args: x: Tensor of shape [batch, compress_length, channels]. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, length, hparams.hidden_size]. """ x = tf.expand_dims(x, axis=2) output = decompress_decoder(x, hparams, strides=(2, 1), kernel=(hparams.kernel_size, 1), name=name) return tf.squeeze(output, axis=2)
0.008666
def get_active_subject_set(self, request): """Get a set containing all subjects for which the current connection has been successfully authenticated.""" # Handle complete certificate in vendor specific extension. if django.conf.settings.DEBUG_GMN: if 'HTTP_VENDOR_INCLUDE_CERTIFICATE' in request.META: request.META[ 'SSL_CLIENT_CERT' ] = self.pem_in_http_header_to_pem_in_string( request.META['HTTP_VENDOR_INCLUDE_CERTIFICATE'] ) # Add subjects from any provided certificate and JWT and store them in # the Django request obj. cert_primary_str, cert_equivalent_set = d1_gmn.app.middleware.session_cert.get_subjects( request ) jwt_subject_list = d1_gmn.app.middleware.session_jwt.validate_jwt_and_get_subject_list( request ) primary_subject_str = cert_primary_str all_subjects_set = ( cert_equivalent_set | {cert_primary_str} | set(jwt_subject_list) ) if len(jwt_subject_list) == 1: jwt_primary_str = jwt_subject_list[0] if jwt_primary_str != cert_primary_str: if cert_primary_str == d1_common.const.SUBJECT_PUBLIC: primary_subject_str = jwt_primary_str else: logging.warning( 'Both a certificate and a JWT were provided and the primary ' 'subjects differ. Using the certificate for primary subject and' 'the JWT as equivalent.' ) logging.info('Primary active subject: {}'.format(primary_subject_str)) logging.info( 'All active subjects: {}'.format(', '.join(sorted(all_subjects_set))) ) # Handle list of subjects in vendor specific extension: if django.conf.settings.DEBUG_GMN: # This is added to any subjects obtained from cert and/or JWT. if 'HTTP_VENDOR_INCLUDE_SUBJECTS' in request.META: request.all_subjects_set.update( request.META['HTTP_VENDOR_INCLUDE_SUBJECTS'].split('\t') ) return primary_subject_str, all_subjects_set
0.003472
def setup_package(): """ Setup the package. """ with open('requirements.txt', 'r') as req_file: install_reqs = req_file.read().split('\n') cmdclass_ = {'antlr': AntlrBuildCommand} cmdclass_.update(versioneer.get_cmdclass()) setup( version=versioneer.get_version(), name='pymoca', maintainer="James Goppert", maintainer_email="[email protected]", description=DOCLINES[0], long_description="\n".join(DOCLINES[2:]), url='https://github.com/pymoca/pymoca', author='James Goppert', author_email='[email protected]', download_url='https://github.com/pymoca/pymoca', license='BSD', classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], install_requires=install_reqs, tests_require=['coverage >= 3.7.1', 'nose >= 1.3.1'], test_suite='nose.collector', python_requires='>=3.5', packages=find_packages("src"), package_dir={"": "src"}, include_package_data=True, cmdclass=cmdclass_ )
0.000862
def stream(self, id, task, type, follow=False, offset=0, origin="start", plain=False): """ This endpoint streams a task's stderr/stdout logs. https://www.nomadproject.io/api/client.html#stream-logs arguments: - id: (str) allocation_id required - task: (str) name of the task inside the allocation to stream logs from - type: (str) Specifies the stream to stream. Either "stderr|stdout" - follow: (bool) default false - offset: (int) default 0 - origin: (str) either start|end, default "start" - plain: (bool) Return just the plain text without framing. default False returns: (str) text raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.BadRequestNomadException """ params = { "task": task, "type": type, "follow": follow, "offset": offset, "origin": origin, "plain": plain } return self.request(id, params=params, method="get").text
0.005245
def create_widgets(self): """Build basic components of dialog.""" self.bbox = QDialogButtonBox( QDialogButtonBox.Ok | QDialogButtonBox.Cancel) self.idx_ok = self.bbox.button(QDialogButtonBox.Ok) self.idx_cancel = self.bbox.button(QDialogButtonBox.Cancel) self.idx_group = FormMenu([gr['name'] for gr in self.groups]) chan_box = QListWidget() self.idx_chan = chan_box stage_box = QListWidget() stage_box.addItems(STAGE_NAME) stage_box.setSelectionMode(QAbstractItemView.ExtendedSelection) self.idx_stage = stage_box cycle_box = QListWidget() cycle_box.setSelectionMode(QAbstractItemView.ExtendedSelection) self.idx_cycle = cycle_box
0.002621
def agent_pools(self): """Instance depends on the API version: * 2019-02-01: :class:`AgentPoolsOperations<azure.mgmt.containerservice.v2019_02_01.operations.AgentPoolsOperations>` """ api_version = self._get_api_version('agent_pools') if api_version == '2019-02-01': from .v2019_02_01.operations import AgentPoolsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
0.009231
def post_structure(entry, site): """ A post structure with extensions. """ author = entry.authors.all()[0] return {'title': entry.title, 'description': six.text_type(entry.html_content), 'link': '%s://%s%s' % (PROTOCOL, site.domain, entry.get_absolute_url()), # Basic Extensions 'permaLink': '%s://%s%s' % (PROTOCOL, site.domain, entry.get_absolute_url()), 'categories': [cat.title for cat in entry.categories.all()], 'dateCreated': DateTime(entry.creation_date.isoformat()), 'postid': entry.pk, 'userid': author.get_username(), # Useful Movable Type Extensions 'mt_excerpt': entry.excerpt, 'mt_allow_comments': int(entry.comment_enabled), 'mt_allow_pings': (int(entry.pingback_enabled) or int(entry.trackback_enabled)), 'mt_keywords': entry.tags, # Useful Wordpress Extensions 'wp_author': author.get_username(), 'wp_author_id': author.pk, 'wp_author_display_name': author.__str__(), 'wp_password': entry.password, 'wp_slug': entry.slug, 'sticky': entry.featured}
0.000752
def _get_common_params(self, user_id, attributes): """ Get params which are used same in both conversion and impression events. Args: user_id: ID for user. attributes: Dict representing user attributes and values which need to be recorded. Returns: Dict consisting of parameters common to both impression and conversion events. """ commonParams = {} commonParams[self.EventParams.PROJECT_ID] = self._get_project_id() commonParams[self.EventParams.ACCOUNT_ID] = self._get_account_id() visitor = {} visitor[self.EventParams.END_USER_ID] = user_id visitor[self.EventParams.SNAPSHOTS] = [] commonParams[self.EventParams.USERS] = [] commonParams[self.EventParams.USERS].append(visitor) commonParams[self.EventParams.USERS][0][self.EventParams.ATTRIBUTES] = self._get_attributes(attributes) commonParams[self.EventParams.SOURCE_SDK_TYPE] = 'python-sdk' commonParams[self.EventParams.ENRICH_DECISIONS] = True commonParams[self.EventParams.SOURCE_SDK_VERSION] = version.__version__ commonParams[self.EventParams.ANONYMIZE_IP] = self._get_anonymize_ip() commonParams[self.EventParams.REVISION] = self._get_revision() return commonParams
0.004075
def from_fileobj(cls, fileobj): """A new _WavPackHeader or raises WavPackHeaderError""" header = fileobj.read(32) if len(header) != 32 or not header.startswith(b"wvpk"): raise WavPackHeaderError("not a WavPack header: %r" % header) block_size = cdata.uint_le(header[4:8]) version = cdata.ushort_le(header[8:10]) track_no = ord(header[10:11]) index_no = ord(header[11:12]) samples = cdata.uint_le(header[12:16]) if samples == 2 ** 32 - 1: samples = -1 block_index = cdata.uint_le(header[16:20]) block_samples = cdata.uint_le(header[20:24]) flags = cdata.uint_le(header[24:28]) crc = cdata.uint_le(header[28:32]) return _WavPackHeader(block_size, version, track_no, index_no, samples, block_index, block_samples, flags, crc)
0.002245
def _update_day_dict(self, data_point, day_dict): """ Helper method for :meth:`_print_daily_stats`. Given a data point and the correct day dict, update attribs on the dict with the contents of the data point. :param dict data_point: The data point to add to the day's stats dict. :param dict day_dict: A stats-tracking dict for a 24 hour period. """ for topic in ['Bounces', 'Complaints', 'DeliveryAttempts', 'Rejects']: day_dict[topic] = day_dict.get(topic, 0) + int(data_point[topic])
0.005254
async def list(cls, fields: Iterable[str] = None) -> Sequence[dict]: ''' Lists the keypair resource policies. You need an admin privilege for this operation. ''' if fields is None: fields = ( 'name', 'created_at', 'total_resource_slots', 'max_concurrent_sessions', 'max_vfolder_count', 'max_vfolder_size', 'idle_timeout', ) q = 'query {' \ ' keypair_resource_policies {' \ ' $fields' \ ' }' \ '}' q = q.replace('$fields', ' '.join(fields)) rqst = Request(cls.session, 'POST', '/admin/graphql') rqst.set_json({ 'query': q, }) async with rqst.fetch() as resp: data = await resp.json() return data['keypair_resource_policies']
0.002252
def letter_score(letter): """Returns the Scrabble score of a letter. Args: letter: a single character string Raises: TypeError if a non-Scrabble character is supplied """ score_map = { 1: ["a", "e", "i", "o", "u", "l", "n", "r", "s", "t"], 2: ["d", "g"], 3: ["b", "c", "m", "p"], 4: ["f", "h", "v", "w", "y"], 5: ["k"], 8: ["j", "x"], 10: ["q", "z"], } for score, letters in score_map.items(): if letter.lower() in letters: return score else: raise TypeError("Invalid letter: %s", letter)
0.001603
def _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, data, validate_method): """Run evaluation of a caller against the truth set using rtg vcfeval. """ out_dir = os.path.join(base_dir, "rtg") if not utils.file_exists(os.path.join(out_dir, "done")): if os.path.exists(out_dir): shutil.rmtree(out_dir) vrn_file, rm_file, interval_bed = _prepare_inputs(vrn_file, rm_file, rm_interval_file, base_dir, data) rtg_ref = tz.get_in(["reference", "rtg"], data) if isinstance(rtg_ref, dict) and "base" in rtg_ref: rtg_ref = os.path.dirname(rtg_ref["base"]) assert rtg_ref and os.path.exists(rtg_ref), ("Did not find rtg indexed reference file for validation:\n%s\n" "Run bcbio_nextgen.py upgrade --data --aligners rtg" % rtg_ref) # handle CWL where we have a reference to a single file in the RTG directory if os.path.isfile(rtg_ref): rtg_ref = os.path.dirname(rtg_ref) # get core and memory usage from standard configuration threads = min(dd.get_num_cores(data), 6) resources = config_utils.get_resources("rtg", data["config"]) memory = config_utils.adjust_opts(resources.get("jvm_opts", ["-Xms500m", "-Xmx1500m"]), {"algorithm": {"memory_adjust": {"magnitude": threads, "direction": "increase"}}}) jvm_stack = [x for x in memory if x.startswith("-Xms")] jvm_mem = [x for x in memory if x.startswith("-Xmx")] jvm_stack = jvm_stack[0] if len(jvm_stack) > 0 else "-Xms500m" jvm_mem = jvm_mem[0].replace("-Xmx", "") if len(jvm_mem) > 0 else "3g" cmd = ["rtg", "vcfeval", "--threads", str(threads), "-b", rm_file, "--bed-regions", interval_bed, "-c", vrn_file, "-t", rtg_ref, "-o", out_dir] if validate_method == "rtg-squash-ploidy": cmd += ["--squash-ploidy"] rm_samples = vcfutils.get_samples(rm_file) if len(rm_samples) > 1 and dd.get_sample_name(data) in rm_samples: cmd += ["--sample=%s" % dd.get_sample_name(data)] cmd += ["--vcf-score-field='%s'" % (_pick_best_quality_score(vrn_file))] mem_export = "%s export RTG_JAVA_OPTS='%s' && export RTG_MEM=%s" % (utils.local_path_export(), jvm_stack, jvm_mem) cmd = mem_export + " && " + " ".join(cmd) do.run(cmd, "Validate calls using rtg vcfeval", data) out = {"fp": os.path.join(out_dir, "fp.vcf.gz"), "fn": os.path.join(out_dir, "fn.vcf.gz")} tp_calls = os.path.join(out_dir, "tp.vcf.gz") tp_baseline = os.path.join(out_dir, "tp-baseline.vcf.gz") if os.path.exists(tp_baseline): out["tp"] = tp_baseline out["tp-calls"] = tp_calls else: out["tp"] = tp_calls return out
0.00398
def updateMesh(self, polydata): """ Overwrite the polygonal mesh of the actor with a new one. """ self.poly = polydata self.mapper.SetInputData(polydata) self.mapper.Modified() return self
0.008197
def zero_pad(m, n=1): """Pad a matrix with zeros, on all sides.""" return np.pad(m, (n, n), mode='constant', constant_values=[0])
0.007299
def spellCheckTextgrid(tg, targetTierName, newTierName, isleDict, printEntries=False): ''' Spell check words by using the praatio spellcheck function Incorrect items are noted in a new tier and optionally printed to the screen ''' def checkFunc(word): try: isleDict.lookup(word) except isletool.WordNotInISLE: returnVal = False else: returnVal = True return returnVal tg = praatio_scripts.spellCheckEntries(tg, targetTierName, newTierName, checkFunc, printEntries) return tg
0.008915
def create(self, deal_id, *args, **kwargs): """ Create an associated contact Creates a deal's associated contact and its role If the specified deal or contact does not exist, the request will return an error :calls: ``post /deals/{deal_id}/associated_contacts`` :param int deal_id: Unique identifier of a Deal. :param tuple *args: (optional) Single object representing AssociatedContact resource. :param dict **kwargs: (optional) AssociatedContact attributes. :return: Dictionary that support attriubte-style access and represents newely created AssociatedContact resource. :rtype: dict """ if not args and not kwargs: raise Exception('attributes for AssociatedContact are missing') attributes = args[0] if args else kwargs attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST) _, _, associated_contact = self.http_client.post("/deals/{deal_id}/associated_contacts".format(deal_id=deal_id), body=attributes) return associated_contact
0.006256
def imsave(filename, data, photometric=None, planarconfig=None, resolution=None, description=None, software='tifffile.py', byteorder=None, bigtiff=False): """Write image data to TIFF file. Image data are written uncompressed in one stripe per plane. Dimensions larger than 2 or 3 (depending on photometric mode and planar configuration) are flattened and saved as separate pages. Parameters ---------- filename : str Name of file to write. data : array_like Input image. The last dimensions are assumed to be image height, width, and samples. photometric : {'minisblack', 'miniswhite', 'rgb'} The color space of the image data. By default this setting is inferred from the data shape. planarconfig : {'contig', 'planar'} Specifies if samples are stored contiguous or in separate planes. By default this setting is inferred from the data shape. 'contig': last dimension contains samples. 'planar': third last dimension contains samples. resolution : ((int, int), (int, int)) X and Y resolution in dots per inch as rational numbers. description : str The subject of the image. Saved with the first page only. software : str Name of the software used to create the image. Saved with the first page only. byteorder : {'<', '>'} The endianness of the data in the file. By default this is the system's native byte order. bigtiff : bool If True the BigTIFF format is used. By default the standard TIFF format is used for data less than 2040 MB. Examples -------- >>> data = numpy.random.rand(10, 3, 301, 219) >>> imsave('temp.tif', data) """ assert(photometric in (None, 'minisblack', 'miniswhite', 'rgb')) assert(planarconfig in (None, 'contig', 'planar')) assert(byteorder in (None, '<', '>')) if byteorder is None: byteorder = '<' if sys.byteorder == 'little' else '>' data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C') data_shape = shape = data.shape data = numpy.atleast_2d(data) if not bigtiff and data.size * data.dtype.itemsize < 2040*2**20: bigtiff = False offset_size = 4 tag_size = 12 numtag_format = 'H' offset_format = 'I' val_format = '4s' else: bigtiff = True offset_size = 8 tag_size = 20 numtag_format = 'Q' offset_format = 'Q' val_format = '8s' # unify shape of data samplesperpixel = 1 extrasamples = 0 if photometric is None: if data.ndim > 2 and (shape[-3] in (3, 4) or shape[-1] in (3, 4)): photometric = 'rgb' else: photometric = 'minisblack' if photometric == 'rgb': if len(shape) < 3: raise ValueError("not a RGB(A) image") if planarconfig is None: planarconfig = 'planar' if shape[-3] in (3, 4) else 'contig' if planarconfig == 'contig': if shape[-1] not in (3, 4): raise ValueError("not a contiguous RGB(A) image") data = data.reshape((-1, 1) + shape[-3:]) samplesperpixel = shape[-1] else: if shape[-3] not in (3, 4): raise ValueError("not a planar RGB(A) image") data = data.reshape((-1, ) + shape[-3:] + (1, )) samplesperpixel = shape[-3] if samplesperpixel == 4: extrasamples = 1 elif planarconfig and len(shape) > 2: if planarconfig == 'contig': data = data.reshape((-1, 1) + shape[-3:]) samplesperpixel = shape[-1] else: data = data.reshape((-1, ) + shape[-3:] + (1, )) samplesperpixel = shape[-3] extrasamples = samplesperpixel - 1 else: planarconfig = None data = data.reshape((-1, 1) + shape[-2:] + (1, )) shape = data.shape # (pages, planes, height, width, contig samples) bytestr = bytes if sys.version[0] == '2' else lambda x: bytes(x, 'ascii') tifftypes = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6, 'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17} tifftags = {'new_subfile_type': 254, 'subfile_type': 255, 'image_width': 256, 'image_length': 257, 'bits_per_sample': 258, 'compression': 259, 'photometric': 262, 'fill_order': 266, 'document_name': 269, 'image_description': 270, 'strip_offsets': 273, 'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278, 'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283, 'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296, 'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320, 'extra_samples': 338, 'sample_format': 339} tags = [] tag_data = [] def pack(fmt, *val): return struct.pack(byteorder+fmt, *val) def tag(name, dtype, number, value, offset=[0]): # append tag binary string to tags list # append (offset, value as binary string) to tag_data list # increment offset by tag_size if dtype == 's': value = bytestr(value) + b'\0' number = len(value) value = (value, ) t = [pack('HH', tifftags[name], tifftypes[dtype]), pack(offset_format, number)] if len(dtype) > 1: number *= int(dtype[:-1]) dtype = dtype[-1] if number == 1: if isinstance(value, (tuple, list)): value = value[0] t.append(pack(val_format, pack(dtype, value))) elif struct.calcsize(dtype) * number <= offset_size: t.append(pack(val_format, pack(str(number)+dtype, *value))) else: t.append(pack(offset_format, 0)) tag_data.append((offset[0] + offset_size + 4, pack(str(number)+dtype, *value))) tags.append(b''.join(t)) offset[0] += tag_size if software: tag('software', 's', 0, software) if description: tag('image_description', 's', 0, description) elif shape != data_shape: tag('image_description', 's', 0, "shape=(%s)" % (",".join('%i' % i for i in data_shape))) tag('datetime', 's', 0, datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S")) # write previous tags only once writeonce = (len(tags), len(tag_data)) if shape[0] > 1 else None tag('compression', 'H', 1, 1) tag('orientation', 'H', 1, 1) tag('image_width', 'I', 1, shape[-2]) tag('image_length', 'I', 1, shape[-3]) tag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2) tag('sample_format', 'H', 1, {'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind]) tag('photometric', 'H', 1, {'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric]) tag('samples_per_pixel', 'H', 1, samplesperpixel) if planarconfig: tag('planar_configuration', 'H', 1, 1 if planarconfig=='contig' else 2) tag('bits_per_sample', 'H', samplesperpixel, (data.dtype.itemsize * 8, ) * samplesperpixel) else: tag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8) if extrasamples: if photometric == 'rgb': tag('extra_samples', 'H', 1, 1) # alpha channel else: tag('extra_samples', 'H', extrasamples, (0, ) * extrasamples) if resolution: tag('x_resolution', '2I', 1, resolution[0]) tag('y_resolution', '2I', 1, resolution[1]) tag('resolution_unit', 'H', 1, 2) tag('rows_per_strip', 'I', 1, shape[-3]) # use one strip per plane strip_byte_counts = (data[0, 0].size * data.dtype.itemsize, ) * shape[1] tag('strip_byte_counts', offset_format, shape[1], strip_byte_counts) # strip_offsets must be the last tag; will be updated later tag('strip_offsets', offset_format, shape[1], (0, ) * shape[1]) fd = open(filename, 'wb') seek = fd.seek tell = fd.tell def write(arg, *args): fd.write(pack(arg, *args) if args else arg) write({'<': b'II', '>': b'MM'}[byteorder]) if bigtiff: write('HHH', 43, 8, 0) else: write('H', 42) ifd_offset = tell() write(offset_format, 0) # first IFD for i in range(shape[0]): # update pointer at ifd_offset pos = tell() seek(ifd_offset) write(offset_format, pos) seek(pos) # write tags write(numtag_format, len(tags)) tag_offset = tell() write(b''.join(tags)) ifd_offset = tell() write(offset_format, 0) # offset to next ifd # write extra tag data and update pointers for off, dat in tag_data: pos = tell() seek(tag_offset + off) write(offset_format, pos) seek(pos) write(dat) # update strip_offsets pos = tell() if len(strip_byte_counts) == 1: seek(ifd_offset - offset_size) write(offset_format, pos) else: seek(pos - offset_size*shape[1]) strip_offset = pos for size in strip_byte_counts: write(offset_format, strip_offset) strip_offset += size seek(pos) # write data data[i].tofile(fd) # if this fails, try update Python and numpy fd.flush() # remove tags that should be written only once if writeonce: tags = tags[writeonce[0]:] d = writeonce[0] * tag_size tag_data = [(o-d, v) for (o, v) in tag_data[writeonce[1]:]] writeonce = None fd.close()
0.001024
def batch_star(self, path): """Evaluate the commands contained in the specific path. The Python equivalent of the CLIPS batch* command. """ if lib.EnvBatchStar(self._env, path.encode()) != 1: raise CLIPSError(self._env)
0.007547
def refresh(self): """ Keeps the lease alive by streaming keep alive requests from the client to the server and streaming keep alive responses from the server to the client. :returns: Response header. :rtype: instance of :class:`txaioetcd.Header` """ if self._expired: raise Expired() obj = { # ID is the lease ID for the lease to keep alive. u'ID': self.lease_id, } data = json.dumps(obj).encode('utf8') url = u'{}/v3alpha/lease/keepalive'.format(self._client._url).encode() response = yield treq.post(url, data, headers=self._client._REQ_HEADERS) obj = yield treq.json_content(response) if u'result' not in obj: raise Exception('bogus lease refresh response (missing "result") in {}'.format(obj)) ttl = obj[u'result'].get(u'TTL', None) if not ttl: self._expired = True raise Expired() header = Header._parse(obj[u'result'][u'header']) if u'header' in obj[u'result'] else None self._expired = False returnValue(header)
0.00431
def _openpyxl_read_xl(xl_path: str): """ Use openpyxl to read an Excel file. """ try: wb = load_workbook(filename=xl_path, read_only=True) except: raise else: return wb
0.009615
def _delocalize_logging_command(self, logging_path, user_project): """Returns a command to delocalize logs. Args: logging_path: location of log files. user_project: name of the project to be billed for the request. Returns: eg. 'gs://bucket/path/myfile' or 'gs://bucket/script-foobar-12' """ # Get the logging prefix (everything up to ".log") logging_prefix = os.path.splitext(logging_path.uri)[0] # Set the provider-specific mkdir and file copy commands if logging_path.file_provider == job_model.P_LOCAL: mkdir_cmd = 'mkdir -p "%s"\n' % os.path.dirname(logging_prefix) cp_cmd = 'cp' elif logging_path.file_provider == job_model.P_GCS: mkdir_cmd = '' if user_project: cp_cmd = 'gsutil -u {} -mq cp'.format(user_project) else: cp_cmd = 'gsutil -mq cp' else: assert False # Construct the copy command copy_logs_cmd = textwrap.dedent("""\ local cp_cmd="{cp_cmd}" local prefix="{prefix}" """).format( cp_cmd=cp_cmd, prefix=logging_prefix) # Build up the command body = textwrap.dedent("""\ {mkdir_cmd} {copy_logs_cmd} """).format( mkdir_cmd=mkdir_cmd, copy_logs_cmd=copy_logs_cmd) return body
0.005495
def do_timeout(self, args): """Get or set timeout (in ms) for resource in use. Get timeout: timeout Set timeout: timeout <mstimeout> """ if not self.current: print('There are no resources in use. Use the command "open".') return args = args.strip() if not args: try: print('Timeout: {}ms'.format(self.current.timeout)) except Exception as e: print(e) else: args = args.split(' ') try: self.current.timeout = float(args[0]) print('Done') except Exception as e: print(e)
0.004076
def ekf_ext_send(self, timestamp, Windspeed, WindDir, WindZ, Airspeed, beta, alpha, force_mavlink1=False): ''' Extended EKF state estimates for ASLUAVs timestamp : Time since system start [us] (uint64_t) Windspeed : Magnitude of wind velocity (in lateral inertial plane) [m/s] (float) WindDir : Wind heading angle from North [rad] (float) WindZ : Z (Down) component of inertial wind velocity [m/s] (float) Airspeed : Magnitude of air velocity [m/s] (float) beta : Sideslip angle [rad] (float) alpha : Angle of attack [rad] (float) ''' return self.send(self.ekf_ext_encode(timestamp, Windspeed, WindDir, WindZ, Airspeed, beta, alpha), force_mavlink1=force_mavlink1)
0.009278
def add_connection_throttle(self, maxConnectionRate=None, maxConnections=None, minConnections=None, rateInterval=None): """ Updates the connection throttling information for the load balancer with the supplied values. At least one of the parameters must be supplied. """ if not any((maxConnectionRate, maxConnections, minConnections, rateInterval)): # Pointless call return return self.manager.add_connection_throttle(self, maxConnectionRate=maxConnectionRate, maxConnections=maxConnections, minConnections=minConnections, rateInterval=rateInterval)
0.011747
def datafind_keep_unique_backups(backup_outs, orig_outs): """This function will take a list of backup datafind files, presumably obtained by querying a remote datafind server, e.g. CIT, and compares these against a list of original datafind files, presumably obtained by querying the local datafind server. Only the datafind files in the backup list that do not appear in the original list are returned. This allows us to use only files that are missing from the local cluster. Parameters ----------- backup_outs : FileList List of datafind files from the remote datafind server. orig_outs : FileList List of datafind files from the local datafind server. Returns -------- FileList List of datafind files in backup_outs and not in orig_outs. """ # NOTE: This function is not optimized and could be made considerably # quicker if speed becomes in issue. With 4s frame files this might # be slow, but for >1000s files I don't foresee any issue, so I keep # this simple. return_list = FileList([]) # We compare the LFNs to determine uniqueness # Is there a way to associate two paths with one LFN?? orig_names = [f.name for f in orig_outs] for file in backup_outs: if file.name not in orig_names: return_list.append(file) else: index_num = orig_names.index(file.name) orig_out = orig_outs[index_num] pfns = list(file.pfns) # This shouldn't happen, but catch if it does assert(len(pfns) == 1) orig_out.PFN(pfns[0].url, site='notlocal') return return_list
0.000591
def to_inference_data(self): """Convert all available data to an InferenceData object. Note that if groups can not be created (i.e., there is no `trace`, so the `posterior` and `sample_stats` can not be extracted), then the InferenceData will not have those groups. """ return InferenceData( **{ "posterior": self.posterior_to_xarray(), "sample_stats": self.sample_stats_to_xarray(), "posterior_predictive": self.posterior_predictive_to_xarray(), "observed_data": self.observed_data_to_xarray(), } )
0.004559
def sort_dict(d, desc=True): """ Sort an ordered dictionary by value, descending. Args: d (OrderedDict): An ordered dictionary. desc (bool): If true, sort desc. Returns: OrderedDict: The sorted dictionary. """ sort = sorted(d.items(), key=lambda x: x[1], reverse=desc) return OrderedDict(sort)
0.002865
def getProjectArea(self, projectarea_name, archived=False, returned_properties=None): """Get :class:`rtcclient.project_area.ProjectArea` object by its name :param projectarea_name: the project area name :param archived: (default is False) whether the project area is archived :param returned_properties: the returned properties that you want. Refer to :class:`rtcclient.client.RTCClient` for more explanations :return: the :class:`rtcclient.project_area.ProjectArea` object :rtype: rtcclient.project_area.ProjectArea """ if not isinstance(projectarea_name, six.string_types) or not projectarea_name: excp_msg = "Please specify a valid ProjectArea name" self.log.error(excp_msg) raise exception.BadValue(excp_msg) self.log.debug("Try to get <ProjectArea %s>", projectarea_name) rp = returned_properties proj_areas = self._getProjectAreas(archived=archived, returned_properties=rp, projectarea_name=projectarea_name) if proj_areas is not None: proj_area = proj_areas[0] self.log.info("Find <ProjectArea %s>", proj_area) return proj_area self.log.error("No ProjectArea named %s", projectarea_name) raise exception.NotFound("No ProjectArea named %s" % projectarea_name)
0.001983
def serialize(message): """ Serialize a message to a udp packet :type message: Message :param message: the message to be serialized :rtype: stream of byte :return: the message serialized """ fmt = "!BBH" if message.token is None or message.token == "": tkl = 0 else: tkl = len(message.token) tmp = (defines.VERSION << 2) tmp |= message.type tmp <<= 4 tmp |= tkl values = [tmp, message.code, message.mid] if message.token is not None and tkl > 0: for b in str(message.token): fmt += "c" values.append(bytes(b, "utf-8")) options = Serializer.as_sorted_list(message.options) # already sorted lastoptionnumber = 0 for option in options: # write 4-bit option delta optiondelta = option.number - lastoptionnumber optiondeltanibble = Serializer.get_option_nibble(optiondelta) tmp = (optiondeltanibble << defines.OPTION_DELTA_BITS) # write 4-bit option length optionlength = option.length optionlengthnibble = Serializer.get_option_nibble(optionlength) tmp |= optionlengthnibble fmt += "B" values.append(tmp) # write extended option delta field (0 - 2 bytes) if optiondeltanibble == 13: fmt += "B" values.append(optiondelta - 13) elif optiondeltanibble == 14: fmt += "H" values.append(optiondelta - 269) # write extended option length field (0 - 2 bytes) if optionlengthnibble == 13: fmt += "B" values.append(optionlength - 13) elif optionlengthnibble == 14: fmt += "H" values.append(optionlength - 269) # write option value if optionlength > 0: opt_type = defines.OptionRegistry.LIST[option.number].value_type if opt_type == defines.INTEGER: words = Serializer.int_to_words(option.value, optionlength, 8) for num in range(0, optionlength): fmt += "B" values.append(words[num]) elif opt_type == defines.STRING: fmt += str(len(bytes(option.value, "utf-8"))) + "s" values.append(bytes(option.value, "utf-8")) else: # OPAQUE for b in option.value: fmt += "B" values.append(b) # update last option number lastoptionnumber = option.number payload = message.payload if payload is not None and len(payload) > 0: # if payload is present and of non-zero length, it is prefixed by # an one-byte Payload Marker (0xFF) which indicates the end of # options and the start of the payload fmt += "B" values.append(defines.PAYLOAD_MARKER) if isinstance(payload, bytes): fmt += str(len(payload)) + "s" values.append(payload) else: fmt += str(len(bytes(payload, "utf-8"))) + "s" values.append(bytes(payload, "utf-8")) # for b in str(payload): # fmt += "c" # values.append(bytes(b, "utf-8")) datagram = None if values[1] is None: values[1] = 0 if values[2] is None: values[2] = 0 try: s = struct.Struct(fmt) datagram = ctypes.create_string_buffer(s.size) s.pack_into(datagram, 0, *values) except struct.error: # The .exception method will report on the exception encountered # and provide a traceback. logger.debug(fmt) logger.debug(values) logging.exception('Failed to pack structure') return datagram
0.000971
def read_10x_h5(filename, genome=None, gex_only=True) -> AnnData: """Read 10x-Genomics-formatted hdf5 file. Parameters ---------- filename : `str` | :class:`~pathlib.Path` Filename. genome : `str`, optional (default: ``None``) Filter expression to this genes within this genome. For legacy 10x h5 files, this must be provided if the data contains more than one genome. gex_only : `bool`, optional (default: `True`) Only keep 'Gene Expression' data and ignore other feature types, e.g. 'Antibody Capture', 'CRISPR Guide Capture', or 'Custom' Returns ------- Annotated data matrix, where obsevations/cells are named by their barcode and variables/genes by gene name. The data matrix is stored in `adata.X`, cell names in `adata.obs_names` and gene names in `adata.var_names`. The gene IDs are stored in `adata.var['gene_ids']`. The feature types are stored in `adata.var['feature_types']` """ logg.info('reading', filename, r=True, end=' ') with tables.open_file(str(filename), 'r') as f: v3 = '/matrix' in f if v3: adata = _read_v3_10x_h5(filename) if genome: if genome not in adata.var['genome'].values: raise ValueError( "Could not find data corresponding to genome '{genome}' in '{filename}'. " "Available genomes are: {avail}." .format( genome=genome, filename=filename, avail=list(adata.var["genome"].unique()), ) ) adata = adata[:, list(map(lambda x: x == str(genome), adata.var['genome']))] if gex_only: adata = adata[:, list(map(lambda x: x == 'Gene Expression', adata.var['feature_types']))] return adata else: return _read_legacy_10x_h5(filename, genome=genome)
0.002075
def get_job_logs(id): """Get the crawl logs from the job.""" crawler_job = models.CrawlerJob.query.filter_by(id=id).one_or_none() if crawler_job is None: click.secho( ( "CrawlJob %s was not found, maybe it's not a crawl job?" % id ), fg='yellow', ) sys.exit(1) if crawler_job.logs is None: click.secho( ( "CrawlJob %s has no log, it might be that it has not run " "yet, you can try again later." % id ), fg='yellow', ) sys.exit(1) _show_file( file_path=crawler_job.logs, header_name='Log', )
0.001364
def attrput(self, groupname, attrname, rownr, value, unit=[], meas=[]): """Put the value and optionally unit and measinfo of an attribute in a row in a group.""" return self._attrput(groupname, attrname, rownr, value, unit, meas)
0.007813
def from_sym_2_tri(symm): """convert a 2D symmetric matrix to an upper triangular matrix in 1D format Parameters ---------- symm : 2D array Symmetric matrix Returns ------- tri: 1D array Contains elements of upper triangular matrix """ inds = np.triu_indices_from(symm) tri = symm[inds] return tri
0.002674
def get_line_for_offset(self, code_offset): """ returns the line number given a code offset """ prev_line = 0 for (offset, line) in self.get_linenumbertable(): if offset < code_offset: prev_line = line elif offset == code_offset: return line else: return prev_line return prev_line
0.004808
def create_or_update_role(self, name, azure_roles, ttl="", max_ttl="", mount_point=DEFAULT_MOUNT_POINT): """Create or update a Vault role. The provided Azure roles must exist for this call to succeed. See the Azure secrets roles docs for more information about roles. Supported methods: POST: /{mount_point}/roles/{name}. Produces: 204 (empty body) :param name: Name of the role. :type name: str | unicode :param azure_roles: List of Azure roles to be assigned to the generated service principal. :type azure_roles: list(dict) :param ttl: Specifies the default TTL for service principals generated using this role. Accepts time suffixed strings ("1h") or an integer number of seconds. Defaults to the system/engine default TTL time. :type ttl: str | unicode :param max_ttl: Specifies the maximum TTL for service principals generated using this role. Accepts time suffixed strings ("1h") or an integer number of seconds. Defaults to the system/engine max TTL time. :type max_ttl: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ params = { 'azure_roles': json.dumps(azure_roles), 'ttl': ttl, 'max_ttl': max_ttl, } api_path = '/v1/{mount_point}/roles/{name}'.format( mount_point=mount_point, name=name ) return self._adapter.post( url=api_path, json=params, )
0.0053
def state(self, *args, **kwargs): """ Get AWS State for a worker type Return the state of a given workertype as stored by the provisioner. This state is stored as three lists: 1 for running instances, 1 for pending requests. The `summary` property contains an updated summary similar to that returned from `listWorkerTypeSummaries`. This method is ``stable`` """ return self._makeApiCall(self.funcinfo["state"], *args, **kwargs)
0.003968
def p_statement_try(p): 'statement : TRY LBRACE inner_statement_list RBRACE CATCH LPAREN fully_qualified_class_name VARIABLE RPAREN LBRACE inner_statement_list RBRACE additional_catches' p[0] = ast.Try(p[3], [ast.Catch(p[7], ast.Variable(p[8], lineno=p.lineno(8)), p[11], lineno=p.lineno(5))] + p[13], lineno=p.lineno(1))
0.007813
def average_temperature(self, unit='kelvin'): """Returns the average value in the temperature series :param unit: the unit of measure for the temperature values. May be among: '*kelvin*' (default), '*celsius*' or '*fahrenheit*' :type unit: str :returns: a float :raises: ValueError when invalid values are provided for the unit of measure or the measurement series is empty """ if unit not in ('kelvin', 'celsius', 'fahrenheit'): raise ValueError("Invalid value for parameter 'unit'") average = self._average(self._purge_none_samples( self.temperature_series())) if unit == 'kelvin': result = average if unit == 'celsius': result = temputils.kelvin_to_celsius(average) if unit == 'fahrenheit': result = temputils.kelvin_to_fahrenheit(average) return result
0.003052
def _get_stats_table(self, table_id, kind='R', summary=False): """Gets a stats table from the player page; helper function that does the work for per-game, per-100-poss, etc. stats. :table_id: the ID of the HTML table. :kind: specifies regular season, playoffs, or both. One of 'R', 'P', 'B'. Defaults to 'R'. :returns: A DataFrame of stats. """ doc = self.get_main_doc() table_id = 'table#{}{}'.format( 'playoffs_' if kind == 'P' else '', table_id) table = doc(table_id) df = sportsref.utils.parse_table(table, flatten=(not summary), footer=summary) return df
0.002801
def follow_cf(save, Uspan, target_cf, nup, n_tot=5.0, slsp=None): """Calculates the quasiparticle weight in single site spin hamiltonian under with N degenerate half-filled orbitals """ if slsp == None: slsp = Spinon(slaves=6, orbitals=3, avg_particles=n_tot, hopping=[0.5]*6, populations = np.asarray([n_tot]*6)/6) zet, lam, mu, mean_f = [], [], [], [] for co in Uspan: print('U=', co, 'del=', target_cf) res=root(targetpop, nup[-1],(co,target_cf,slsp, n_tot)) print(res.x) if res.x>nup[-1]: break nup.append(res.x) slsp.param['populations']=population_distri(nup[-1]) mean_f.append(slsp.mean_field()) zet.append(slsp.quasiparticle_weight()) lam.append(slsp.param['lambda']) mu.append(orbital_energies(slsp.param, zet[-1])) # plt.plot(np.asarray(zet)[:,0], label='d={}, zl'.format(str(target_cf))) # plt.plot(np.asarray(zet)[:,5], label='d={}, zh'.format(str(target_cf))) case = save.createGroup('cf={}'.format(target_cf)) varis = st.setgroup(case) st.storegroup(varis, Uspan[:len(zet)], zet, lam, mu, nup[1:],target_cf,mean_f)
0.012648
def _boundsspec2slicespec(boundsspec,scs): """ Convert an iterable boundsspec (supplying l,b,r,t of a BoundingRegion) into a Slice specification. Includes all units whose centers are within the specified sheet-coordinate bounds specified by boundsspec. Exact inverse of _slicespec2boundsspec(). """ l,b,r,t = boundsspec t_m,l_m = scs.sheet2matrix(l,t) b_m,r_m = scs.sheet2matrix(r,b) l_idx = int(np.ceil(l_m-0.5)) t_idx = int(np.ceil(t_m-0.5)) # CBENHANCEMENT: Python 2.6's math.trunc()? r_idx = int(np.floor(r_m+0.5)) b_idx = int(np.floor(b_m+0.5)) return t_idx,b_idx,l_idx,r_idx
0.018233
def get_replication_group_imbalance_stats(rgs, partitions): """Calculate extra replica count replica count over each replication-group and net extra-same-replica count. """ tot_rgs = len(rgs) extra_replica_cnt_per_rg = defaultdict(int) for partition in partitions: # Get optimal replica-count for each partition opt_replica_cnt, extra_replicas_allowed = \ compute_optimum(tot_rgs, partition.replication_factor) # Extra replica count for each rg for rg in rgs: replica_cnt_rg = rg.count_replica(partition) extra_replica_cnt, extra_replicas_allowed = \ get_extra_element_count( replica_cnt_rg, opt_replica_cnt, extra_replicas_allowed, ) extra_replica_cnt_per_rg[rg.id] += extra_replica_cnt # Evaluate net imbalance across all replication-groups net_imbalance = sum(extra_replica_cnt_per_rg.values()) return net_imbalance, extra_replica_cnt_per_rg
0.000949
def get_contract_from_name(self, contract_name): """ Return a contract from a name Args: contract_name (str): name of the contract Returns: Contract """ return next((c for c in self.contracts if c.name == contract_name), None)
0.009934
def getallstates(self, window_name, object_name): """ Get all states of given object @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to look for, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: list of string on success. @rtype: list """ object_handle = self._get_object_handle(window_name, object_name) _obj_states = [] if object_handle.AXEnabled: _obj_states.append("enabled") if object_handle.AXFocused: _obj_states.append("focused") else: try: if object_handle.AXFocused: _obj_states.append("focusable") except: pass if re.match("AXCheckBox", object_handle.AXRole, re.M | re.U | re.L) or \ re.match("AXRadioButton", object_handle.AXRole, re.M | re.U | re.L): if object_handle.AXValue: _obj_states.append("checked") return _obj_states
0.003306
def hide_routemap_holder_route_map_content_continue_holder_cont(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') content = ET.SubElement(route_map, "content") continue_holder = ET.SubElement(content, "continue-holder") cont = ET.SubElement(continue_holder, "continue") callback = kwargs.pop('callback', self._callback) return callback(config)
0.004224
def ndb_put(self, entity): """Like put(), but for NDB entities.""" assert ndb is not None and isinstance(entity, ndb.Model) self.ndb_puts.append(entity)
0.006098
def compile_msg_payload(self, invite): """ Determine recipient, message content, return it as a dict that can be Posted to the message sender """ self.l.info("Compiling the outbound message payload") update_invite = False # Determine the recipient address if "to_addr" in invite.invite: to_addr = invite.invite["to_addr"] else: update_invite = True to_addr = get_identity_address(invite.identity) # Determine the message content if "content" in invite.invite: content = invite.invite["content"] else: update_invite = True content = settings.INVITE_TEXT # Determine the metadata if "metadata" in invite.invite: metadata = invite.invite["metadata"] else: update_invite = True metadata = {} msg_payload = { "to_addr": to_addr, "content": content, "metadata": metadata } if update_invite is True: self.l.info("Updating the invite.invite field") invite.invite = msg_payload invite.save() self.l.info("Compiled the outbound message payload") return msg_payload
0.001548
def state_load(self, f, use_active_range=False): """Load a state previously stored by :meth:`DataFrame.state_store`, see also :meth:`DataFrame.state_set`.""" state = vaex.utils.read_json_or_yaml(f) self.state_set(state, use_active_range=use_active_range)
0.010791
def interpolator(self, x, values): """ Returns a polynomial with vector coefficients which interpolates the values at the Chebyshev points x """ # hacking the barycentric interpolator by computing the weights in advance p = Bary([0.]) N = len(values) weights = np.ones(N) weights[0] = .5 weights[1::2] = -1 weights[-1] *= .5 p.wi = weights p.xi = x p.set_yi(values) return p
0.00823
def is_rotation(self, other): '''Determine whether two sequences are the same, just at different rotations. :param other: The sequence to check for rotational equality. :type other: coral.sequence._sequence.Sequence ''' if len(self) != len(other): return False for i in range(len(self)): if self.rotate(i) == other: return True return False
0.004464
def get_template_names(self): """ datagrid的默认模板 """ names = super(EasyUIDeleteView, self).get_template_names() names.append('easyui/confirm_delete.html') return names
0.009346
def _make_query_from_terms(self, terms, limit=None): """ Creates a query for partition from decomposed search terms. Args: terms (dict or unicode or string): Returns: tuple of (TextClause, dict): First element is FTS query, second is parameters of the query. Element of the execution of the query is tuple of three elements: (vid, dataset_vid, score). """ expanded_terms = self._expand_terms(terms) terms_used = 0 if expanded_terms['doc']: # create query with real score. query_parts = ["SELECT vid, dataset_vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) as score"] if expanded_terms['doc'] and expanded_terms['keywords']: query_parts = ["SELECT vid, dataset_vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) " " + ts_rank_cd(setweight(to_tsvector(coalesce(keywords::text,'')),'B'), to_tsquery(:keywords))" ' as score'] else: # create query with score = 1 because query will not touch doc field. query_parts = ['SELECT vid, dataset_vid, 1 as score'] query_parts.append('FROM partition_index') query_params = {} where_count = 0 if expanded_terms['doc']: query_parts.append('WHERE doc @@ to_tsquery(:doc)') query_params['doc'] = self.backend._and_join(expanded_terms['doc']) where_count += 1 terms_used += 1 if expanded_terms['keywords']: query_params['keywords'] = self.backend._and_join(expanded_terms['keywords']) kw_q = "to_tsvector(coalesce(keywords::text,'')) @@ to_tsquery(:keywords)" query_parts.append(("AND " if where_count else "WHERE ") + kw_q) where_count += 1 terms_used += 1 if expanded_terms['from']: query_parts.append(("AND " if where_count else "WHERE ") + ' from_year >= :from_year') query_params['from_year'] = expanded_terms['from'] where_count += 1 terms_used += 1 if expanded_terms['to']: query_parts.append(("AND " if where_count else "WHERE ") + ' to_year <= :to_year') query_params['to_year'] = expanded_terms['to'] where_count += 1 terms_used += 1 query_parts.append('ORDER BY score DESC') if limit: query_parts.append('LIMIT :limit') query_params['limit'] = limit if not terms_used: logger.debug('No terms used; not creating query') return None, None query_parts.append(';') deb_msg = 'Dataset terms conversion: `{}` terms converted to `{}` with `{}` params query.'\ .format(terms, query_parts, query_params) logger.debug(deb_msg) return text('\n'.join(query_parts)), query_params
0.003729
def startLogin(): """ If we are not logged in, this generates the redirect URL to the OIDC provider and returns the redirect response :return: A redirect response to the OIDC provider """ flask.session["state"] = oic.oauth2.rndstr(SECRET_KEY_LENGTH) flask.session["nonce"] = oic.oauth2.rndstr(SECRET_KEY_LENGTH) args = { "client_id": app.oidcClient.client_id, "response_type": "code", "scope": ["openid", "profile"], "nonce": flask.session["nonce"], "redirect_uri": app.oidcClient.redirect_uris[0], "state": flask.session["state"] } result = app.oidcClient.do_authorization_request( request_args=args, state=flask.session["state"]) return flask.redirect(result.url)
0.001305
def _nose_tools_trivial_transform(): """Custom transform for the nose.tools module.""" stub = _BUILDER.string_build("""__all__ = []""") all_entries = ["ok_", "eq_"] for pep8_name, method in _nose_tools_functions(): all_entries.append(pep8_name) stub[pep8_name] = method # Update the __all__ variable, since nose.tools # does this manually with .append. all_assign = stub["__all__"].parent all_object = astroid.List(all_entries) all_object.parent = all_assign all_assign.value = all_object return stub
0.001779
def _from_any_pb(pb_type, any_pb): """Converts an Any protobuf to the specified message type Args: pb_type (type): the type of the message that any_pb stores an instance of. any_pb (google.protobuf.any_pb2.Any): the object to be converted. Returns: pb_type: An instance of the pb_type message. Raises: TypeError: if the message could not be converted. """ msg = pb_type() if not any_pb.Unpack(msg): raise TypeError( "Could not convert {} to {}".format( any_pb.__class__.__name__, pb_type.__name__ ) ) return msg
0.001546
def _cut_time(gammas): """Support function for iat(). Find cutting time, when gammas become negative.""" for i in range(len(gammas) - 1): if not ((gammas[i + 1] > 0.0) & (gammas[i + 1] < gammas[i])): return i return i
0.003906
def populate_schema_objects(self, schema, obj_type): """Returns list of tables or functions for a (optional) schema""" metadata = self.dbmetadata[obj_type] schema = schema or self.dbname try: objects = metadata[schema].keys() except KeyError: # schema doesn't exist objects = [] return objects
0.005277
def sample(self, bqm, **kwargs): """Sample from the problem provided by bqm and truncate output. Args: bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model to be sampled from. **kwargs: Parameters for the sampling method, specified by the child sampler. Returns: :obj:`dimod.SampleSet` """ tkw = self._truncate_kwargs if self._aggregate: return self.child.sample(bqm, **kwargs).aggregate().truncate(**tkw) else: return self.child.sample(bqm, **kwargs).truncate(**tkw)
0.003096
def MLOAD(self, address): """Load word from memory""" self._allocate(address, 32) value = self._load(address, 32) return value
0.012658
def execute(self, args): """Execute show subcommand.""" if args.name is not None: self.show_workspace(slashes2dash(args.name)) elif args.all is not None: self.show_all()
0.009217