text
stringlengths
78
104k
score
float64
0
0.18
def fcoe_get_login_input_fcoe_login_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_login = ET.Element("fcoe_get_login") config = fcoe_get_login input = ET.SubElement(fcoe_get_login, "input") fcoe_login_rbridge_id = ET.SubElement(input, "fcoe-login-rbridge-id") fcoe_login_rbridge_id.text = kwargs.pop('fcoe_login_rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
0.003795
def interval(self, T=[0, 1000]): """ Get all spikes in a time interval T. Parameters ---------- T : list Time interval. Returns ------- s : list Nested list with spike times. See also -------- sqlite3.connect.cursor """ self.cursor.execute('SELECT * FROM spikes WHERE time BETWEEN %f AND %f' % tuple(T)) sel = self.cursor.fetchall() return sel
0.007952
def get_entry(self, entry_name, key): """Returns cache entry parameter value by its name. :param str|unicode entry_name: :param key: :return: """ return self.cache[entry_name].get(key, False)
0.008333
def get_height(self, id=None, endpoint=None): """ Get the current height of the blockchain Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_BLOCK_COUNT, id=id, endpoint=endpoint)
0.004505
def reload_core(host=None, core_name=None): ''' MULTI-CORE HOSTS ONLY Load a new core from the same configuration as an existing registered core. While the "new" core is initializing, the "old" one will continue to accept requests. Once it has finished, all new request will go to the "new" core, and the "old" core will be unloaded. host : str (None) The solr host to query. __opts__['host'] is default. core_name : str The name of the core to reload Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.reload_core None music Return data is in the following format:: {'success':bool, 'data':dict, 'errors':list, 'warnings':list} ''' ret = _get_return_dict() if not _check_for_cores(): err = ['solr.reload_core can only be called by "multi-core" minions'] return ret.update({'success': False, 'errors': err}) if _get_none_or_value(core_name) is None and _check_for_cores(): success = True for name in __opts__['solr.cores']: resp = reload_core(host, name) if not resp['success']: success = False data = {name: {'data': resp['data']}} ret = _update_return_dict(ret, success, data, resp['errors'], resp['warnings']) return ret extra = ['action=RELOAD', 'core={0}'.format(core_name)] url = _format_url('admin/cores', host=host, core_name=None, extra=extra) return _http_request(url)
0.001239
def get_initial(self): """ Returns a copy of `initial` with empty initial data dictionaries for each form. """ initial = super(MultiFormView, self).get_initial() for key in six.iterkeys(self.form_classes): initial[key] = {} return initial
0.010067
def do_diff(self, subcmd, opts, *args): """Display the differences between two paths. usage: 1. diff [-r N[:M]] [TARGET[@REV]...] 2. diff [-r N[:M]] --old=OLD-TGT[@OLDREV] [--new=NEW-TGT[@NEWREV]] \ [PATH...] 3. diff OLD-URL[@OLDREV] NEW-URL[@NEWREV] 1. Display the changes made to TARGETs as they are seen in REV between two revisions. TARGETs may be working copy paths or URLs. N defaults to BASE if any TARGET is a working copy path, otherwise it must be specified. M defaults to the current working version if any TARGET is a working copy path, otherwise it defaults to HEAD. 2. Display the differences between OLD-TGT as it was seen in OLDREV and NEW-TGT as it was seen in NEWREV. PATHs, if given, are relative to OLD-TGT and NEW-TGT and restrict the output to differences for those paths. OLD-TGT and NEW-TGT may be working copy paths or URL[@REV]. NEW-TGT defaults to OLD-TGT if not specified. -r N makes OLDREV default to N, -r N:M makes OLDREV default to N and NEWREV default to M. 3. Shorthand for 'svn diff --old=OLD-URL[@OLDREV] --new=NEW-URL[@NEWREV]' Use just 'svn diff' to display local modifications in a working copy. ${cmd_option_list} """ print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
0.007838
def load_writer(writer, ppp_config_dir=None, **writer_kwargs): """Find and load writer `writer` in the available configuration files.""" if ppp_config_dir is None: ppp_config_dir = get_environ_config_dir() config_fn = writer + ".yaml" if "." not in writer else writer config_files = config_search_paths( os.path.join("writers", config_fn), ppp_config_dir) writer_kwargs.setdefault("config_files", config_files) if not writer_kwargs['config_files']: raise ValueError("Unknown writer '{}'".format(writer)) try: return load_writer_configs(writer_kwargs['config_files'], ppp_config_dir=ppp_config_dir, **writer_kwargs) except ValueError: raise ValueError("Writer '{}' does not exist or could not be " "loaded".format(writer))
0.001124
def _handleSelectAllAxes(self, evt): """Called when the 'select all axes' menu item is selected.""" if len(self._axisId) == 0: return for i in range(len(self._axisId)): self._menu.Check(self._axisId[i], True) self._toolbar.set_active(self.getActiveAxes()) evt.Skip()
0.006061
def to_index_variable(self): """Return this variable as an xarray.IndexVariable""" return IndexVariable(self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True)
0.008929
def script_file(self): """ Returns the startup script file for this VPCS VM. :returns: path to startup script file """ # use the default VPCS file if it exists path = os.path.join(self.working_dir, 'startup.vpc') if os.path.exists(path): return path else: return None
0.005602
def list_all(course, single=None): """ Lists all of the exercises in the current course. """ def bs(val): return "●" if val else " " def bc(val): return as_success("✔") if val else as_error("✘") def format_line(exercise): return "{0} │ {1} │ {2} │ {3} │ {4}".format(exercise.tid, bs(exercise.is_selected), bc(exercise.is_downloaded), bc(exercise.is_completed), exercise.menuname()) print("ID{0}│ S │ D │ C │ Name".format( (len(str(course.exercises[0].tid)) - 1) * " " )) if single: print(format_line(single)) return for exercise in course.exercises: # ToDo: use a pager print(format_line(exercise))
0.001094
def after(self, context): "Invokes all after functions with context passed to them." self._invoke(self._after, context) run.after_each.execute(context)
0.011429
def get_dirname(self, index): """Return dirname associated with *index*""" fname = self.get_filename(index) if fname: if osp.isdir(fname): return fname else: return osp.dirname(fname)
0.007407
def _build_iterable_validator(iterable): """Build a validator from an iterable.""" sub_schemas = [parse_schema(s) for s in iterable] def item_validator(value): """Validate items in an iterable.""" for sub in sub_schemas: try: return sub(value) except NotValid: pass raise NotValid('%r invalidated by anything in %s.' % (value, iterable)) def iterable_validator(data): """Validate an iterable.""" if not type(data) is type(iterable): raise NotValid('%r is not of type %s' % (data, type(iterable))) return type(iterable)(item_validator(value) for value in data) return iterable_validator
0.001376
def _tupleCompare(tuple1, ineq, tuple2, eq=lambda a,b: (a==b), ander=AND, orer=OR): """ Compare two 'in-database tuples'. Useful when sorting by a compound key and slicing into the middle of that query. """ orholder = [] for limit in range(len(tuple1)): eqconstraint = [ eq(elem1, elem2) for elem1, elem2 in zip(tuple1, tuple2)[:limit]] ineqconstraint = ineq(tuple1[limit], tuple2[limit]) orholder.append(ander(*(eqconstraint + [ineqconstraint]))) return orer(*orholder)
0.010221
def request_data_stream_encode(self, target_system, target_component, req_stream_id, req_message_rate, start_stop): ''' THIS INTERFACE IS DEPRECATED. USE SET_MESSAGE_INTERVAL INSTEAD. target_system : The target requested to send the message stream. (uint8_t) target_component : The target requested to send the message stream. (uint8_t) req_stream_id : The ID of the requested data stream (uint8_t) req_message_rate : The requested message rate (uint16_t) start_stop : 1 to start sending, 0 to stop sending. (uint8_t) ''' return MAVLink_request_data_stream_message(target_system, target_component, req_stream_id, req_message_rate, start_stop)
0.010651
def stop(self) -> None: """ Stop the :class:`~lahja.endpoint.Endpoint` from receiving further events. """ if not self._running: return self._running = False self._receiving_queue.put_nowait((TRANSPARENT_EVENT, None)) self._internal_queue.put_nowait((TRANSPARENT_EVENT, None))
0.008721
def bind_socket(sock, channel='can0'): """ Binds the given socket to the given interface. :param socket.socket sock: The socket to be bound :raises OSError: If the specified interface isn't found. """ log.debug('Binding socket to channel=%s', channel) if HAS_NATIVE_SUPPORT: sock.bind((channel,)) else: # For Python 2.7 addr = get_addr(sock, channel) libc.bind(sock.fileno(), addr, len(addr)) log.debug('Bound socket.')
0.001984
def run(self, *args, **kwargs): """Start genesis :returns: None :rtype: None :raises: None """ if self.win: self.win.deleteLater() mayawin = maya_main_window() self.win = ReftrackWin(self.inter, parent=mayawin) self.win.destroyed.connect(self.win_destroyed) self.win.show() self.win.wrap_scene()
0.005051
def get_function_hash(self, func, args=None, kwargs=None, ttl=None, key=None, noc=None): """Compute the hash of the function to be evaluated. """ base_hash = settings.HASH_FUNCTION() if PY3: base_hash.update(func.__name__.encode(settings.DEFAULT_ENCODING)) else: base_hash.update(func.__name__) if args: for a in args: if PY3: base_hash.update(repr(a).encode(settings.DEFAULT_ENCODING)) else: base_hash.update(repr(a)) if kwargs: for k in sorted(kwargs): if PY3: base_hash.update(("{}={}".format(k, repr(kwargs[k]))).encode(settings.DEFAULT_ENCODING)) else: base_hash.update(("{}={}".format(k, repr(kwargs[k])))) if ttl: base_hash.update(str(ttl).encode(settings.DEFAULT_ENCODING)) if key and can_encrypt: if PY3: base_hash.update(key.encode(settings.DEFAULT_ENCODING)) else: base_hash.update(key) if noc: base_hash.update(str(noc).encode(settings.DEFAULT_ENCODING)) base_hash_hex = base_hash.hexdigest() return base_hash_hex
0.003072
def set_mt_wcs(self, image): """ Reset the WCS for this image based on the WCS information from another imageObject. """ for chip in range(1,self._numchips+1,1): sci_chip = self._image[self.scienceExt,chip] ref_chip = image._image[image.scienceExt,chip] # Do we want to keep track of original WCS or not? No reason now... sci_chip.wcs = ref_chip.wcs.copy()
0.013636
def request(self, hash_, quickkey, doc_type, page=None, output=None, size_id=None, metadata=None, request_conversion_only=None): """Query conversion server hash_: 4 characters of file hash quickkey: File quickkey doc_type: "i" for image, "d" for documents page: The page to convert. If page is set to 'initial', the first 10 pages of the document will be provided. (document) output: "pdf", "img", or "swf" (document) size_id: 0,1,2 (document) 0-9, a-f, z (image) metadata: Set to 1 to get metadata dict request_conversion_only: Request conversion w/o content """ if len(hash_) > 4: hash_ = hash_[:4] query = QueryParams({ 'quickkey': quickkey, 'doc_type': doc_type, 'page': page, 'output': output, 'size_id': size_id, 'metadata': metadata, 'request_conversion_only': request_conversion_only }) url = API_ENDPOINT + '?' + hash_ + '&' + urlencode(query) response = self.http.get(url, stream=True) if response.status_code == 204: raise ConversionServerError("Unable to fulfill request. " "The document will not be converted.", response.status_code) response.raise_for_status() if response.headers['content-type'] == 'application/json': return response.json() return response
0.002506
def parse_plist(self, preferences_file): """Try to reset preferences from preference_file.""" preferences_file = os.path.expanduser(preferences_file) # Try to open using FoundationPlist. If it's not available, # fall back to plistlib and hope it's not binary encoded. try: prefs = FoundationPlist.readPlist(preferences_file) except NameError: try: prefs = plistlib.readPlist(preferences_file) except ExpatError: # If we're on OSX, try to convert using another # tool. if is_osx(): subprocess.call(["plutil", "-convert", "xml1", preferences_file]) prefs = plistlib.readPlist(preferences_file) self.preferences_file = preferences_file self.user = prefs.get("jss_user") self.password = prefs.get("jss_pass") self.url = prefs.get("jss_url") if not all([self.user, self.password, self.url]): raise JSSPrefsMissingKeyError("Please provide all required " "preferences!") # Optional file repository array. Defaults to empty list. self.repos = [] for repo in prefs.get("repos", []): self.repos.append(dict(repo)) self.verify = prefs.get("verify", True) self.suppress_warnings = prefs.get("suppress_warnings", True)
0.001351
def save_named_query(self, alias, querystring, afterwards=None): """ add an alias for a query string. These are stored in the notmuch database and can be used as part of more complex queries using the syntax "query:alias". See :manpage:`notmuch-search-terms(7)` for more info. :param alias: name of shortcut :type alias: str :param querystring: value, i.e., the full query string :type querystring: str :param afterwards: callback to trigger after adding the alias :type afterwards: callable or None """ if self.ro: raise DatabaseROError() self.writequeue.append(('setconfig', afterwards, 'query.' + alias, querystring))
0.002571
def _rnd_datetime(self, start, end): """Internal random datetime generator. """ return self.from_utctimestamp( random.randint( int(self.to_utctimestamp(start)), int(self.to_utctimestamp(end)), ) )
0.007042
def get_products(self, latitude, longitude): """Get information about the Uber products offered at a given location. Parameters latitude (float) The latitude component of a location. longitude (float) The longitude component of a location. Returns (Response) A Response object containing available products information. """ args = OrderedDict([ ('latitude', latitude), ('longitude', longitude), ]) return self._api_call('GET', 'v1.2/products', args=args)
0.003221
def read(self, size=sys.maxsize): """Read at most size bytes from the file (less if the read hits EOF before obtaining size bytes). """ blob_size = int(self.blob_properties.get('content-length')) if self._pointer < blob_size: chunk = self._download_chunk_with_retries( chunk_offset=self._pointer, chunk_size=size) self._pointer += size return chunk
0.004545
def _get_positions_to_highlight(self, document): """ Return a list of (row, col) tuples that need to be highlighted. """ # Try for the character under the cursor. if document.current_char and document.current_char in self.chars: pos = document.find_matching_bracket_position( start_pos=document.cursor_position - self.max_cursor_distance, end_pos=document.cursor_position + self.max_cursor_distance) # Try for the character before the cursor. elif (document.char_before_cursor and document.char_before_cursor in self._closing_braces and document.char_before_cursor in self.chars): document = Document(document.text, document.cursor_position - 1) pos = document.find_matching_bracket_position( start_pos=document.cursor_position - self.max_cursor_distance, end_pos=document.cursor_position + self.max_cursor_distance) else: pos = None # Return a list of (row, col) tuples that need to be highlighted. if pos: pos += document.cursor_position # pos is relative. row, col = document.translate_index_to_position(pos) return [(row, col), (document.cursor_position_row, document.cursor_position_col)] else: return []
0.005751
def _validate(self, data: Any, schema: AnyMapping) -> Any: """Validate data against given schema. :param data: Data to validate. :param schema: Schema to use for validation. """ try: return self.validate_func(schema, self._pure_data(data)) except self.validation_error_class as err: logger.error( 'Schema validation error', exc_info=True, extra={'schema': schema, 'schema_module': self.module}) if self.error_class is None: raise raise self.make_error('Validation Error', error=err) from err
0.003067
def _create(self, monomer, mon_vector): """ create the polymer from the monomer Args: monomer (Molecule) mon_vector (numpy.array): molecule vector that starts from the start atom index to the end atom index """ while self.length != (self.n_units-1): if self.linear_chain: move_direction = np.array(mon_vector) / np.linalg.norm(mon_vector) else: move_direction = self._next_move_direction() self._add_monomer(monomer.copy(), mon_vector, move_direction)
0.005
def token(self): """ Returns authorization token provided by Cocaine. The real meaning of the token is determined by its type. For example OAUTH2 token will have "bearer" type. :return: A tuple of token type and body. """ if self._token is None: token_type = os.getenv(TOKEN_TYPE_KEY, '') token_body = os.getenv(TOKEN_BODY_KEY, '') self._token = _Token(token_type, token_body) return self._token
0.006036
def sort(self): """Sort by commit size, per author.""" # First sort commits by author email users = [] # Group commits by author email, so they can be merged for _, group in itertools.groupby(sorted(self.commits), operator.attrgetter('author_mail')): if group: users.append(self.merge_user_commits(group)) # Finally sort by the (aggregated) commits' line counts self.sorted_commits = sorted(users, key=operator.attrgetter('line_count'), reverse=True) return self.sorted_commits
0.00292
def substitutes(self): """ Return the substitutions for the templating replacements. """ author_collector = AuthorCollector() substitute_dict = dict( project = self.project_name, date = date.today().isoformat(), author = author_collector.collect() ) return substitute_dict
0.022989
def execute(self): """ params = { "ApexCode" : "None", "ApexProfiling" : "01pd0000001yXtYAAU", "Callout" : True, "Database" : 1, "ExpirationDate" : 3, "ScopeId" : "", "System" : "", "TracedEntityId" : "", "Validation" : "", "Visualforce" : "", "Workflow" : "" } """ if 'type' not in self.params: raise MMException("Please include the type of log, 'user' or 'apex'") if 'debug_categories' not in self.params: raise MMException("Please include debug categories in dictionary format: e.g.: {'ApexCode':'DEBUG', 'Visualforce':'INFO'}") request = {} if self.params['type'] == 'user': request['ScopeId'] = None request['TracedEntityId'] = self.params.get('user_id', config.sfdc_client.user_id) elif self.params['type'] == 'apex': #request['ScopeId'] = 'user' request['ScopeId'] = config.sfdc_client.user_id request['TracedEntityId'] = self.params['apex_id'] for c in self.params['debug_categories']: if 'category' in c: request[c['category']] = c['level'] else: request[c] = self.params['debug_categories'][c] request['ExpirationDate'] = util.get_iso_8601_timestamp(int(float(self.params.get('expiration', 30)))) config.logger.debug(self.params['debug_categories']) config.logger.debug("Log creation reuqest--->") config.logger.debug(request) create_result = config.sfdc_client.create_trace_flag(request) config.logger.debug("Log creation response--->") config.logger.debug(create_result) if type(create_result) is list: create_result = create_result[0] if type(create_result) is not str and type(create_result) is not unicode: return json.dumps(create_result) else: return create_result
0.003655
def inc_convert(self, value): """Default converter for the inc:// protocol.""" if not os.path.isabs(value): value = os.path.join(self.base, value) with codecs.open(value, 'r', encoding='utf-8') as f: result = json.load(f) return result
0.006873
def _post_query(self, **query_dict): """Perform a POST query against Solr and return the response as a Python dict.""" param_dict = query_dict.copy() return self._send_query(do_post=True, **param_dict)
0.012876
def linearize_aliases(self): # type: () -> typing.List[Alias] """ Returns a list of all aliases used in the namespace. The aliases are ordered to ensure that if they reference other aliases those aliases come earlier in the list. """ linearized_aliases = [] seen_aliases = set() # type: typing.Set[Alias] def add_alias(alias): # type: (Alias) -> None if alias in seen_aliases: return elif alias.namespace != self: return if is_alias(alias.data_type): add_alias(alias.data_type) linearized_aliases.append(alias) seen_aliases.add(alias) for alias in self.aliases: add_alias(alias) return linearized_aliases
0.003623
def get_reader_input_fn(train_config, preprocess_output_dir, model_type, data_paths, batch_size, shuffle, num_epochs=None): """Builds input layer for training.""" def get_input_features(): """Read the input features from the given data paths.""" _, examples = util.read_examples( input_files=data_paths, batch_size=batch_size, shuffle=shuffle, num_epochs=num_epochs) features = util.parse_example_tensor(examples=examples, train_config=train_config, keep_target=True) target_name = train_config['target_column'] target = features.pop(target_name) features, target = util.preprocess_input( features=features, target=target, train_config=train_config, preprocess_output_dir=preprocess_output_dir, model_type=model_type) return features, target # Return a function to input the feaures into the model from a data path. return get_input_features
0.004721
def load(self, dbfile, password=None, keyfile=None, readonly=False): """ Load the database from file/stream. :param dbfile: The database file path/stream. :type dbfile: str or file-like object :param password: The password for the database. :type password: str :param keyfile: Path to a keyfile (or a stream) that can be used instead of or in conjunction with password for database. :type keyfile: str or file-like object :param readonly: Whether to open the database read-only. :type readonly: bool """ self._clear() buf = None is_stream = hasattr(dbfile, 'read') if is_stream: buf = dbfile.read() else: if not os.path.exists(dbfile): raise IOError("File does not exist: {0}".format(dbfile)) with open(dbfile, 'rb') as fp: buf = fp.read() self.load_from_buffer(buf, password=password, keyfile=keyfile, readonly=readonly) # One we have successfully loaded the file, go ahead and set the internal attribute # (in the LockingDatabase subclass, this will effectivley take out the lock on the file) if not is_stream: self.filepath = dbfile
0.009002
def String(self, str): """Get an interned string from the reader, allows for example to speedup string name comparisons """ ret = libxml2mod.xmlTextReaderConstString(self._o, str) return ret
0.008889
def semaphore(branch: str): """ Performs necessary checks to ensure that the semaphore build is successful, on the correct branch and not a pull-request. :param branch: The branch the environment should be running against. """ assert os.environ.get('BRANCH_NAME') == branch assert os.environ.get('PULL_REQUEST_NUMBER') is None assert os.environ.get('SEMAPHORE_THREAD_RESULT') != 'failed'
0.002375
def is_ancestor_of_family(self, id_, family_id): """Tests if an ``Id`` is an ancestor of a family. arg: id (osid.id.Id): an ``Id`` arg: family_id (osid.id.Id): the ``Id`` of a family return: (boolean) - ``true`` if this ``id`` is an ancestor of ``family_id,`` ``false`` otherwise raise: NotFound - ``family_id`` is not found raise: NullArgument - ``id`` or ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_ancestor_of_bin if self._catalog_session is not None: return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=family_id) return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=family_id)
0.003802
def get(self, key): """ Retrieve the decrypted value of a key in a giraffez configuration file. :param str key: The key used to lookup the encrypted value """ if not key.startswith("secure.") and not key.startswith("connections."): key = "secure.{0}".format(key) value = self.config.get_value(key) if not isinstance(value, basestring): value = None return value
0.00655
def run(self): """Run the sync. Confront the local and the remote directories and perform the needed changes.""" # Check if remote path is present try: self.sftp.stat(self.remote_path) except FileNotFoundError as e: if self.create_remote_directory: self.sftp.mkdir(self.remote_path) self.logger.info( "Created missing remote dir: '" + self.remote_path + "'") else: self.logger.error( "Remote folder does not exists. " "Add '-r' to create it if missing.") sys.exit(1) try: if self.delete: # First check for items to be removed self.check_for_deletion() # Now scan local for items to upload/create self.check_for_upload_create() except FileNotFoundError: # If this happens, probably the remote folder doesn't exist. self.logger.error( "Error while opening remote folder. Are you sure it does exist?") sys.exit(1)
0.003478
def get_analysis_config(self, group_name): """ Gets any config data saved for the analysis. :param group_name: The name of the analysis group. :returns: A dictionary of dictionaries. Each key represents an analysis step. Each value is a dictionary containing the analysis parameters as key/value pairs. Returns None if no configuration exists for the analysis. """ self.assert_open() group = 'Analyses/{}/Configuration'.format(group_name) config = None if group in self.handle: config = self._parse_attribute_tree(group) return config
0.003053
def render_js_template(self, template_path, element_id, context=None): """ Render a js template. """ context = context or {} return u"<script type='text/template' id='{}'>\n{}\n</script>".format( element_id, self.render_template(template_path, context) )
0.006135
def _parse_nested(self, name, results): """ Parse the nested relationship in a relation. :param name: The name of the relationship :type name: str :type results: dict :rtype: dict """ progress = [] for segment in name.split('.'): progress.append(segment) last = '.'.join(progress) if last not in results: results[last] = self.__class__(self.get_query().new_query()) return results
0.003861
def init(self, key_value_pairs=None, **kwargs): """Initialize datastore. Only sets values for keys that are not in the datastore already. :param dict key_value_pairs: A set of key value pairs to use to initialize the datastore. :rtype: Iterable[tornado.concurrent.Future] """ if key_value_pairs is None: key_value_pairs = kwargs return [self.set(k, v) for k, v in key_value_pairs.items() if k not in self]
0.003846
def unwrap_arguments(xml_response): """Extract arguments and their values from a SOAP response. Args: xml_response (str): SOAP/xml response text (unicode, not utf-8). Returns: dict: a dict of ``{argument_name: value}`` items. """ # A UPnP SOAP response (including headers) looks like this: # HTTP/1.1 200 OK # CONTENT-LENGTH: bytes in body # CONTENT-TYPE: text/xml; charset="utf-8" DATE: when response was # generated # EXT: # SERVER: OS/version UPnP/1.0 product/version # # <?xml version="1.0"?> # <s:Envelope # xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" # s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"> # <s:Body> # <u:actionNameResponse # xmlns:u="urn:schemas-upnp-org:service:serviceType:v"> # <argumentName>out arg value</argumentName> # ... other out args and their values go here, if any # </u:actionNameResponse> # </s:Body> # </s:Envelope> # Get all tags in order. Elementree (in python 2.x) seems to prefer to # be fed bytes, rather than unicode xml_response = xml_response.encode('utf-8') try: tree = XML.fromstring(xml_response) except XML.ParseError: # Try to filter illegal xml chars (as unicode), in case that is # the reason for the parse error filtered = illegal_xml_re.sub('', xml_response.decode('utf-8'))\ .encode('utf-8') tree = XML.fromstring(filtered) # Get the first child of the <Body> tag which will be # <{actionNameResponse}> (depends on what actionName is). Turn the # children of this into a {tagname, content} dict. XML unescaping # is carried out for us by elementree. action_response = tree.find( "{http://schemas.xmlsoap.org/soap/envelope/}Body")[0] return dict((i.tag, i.text or "") for i in action_response)
0.000928
def wantFunction(self, function): """Is the function a test function? """ try: if hasattr(function, 'compat_func_name'): funcname = function.compat_func_name else: funcname = function.__name__ except AttributeError: # not a function return False declared = getattr(function, '__test__', None) if declared is not None: wanted = declared else: wanted = not funcname.startswith('_') and self.matches(funcname) plug_wants = self.plugins.wantFunction(function) if plug_wants is not None: wanted = plug_wants log.debug("wantFunction %s? %s", function, wanted) return wanted
0.002594
def getLogicalDisplayInfo(self): ''' Gets C{mDefaultViewport} and then C{deviceWidth} and C{deviceHeight} values from dumpsys. This is a method to obtain display logical dimensions and density ''' self.__checkTransport() logicalDisplayRE = re.compile( '.*DisplayViewport\{valid=true, .*orientation=(?P<orientation>\d+), .*deviceWidth=(?P<width>\d+), deviceHeight=(?P<height>\d+).*') for line in self.shell('dumpsys display').splitlines(): m = logicalDisplayRE.search(line, 0) if m: self.__displayInfo = {} for prop in ['width', 'height', 'orientation']: self.__displayInfo[prop] = int(m.group(prop)) for prop in ['density']: d = self.__getDisplayDensity(None, strip=True, invokeGetPhysicalDisplayIfNotFound=True) if d: self.__displayInfo[prop] = d else: # No available density information self.__displayInfo[prop] = -1.0 return self.__displayInfo return None
0.00766
def to_sql(self, conn, table_name, dbapi_module=None, use_python_type_specifiers=False, use_exact_column_names=True): """ Convert an SFrame to a single table in a SQL database. This function does not attempt to create the table or check if a table named `table_name` exists in the database. It simply assumes that `table_name` exists in the database and appends to it. `to_sql` can be thought of as a convenience wrapper around parameterized SQL insert statements. Parameters ---------- conn : dbapi2.Connection A DBAPI2 connection object. Any connection object originating from the 'connect' method of a DBAPI2-compliant package can be used. table_name : str The name of the table to append the data in this SFrame. dbapi_module : module | package, optional The top-level DBAPI2 module/package that constructed the given connection object. By default, a best guess of which module the connection came from is made. In the event that this guess is wrong, this will need to be specified. use_python_type_specifiers : bool, optional If the DBAPI2 module's parameter marker style is 'format' or 'pyformat', attempt to use accurate type specifiers for each value ('s' for string, 'd' for integer, etc.). Many DBAPI2 modules simply use 's' for all types if they use these parameter markers, so this is False by default. use_exact_column_names : bool, optional Specify the column names of the SFrame when inserting its contents into the DB. If the specified table does not have the exact same column names as the SFrame, inserting the data will fail. If False, the columns in the SFrame are inserted in order without care of the schema of the DB table. True by default. """ mod_info = _get_global_dbapi_info(dbapi_module, conn) c = conn.cursor() col_info = list(zip(self.column_names(), self.column_types())) if not use_python_type_specifiers: _pytype_to_printf = lambda x: 's' # DBAPI2 standard allows for five different ways to specify parameters sql_param = { 'qmark' : lambda name,col_num,col_type: '?', 'numeric' : lambda name,col_num,col_type:':'+str(col_num+1), 'named' : lambda name,col_num,col_type:':'+str(name), 'format' : lambda name,col_num,col_type:'%'+_pytype_to_printf(col_type), 'pyformat': lambda name,col_num,col_type:'%('+str(name)+')'+_pytype_to_printf(col_type), } get_sql_param = sql_param[mod_info['paramstyle']] # form insert string ins_str = "INSERT INTO " + str(table_name) value_str = " VALUES (" col_str = " (" count = 0 for i in col_info: col_str += i[0] value_str += get_sql_param(i[0],count,i[1]) if count < len(col_info)-1: col_str += "," value_str += "," count += 1 col_str += ")" value_str += ")" if use_exact_column_names: ins_str += col_str ins_str += value_str # Some formats require values in an iterable, some a dictionary if (mod_info['paramstyle'] == 'named' or\ mod_info['paramstyle'] == 'pyformat'): prepare_sf_row = lambda x:x else: col_names = self.column_names() prepare_sf_row = lambda x: [x[i] for i in col_names] for i in self: try: c.execute(ins_str, prepare_sf_row(i)) except mod_info['Error'] as e: if hasattr(conn, 'rollback'): conn.rollback() raise e conn.commit() c.close()
0.008627
def assemble_transcripts(run_parallel, samples): """ assembly strategy rationale implemented as suggested in http://www.nature.com/nprot/journal/v7/n3/full/nprot.2012.016.html run Cufflinks in without a reference GTF for each individual sample merge the assemblies with Cuffmerge using a reference GTF """ assembler = dd.get_in_samples(samples, dd.get_transcript_assembler) data = samples[0][0] if assembler: if "cufflinks" in assembler: samples = run_parallel("cufflinks_assemble", samples) if "stringtie" in assembler: samples = run_parallel("run_stringtie_expression", samples) if "stringtie" in assembler and stringtie.supports_merge(data): samples = run_parallel("stringtie_merge", [samples]) else: samples = run_parallel("cufflinks_merge", [samples]) return samples
0.001121
def _setHeaders(self, call = None, **kwargs): ''' Attach particular SOAP headers to the request depending on the method call made ''' # All calls, including utility calls, set the session header headers = {'SessionHeader': self._sessionHeader} if 'debug_categories' in kwargs: #ERROR, WARN, INFO, DEBUG, FINE, FINER, FINEST #Db, Workflow, Validation, Callout, Apex Code, Apex Profiling, All debug_categories = kwargs['debug_categories'] headers['DebuggingHeader'] = { 'categories' : debug_categories } # headers['DebuggingHeader'] = { # 'categories' : { # 'category' : kwargs['debug_category'], # 'level' : kwargs['debug_level'] # } # } if call in ('convertLead', 'create', 'merge', 'process', 'undelete', 'update', 'upsert'): if self._allowFieldTruncationHeader is not None: headers['AllowFieldTruncationHeader'] = self._allowFieldTruncationHeader if call in ('create', 'merge', 'update', 'upsert'): if self._assignmentRuleHeader is not None: headers['AssignmentRuleHeader'] = self._assignmentRuleHeader # CallOptions will only ever be set by the SforcePartnerClient if self._callOptions is not None: if call in ('create', 'merge', 'queryAll', 'query', 'queryMore', 'retrieve', 'search', 'update', 'upsert', 'convertLead', 'login', 'delete', 'describeGlobal', 'describeLayout', 'describeTabs', 'describeSObject', 'describeSObjects', 'getDeleted', 'getUpdated', 'process', 'undelete', 'getServerTimestamp', 'getUserInfo', 'setPassword', 'resetPassword'): headers['CallOptions'] = self._callOptions if call in ('create', 'delete', 'resetPassword', 'update', 'upsert'): if self._emailHeader is not None: headers['EmailHeader'] = self._emailHeader if call in ('describeSObject', 'describeSObjects'): if self._localeOptions is not None: headers['LocaleOptions'] = self._localeOptions if call == 'login': if self._loginScopeHeader is not None: headers['LoginScopeHeader'] = self._loginScopeHeader if call in ('create', 'merge', 'query', 'retrieve', 'update', 'upsert'): if self._mruHeader is not None: headers['MruHeader'] = self._mruHeader if call in ('convertLead', 'create', 'delete', 'describeGlobal', 'describeLayout', 'describeSObject', 'describeSObjects', 'describeTabs', 'merge', 'process', 'query', 'retrieve', 'search', 'undelete', 'update', 'upsert'): if self._packageVersionHeader is not None: headers['PackageVersionHeader'] = self._packageVersionHeader if call in ('query', 'queryAll', 'queryMore', 'retrieve'): if self._queryOptions is not None: headers['QueryOptions'] = self._queryOptions if call == 'delete': if self._userTerritoryDeleteHeader is not None: headers['UserTerritoryDeleteHeader'] = self._userTerritoryDeleteHeader #print '\n\n>>>>>>>>>>>>>>>> setting header ' #print headers self._sforce.set_options(soapheaders = headers)
0.008053
def _send_to_group(self, group, **kwargs): """ You shouldn't use this method directly. Send a single command to specific group. Handles automatically sending command to white or rgbw group. """ retries = kwargs.get("retries", self.repeat_commands) for _ in range(retries): if kwargs.get("send_on", True): self.on(group) if group is None or group == 0: self._send_to_all_groups(**kwargs) continue if group < 1 or group > 4: raise AttributeError("Group must be between 1 and 4 (was %s)" % group) if kwargs.get("per_group"): self._send_command(kwargs.get("%s_cmd" % self.get_group_type(group), [None, None, None, None])[group - 1]) continue if self.get_group_type(group) == "white": command = self.WHITE_COMMANDS.get(kwargs["command"]) elif self.get_group_type(group) == "rgbw": if kwargs["command"] == "color_by_int": command = (self.RGBW_COMMANDS["color_by_int"], struct.pack("B", kwargs["color"])) else: command = self.RGBW_COMMANDS.get(kwargs["command"]) self._send_command(command)
0.003837
def normalize_name(name): ''' Strips the architecture from the specified package name, if necessary. Circumstances where this would be done include: * If the arch is 32 bit and the package name ends in a 32-bit arch. * If the arch matches the OS arch, or is ``noarch``. CLI Example: .. code-block:: bash salt '*' pkg.normalize_name zsh.x86_64 ''' try: arch = name.rsplit('.', 1)[-1] if arch not in salt.utils.pkg.rpm.ARCHES + ('noarch',): return name except ValueError: return name if arch in (__grains__['osarch'], 'noarch') \ or salt.utils.pkg.rpm.check_32(arch, osarch=__grains__['osarch']): return name[:-(len(arch) + 1)] return name
0.001328
def set_domain(clz, dag): """ Sets the domain. Should only be called once per class instantiation. """ logging.info("Setting domain for poset %s" % clz.__name__) if nx.number_of_nodes(dag) == 0: raise CellConstructionFailure("Empty DAG structure.") if not nx.is_directed_acyclic_graph(dag): raise CellConstructionFailure("Must be directed and acyclic") if not nx.is_weakly_connected(dag): raise CellConstructionFailure("Must be connected") clz.domain_map[clz] = dag
0.007092
def _get(self, k, obj=None): 'Return Option object for k in context of obj. Cache result until any set().' opt = self._cache.get((k, obj), None) if opt is None: opt = self._opts._get(k, obj) self._cache[(k, obj or vd.sheet)] = opt return opt
0.010101
def outputSimple(self): """ Simple output mode """ out = [] errors = [] successfulResponses = \ len([True for rsp in self.results if rsp['success']]) out.append("INFO QUERIED {0}".format( len(self.serverList))) out.append("INFO SUCCESS {0}".format( successfulResponses)) out.append("INFO ERROR {0}".format( len(self.serverList) - successfulResponses)) for rsp in self.resultsColated: if rsp['success']: out.append("RESULT {0} {1}".format( len(rsp['servers']), "|".join(rsp['results']) )) else: errors.append("ERROR {0} {1}".format( len(rsp['servers']), "|".join(rsp['results']) )) out += errors sys.stdout.write("\n".join(out)) sys.stdout.write("\n")
0.002049
def hl_canvas2table_box(self, canvas, tag): """Highlight all masks inside user drawn box on table.""" self.treeview.clear_selection() # Remove existing box cobj = canvas.get_object_by_tag(tag) if cobj.kind != 'rectangle': return canvas.delete_object_by_tag(tag, redraw=False) # Remove existing highlight if self.maskhltag: try: canvas.delete_object_by_tag(self.maskhltag, redraw=True) except Exception: pass # Nothing to do if no masks are displayed try: obj = canvas.get_object_by_tag(self.masktag) except Exception: return if obj.kind != 'compound': return # Nothing to do if table has no data if len(self._maskobjs) == 0: return # Find masks that intersect the rectangle for i, mobj in enumerate(self._maskobjs): # The actual mask mask1 = self._rgbtomask(mobj) # The selected area rgbimage = mobj.get_image() mask2 = rgbimage.get_shape_mask(cobj) # Highlight mask with intersect if np.any(mask1 & mask2): self._highlight_path(self._treepaths[i])
0.001538
def router_fabric_virtual_gateway_address_family_ipv4_accept_unicast_arp_request(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") router = ET.SubElement(config, "router", xmlns="urn:brocade.com:mgmt:brocade-common-def") fabric_virtual_gateway = ET.SubElement(router, "fabric-virtual-gateway", xmlns="urn:brocade.com:mgmt:brocade-anycast-gateway") address_family = ET.SubElement(fabric_virtual_gateway, "address-family") ipv4 = ET.SubElement(address_family, "ipv4") accept_unicast_arp_request = ET.SubElement(ipv4, "accept-unicast-arp-request") callback = kwargs.pop('callback', self._callback) return callback(config)
0.009682
def import_module_with_exceptions(name, package=None): """Wrapper around importlib.import_module to import TimeSide subpackage and ignoring ImportError if Aubio, Yaafe and Vamp Host are not available""" from timeside.core import _WITH_AUBIO, _WITH_YAAFE, _WITH_VAMP if name.count('.server.'): # TODO: # Temporary skip all timeside.server submodules before check dependencies return try: import_module(name, package) except VampImportError: # No Vamp Host if _WITH_VAMP: raise VampImportError else: # Ignore Vamp ImportError return except ImportError as e: if str(e).count('yaafelib') and not _WITH_YAAFE: # Ignore Yaafe ImportError return elif str(e).count('aubio') and not _WITH_AUBIO: # Ignore Aubio ImportError return elif str(e).count('DJANGO_SETTINGS_MODULE'): # Ignore module requiring DJANGO_SETTINGS_MODULE in environnement return else: print (name, package) raise e return name
0.002632
def captchaform(field_name): """Decorator to add a simple captcha to a form To use this decorator, you must specify the captcha field's name as an argument to the decorator. For example: @captchaform('captcha') class MyForm(Form): pass This would add a new form field named 'captcha' to the Django form MyForm. Nothing else is needed; the captcha field and widget expect to be left fully to their own devices, and tinkering with them may produce the unexpected. It is also possible using this decorator to add multiple captchas to your forms: @captchaform('captchatwo') @captchaform('captchaone') class MyForm(Form): pass Note that the captchas are added to your fields in the inverse order that the decorators appear in your source; in this example, 'captchaone' appears first in the form, followed by 'captchatwo'. """ def wrapper(orig_form): """The actual function that wraps and modifies the form""" # Get the original init method so we can call it later orig_init = orig_form.__init__ def new_init(self, *args, **kwargs): """This is the captchaform replacement init method This method replaces a decorated form with one that properly handles ensuring that the captcha is always updated when the form is instantiated. """ # Call the original init method so we have a proper form orig_init(self, *args, **kwargs) # Ensure a fresh captcha on each generation of the form self.fields[field_name].widget.generate_captcha() # Create a new captcha field to be used in our form captcha = CaptchaField() # Replace the form's init method with our own orig_form.__init__ = new_init # Add the captcha field to the form's base and declared fields orig_form.base_fields.update({field_name: captcha}) orig_form.declared_fields.update({field_name: captcha}) return orig_form return wrapper
0.001437
def plugin_is_enabled(name, runas=None): ''' Return whether the plugin is enabled. CLI Example: .. code-block:: bash salt '*' rabbitmq.plugin_is_enabled rabbitmq_plugin_name ''' if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.user.get_user() return name in list_enabled_plugins(runas)
0.00277
def layout(request, ident, stateless=False, cache_id=None, **kwargs): 'Return the layout of the dash application' _, app = DashApp.locate_item(ident, stateless) view_func = app.locate_endpoint_function('dash-layout') resp = view_func() initial_arguments = get_initial_arguments(request, cache_id) response_data, mimetype = app.augment_initial_layout(resp, initial_arguments) return HttpResponse(response_data, content_type=mimetype)
0.004107
def user(self): """ Returns the current user set by current context """ return self.users.get(self.contexts[self.current_context].get("user", ""), {})
0.016484
def get_template_names(self): """ datagrid的默认模板 """ names = super(CommandDatagridView, self).get_template_names() names.append('easyui/command_datagrid.html') return names
0.009132
def resource_collection_create(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/resource_collections#create-a-resource-collection" api_path = "/api/v2/resource_collections.json" return self.call(api_path, method="POST", data=data, **kwargs)
0.010417
def paired_reader_from_bamfile(args, log, usage_logger, annotated_writer): '''Given a BAM file, return a generator that yields filtered, paired reads''' total_aligns = pysamwrapper.total_align_count(args.input_bam) bamfile_generator = _bamfile_generator(args.input_bam) return _paired_reader(args.umt_length, bamfile_generator, total_aligns, log, usage_logger, annotated_writer)
0.004854
def get_results(self, hql, schema='default', fetch_size=None, hive_conf=None): """ Get results of the provided hql in target schema. :param hql: hql to be executed. :type hql: str or list :param schema: target schema, default to 'default'. :type schema: str :param fetch_size: max size of result to fetch. :type fetch_size: int :param hive_conf: hive_conf to execute alone with the hql. :type hive_conf: dict :return: results of hql execution, dict with data (list of results) and header :rtype: dict """ results_iter = self._get_results(hql, schema, fetch_size=fetch_size, hive_conf=hive_conf) header = next(results_iter) results = { 'data': list(results_iter), 'header': header } return results
0.004415
def valid_daily_max_temperature(comp, units='K'): r"""Decorator to check that a computation runs on a valid temperature dataset.""" @wraps(comp) def func(tasmax, *args, **kwds): check_valid_temperature(tasmax, units) check_valid(tasmax, 'cell_methods', 'time: maximum within days') return comp(tasmax, *args, **kwds) return func
0.005405
def browse(self): ''' Browse the history of a single file adds one commit that doesn't contain changes in test_file_1. there are four commits in summary, so the check for buffer line count compares with 3. at the end, a fifth commit must be present due to resetting the file contents. ''' check = self._check marker_text = Random.string() self.vim.buffer.set_content([marker_text]) self._save() self._write_file(1) self._save() self._write_file2(1) self._save() self._write_file(2) self._write_file2(1) self._save() self.vim.cmd('ProHistoryFileBrowse {}'.format('test_file_1')) check(0, '*') self.vim.vim.feedkeys('j') self.vim.vim.feedkeys('j') later(lambda: self.vim.buffer.content.length.should.equal(3)) self.vim.vim.feedkeys('s') self._await_commit(0) self.vim.buffer.content.should.equal(List(marker_text))
0.001961
def get_temp_dimname(dims: Container[Hashable], new_dim: Hashable) -> Hashable: """ Get an new dimension name based on new_dim, that is not used in dims. If the same name exists, we add an underscore(s) in the head. Example1: dims: ['a', 'b', 'c'] new_dim: ['_rolling'] -> ['_rolling'] Example2: dims: ['a', 'b', 'c', '_rolling'] new_dim: ['_rolling'] -> ['__rolling'] """ while new_dim in dims: new_dim = '_' + str(new_dim) return new_dim
0.001908
def getresponse(self): """Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by class the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed. """ # if a prior response has been completed, then forget about it. if self.__response and self.__response.isclosed(): self.__response = None # if a prior response exists, then it must be completed (otherwise, we # cannot read this response's header to determine the connection-close # behavior) # # note: if a prior response existed, but was connection-close, then the # socket and response were made independent of this HTTPConnection # object since a new request requires that we open a whole new # connection # # this means the prior response had one of two states: # 1) will_close: this connection was reset and the prior socket and # response operate independently # 2) persistent: the response was retained and we await its # isclosed() status to become true. # if self.__state != _CS_REQ_SENT or self.__response: raise ResponseNotReady(self.__state) if self.debuglevel > 0: response = self.response_class(self.sock, self.debuglevel, method=self._method) else: response = self.response_class(self.sock, method=self._method) response.begin() assert response.will_close != _UNKNOWN self.__state = _CS_IDLE if response.will_close: # this effectively passes the connection to the response self.close() else: # remember this, so we can tell when it is complete self.__response = response return response
0.000874
def unparse(text, entities): """ Performs the reverse operation to .parse(), effectively returning HTML given a normal text and its MessageEntity's. :param text: the text to be reconverted into HTML. :param entities: the MessageEntity's applied to the text. :return: a HTML representation of the combination of both inputs. """ if not text or not entities: return text text = _add_surrogate(text) html = [] last_offset = 0 for entity in entities: if entity.offset > last_offset: html.append(escape(text[last_offset:entity.offset])) elif entity.offset < last_offset: continue skip_entity = False entity_text = escape(text[entity.offset:entity.offset + entity.length]) entity_type = type(entity) if entity_type == MessageEntityBold: html.append('<strong>{}</strong>'.format(entity_text)) elif entity_type == MessageEntityItalic: html.append('<em>{}</em>'.format(entity_text)) elif entity_type == MessageEntityCode: html.append('<code>{}</code>'.format(entity_text)) elif entity_type == MessageEntityPre: if entity.language: html.append( "<pre>\n" " <code class='language-{}'>\n" " {}\n" " </code>\n" "</pre>".format(entity.language, entity_text)) else: html.append('<pre><code>{}</code></pre>' .format(entity_text)) elif entity_type == MessageEntityEmail: html.append('<a href="mailto:{0}">{0}</a>'.format(entity_text)) elif entity_type == MessageEntityUrl: html.append('<a href="{0}">{0}</a>'.format(entity_text)) elif entity_type == MessageEntityTextUrl: html.append('<a href="{}">{}</a>' .format(escape(entity.url), entity_text)) elif entity_type == MessageEntityMentionName: html.append('<a href="tg://user?id={}">{}</a>' .format(entity.user_id, entity_text)) else: skip_entity = True last_offset = entity.offset + (0 if skip_entity else entity.length) html.append(text[last_offset:]) return _del_surrogate(''.join(html))
0.000421
def correct_bitstring_probs(p, assignment_probabilities): """ Given a 2d array of corrupted bitstring probabilities (outer axis iterates over shots, inner axis over bits) and a list of assignment probability matrices (one for each bit in the readout) compute the corrected probabilities. :param np.array p: An array that enumerates bitstring probabilities. When flattened out ``p = [p_00...0, p_00...1, ...,p_11...1]``. The total number of elements must therefore be a power of 2. The canonical shape has a separate axis for each qubit, such that ``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``. :param List[np.array] assignment_probabilities: A list of assignment probability matrices per qubit. Each assignment probability matrix is expected to be of the form:: [[p00 p01] [p10 p11]] :return: ``p_corrected`` an array with as many dimensions as there are qubits that contains the noisy-readout-corrected estimated probabilities for each measured bitstring, i.e., ``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``. :rtype: np.array """ return _apply_local_transforms(p, (np.linalg.inv(ap) for ap in assignment_probabilities))
0.009302
def _run_cell_text(self, text, line): """Run cell code in the console. Cell code is run in the console by copying it to the console if `self.run_cell_copy` is ``True`` otherwise by using the `run_cell` function. Parameters ---------- text : str The code in the cell as a string. line : int The starting line number of the cell in the file. """ finfo = self.get_current_finfo() editor = self.get_current_editor() oe_data = editor.highlighter.get_outlineexplorer_data() try: cell_name = oe_data.get(line-1).def_name except AttributeError: cell_name = '' if finfo.editor.is_python() and text: self.run_cell_in_ipyclient.emit(text, cell_name, finfo.filename, self.run_cell_copy) editor.setFocus()
0.00201
def delete_user_entitlements(self, user_id): """DeleteUserEntitlements. [Preview API] :param str user_id: """ route_values = {} if user_id is not None: route_values['userId'] = self._serialize.url('user_id', user_id, 'str') self._send(http_method='DELETE', location_id='6490e566-b299-49a7-a4e4-28749752581f', version='5.1-preview.1', route_values=route_values)
0.006186
def parse_traceback(msg, site_path="site-packages", in_app_prefix="c7n"): """Extract a sentry traceback structure, From a python formatted traceback string per python stdlib traceback.print_exc() """ data = {} lines = list(filter(None, msg.split('\n'))) data['frames'] = [] err_ctx = None for l in lines[1:-1]: l = l.strip() # noqa E741 if l.startswith('Traceback'): continue elif l.startswith('File'): abs_path, lineno, function = l.split(',', 3) abs_path = abs_path[abs_path.find('"'):-1] f_path = abs_path[abs_path.find(site_path) + len(site_path) + 1:] module = f_path[:f_path.find('.')].replace('/', '.').strip('.') lineno = int(lineno.strip().split()[1]) function = function.strip().split()[-1] err_ctx = dict(lineno=lineno, abs_path=abs_path, function=function, filename=f_path, module=module) if module.startswith(in_app_prefix): err_ctx['in_app'] = True elif err_ctx is not None: err_ctx['context_line'] = l data['frames'].append(err_ctx) err_ctx = None return lines[0], { 'type': lines[-1].strip().split(':')[0], 'value': lines[-1].strip().split(':', 1)[1].strip(), 'module': data['frames'][-1]['module'], 'stacktrace': data}
0.002647
def movies_opening(self, **kwargs): """Gets the current opening movies from the API. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movies_opening') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
0.003831
def _change_volume(self, increase): """Change volume using amixer """ sign = "+" if increase else "-" delta = "%d%%%s" % (self.volume_tick, sign) self._run(["amixer", "-q", "sset", "Master", delta])
0.008403
def assign_taxonomy( data, min_confidence=0.80, output_fp=None, training_data_fp=None, fixrank=True, max_memory=None, tmp_dir=tempfile.gettempdir()): """Assign taxonomy to each sequence in data with the RDP classifier data: open fasta file object or list of fasta lines confidence: minimum support threshold to assign taxonomy to a sequence output_fp: path to write output; if not provided, result will be returned in a dict of {seq_id:(taxonomy_assignment,confidence)} """ # Going to iterate through this twice in succession, best to force # evaluation now data = list(data) # RDP classifier doesn't preserve identifiers with spaces # Use lookup table seq_id_lookup = {} for seq_id, seq in parse_fasta(data): seq_id_lookup[seq_id.split()[0]] = seq_id app_kwargs = {} if tmp_dir is not None: app_kwargs['TmpDir'] = tmp_dir app = RdpClassifier(**app_kwargs) if max_memory is not None: app.Parameters['-Xmx'].on(max_memory) temp_output_file = tempfile.NamedTemporaryFile( prefix='RdpAssignments_', suffix='.txt', dir=tmp_dir) app.Parameters['-o'].on(temp_output_file.name) if training_data_fp is not None: app.Parameters['-t'].on(training_data_fp) if fixrank: app.Parameters['-f'].on('fixrank') else: app.Parameters['-f'].on('allrank') app_result = app(data) assignments = {} # ShortSequenceException messages are written to stdout # Tag these ID's as unassignable for line in app_result['StdOut']: excep = parse_rdp_exception(line) if excep is not None: _, rdp_id = excep orig_id = seq_id_lookup[rdp_id] assignments[orig_id] = ('Unassignable', 1.0) for line in app_result['Assignments']: rdp_id, direction, taxa = parse_rdp_assignment(line) if taxa[0][0] == "Root": taxa = taxa[1:] orig_id = seq_id_lookup[rdp_id] lineage, confidence = get_rdp_lineage(taxa, min_confidence) if lineage: assignments[orig_id] = (';'.join(lineage), confidence) else: assignments[orig_id] = ('Unclassified', 1.0) if output_fp: try: output_file = open(output_fp, 'w') except OSError: raise OSError("Can't open output file for writing: %s" % output_fp) for seq_id, assignment in assignments.items(): lineage, confidence = assignment output_file.write( '%s\t%s\t%1.3f\n' % (seq_id, lineage, confidence)) output_file.close() return None else: return assignments
0.00037
def vcsNodeState_originator_switch_info_switchIpV6Address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs") originator_switch_info = ET.SubElement(vcsNodeState, "originator-switch-info") switchIpV6Address = ET.SubElement(originator_switch_info, "switchIpV6Address") switchIpV6Address.text = kwargs.pop('switchIpV6Address') callback = kwargs.pop('callback', self._callback) return callback(config)
0.008503
def get_num_shards(num_samples: int, samples_per_shard: int, min_num_shards: int) -> int: """ Returns the number of shards. :param num_samples: Number of training data samples. :param samples_per_shard: Samples per shard. :param min_num_shards: Minimum number of shards. :return: Number of shards. """ return max(int(math.ceil(num_samples / samples_per_shard)), min_num_shards)
0.004878
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Generic Output Location Read from File Method """ # Assign file extension attribute to file object self.fileExtension = extension # Open file and parse into a data structure with open(path, 'r') as f: for line in f: sline = line.strip().split() if len(sline) == 1: self.numLocations = sline[0] else: # Create GSSHAPY OutputLocation object location = OutputLocation(linkOrCellI=sline[0], nodeOrCellJ=sline[1]) # Associate OutputLocation with OutputLocationFile location.outputLocationFile = self
0.003409
def paragraphs(self): """ Immutable sequence of |_Paragraph| instances corresponding to the paragraphs in this text frame. A text frame always contains at least one paragraph. """ return tuple([_Paragraph(p, self) for p in self._txBody.p_lst])
0.006873
def verify_sig(signed_request, secret, issuer=None, algorithms=None, expected_aud=None): """ Verify the JWT signature. Given a raw JWT, this verifies it was signed with *secret*, decodes it, and returns the JSON dict. """ if not issuer: issuer = _get_issuer(signed_request=signed_request) signed_request = _to_bytes(signed_request) app_req = _get_json(signed_request) # Check signature. try: jwt.decode(signed_request, secret, verify=True, algorithms=algorithms, audience=expected_aud) except jwt.ExpiredSignatureError, exc: _re_raise_as(RequestExpired, '%s' % exc, issuer=issuer) except jwt.InvalidTokenError, exc: _re_raise_as(InvalidJWT, 'Signature verification failed: %s' % exc, issuer=issuer) return app_req
0.001143
def _Region2(T, P): """Basic equation for region 2 Parameters ---------- T : float Temperature, [K] P : float Pressure, [MPa] Returns ------- prop : dict Dict with calculated properties. The available properties are: * v: Specific volume, [m³/kg] * h: Specific enthalpy, [kJ/kg] * s: Specific entropy, [kJ/kgK] * cp: Specific isobaric heat capacity, [kJ/kgK] * cv: Specific isocoric heat capacity, [kJ/kgK] * w: Speed of sound, [m/s] * alfav: Cubic expansion coefficient, [1/K] * kt: Isothermal compressibility, [1/MPa] References ---------- IAPWS, Revised Release on the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam August 2007, http://www.iapws.org/relguide/IF97-Rev.html, Eq 15-17 Examples -------- >>> _Region2(700,30)["v"] 0.00542946619 >>> _Region2(700,30)["h"] 2631.49474 >>> _Region2(700,30)["h"]-30000*_Region2(700,30)["v"] 2468.61076 >>> _Region2(700,0.0035)["s"] 10.1749996 >>> _Region2(700,0.0035)["cp"] 2.08141274 >>> _Region2(700,0.0035)["cv"] 1.61978333 >>> _Region2(300,0.0035)["w"] 427.920172 >>> _Region2(300,0.0035)["alfav"] 0.00337578289 >>> _Region2(300,0.0035)["kt"] 286.239651 """ if P < 0: P = Pmin Tr = 540/T Pr = P/1 go, gop, gopp, got, gott, gopt = Region2_cp0(Tr, Pr) Ir = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 10, 10, 10, 16, 16, 18, 20, 20, 20, 21, 22, 23, 24, 24, 24] Jr = [0, 1, 2, 3, 6, 1, 2, 4, 7, 36, 0, 1, 3, 6, 35, 1, 2, 3, 7, 3, 16, 35, 0, 11, 25, 8, 36, 13, 4, 10, 14, 29, 50, 57, 20, 35, 48, 21, 53, 39, 26, 40, 58] nr = [-0.0017731742473212999, -0.017834862292357999, -0.045996013696365003, -0.057581259083432, -0.050325278727930002, -3.3032641670203e-05, -0.00018948987516315, -0.0039392777243355001, -0.043797295650572998, -2.6674547914087001e-05, 2.0481737692308999e-08, 4.3870667284435001e-07, -3.2277677238570002e-05, -0.0015033924542148, -0.040668253562648998, -7.8847309559367001e-10, 1.2790717852285001e-08, 4.8225372718507002e-07, 2.2922076337661001e-06, -1.6714766451061001e-11, -0.0021171472321354998, -23.895741934103999, -5.9059564324270004e-18, -1.2621808899101e-06, -0.038946842435739003, 1.1256211360459e-11, -8.2311340897998004, 1.9809712802088e-08, 1.0406965210174e-19, -1.0234747095929e-13, -1.0018179379511e-09, -8.0882908646984998e-11, 0.10693031879409, -0.33662250574170999, 8.9185845355420999e-25, 3.0629316876231997e-13, -4.2002467698208001e-06, -5.9056029685639003e-26, 3.7826947613457002e-06, -1.2768608934681e-15, 7.3087610595061e-29, 5.5414715350778001e-17, -9.4369707241209998e-07] gr = grp = grpp = grt = grtt = grpt = 0 for i, j, ni in zip(Ir, Jr, nr): gr += ni * Pr**i * (Tr-0.5)**j grp += ni*i * Pr**(i-1) * (Tr-0.5)**j grpp += ni*i*(i-1) * Pr**(i-2) * (Tr-0.5)**j grt += ni*j * Pr**i * (Tr-0.5)**(j-1) grtt += ni*j*(j-1) * Pr**i * (Tr-0.5)**(j-2) grpt += ni*i*j * Pr**(i-1) * (Tr-0.5)**(j-1) propiedades = {} propiedades["T"] = T propiedades["P"] = P propiedades["v"] = Pr*(gop+grp)*R*T/P/1000 propiedades["h"] = Tr*(got+grt)*R*T propiedades["s"] = R*(Tr*(got+grt)-(go+gr)) propiedades["cp"] = -R*Tr**2*(gott+grtt) propiedades["cv"] = R*(-Tr**2*(gott+grtt)-(1+Pr*grp-Tr*Pr*grpt)**2 / (1-Pr**2*grpp)) propiedades["w"] = (R*T*1000*(1+2*Pr*grp+Pr**2*grp**2)/(1-Pr**2*grpp+( 1+Pr*grp-Tr*Pr*grpt)**2/Tr**2/(gott+grtt)))**0.5 propiedades["alfav"] = (1+Pr*grp-Tr*Pr*grpt)/(1+Pr*grp)/T propiedades["kt"] = (1-Pr**2*grpp)/(1+Pr*grp)/P propiedades["region"] = 2 propiedades["x"] = 1 return propiedades
0.000244
def load_and_check(self, base_settings, prompt=None): """Load settings and check them. Loads the settings from ``base_settings``, then checks them. Returns: (merged settings, True) on success (None, False) on failure """ checker = Checker(self.file_name, self.section, self.registry, self.strategy_type, prompt) settings = self.load(base_settings) if checker.check(settings): return settings, True return None, False
0.00578
def _init_a(D): '''Initial guess for Dirichlet alpha parameters given data D''' E = D.mean(axis=0) E2 = (D**2).mean(axis=0) return ((E[0] - E2[0])/(E2[0]-E[0]**2)) * E
0.005464
def _create_rubber_bands_action(self): """Create action for toggling rubber bands.""" icon = resources_path('img', 'icons', 'toggle-rubber-bands.svg') self.action_toggle_rubberbands = QAction( QIcon(icon), self.tr('Toggle Scenario Outlines'), self.iface.mainWindow()) message = self.tr('Toggle rubber bands showing scenario extents.') self.action_toggle_rubberbands.setStatusTip(message) self.action_toggle_rubberbands.setWhatsThis(message) # Set initial state self.action_toggle_rubberbands.setCheckable(True) flag = setting('showRubberBands', False, expected_type=bool) self.action_toggle_rubberbands.setChecked(flag) # noinspection PyUnresolvedReferences self.action_toggle_rubberbands.triggered.connect( self.dock_widget.toggle_rubber_bands) self.add_action(self.action_toggle_rubberbands)
0.002144
def random_tree(n_leaves): """ Randomly partition the nodes """ def _random_subtree(leaves): if len(leaves) == 1: return leaves[0] elif len(leaves) == 2: return (leaves[0], leaves[1]) else: split = npr.randint(1, len(leaves)-1) return (_random_subtree(leaves[:split]), _random_subtree(leaves[split:])) return _random_subtree(np.arange(n_leaves))
0.002183
def _try_assign_utc_time(self, raw_time, time_base): """Try to assign a UTC time to this reading.""" # Check if the raw time is encoded UTC since y2k or just uptime if raw_time != IOTileEvent.InvalidRawTime and (raw_time & (1 << 31)): y2k_offset = self.raw_time ^ (1 << 31) return self._Y2KReference + datetime.timedelta(seconds=y2k_offset) if time_base is not None: return time_base + datetime.timedelta(seconds=raw_time) return None
0.003899
def search_users(self, username_keyword, limit=10): """ Searches for users whose username matches ``username_keyword``, and returns a list of matched users. :param str username_keyword: keyword to search with :param int limit: maximum number of returned users :return: a list of matched users :rtype: List[GogsUser] :raises NetworkFailure: if there is an error communicating with the server :raises ApiFailure: if the request cannot be serviced """ params = {"q": username_keyword, "limit": limit} response = self.get("/users/search", params=params) return [GogsUser.from_json(user_json) for user_json in response.json()["data"]]
0.006812
def is_course_run_upgradeable(course_run): """ Return true if the course run has a verified seat with an unexpired upgrade deadline, false otherwise. """ now = datetime.datetime.now(pytz.UTC) for seat in course_run.get('seats', []): if seat.get('type') == 'verified': upgrade_deadline = parse_datetime_handle_invalid(seat.get('upgrade_deadline')) return not upgrade_deadline or upgrade_deadline > now return False
0.006397
def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False): """Parses the sections in the memory and returns a list of them""" sections = [] optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER) offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders image_section_header_size = sizeof(IMAGE_SECTION_HEADER) for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections): ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset) if parse_header_only: raw = None bytes_ = bytearray() else: size = ishdr.SizeOfRawData raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData) bytes_ = bytearray(raw) sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw)) offset += image_section_header_size return sections
0.007105
def _contains_policies(self, resource_properties): """ Is there policies data in this resource? :param dict resource_properties: Properties of the resource :return: True if we can process this resource. False, otherwise """ return resource_properties is not None \ and isinstance(resource_properties, dict) \ and self.POLICIES_PROPERTY_NAME in resource_properties
0.004587
def aggregate_weights(weights, drop_date=False): """ Transforms list of tuples of weights into pandas.DataFrame of weights. Parameters: ----------- weights: list A list of tuples consisting of the generic instrument name, the tradeable contract as a string, the weight on this contract as a float and the date as a pandas.Timestamp. drop_date: boolean Whether to drop the date from the multiIndex Returns ------- A pandas.DataFrame of loadings of generic contracts on tradeable instruments for a given date. The columns are generic instrument names and the index is strings representing instrument names. """ dwts = pd.DataFrame(weights, columns=["generic", "contract", "weight", "date"]) dwts = dwts.pivot_table(index=['date', 'contract'], columns=['generic'], values='weight', fill_value=0) dwts = dwts.astype(float) dwts = dwts.sort_index() if drop_date: dwts.index = dwts.index.levels[-1] return dwts
0.000935
def almost_equal(f: DataFrame, g: DataFrame) -> bool: """ Return ``True`` if and only if the given DataFrames are equal after sorting their columns names, sorting their values, and reseting their indices. """ if f.empty or g.empty: return f.equals(g) else: # Put in canonical order F = ( f.sort_index(axis=1) .sort_values(list(f.columns)) .reset_index(drop=True) ) G = ( g.sort_index(axis=1) .sort_values(list(g.columns)) .reset_index(drop=True) ) return F.equals(G)
0.001608