text
stringlengths
78
104k
score
float64
0
0.18
def get_force(self, component_info=None, data=None, component_position=None): """Get force data.""" components = [] append_components = components.append for _ in range(component_info.plate_count): component_position, plate = QRTPacket._get_exact( RTForcePlate, data, component_position ) force_list = [] for _ in range(plate.force_count): component_position, force = QRTPacket._get_exact( RTForce, data, component_position ) force_list.append(force) append_components((plate, force_list)) return components
0.002894
def project_onto_plane(strike, dip, plunge, bearing): """ Projects a linear feature(s) onto the surface of a plane. Returns a rake angle(s) along the plane. This is also useful for finding the rake angle of a feature that already intersects the plane in question. Parameters ---------- strike, dip : numbers or sequences of numbers The strike and dip (in degrees, following the right-hand-rule) of the plane(s). plunge, bearing : numbers or sequences of numbers The plunge and bearing (in degrees) or of the linear feature(s) to be projected onto the plane. Returns ------- rake : array A sequence of rake angles measured downwards from horizontal in degrees. Zero degrees corresponds to the "right- hand" direction indicated by the strike, while a negative angle corresponds to the opposite direction. Rakes returned by this function will always be between -90 and 90 (inclusive). """ # Project the line onto the plane norm = sph2cart(*pole(strike, dip)) feature = sph2cart(*line(plunge, bearing)) norm, feature = np.array(norm), np.array(feature) perp = np.cross(norm, feature, axis=0) on_plane = np.cross(perp, norm, axis=0) on_plane /= np.sqrt(np.sum(on_plane**2, axis=0)) # Calculate the angle between the projected feature and horizontal # This is just a dot product, but we need to work with multiple measurements # at once, so einsum is quicker than apply_along_axis. strike_vec = sph2cart(*line(0, strike)) dot = np.einsum('ij,ij->j', on_plane, strike_vec) rake = np.degrees(np.arccos(dot)) # Convert rakes over 90 to negative rakes... rake[rake > 90] -= 180 rake[rake < -90] += 180 return rake
0.00111
def __get_or_create( ns_cache: NamespaceMap, name: sym.Symbol, module: types.ModuleType = None, core_ns_name=CORE_NS, ) -> lmap.Map: """Private swap function used by `get_or_create` to atomically swap the new namespace map into the global cache.""" ns = ns_cache.entry(name, None) if ns is not None: return ns_cache new_ns = Namespace(name, module=module) if name.name != core_ns_name: core_ns = ns_cache.entry(sym.symbol(core_ns_name), None) assert core_ns is not None, "Core namespace not loaded yet!" new_ns.refer_all(core_ns) return ns_cache.assoc(name, new_ns)
0.004255
def get_contained_labels(self, inplace=True): """ Get the set of unique labels contained in this annotation. Returns a pandas dataframe or sets the contained_labels attribute of the object. Requires the label_store field to be set. Function will try to use attributes contained in the order: 1. label_store 2. symbol 3. description This function should also be called to summarize information about an annotation after it has been read. Should not be a helper function to others except rdann. """ if self.custom_labels is not None: self.check_field('custom_labels') # Create the label map label_map = ann_label_table.copy() # Convert the tuple triplets into a pandas dataframe if needed if isinstance(self.custom_labels, (list, tuple)): custom_labels = label_triplets_to_df(self.custom_labels) elif isinstance(self.custom_labels, pd.DataFrame): # Set the index just in case it doesn't already match the label_store self.custom_labels.set_index( self.custom_labels['label_store'].values, inplace=True) custom_labels = self.custom_labels else: custom_labels = None # Merge the standard wfdb labels with the custom labels. # custom labels values overwrite standard wfdb if overlap. if custom_labels is not None: for i in custom_labels.index: label_map.loc[i] = custom_labels.loc[i] # This doesn't work... # label_map.loc[custom_labels.index] = custom_labels.loc[custom_labels.index] # Get the labels using one of the features if self.label_store is not None: index_vals = set(self.label_store) reset_index = False counts = np.unique(self.label_store, return_counts=True) elif self.symbol is not None: index_vals = set(self.symbol) label_map.set_index(label_map['symbol'].values, inplace=True) reset_index = True counts = np.unique(self.symbol, return_counts=True) elif self.description is not None: index_vals = set(self.description) label_map.set_index(label_map['description'].values, inplace=True) reset_index = True counts = np.unique(self.description, return_counts=True) else: raise Exception('No annotation labels contained in object') contained_labels = label_map.loc[index_vals, :] # Add the counts for i in range(len(counts[0])): contained_labels.loc[counts[0][i], 'n_occurrences'] = counts[1][i] contained_labels['n_occurrences'] = pd.to_numeric(contained_labels['n_occurrences'], downcast='integer') if reset_index: contained_labels.set_index(contained_labels['label_store'].values, inplace=True) if inplace: self.contained_labels = contained_labels return else: return contained_labels
0.001569
def predict(self, v=None, X=None): """In classification this returns the classes, in regression it is equivalent to the decision function""" if X is None: X = v v = None m = self.model(v=v) return m.predict(X)
0.007326
def cluster(self, method, **kwargs): """ Cluster the tribe. Cluster templates within a tribe: returns multiple tribes each of which could be stacked. :type method: str :param method: Method of stacking, see :mod:`eqcorrscan.utils.clustering` :return: List of tribes. .. rubric:: Example """ from eqcorrscan.utils import clustering tribes = [] func = getattr(clustering, method) if method in ['space_cluster', 'space_time_cluster']: cat = Catalog([t.event for t in self.templates]) groups = func(cat, **kwargs) for group in groups: new_tribe = Tribe() for event in group: new_tribe.templates.extend([t for t in self.templates if t.event == event]) tribes.append(new_tribe) return tribes
0.002068
def close(self): """Close the socket underlying this connection.""" self.rfile.close() if not self.linger: # Python's socket module does NOT call close on the kernel socket # when you call socket.close(). We do so manually here because we # want this server to send a FIN TCP segment immediately. Note this # must be called *before* calling socket.close(), because the latter # drops its reference to the kernel socket. if hasattr(self.socket, '_sock'): self.socket._sock.close() self.socket.close() else: # On the other hand, sometimes we want to hang around for a bit # to make sure the client has a chance to read our entire # response. Skipping the close() calls here delays the FIN # packet until the socket object is garbage-collected later. # Someday, perhaps, we'll do the full lingering_close that # Apache does, but not today. pass
0.003752
def disassociate_route_table(association_id, region=None, key=None, keyid=None, profile=None): ''' Dissassociates a route table. association_id The Route Table Association ID to disassociate CLI Example: .. code-block:: bash salt myminion boto_vpc.disassociate_route_table 'rtbassoc-d8ccddba' ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if conn.disassociate_route_table(association_id): log.info('Route table with association id %s has been disassociated.', association_id) return {'disassociated': True} else: log.warning('Route table with association id %s has not been disassociated.', association_id) return {'disassociated': False} except BotoServerError as e: return {'disassociated': False, 'error': __utils__['boto.get_error'](e)}
0.005507
def ConsultarPuerto(self, sep="||"): "Consulta de Puertos habilitados" ret = self.client.puertoConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['puertoReturn'] self.__analizar_errores(ret) array = ret.get('puertos', []) return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array]
0.01
def get_model_fields(self): """ List of model fields to include (defaults to all) """ model_fields = getattr(self.Meta, 'fields', None) if model_fields is not None: model_fields = set(model_fields) return model_fields
0.007576
def DeleteItem(self, item): "Remove the item from the list and unset the related data" wx_data = self.GetItemData(item) py_data = self._py_data_map[wx_data] del self._py_data_map[wx_data] del self._wx_data_map[py_data] wx.ListCtrl.DeleteItem(self, item)
0.006515
def lint(): """ run linter on our code base. """ path = os.path.realpath(os.getcwd()) cmd = 'flake8 %s' % path opt = '' print(">>> Linting codebase with the following command: %s %s" % (cmd, opt)) try: return_code = call([cmd, opt], shell=True) if return_code < 0: print(">>> Terminated by signal", -return_code, file=sys.stderr) elif return_code != 0: sys.exit('>>> Lint checks failed') else: print(">>> Lint checks passed", return_code, file=sys.stderr) except OSError as e: print(">>> Execution failed:", e, file=sys.stderr)
0.003125
def hello(self): """http://docs.fiesta.cc/index.html#getting-started""" path = 'hello' response = self.request(path, do_authentication=False) return response
0.010582
def injectAttribute(annotationName, depth, attr, value): """ Inject an attribute in a class from it's class frame. Use in class annnotation to create methods/properties dynamically at class creation time without dealing with metaclass. depth parameter specify the stack depth from the class definition. """ locals = reflect.class_locals(depth, annotationName) injections = locals.get(_ATTRIBUTE_INJECTIONS_ATTR, None) if injections is None: injections = list() locals[_ATTRIBUTE_INJECTIONS_ATTR] = injections injections.append((attr, value))
0.001672
def read_csv(filepath, sep=',', header='infer', names=None, usecols=None, dtype=None, converters=None, skiprows=None, nrows=None): """Read CSV into DataFrame. Eager implementation using pandas, i.e. entire file is read at this point. Only common/relevant parameters available at the moment; for full list, could use pandas directly and then convert to baloo. Parameters ---------- filepath : str sep : str, optional Separator used between values. header : 'infer' or None, optional Whether to infer the column names from the first row or not. names : list of str, optional List of column names to use. Overrides inferred header. usecols : list of (int or str), optional Which columns to parse. dtype : dict, optional Dict of column -> type to parse as. converters : dict, optional Dict of functions for converting values in certain columns. skiprows : int, optional Number of lines to skip at start of file. nrows : int, optional Number of rows to read. Returns ------- DataFrame See Also -------- pandas.read_csv : https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html """ pd_df = pd_read_csv(filepath, sep=sep, header=header, names=names, usecols=usecols, dtype=dtype, converters=converters, skiprows=skiprows, nrows=nrows) return DataFrame.from_pandas(pd_df)
0.00303
def build_helpers_egginfo_json( json_field, json_key_registry, json_filename=None): """ Return a tuple of functions that will provide the usage of the JSON egginfo based around the provided field. """ json_filename = ( json_field + '.json' if json_filename is None else json_filename) # Default calmjs core implementation specific functions, to be used by # integrators intended to use this as a distribution. def get_extras_json(pkg_names, working_set=None): """ Only extract the extras_json information for the given packages 'pkg_names'. """ working_set = working_set or default_working_set dep_keys = set(get(json_key_registry).iter_records()) dists = pkg_names_to_dists(pkg_names, working_set=working_set) return flatten_dist_egginfo_json( dists, filename=json_filename, dep_keys=dep_keys, working_set=working_set ) def _flatten_extras_json(pkg_names, find_dists, working_set): # registry key must be explicit here as it was designed for this. dep_keys = set(get(json_key_registry).iter_records()) dists = find_dists(pkg_names, working_set=working_set) return flatten_dist_egginfo_json( dists, filename=json_filename, dep_keys=dep_keys, working_set=working_set ) def flatten_extras_json(pkg_names, working_set=None): """ Traverses through the dependency graph of packages 'pkg_names' and flattens all the egg_info json information """ working_set = working_set or default_working_set return _flatten_extras_json( pkg_names, find_packages_requirements_dists, working_set) def flatten_parents_extras_json(pkg_names, working_set=None): """ Traverses through the dependency graph of packages 'pkg_names' and flattens all the egg_info json information for parents of the specified packages. """ working_set = working_set or default_working_set return _flatten_extras_json( pkg_names, find_packages_parents_requirements_dists, working_set) write_extras_json = partial(write_json_file, json_field) return ( get_extras_json, flatten_extras_json, flatten_parents_extras_json, write_extras_json, )
0.000418
def _filter_markdown(source, filters): """Only keep some Markdown headers from a Markdown string.""" lines = source.splitlines() # Filters is a list of 'hN' strings where 1 <= N <= 6. headers = [_replace_header_filter(filter) for filter in filters] lines = [line for line in lines if line.startswith(tuple(headers))] return '\n'.join(lines)
0.002747
def get_userinfo(self, access_token, id_token, payload): """Return user details dictionary. The id_token and payload are not used in the default implementation, but may be used when overriding this method""" user_response = requests.get( self.OIDC_OP_USER_ENDPOINT, headers={ 'Authorization': 'Bearer {0}'.format(access_token) }, verify=self.get_settings('OIDC_VERIFY_SSL', True)) user_response.raise_for_status() return user_response.json()
0.007326
def to_map_with_default(value, default_value): """ Converts JSON string into map object or returns default value when conversion is not possible. :param value: the JSON string to convert. :param default_value: the default value. :return: Map object value or default when conversion is not supported. """ result = JsonConverter.to_nullable_map(value) return result if result != None else default_value
0.008565
def _read_buckets_cache_file(cache_file): ''' Return the contents of the buckets cache file ''' log.debug('Reading buckets cache file') with salt.utils.files.fopen(cache_file, 'rb') as fp_: data = pickle.load(fp_) return data
0.003846
def change_quantiles(x, ql, qh, isabs, f_agg): """ First fixes a corridor given by the quantiles ql and qh of the distribution of x. Then calculates the average, absolute value of consecutive changes of the series x inside this corridor. Think about selecting a corridor on the y-Axis and only calculating the mean of the absolute change of the time series inside this corridor. :param x: the time series to calculate the feature of :type x: numpy.ndarray :param ql: the lower quantile of the corridor :type ql: float :param qh: the higher quantile of the corridor :type qh: float :param isabs: should the absolute differences be taken? :type isabs: bool :param f_agg: the aggregator function that is applied to the differences in the bin :type f_agg: str, name of a numpy function (e.g. mean, var, std, median) :return: the value of this feature :return type: float """ if ql >= qh: ValueError("ql={} should be lower than qh={}".format(ql, qh)) div = np.diff(x) if isabs: div = np.abs(div) # All values that originate from the corridor between the quantiles ql and qh will have the category 0, # other will be np.NaN try: bin_cat = pd.qcut(x, [ql, qh], labels=False) bin_cat_0 = bin_cat == 0 except ValueError: # Occurs when ql are qh effectively equal, e.g. x is not long enough or is too categorical return 0 # We only count changes that start and end inside the corridor ind = (bin_cat_0 & _roll(bin_cat_0, 1))[1:] if sum(ind) == 0: return 0 else: ind_inside_corridor = np.where(ind == 1) aggregator = getattr(np, f_agg) return aggregator(div[ind_inside_corridor])
0.003975
async def _roundtrip(cls): """Testing helper: gets each value and sets it again.""" getters = { name[4:]: getattr(cls, name) for name in dir(cls) if name.startswith("get_") and name != "get_config" } setters = { name[4:]: getattr(cls, name) for name in dir(cls) if name.startswith("set_") and name != "set_config" } for name, getter in getters.items(): print(">>>", name) value = await getter() print(" ->", repr(value)) print(" ", type(value)) setter = setters[name] try: await setter(value) except CallError as error: print(error) print(error.content.decode("utf-8", "replace")) else: value2 = await getter() if value2 != value: print( "!!! Round-trip failed:", repr(value), "-->", repr(value2))
0.001923
def join( self, inner_enumerable, outer_key=lambda x: x, inner_key=lambda x: x, result_func=lambda x: x ): """ Return enumerable of inner equi-join between two enumerables :param inner_enumerable: inner enumerable to join to self :param outer_key: key selector of outer enumerable as lambda expression :param inner_key: key selector of inner enumerable as lambda expression :param result_func: lambda expression to transform result of join :return: new Enumerable object """ if not isinstance(inner_enumerable, Enumerable3): raise TypeError( u"inner_enumerable parameter must be an instance of Enumerable" ) return Enumerable3( itertools.product( filter( lambda x: outer_key(x) in map(inner_key, inner_enumerable), self ), filter( lambda y: inner_key(y) in map(outer_key, self), inner_enumerable ) ) )\ .where(lambda x: outer_key(x[0]) == inner_key(x[1]))\ .select(result_func)
0.002379
def ls_cmd(argv): """List available environments.""" parser = argparse.ArgumentParser() p_group = parser.add_mutually_exclusive_group() p_group.add_argument('-b', '--brief', action='store_false') p_group.add_argument('-l', '--long', action='store_true') args = parser.parse_args(argv) lsvirtualenv(args.long)
0.002976
def _wait_for_consistency(checker): """Eventual consistency: wait until GCS reports something is true. This is necessary for e.g. create/delete where the operation might return, but won't be reflected for a bit. """ for _ in xrange(EVENTUAL_CONSISTENCY_MAX_SLEEPS): if checker(): return time.sleep(EVENTUAL_CONSISTENCY_SLEEP_INTERVAL) logger.warning('Exceeded wait for eventual GCS consistency - this may be a' 'bug in the library or something is terribly wrong.')
0.001859
def update_movie_ticket(self, code, ticket_class, show_time, duration, screening_room, seat_number, card_id=None): """ 更新电影票 """ ticket = { 'code': code, 'ticket_class': ticket_class, 'show_time': show_time, 'duration': duration, 'screening_room': screening_room, 'seat_number': seat_number } if card_id: ticket['card_id'] = card_id return self._post( 'card/movieticket/updateuser', data=ticket )
0.005034
def status(self): """Return string indicating the error encountered on failure.""" self._check_valid() if self._ret_val == swiglpk.GLP_ENOPFS: return 'No primal feasible solution' elif self._ret_val == swiglpk.GLP_ENODFS: return 'No dual feasible solution' return str(swiglpk.glp_get_status(self._problem._p))
0.005362
def _build_url(self, query_params): """Build the final URL to be passed to urllib :param query_params: A dictionary of all the query parameters :type query_params: dictionary :return: string """ url = '' count = 0 while count < len(self._url_path): url += '/{}'.format(self._url_path[count]) count += 1 # add slash if self.append_slash: url += '/' if query_params: url_values = urlencode(sorted(query_params.items()), True) url = '{}?{}'.format(url, url_values) if self._version: url = self._build_versioned_url(url) else: url = '{}{}'.format(self.host, url) return url
0.002601
def load_visible_suites(cls, paths=None): """Get a list of suites whos bin paths are visible on $PATH. Returns: List of `Suite` objects. """ suite_paths = cls.visible_suite_paths(paths) suites = [cls.load(x) for x in suite_paths] return suites
0.006579
def handle_move(self, dest_path): """Change semantic of MOVE to change resource tags.""" # path and destPath must be '/by_tag/<tag>/<resname>' if "/by_tag/" not in self.path: raise DAVError(HTTP_FORBIDDEN) if "/by_tag/" not in dest_path: raise DAVError(HTTP_FORBIDDEN) catType, tag, _rest = util.save_split(self.path.strip("/"), "/", 2) assert catType == "by_tag" assert tag in self.data["tags"] self.data["tags"].remove(tag) catType, tag, _rest = util.save_split(dest_path.strip("/"), "/", 2) assert catType == "by_tag" if tag not in self.data["tags"]: self.data["tags"].append(tag) return True
0.002751
def match(self, rule): """ Checks if the given rule matches with the filter. :param rule: The Flask rule to be matched. :return: True if the filter matches. """ if self.methods: for method in self.methods: if method in rule.methods: return True if self.endpoints: for endpoint in self.endpoints: if endpoint == rule.endpoint: return True return False
0.003899
def get_all_label_algorithms(): """Gets all the possible label (structural grouping) algorithms in MSAF. Returns ------- algo_ids : list List of all the IDs of label algorithms (strings). """ algo_ids = [] for name in msaf.algorithms.__all__: module = eval(msaf.algorithms.__name__ + "." + name) if module.is_label_type: algo_ids.append(module.algo_id) return algo_ids
0.002288
def get_seconds_until_next_quarter(now=None): """ Returns the number of seconds until the next quarter of an hour. This is the short-term rate limit used by Strava. :param now: A (utc) timestamp :type now: arrow.arrow.Arrow :return: the number of seconds until the next quarter, as int """ if now is None: now = arrow.utcnow() return 899 - (now - now.replace(minute=(now.minute // 15) * 15, second=0, microsecond=0)).seconds
0.006466
def delete_items_by_index(list_, index_list, copy=False): """ Remove items from ``list_`` at positions specified in ``index_list`` The original ``list_`` is preserved if ``copy`` is True Args: list_ (list): index_list (list): copy (bool): preserves original list if True Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [8, 1, 8, 1, 6, 6, 3, 4, 4, 5, 6] >>> index_list = [2, -1] >>> result = delete_items_by_index(list_, index_list) >>> print(result) [8, 1, 1, 6, 6, 3, 4, 4, 5] """ if copy: list_ = list_[:] # Rectify negative indicies index_list_ = [(len(list_) + x if x < 0 else x) for x in index_list] # Remove largest indicies first index_list_ = sorted(index_list_, reverse=True) for index in index_list_: del list_[index] return list_
0.00108
def remove_link_button(self): """ Function removes link button from Run Window """ if self.link is not None: self.info_box.remove(self.link) self.link.destroy() self.link = None
0.008163
def sph_midpoint(coord1, coord2): """Compute the midpoint between two points on the sphere. Parameters ---------- coord1 : `~astropy.coordinates.SkyCoord` Coordinate of one point on a great circle. coord2 : `~astropy.coordinates.SkyCoord` Coordinate of the other point on a great circle. Returns ------- midpt : `~astropy.coordinates.SkyCoord` The coordinates of the spherical midpoint. """ c1 = coord1.cartesian / coord1.cartesian.norm() coord2 = coord2.transform_to(coord1.frame) c2 = coord2.cartesian / coord2.cartesian.norm() midpt = 0.5 * (c1 + c2) usph = midpt.represent_as(coord.UnitSphericalRepresentation) return coord1.frame.realize_frame(usph)
0.001346
def _get_id_from_name(self, name): """List placement group ids which match the given name.""" _filter = { 'placementGroups': { 'name': {'operation': name} } } mask = "mask[id, name]" results = self.client.call('Account', 'getPlacementGroups', filter=_filter, mask=mask) return [result['id'] for result in results]
0.007481
def get_default_view_path(resource): "Returns the dotted path to the default view class." parts = [a.member_name for a in resource.ancestors] +\ [resource.collection_name or resource.member_name] if resource.prefix: parts.insert(-1, resource.prefix) view_file = '%s' % '_'.join(parts) view = '%s:%sView' % (view_file, snake2camel(view_file)) app_package_name = get_app_package_name(resource.config) return '%s.views.%s' % (app_package_name, view)
0.002008
def getSystemVariable(self, remote, name): """Get single system variable from CCU / Homegear""" var = None if self.remotes[remote]['username'] and self.remotes[remote]['password']: LOG.debug( "ServerThread.getSystemVariable: Getting System variable via JSON-RPC") session = self.jsonRpcLogin(remote) if not session: return try: params = {"_session_id_": session, "name": name} response = self._rpcfunctions.jsonRpcPost( self.remotes[remote]['ip'], self.remotes[remote].get('jsonport', DEFAULT_JSONPORT), "SysVar.getValueByName", params) if response['error'] is None and response['result']: try: var = float(response['result']) except Exception as err: var = response['result'] == 'true' self.jsonRpcLogout(remote, session) except Exception as err: self.jsonRpcLogout(remote, session) LOG.warning( "ServerThread.getSystemVariable: Exception: %s" % str(err)) else: try: var = self.proxies[ "%s-%s" % (self._interface_id, remote)].getSystemVariable(name) except Exception as err: LOG.debug( "ServerThread.getSystemVariable: Exception: %s" % str(err)) return var
0.003971
def _get_numbering(document, numid, ilvl): """Returns type for the list. :Returns: Returns type for the list. Returns "bullet" by default or in case of an error. """ try: abs_num = document.numbering[numid] return document.abstruct_numbering[abs_num][ilvl]['numFmt'] except: return 'bullet'
0.008772
def open(cls, path): """Load an image file into a PIX object. Leptonica can load TIFF, PNM (PBM, PGM, PPM), PNG, and JPEG. If loading fails then the object will wrap a C null pointer. """ filename = fspath(path) with _LeptonicaErrorTrap(): return cls(lept.pixRead(os.fsencode(filename)))
0.005731
def _index_sub(self, uri_list, num, batch_num): """ Converts a list of uris to elasticsearch json objects args: uri_list: list of uris to convert num: the ending count within the batch batch_num: the batch number """ bname = '%s-%s' % (batch_num, num) log.debug("batch_num '%s' starting es_json conversion", bname) qry_data = get_all_item_data([item[0] for item in uri_list], self.tstore_conn, rdfclass=self.rdf_class) log.debug("batch_num '%s-%s' query_complete | count: %s", batch_num, num, len(qry_data)) # path = os.path.join(CFG.dirs.cache, "index_pre") # if not os.path.exists(path): # os.makedirs(path) # with open(os.path.join(path, bname + ".json"), "w") as fo: # fo.write(json.dumps(qry_data)) data = RdfDataset(qry_data) del qry_data log.debug("batch_num '%s-%s' RdfDataset Loaded", batch_num, num) for value in uri_list: try: self.batch_data[batch_num]['main'].append(\ data[value[0]].es_json()) self.count += 1 except KeyError: pass for name, indexer in self.other_indexers.items(): for item in data.json_qry("$.:%s" % name.pyuri): val = item.es_json() if val: self.batch_data[batch_num][name].append(val) self.batch_uris[batch_num].append(item.subject) del data del uri_list log.debug("batch_num '%s-%s' converted to es_json", batch_num, num)
0.00167
def DumpDirHashToStringIO(directory, stringio, base='', exclude=None, include=None): ''' Helper to iterate over the files in a directory putting those in the passed StringIO in ini format. :param unicode directory: The directory for which the hash should be done. :param StringIO stringio: The string to which the dump should be put. :param unicode base: If provided should be added (along with a '/') before the name=hash of file. :param unicode exclude: Pattern to match files to exclude from the hashing. E.g.: *.gz :param unicode include: Pattern to match files to include in the hashing. E.g.: *.zip ''' import fnmatch import os files = [(os.path.join(directory, i), i) for i in os.listdir(directory)] files = [i for i in files if os.path.isfile(i[0])] for fullname, filename in files: if include is not None: if not fnmatch.fnmatch(fullname, include): continue if exclude is not None: if fnmatch.fnmatch(fullname, exclude): continue md5 = Md5Hex(fullname) if base: stringio.write('%s/%s=%s\n' % (base, filename, md5)) else: stringio.write('%s=%s\n' % (filename, md5))
0.003084
def seat_slot(self): """The seat slot of the touch event. A seat slot is a non-negative seat wide unique identifier of an active touch point. Events from single touch devices will be represented as one individual touch point per device. For events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`, :attr:`~libinput.constant.EventType.TOUCH_UP`, :attr:`~libinput.constant.EventType.TOUCH_MOTION` or :attr:`~libinput.constant.EventType.TOUCH_CANCEL`, this property raises :exc:`AssertionError`. Returns: int: The seat slot of the touch event. Raises: AssertionError """ if self.type == EventType.TOUCH_FRAME: raise AttributeError(_wrong_prop.format(self.type)) return self._libinput.libinput_event_touch_get_seat_slot(self._handle)
0.02551
def to_picard_basecalling_params( self, directory: Union[str, Path], bam_prefix: Union[str, Path], lanes: Union[int, List[int]], ) -> None: """Writes sample and library information to a set of files for a given set of lanes. **BARCODE PARAMETERS FILES**: Store information regarding the sample index sequences, sample index names, and, optionally, the library name. These files are used by Picard's `CollectIlluminaBasecallingMetrics` and Picard's `ExtractIlluminaBarcodes`. The output tab-seperated files are formatted as: ``<directory>/barcode_params.<lane>.txt`` **LIBRARY PARAMETERS FILES**: Store information regarding the sample index sequences, sample index names, and optionally sample library and descriptions. A path to the resulting demultiplexed BAM file is also stored which is used by Picard's `IlluminaBasecallsToSam`. The output tab-seperated files are formatted as: ``<directory>/library_params.<lane>.txt`` The format of the BAM file output paths in the library parameter files are formatted as: ``<bam_prefix>/<Sample_Name>.<Sample_Library>/<Sample_Name>.<index><index2>.<lane>.bam`` Two files will be written to ``directory`` for all ``lanes`` specified. If the path to ``directory`` does not exist, it will be created. Args: directory: File path to the directory to write the parameter files. bam_prefix: Where the demultiplexed BAMs should be written. lanes: The lanes to write basecalling parameters for. """ if len(self.samples) == 0: raise ValueError('No samples in sample sheet') if not ( isinstance(lanes, int) or isinstance(lanes, (list, tuple)) and len(lanes) > 0 and all(isinstance(lane, int) for lane in lanes) ): raise ValueError(f'Lanes must be an int or list of ints: {lanes}') if len(set(len(sample.index or '') for sample in self.samples)) != 1: raise ValueError('I7 indexes have differing lengths.') if len(set(len(sample.index2 or '') for sample in self.samples)) != 1: raise ValueError('I5 indexes have differing lengths.') for attr in ('Sample_Name', 'Library_ID', 'index'): if any(getattr(sample, attr) is None for sample in self.samples): raise ValueError( 'Samples must have at least `Sample_Name`, ' '`Sample_Library`, and `index` attributes' ) # Make lanes iterable if only an int was provided. lanes = [lanes] if isinstance(lanes, int) else lanes # Resolve path to basecalling parameter files. prefix = Path(directory).expanduser().resolve() prefix.mkdir(exist_ok=True, parents=True) # Promote bam_prefix to Path object. bam_prefix = Path(bam_prefix).expanduser().resolve() # Both headers are one column larger if an ``index2`` attribute is # present on all samples. Use list splatting to unpack the options. barcode_header = [ *( ['barcode_sequence_1'] if not self.samples_have_index2 else ['barcode_sequence_1', 'barcode_sequence_2'] ), 'barcode_name', 'library_name', ] # TODO: Remove description if none is provided on all samples. library_header = [ *( ['BARCODE_1'] if not self.samples_have_index2 else ['BARCODE_1', 'BARCODE_2'] ), 'OUTPUT', 'SAMPLE_ALIAS', 'LIBRARY_NAME', 'DS', ] for lane in lanes: barcode_out = prefix / f'barcode_params.{lane}.txt' library_out = prefix / f'library_params.{lane}.txt' # Enter into a writing context for both library and barcode params. with ExitStack() as stack: barcode_writer = csv.writer( stack.enter_context(barcode_out.open('w')), delimiter='\t' ) library_writer = csv.writer( stack.enter_context(library_out.open('w')), delimiter='\t' ) barcode_writer.writerow(barcode_header) library_writer.writerow(library_header) for sample in self.samples: # The long name of a sample is a combination of the sample # ID and the sample library. long_name = '.'.join( [sample.Sample_Name, sample.Library_ID] ) # The barcode name is all sample indexes concatenated. barcode_name = sample.index + (sample.index2 or '') library_name = sample.Library_ID or '' # Assemble the path to the future BAM file. bam_file = ( bam_prefix / long_name / f'{sample.Sample_Name}.{barcode_name}.{lane}.bam' ) # Use list splatting to build the contents of the library # and barcodes parameter files. barcode_line = [ *( [sample.index] if not self.samples_have_index2 else [sample.index, sample.index2] ), barcode_name, library_name, ] library_line = [ *( [sample.index] if not self.samples_have_index2 else [sample.index, sample.index2] ), bam_file, sample.Sample_Name, sample.Library_ID, sample.Description or '', ] barcode_writer.writerow(map(str, barcode_line)) library_writer.writerow(map(str, library_line)) # Dempultiplexing relys on an umatched file so append that, # but only to the library parameters file. unmatched_file = bam_prefix / f'unmatched.{lane}.bam' library_line = [ *(['N'] if not self.samples_have_index2 else ['N', 'N']), unmatched_file, 'unmatched', 'unmatchedunmatched', '', ] library_writer.writerow(map(str, library_line))
0.000434
def clone(cls, srcpath, destpath): """Clone an existing repository to a new bare repository.""" # Mercurial will not create intermediate directories for clones. try: os.makedirs(destpath) except OSError as e: if not e.errno == errno.EEXIST: raise cmd = [HG, 'clone', '--quiet', '--noupdate', srcpath, destpath] subprocess.check_call(cmd) return cls(destpath)
0.004405
def cudnn_gru(units, n_hidden, n_layers=1, trainable_initial_states=False, seq_lengths=None, input_initial_h=None, name='cudnn_gru', reuse=False): """ Fast CuDNN GRU implementation Args: units: tf.Tensor with dimensions [B x T x F], where B - batch size T - number of tokens F - features n_hidden: dimensionality of hidden state trainable_initial_states: whether to create a special trainable variable to initialize the hidden states of the network or use just zeros seq_lengths: tensor of sequence lengths with dimension [B] n_layers: number of layers input_initial_h: initial hidden state, tensor name: name of the variable scope to use reuse:whether to reuse already initialized variable Returns: h - all hidden states along T dimension, tf.Tensor with dimensionality [B x T x F] h_last - last hidden state, tf.Tensor with dimensionality [B x H] """ with tf.variable_scope(name, reuse=reuse): gru = tf.contrib.cudnn_rnn.CudnnGRU(num_layers=n_layers, num_units=n_hidden) if trainable_initial_states: init_h = tf.get_variable('init_h', [n_layers, 1, n_hidden]) init_h = tf.tile(init_h, (1, tf.shape(units)[0], 1)) else: init_h = tf.zeros([n_layers, tf.shape(units)[0], n_hidden]) initial_h = input_initial_h or init_h h, h_last = gru(tf.transpose(units, (1, 0, 2)), (initial_h, )) h = tf.transpose(h, (1, 0, 2)) h_last = tf.squeeze(h_last, axis=0)[-1] # extract last layer state # Extract last states if they are provided if seq_lengths is not None: indices = tf.stack([tf.range(tf.shape(h)[0]), seq_lengths-1], axis=1) h_last = tf.gather_nd(h, indices) return h, h_last
0.002064
def power_off(self, interval=200): """230v power off""" if self.__power_off_port is None: cij.err("cij.usb.relay: Invalid USB_RELAY_POWER_OFF") return 1 return self.__press(self.__power_off_port, interval=interval)
0.007605
def validate_version_argument(version, hint=4): """ validate the version argument against the supported MDF versions. The default version used depends on the hint MDF major revision Parameters ---------- version : str requested MDF version hint : int MDF revision hint Returns ------- valid_version : str valid version """ if version not in SUPPORTED_VERSIONS: if hint == 2: valid_version = "2.14" elif hint == 3: valid_version = "3.30" else: valid_version = "4.10" message = ( 'Unknown mdf version "{}".' " The available versions are {};" ' automatically using version "{}"' ) message = message.format(version, SUPPORTED_VERSIONS, valid_version) logger.warning(message) else: valid_version = version return valid_version
0.001068
def disconnect(self): """Disconnect from AWS IOT message broker """ if self.client is None: return try: self.client.disconnect() except operationError as exc: raise InternalError("Could not disconnect from AWS IOT", message=exc.message)
0.009554
def set_pubkey(self, pkey): """ Set the public key of the certificate signing request. :param pkey: The public key to use. :type pkey: :py:class:`PKey` :return: ``None`` """ set_result = _lib.X509_REQ_set_pubkey(self._req, pkey._pkey) _openssl_assert(set_result == 1)
0.006006
def help_completion_options(self): """ Return options of this command. """ for opt in self.parser.option_list: for lopt in opt._long_opts: yield lopt
0.00995
def fit(self, trX, trY, batch_size=64, n_epochs=1, len_filter=LenFilter(), snapshot_freq=1, path=None): """Train model on given training examples and return the list of costs after each minibatch is processed. Args: trX (list) -- Inputs trY (list) -- Outputs batch_size (int, optional) -- number of examples in a minibatch (default 64) n_epochs (int, optional) -- number of epochs to train for (default 1) len_filter (object, optional) -- object to filter training example by length (default LenFilter()) snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1) path (str, optional) -- prefix of path where model snapshots are saved. If None, no snapshots are saved (default None) Returns: list -- costs of model after processing each minibatch """ if len_filter is not None: trX, trY = len_filter.filter(trX, trY) trY = standardize_targets(trY, cost=self.cost) n = 0. t = time() costs = [] for e in range(n_epochs): epoch_costs = [] for xmb, ymb in self.iterator.iterXY(trX, trY): c = self._train(xmb, ymb) epoch_costs.append(c) n += len(ymb) if self.verbose >= 2: n_per_sec = n / (time() - t) n_left = len(trY) - n % len(trY) time_left = n_left/n_per_sec sys.stdout.write("\rEpoch %d Seen %d samples Avg cost %0.4f Time left %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time_left)) sys.stdout.flush() costs.extend(epoch_costs) status = "Epoch %d Seen %d samples Avg cost %0.4f Time elapsed %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time() - t) if self.verbose >= 2: sys.stdout.write("\r"+status) sys.stdout.flush() sys.stdout.write("\n") elif self.verbose == 1: print(status) if path and e % snapshot_freq == 0: save(self, "{0}.{1}".format(path, e)) return costs
0.004915
def write(self): """ Write contents of cache to disk. """ io.debug("Storing cache '{0}'".format(self.path)) with open(self.path, "w") as file: json.dump(self._data, file, sort_keys=True, indent=2, separators=(',', ': '))
0.006803
def gen_mu(K, delta, c): """The Robust Soliton Distribution on the degree of transmitted blocks """ S = c * log(K/delta) * sqrt(K) tau = gen_tau(S, K, delta) rho = gen_rho(K) normalizer = sum(rho) + sum(tau) return [(rho[d] + tau[d])/normalizer for d in range(K)]
0.010067
def set_published(self, published): """Sets the published status. arg: published (boolean): the published status raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.set_group_template if self.get_published_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_boolean(published): raise errors.InvalidArgument() self._my_map['published'] = published
0.005034
def alter_change_column(self, table, column, field): """Support change columns.""" ctx = self.make_context() field_null, field.null = field.null, True ctx = self._alter_table(ctx, table).literal(' ALTER COLUMN ').sql(field.ddl(ctx)) field.null = field_null return ctx
0.009524
def to_bigquery_field(self, name_case=DdlParseBase.NAME_CASE.original): """Generate BigQuery JSON field define""" col_name = self.get_name(name_case) mode = self.bigquery_mode if self.array_dimensional <= 1: # no or one dimensional array data type type = self.bigquery_legacy_data_type else: # multiple dimensional array data type type = "RECORD" fields = OrderedDict() fields_cur = fields for i in range(1, self.array_dimensional): is_last = True if i == self.array_dimensional - 1 else False fields_cur['fields'] = [OrderedDict()] fields_cur = fields_cur['fields'][0] fields_cur['name'] = "dimension_{}".format(i) fields_cur['type'] = self.bigquery_legacy_data_type if is_last else "RECORD" fields_cur['mode'] = self.bigquery_mode if is_last else "REPEATED" col = OrderedDict() col['name'] = col_name col['type'] = type col['mode'] = mode if self.array_dimensional > 1: col['fields'] = fields['fields'] return json.dumps(col)
0.003289
def __potential_connection_failure(self, e): """ OperationalError's are emitted by the _mysql library for almost every error code emitted by MySQL. Because of this we verify that the error is actually a connection error before terminating the connection and firing off a PoolConnectionException """ try: self._conn.query('SELECT 1') except (IOError, _mysql.OperationalError): # ok, it's actually an issue. self.__handle_connection_failure(e) else: # seems ok, probably programmer error raise _mysql.DatabaseError(*e.args)
0.003096
def save(self, *args, **kwargs): """If creating new instance, create profile on Authorize.NET also""" data = kwargs.pop('data', {}) sync = kwargs.pop('sync', True) if not self.id and sync: self.push_to_server(data) super(CustomerProfile, self).save(*args, **kwargs)
0.006309
def build_signature_template(key_id, algorithm, headers): """ Build the Signature template for use with the Authorization header. key_id is the mandatory label indicating to the server which secret to use algorithm is one of the supported algorithms headers is a list of http headers to be included in the signing string. The signature must be interpolated into the template to get the final Authorization header value. """ param_map = {'keyId': key_id, 'algorithm': algorithm, 'signature': '%s'} if headers: headers = [h.lower() for h in headers] param_map['headers'] = ' '.join(headers) kv = map('{0[0]}="{0[1]}"'.format, param_map.items()) kv_string = ','.join(kv) sig_string = 'Signature {0}'.format(kv_string) return sig_string
0.00119
def variable(self, name: str, default_value=None): """ Safely returns the value of the variable given in PUM Parameters ---------- name the name of the variable default_value the default value for the variable if it does not exist """ return self.__variables.get(name, default_value)
0.005376
def normalise_filled(self, meta, val): """Only care about valid image names""" available = list(meta.everything["images"].keys()) val = sb.formatted(sb.string_spec(), formatter=MergedOptionStringFormatter).normalise(meta, val) if val not in available: raise BadConfiguration("Specified image doesn't exist", specified=val, available=available) return val
0.009852
def xross_listener(http_method=None, **xross_attrs): """Instructs xross to handle AJAX calls right from the moment it is called. This should be placed in a view decorated with `@xross_view()`. :param str http_method: GET or POST. To be used as a source of data for xross. :param dict xross_attrs: xross handler attributes. Those attributes will be available in operation functions in `xross` keyword argument. """ handler = currentframe().f_back.f_locals['request']._xross_handler handler.set_attrs(**xross_attrs) if http_method is not None: handler.http_method = http_method handler.dispatch()
0.004615
def population_chart_header_element(feature, parent): """Retrieve population chart header string from definitions.""" _ = feature, parent # NOQA header = population_chart_header['string_format'] return header.capitalize()
0.004202
def hide_routemap_holder_route_map_content_set_weight_weight_value(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') content = ET.SubElement(route_map, "content") set = ET.SubElement(content, "set") weight = ET.SubElement(set, "weight") weight_value = ET.SubElement(weight, "weight-value") weight_value.text = kwargs.pop('weight_value') callback = kwargs.pop('callback', self._callback) return callback(config)
0.003883
def load_remote_system(url, format=None): '''Load a system from the remote location specified by *url*. **Example** :: load_remote_system('https://raw.github.com/chemlab/chemlab-testdata/master/naclwater.gro') ''' filename, headers = urlretrieve(url) return load_system(filename, format=format)
0.00304
def get_input_peer(entity, allow_self=True, check_hash=True): """ Gets the input peer for the given "entity" (user, chat or channel). A ``TypeError`` is raised if the given entity isn't a supported type or if ``check_hash is True`` but the entity's ``access_hash is None``. Note that ``check_hash`` **is ignored** if an input peer is already passed since in that case we assume the user knows what they're doing. This is key to getting entities by explicitly passing ``hash = 0``. """ try: if entity.SUBCLASS_OF_ID == 0xc91c90b6: # crc32(b'InputPeer') return entity except AttributeError: # e.g. custom.Dialog (can't cyclic import). if allow_self and hasattr(entity, 'input_entity'): return entity.input_entity elif hasattr(entity, 'entity'): return get_input_peer(entity.entity) else: _raise_cast_fail(entity, 'InputPeer') if isinstance(entity, types.User): if entity.is_self and allow_self: return types.InputPeerSelf() elif entity.access_hash is not None or not check_hash: return types.InputPeerUser(entity.id, entity.access_hash) else: raise TypeError('User without access_hash cannot be input') if isinstance(entity, (types.Chat, types.ChatEmpty, types.ChatForbidden)): return types.InputPeerChat(entity.id) if isinstance(entity, (types.Channel, types.ChannelForbidden)): if entity.access_hash is not None or not check_hash: return types.InputPeerChannel(entity.id, entity.access_hash) else: raise TypeError('Channel without access_hash cannot be input') if isinstance(entity, types.InputUser): return types.InputPeerUser(entity.user_id, entity.access_hash) if isinstance(entity, types.InputChannel): return types.InputPeerChannel(entity.channel_id, entity.access_hash) if isinstance(entity, types.InputUserSelf): return types.InputPeerSelf() if isinstance(entity, types.UserEmpty): return types.InputPeerEmpty() if isinstance(entity, types.UserFull): return get_input_peer(entity.user) if isinstance(entity, types.ChatFull): return types.InputPeerChat(entity.id) if isinstance(entity, types.PeerChat): return types.InputPeerChat(entity.chat_id) _raise_cast_fail(entity, 'InputPeer')
0.00041
def plotRealImg(sim, cam, rawdata, t: int, odir: Path=None, fg=None): """ sim: histfeas/simclass.py cam: camclass.py rawdata: nframe x ny x nx ndarray t: integer index to read odir: output directory (where to write results) plots both cameras together, and magnetic zenith 1-D cut line and 1 degree radar beam red circle centered on magnetic zenith """ ncols = len(cam) # print('using {} cameras'.format(ncols)) T = nans(ncols, dtype=datetime) # if asi is not None: # ncols=3 # if isinstance(asi,(tuple,list)): # pass # elif isinstance(asi,(str,Path)): # asi = Path(asi).expanduser() # if asi.is_dir(): # asi=list(asi.glob('*.FITS')) if fg is None: doclose = True fg, axs = subplots(nrows=1, ncols=ncols, figsize=( 15, 12), dpi=DPI, facecolor='black') axs = atleast_1d(axs) # in case only 1 # fg.set_size_inches(15,5) #clips off else: # maintain original figure handle for anim.writer doclose = False fg.clf() axs = [fg.add_subplot(1, ncols, i + 1) for i in range(ncols)] for i, C in enumerate(cam): if C.usecam: # HiST cameras # print('frame {}'.format(t)) # hold times for all cameras at this time step T[i] = updateframe(t, rawdata[i], None, cam[i], axs[i], fg) elif C.name == 'asi': # ASI dasc = dio.load(C.fn, treq=T[sim.useCamBool][0]) C.tKeo = dasc.time updateframe(0, dasc.values, dasc.wavelength, C, axs[i], fg) # FIXME may need API update try: overlayrowcol(axs[i], C.hlrows, C.hlcols) except AttributeError: pass # az/el were not registered else: logging.error(f'unknown camera {C.name} index {i}') if i == 0: axs[0].set_ylabel(datetime.strftime( T[0], '%x')).set_color('limegreen') # NOTE: commented out due to Matplotlib 1.x bugs # fg.suptitle(datetime.strftime(T[0],'%x')) #makes giant margins that tight_layout doesn't help, bug # fg.text(0.5,0.15,datetime.strftime(T[0],'%x'))#, va='top',ha='center') #bug too # fg.tight_layout() # fg.subplots_adjust(top=0.95) # TODO: T[0] is fastest cam now, but needs generalization writeplots(fg, 'rawFrame', T[0], odir=odir, dpi=sim.dpi, facecolor='k', doclose=doclose)
0.001962
def write(self, pkt): """accepts a either a single packet or a list of packets to be written to the dumpfile """ if not self.header_present: self._write_header(pkt) if isinstance(pkt, BasePacket): self._write_packet(pkt) else: for p in pkt: self._write_packet(p)
0.008333
def _initialize_providers(self): """Read config file and initialize providers""" configured_providers = active_config.DATABASES provider_objects = {} if not isinstance(configured_providers, dict) or configured_providers == {}: raise ConfigurationError( "'DATABASES' config must be a dict and at least one " "provider must be defined") if 'default' not in configured_providers: raise ConfigurationError( "You must define a 'default' provider") for provider_name, conn_info in configured_providers.items(): provider_full_path = conn_info['PROVIDER'] provider_module, provider_class = provider_full_path.rsplit('.', maxsplit=1) provider_cls = getattr(importlib.import_module(provider_module), provider_class) provider_objects[provider_name] = provider_cls(conn_info) return provider_objects
0.005165
def _copy_stream_position(position): """Copy a StreamPosition. Args: position (Union[ \ dict, \ ~google.cloud.bigquery_storage_v1beta1.types.StreamPosition \ ]): StreamPostion (or dictionary in StreamPosition format) to copy. Returns: ~google.cloud.bigquery_storage_v1beta1.types.StreamPosition: A copy of the input StreamPostion. """ if isinstance(position, types.StreamPosition): output = types.StreamPosition() output.CopyFrom(position) return output return types.StreamPosition(**position)
0.001623
def _dqtoi(self, dq): """Convert dotquad or hextet to long.""" # hex notation if dq.startswith('0x'): return self._dqtoi_hex(dq) # IPv6 if ':' in dq: return self._dqtoi_ipv6(dq) elif len(dq) == 32: # Assume full heximal notation self.v = 6 return int(dq, 16) # IPv4 if '.' in dq: return self._dqtoi_ipv4(dq) raise ValueError('Invalid address input')
0.004032
def xlate(self, type): """ Get a (namespace) translated I{qualified} name for specified type. @param type: A schema type. @type type: I{suds.xsd.sxbasic.SchemaObject} @return: A translated I{qualified} name. @rtype: str """ resolved = type.resolve() name = resolved.name if type.multi_occurrence(): name += '[]' ns = resolved.namespace() if ns[1] == self.wsdl.tns[1]: return name prefix = self.getprefix(ns[1]) return ':'.join((prefix, name))
0.00346
def restore_saved_local_scope( self, saved_variables, args_mapping, line_number ): """Restore the previously saved variables to their original values. Args: saved_variables(list[SavedVariable]) args_mapping(dict): A mapping of call argument to definition argument. line_number(int): Of the def of the function call about to be entered into. Note: We do not need connect_if_allowed because of the preceding call to save_local_scope. """ restore_nodes = list() for var in saved_variables: # Is var.RHS a call argument? if var.RHS in args_mapping: # If so, use the corresponding definition argument for the RHS of the label. restore_nodes.append(RestoreNode( var.RHS + ' = ' + args_mapping[var.RHS], var.RHS, [var.LHS], line_number=line_number, path=self.filenames[-1] )) else: # Create a node for e.g. foo = save_1_foo restore_nodes.append(RestoreNode( var.RHS + ' = ' + var.LHS, var.RHS, [var.LHS], line_number=line_number, path=self.filenames[-1] )) # Chain the restore nodes for node, successor in zip(restore_nodes, restore_nodes[1:]): node.connect(successor) if restore_nodes: # Connect the last node to the first restore node self.nodes[-1].connect(restore_nodes[0]) self.nodes.extend(restore_nodes) return restore_nodes
0.003386
def add(self, path, compress=None): """Add `path` to the MAR file. If `path` is a file, it will be added directly. If `path` is a directory, it will be traversed recursively and all files inside will be added. Args: path (str): path to file or directory on disk to add to this MAR file compress (str): One of 'xz', 'bz2', or None. Defaults to None. """ if os.path.isdir(path): self.add_dir(path, compress) else: self.add_file(path, compress)
0.003503
def terminal(exec_='', background=False, shell_after_cmd_exec=False, keep_open_after_cmd_exec=False, return_cmd=False): '''Start the default terminal emulator. Start the user's preferred terminal emulator, optionally running a command in it. **Order of starting** Windows: Powershell Mac: - iTerm2 - Terminal.app Linux/Unix: - ``$TERMINAL`` - ``x-terminal-emulator`` - Terminator - Desktop environment's terminal - gnome-terminal - urxvt - rxvt - xterm Args: exec\_ (str) : An optional command to run in the opened terminal emulator. Defaults to empty (no command). background (bool): Run the terminal in the background, instead of waiting for completion. Defaults to ``False``. shell_after_cmd_exec (bool): Start the user's shell after running the command (see exec_). Defaults to `False`. return_cmd (bool): Returns the command used to start the terminal (str) instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, returns the command to run the terminal instead of running it. Else returns nothing. ''' desktop_env = system.get_name() if not exec_: shell_after_cmd_exec = True if desktop_env == 'windows': terminal_cmd_str = 'start powershell.exe' if desktop_env == 'mac': # Try iTerm2 first, apparently most popular Mac Terminal if mac_app_exists('iTerm2'): terminal_cmd_str = 'open -a iTerm2' else: terminal_cmd_str = 'open -a Terminal' else: # sensible-terminal if os.getenv('TERMINAL'): # Not everywhere, but if user *really* has a preference, they will # set this terminal_cmd_str = os.getenv('TERMINAL') elif system.is_in_path('x-terminal-emulator'): # This is a convenience script that launches terminal based on # user preferences. # This is not available on some distros (but most have it) # so try this first terminal_cmd_str = 'x-terminal-emulator' elif system.is_in_path('terminator'): terminal_cmd_str = 'terminator' elif desktop_env in ['gnome', 'unity', 'cinnamon', 'gnome2']: terminal_cmd_str = 'gnome-terminal' elif desktop_env == 'xfce4': terminal_cmd_str = 'xfce4-terminal' elif desktop_env == 'kde' or desktop_env == 'trinity': terminal_cmd_str = 'konsole' elif desktop_env == 'mate': terminal_cmd_str = 'mate-terminal' elif desktop_env == 'i3': terminal_cmd_str = 'i3-sensible-terminal' elif desktop_env == 'pantheon': terminal_cmd_str = 'pantheon-terminal' elif desktop_env == 'enlightenment': terminal_cmd_str = 'terminology' elif desktop_env == 'lxde' or desktop_env == 'lxqt': terminal_cmd_str = 'lxterminal' else: if system.is_in_path('gnome-terminal'): terminal_cmd_str = 'gnome-terminal' elif system.is_in_path('urxvt'): terminal_cmd_str = 'urxvt' elif system.is_in_path('rxvt'): terminal_cmd_str = 'rxvt' elif system.is_in_path('xterm'): terminal_cmd_str = 'xterm' if exec_: if desktop_env == 'windows': if keep_open_after_cmd_exec and not shell_after_cmd_exec: exec_ += '; pause' if os.path.isfile(exec_): terminal_cmd_str += exec_ else: terminal_cmd_str += ' -Command ' + '"' + exec_ + '"' if shell_after_cmd_exec: terminal_cmd_str += ' -NoExit' else: if keep_open_after_cmd_exec and not shell_after_cmd_exec: exec_ += '; read' if shell_after_cmd_exec: exec_ += '; ' + os.getenv('SHELL') if desktop_env == 'mac': terminal_cmd_str += ' sh -c {}'.format(shlex.quote(exec_)) else: terminal_cmd_str += ' -e {}'.format( shlex.quote('sh -c {}'.format(shlex.quote(exec_)))) if return_cmd: return terminal_cmd_str terminal_proc = sp.Popen([terminal_cmd_str], shell=True, stdout=sp.PIPE) if not background: # Wait for process to complete terminal_proc.wait()
0.029053
def exec_notebook_daemon_command(self, name, cmd, port=0): """Run a daemon script command.""" cmd = self.get_notebook_daemon_command(name, cmd, port) # Make all arguments explicit strings cmd = [str(arg) for arg in cmd] logger.info("Running notebook command: %s", " ".join(cmd)) # print("XXX - DEBUG - Running notebook command:", " ".join(cmd)) # Add support for traceback dump on stuck env = os.environ.copy() env["PYTHONFAULTHANDLER"] = "true" p = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env) time.sleep(0.2) stdout, stderr = p.communicate() if b"already running" in stderr: raise RuntimeError("Looks like notebook_daemon is already running. Please kill it manually pkill -f notebook_daemon. Was: {}".format(stderr.decode("utf-8"))) if p.returncode != 0: logger.error("STDOUT: %s", stdout) logger.error("STDERR: %s", stderr) raise RuntimeError("Could not execute notebook command. Exit code: {} cmd: {}".format(p.returncode, " ".join(cmd))) return stdout
0.004288
def _connect(self, key, spec, via=None): """ Actual connect implementation. Arranges for the Mitogen connection to be created and enqueues an asynchronous call to start the forked task parent in the remote context. :param key: Deduplication key representing the connection configuration. :param spec: Connection specification. :returns: Dict like:: { 'context': mitogen.core.Context or None, 'via': mitogen.core.Context or None, 'init_child_result': { 'fork_context': mitogen.core.Context, 'home_dir': str or None, }, 'msg': str or None } Where `context` is a reference to the newly constructed context, `init_child_result` is the result of executing :func:`ansible_mitogen.target.init_child` in that context, `msg` is an error message and the remaining fields are :data:`None`, or `msg` is :data:`None` and the remaining fields are set. """ try: method = getattr(self.router, spec['method']) except AttributeError: raise Error('unsupported method: %(transport)s' % spec) context = method(via=via, unidirectional=True, **spec['kwargs']) if via and spec.get('enable_lru'): self._update_lru(context, spec, via) # Forget the context when its disconnect event fires. mitogen.core.listen(context, 'disconnect', lambda: self._on_context_disconnect(context)) self._send_module_forwards(context) init_child_result = context.call( ansible_mitogen.target.init_child, log_level=LOG.getEffectiveLevel(), candidate_temp_dirs=self._get_candidate_temp_dirs(), ) if os.environ.get('MITOGEN_DUMP_THREAD_STACKS'): from mitogen import debug context.call(debug.dump_to_logger) self._key_by_context[context] = key self._refs_by_context[context] = 0 return { 'context': context, 'via': via, 'init_child_result': init_child_result, 'msg': None, }
0.001283
def _decrypt_object(obj, **kwargs): ''' Recursively try to decrypt any object. If the object is a six.string_types (string or unicode), and it contains a valid NACLENC pretext, decrypt it, otherwise keep going until a string is found. ''' if salt.utils.stringio.is_readable(obj): return _decrypt_object(obj.getvalue(), **kwargs) if isinstance(obj, six.string_types): if re.search(NACL_REGEX, obj) is not None: return __salt__['nacl.dec'](re.search(NACL_REGEX, obj).group(1), **kwargs) else: return obj elif isinstance(obj, dict): for key, value in six.iteritems(obj): obj[key] = _decrypt_object(value, **kwargs) return obj elif isinstance(obj, list): for key, value in enumerate(obj): obj[key] = _decrypt_object(value, **kwargs) return obj else: return obj
0.002203
def state_probabilities(alpha, beta, T=None, gamma_out=None): """ Calculate the (T,N)-probabilty matrix for being in state i at time t. Parameters ---------- alpha : ndarray((T,N), dtype = float), optional, default = None alpha[t,i] is the ith forward coefficient of time t. beta : ndarray((T,N), dtype = float), optional, default = None beta[t,i] is the ith forward coefficient of time t. T : int, optional, default = None trajectory length. If not given, gamma_out.shape[0] will be used. If gamma_out is neither given, T = alpha.shape[0] will be used. gamma_out : ndarray((T,N), dtype = float), optional, default = None containter for the gamma result variables. If None, a new container will be created. Returns ------- gamma : ndarray((T,N), dtype = float), optional, default = None gamma[t,i] is the probabilty at time t to be in state i ! See Also -------- forward : to calculate `alpha` backward : to calculate `beta` """ # get summation helper - we use matrix multiplication with 1's because it's faster than the np.sum function (yes!) global ones_size if ones_size != alpha.shape[1]: global ones ones = np.ones(alpha.shape[1])[:, None] ones_size = alpha.shape[1] # if alpha.shape[0] != beta.shape[0]: raise ValueError('Inconsistent sizes of alpha and beta.') # determine T to use if T is None: if gamma_out is None: T = alpha.shape[0] else: T = gamma_out.shape[0] # compute if gamma_out is None: gamma_out = alpha * beta if T < gamma_out.shape[0]: gamma_out = gamma_out[:T] else: if gamma_out.shape[0] < alpha.shape[0]: np.multiply(alpha[:T], beta[:T], gamma_out) else: np.multiply(alpha, beta, gamma_out) # normalize np.divide(gamma_out, np.dot(gamma_out, ones), out=gamma_out) # done return gamma_out
0.001485
def _query(queue_name=None, build_id=None, release_id=None, run_id=None, count=None): """Queries for work items based on their criteria. Args: queue_name: Optional queue name to restrict to. build_id: Optional build ID to restrict to. release_id: Optional release ID to restrict to. run_id: Optional run ID to restrict to. count: How many tasks to fetch. Defaults to None, which means all tasks are fetch that match the query. Returns: List of WorkQueue items. """ assert queue_name or build_id or release_id or run_id q = WorkQueue.query if queue_name: q = q.filter_by(queue_name=queue_name) if build_id: q = q.filter_by(build_id=build_id) if release_id: q = q.filter_by(release_id=release_id) if run_id: q = q.filter_by(run_id=run_id) q = q.order_by(WorkQueue.created.desc()) if count is not None: q = q.limit(count) return q.all()
0.000998
def setdefault(self, key, val): """ set a default value for key this is different than dict's setdefault because it will set default either if the key doesn't exist, or if the value at the key evaluates to False, so an empty string or a None value will also be updated :param key: string, the attribute to update :param val: mixed, the attributes new value if key has a current value that evaluates to False """ if not getattr(self, key, None): setattr(self, key, val)
0.007067
def get_weights_fn(modality_type, value=None): """Gets default weights function; if none available, return value.""" if modality_type in (ModalityType.CTC_SYMBOL, ModalityType.IDENTITY_SYMBOL, ModalityType.MULTI_LABEL, ModalityType.SYMBOL, ModalityType.SYMBOL_ONE_HOT): return common_layers.weights_nonzero elif modality_type in ModalityType.get_choices(): return common_layers.weights_all return value
0.009804
def _attach_module_identifier(command_dict, modulefn): """ Attaches a 'module': modulename entry to each node in the dictionary. This is used by the help printer so that the user can tell if a command was included by default or via a module. """ for command in command_dict: command_dict[command]['module'] = modulefn _attach_module_identifier(command_dict[command]['children'], modulefn)
0.00978
def gramm_to_promille(gramm, age, weight, height, sex): """Return the blood alcohol content (per mill) for a person with the given body stats and amount of alcohol (in gramm) in blood """ bw = calculate_bw(age, weight, height, sex) return (gramm * W) / (PB * bw)
0.014545
def download(self, callback=None): """ Downloads this resource from its URL to a file on the local system. This method should only be invoked on a worker node after the node was setup for accessing resources via prepareSystem(). """ dirPath = self.localDirPath if not os.path.exists(dirPath): tempDirPath = mkdtemp(dir=os.path.dirname(dirPath), prefix=self.contentHash + "-") self._save(tempDirPath) if callback is not None: callback(tempDirPath) try: os.rename(tempDirPath, dirPath) except OSError as e: # If dirPath already exists & is non-empty either ENOTEMPTY or EEXIST will be raised if e.errno == errno.ENOTEMPTY or e.errno == errno.EEXIST: # Another process beat us to it. # TODO: This is correct but inefficient since multiple processes download the resource redundantly pass else: raise
0.00653
def tag(self, tokens): """Return a list of (token, tag) tuples for a given list of tokens.""" if not self._loaded_model: self.load(self.model) tags = [None] * len(tokens) norm = self._normalize(tokens) length = len(norm) # A set of allowed indexes for matches to start or end at delims = [0] + [i for span in [m.span() for m in self.delimiters.finditer(norm)] for i in span] + [length] # Token indices token_at_index = [] for i, t in enumerate(tokens): token_at_index.extend([i] * (len(self.lexicon[t].normalized) + 1)) start_i = 0 end_i = 1 matches = {} next_start = end_i # TODO: This could be a little more efficient by skipping indexes forward to next delim points. while True: current = norm[start_i:end_i] if self._dawg.has_keys_with_prefix(current): # print('%s:%s:%s' % (start_i, end_i, current)) # If the current span is in the dawg, and isn't followed by an alphanumeric character if current in self._dawg and start_i in delims and end_i in delims: # print(current) # Subsequent longer matches with same start_i will overwrite values in matches dict matches[start_i] = (start_i, end_i, current) # We can skip forward to after this match next time we increment start_i next_start = end_i # Increment end_i provided we aren't already at the end of the input if end_i < length: end_i += 1 continue # Increment start_i provided we aren't already at the end of the input start_i = next_start if start_i >= length - 1: break end_i = start_i + 1 next_start = end_i # Apply matches as tags to the relevant tokens for start_i, end_i, current in matches.values(): start_token = token_at_index[start_i] end_token = token_at_index[end_i] # Possible for match to start in 'I' token from prev match. Merge matches by not overwriting to 'B'. if not tags[start_token] == 'I-%s' % self.entity: tags[start_token] = 'B-%s' % self.entity tags[start_token+1:end_token+1] = ['I-%s' % self.entity] * (end_token - start_token) tokentags = list(zip(tokens, tags)) return tokentags
0.004715
def send(self): """Send this message to the controller.""" self._file.write(self.as_bytes()) self._file.write(b'\r\n')
0.014085
def periodic(period, drain_timeout=_DEFAULT_DRAIN, reset_timeout=_DEFAULT_RESET, max_consecutive_attempts=_DEFAULT_ATTEMPTS): """Create a periodic consistent region configuration. The IBM Streams runtime will trigger a drain and checkpoint the region periodically at the time interval specified by `period`. Args: period: The trigger period. This may be either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. drain_timeout: The drain timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. reset_timeout: The reset timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. max_consecutive_attempts(int): The maximum number of consecutive attempts to reset the region. This must be an integer value between 1 and 2147483647, inclusive. If not specified, the default value is 5. Returns: ConsistentRegionConfig: the configuration. """ return ConsistentRegionConfig(trigger=ConsistentRegionConfig.Trigger.PERIODIC, period=period, drain_timeout=drain_timeout, reset_timeout=reset_timeout, max_consecutive_attempts=max_consecutive_attempts)
0.006565
def catch_factory(attr): """ Factory returning a catch function """ def _catch(s, *args, **kw): """ This is used to catch and process all calls. """ def process(value): """ return the actual value after processing """ if attr.startswith("__"): # __repr__, __str__ etc return getattr(value, attr)(*args, **kw) else: # upper, lower etc return getattr(u"".__class__, attr)(value, *args, **kw) stack = inspect.stack() mod = inspect.getmodule(stack[1][0]) # We are called from the owning module so allow if mod.__name__.split(".")[-1] == s._module_name: return process(s._value) # very shallow calling no stack if len(stack) < 3: return process(s._private) # Check if this is an internal or external module. We need to allow # calls to modules like requests etc remote = not inspect.getmodule(stack[2][0]).__name__.startswith("py3status") valid = False # go through the stack to see how we came through the code for frame in stack[2:]: mod = inspect.getmodule(frame[0]) if remote and mod.__name__.split(".")[-1] == s._module_name: # the call to an external module started in the correct module # so allow this usage valid = True break if mod.__name__ == "py3status.py3" and frame[3] == "request": # Py3.request has special needs due so it is allowed to access # private variables. valid = True break if mod.__name__.startswith("py3status"): # We were somewhere else in py3status than the module, maybe we # are doing some logging. Prevent usage return process(s._private) if valid: return process(s._value) return process(s._private) return _catch
0.000954
async def _request( self, method: str, url: str, *, headers: dict = None, params: dict = None, json: dict = None) -> dict: """Make a request against AirVisual.""" full_url = '{0}/{1}'.format(url, self.zip_code) pieces = urlparse(url) if not headers: headers = {} headers.update({ 'Content-Type': 'application/json', 'Referer': '{0}://{1}'.format(pieces.scheme, pieces.netloc), 'User-Agent': API_USER_AGENT }) async with self._websession.request(method, full_url, headers=headers, params=params, json=json) as resp: try: resp.raise_for_status() data = await resp.json(content_type=None) return data except client_exceptions.ClientError as err: raise RequestError( 'Error requesting data from {0}: {1}'.format(url, err))
0.001887
def unsubscribe(self, callback_id): """Ask the hub to cancel the subscription for callback_id, then delete it from the local database if successful. """ request = self.get_active_subscription(callback_id) request['mode'] = 'unsubscribe' self.subscribe_impl(callback_id, **request)
0.006079
def del_client(self, **kwargs): """ Registers a new client to the specified network. Usage: ======= =================================== Method URI ======= =================================== DELETE /vtep/networks/{vni}/clients/{mac} ======= =================================== Request parameters: =========== =============================================== Attribute Description =========== =============================================== vni Virtual Network Identifier. (e.g. 10) mac Client MAC address to register. =========== =============================================== Example:: $ curl -X DELETE http://localhost:8080/vtep/networks/10/clients/aa:bb:cc:dd:ee:ff | python -m json.tool :: { "10": { "EvpnClient": { "ip": "10.0.0.1", "mac": "aa:bb:cc:dd:ee:ff", "next_hop": "172.17.0.1", "port": 1 } } } """ try: body = self.vtep_app.del_client(**kwargs) except (BGPSpeakerNotFound, DatapathNotFound, VniNotFound, ClientNotFound, ClientNotLocal) as e: return Response(body=str(e), status=500) return Response(content_type='application/json', body=json.dumps(body))
0.001887
def getStormQuery(self, text): ''' Parse storm query text and return a Query object. ''' query = s_syntax.Parser(text).query() query.init(self) return query
0.009804
def get_analyses(self): """Returns a list of analyses from the AR """ analyses = self.context.getAnalyses(full_objects=True) return filter(self.is_analysis_attachment_allowed, analyses)
0.009217
def parser_add(self, parser, prefix=''): """Add config to an :py:class:`argparse.ArgumentParser` object. The parser’s :py:meth:`~argparse.ArgumentParser.add_argument` method is called for each config item. The argument name is constructed from the parent and item names, with a dot separator. :param argparse.ArgumentParser parser: The argument parser object. :keyword str prefix: The parent node name. """ if prefix: prefix += '.' for key, value in self.items(): value.parser_add(parser, prefix + key)
0.003279
def disable_glut(self): """Disable event loop integration with glut. This sets PyOS_InputHook to NULL and set the display function to a dummy one and set the timer to a dummy timer that will be triggered very far in the future. """ import OpenGL.GLUT as glut # @UnresolvedImport from glut_support import glutMainLoopEvent # @UnresolvedImport glut.glutHideWindow() # This is an event to be processed below glutMainLoopEvent() self.clear_inputhook()
0.003759
def get_table(self, schema_type): """Retrieve a SQLAlchemy table based on the supplied GraphQL schema type name.""" table_name = schema_type.lower() if not self.has_table(table_name): raise exceptions.GraphQLCompilationError( 'No Table found in SQLAlchemy metadata for table name "{}"'.format(table_name) ) return self.table_name_to_table[table_name]
0.009479
def fast_sweep_time_evolution(Ep, epsilonp, gamma, omega_level, rm, xi, theta, semi_analytic=True, file_name=None, return_code=False): r"""Return a spectrum of time evolutions of the density matrix. We test a basic two-level system. >>> import numpy as np >>> from sympy import symbols >>> from scipy.constants import physical_constants >>> e_num = physical_constants["elementary charge"][0] >>> hbar_num = physical_constants["Planck constant over 2 pi"][0] >>> Ne = 2 >>> Nl = 1 >>> Ep = [-1.0] >>> epsilonp = [np.array([0, 0, 1.0])] >>> delta = symbols("delta") >>> detuning_knob = [delta] >>> gamma = np.array([[0.0, -1.0], [1.0, 0.0]]) >>> omega_level = np.array([0.0, 100.0]) >>> rm = [np.array([[0.0, 0.0], [1.0, 0.0]])*hbar_num/e_num ... for p in range(3)] >>> xi = np.array([[[0, 1], [1, 0]]]) >>> theta = phase_transformation(Ne, Nl, rm, xi) >>> sweep_time_evolution = fast_sweep_time_evolution(Ep, epsilonp, gamma, ... omega_level, rm, xi, ... theta) >>> t = np.linspace(0, 1e1, 11) >>> unfolding = Unfolding(Ne, True, True, True) >>> rho0 = np.array([[1, 0], [0, 0]]) >>> rho0 = unfolding(rho0) >>> deltas, rho = sweep_time_evolution(t, rho0, [[-20, 20, 5]]) >>> print(rho.shape) (5, 11, 3) >>> print(rho) [[[ 0.0000e+00 0.0000e+00 0.0000e+00] [ 5.6205e-04 -1.8774e-02 -1.4437e-02] [ 1.0302e-03 -3.1226e-02 -7.3031e-03] [ 9.1218e-04 -3.0149e-02 1.3325e-03] [ 6.3711e-04 -2.5073e-02 2.7437e-03] [ 5.3438e-04 -2.3100e-02 2.2977e-04] [ 5.8098e-04 -2.4044e-02 -1.4626e-03] [ 6.3808e-04 -2.5209e-02 -1.3291e-03] [ 6.4675e-04 -2.5407e-02 -6.4498e-04] [ 6.2948e-04 -2.5071e-02 -3.7457e-04] [ 6.1812e-04 -2.4841e-02 -4.9967e-04]] <BLANKLINE> [[ 0.0000e+00 0.0000e+00 0.0000e+00] [ 5.8142e-03 -7.4650e-02 1.3859e-02] [ 2.2458e-03 -4.3027e-02 -1.9436e-02] [ 2.2788e-03 -4.6867e-02 8.1709e-03] [ 3.0571e-03 -5.4724e-02 -6.7300e-03] [ 2.0980e-03 -4.5626e-02 -2.2121e-03] [ 2.6866e-03 -5.1685e-02 -1.1906e-03] [ 2.4351e-03 -4.9072e-02 -3.8467e-03] [ 2.4572e-03 -4.9419e-02 -1.6141e-03] [ 2.5241e-03 -5.0036e-02 -2.8327e-03] [ 2.4491e-03 -4.9304e-02 -2.4541e-03]] <BLANKLINE> [[ 0.0000e+00 0.0000e+00 0.0000e+00] [ 1.4361e-01 0.0000e+00 -3.4458e-01] [ 3.0613e-01 0.0000e+00 -4.1373e-01] [ 3.6110e-01 0.0000e+00 -3.7387e-01] [ 3.5427e-01 0.0000e+00 -3.3710e-01] [ 3.3835e-01 0.0000e+00 -3.2630e-01] [ 3.3135e-01 0.0000e+00 -3.2873e-01] [ 3.3115e-01 0.0000e+00 -3.3244e-01] [ 3.3261e-01 0.0000e+00 -3.3388e-01] [ 3.3343e-01 0.0000e+00 -3.3383e-01] [ 3.3355e-01 0.0000e+00 -3.3348e-01]] <BLANKLINE> [[ 0.0000e+00 0.0000e+00 0.0000e+00] [ 5.8142e-03 7.4650e-02 1.3859e-02] [ 2.2458e-03 4.3027e-02 -1.9436e-02] [ 2.2788e-03 4.6867e-02 8.1709e-03] [ 3.0571e-03 5.4724e-02 -6.7300e-03] [ 2.0980e-03 4.5626e-02 -2.2121e-03] [ 2.6866e-03 5.1685e-02 -1.1906e-03] [ 2.4351e-03 4.9072e-02 -3.8467e-03] [ 2.4572e-03 4.9419e-02 -1.6141e-03] [ 2.5241e-03 5.0036e-02 -2.8327e-03] [ 2.4491e-03 4.9304e-02 -2.4541e-03]] <BLANKLINE> [[ 0.0000e+00 0.0000e+00 0.0000e+00] [ 5.6205e-04 1.8774e-02 -1.4437e-02] [ 1.0302e-03 3.1226e-02 -7.3031e-03] [ 9.1218e-04 3.0149e-02 1.3325e-03] [ 6.3711e-04 2.5073e-02 2.7437e-03] [ 5.3438e-04 2.3100e-02 2.2977e-04] [ 5.8098e-04 2.4044e-02 -1.4626e-03] [ 6.3808e-04 2.5209e-02 -1.3291e-03] [ 6.4675e-04 2.5407e-02 -6.4498e-04] [ 6.2948e-04 2.5071e-02 -3.7457e-04] [ 6.1812e-04 2.4841e-02 -4.9967e-04]]] >>> deltas, rho = sweep_time_evolution(t, rho0, [[-20, 20, 11]], ... average=True) >>> print(rho) [[ 0.0006 -0.024 -0.0021] [ 0.0011 -0.0308 -0.0007] [ 0.0016 -0.0375 0.0024] [ 0.0041 -0.0604 -0.0061] [ 0.016 -0.1175 -0.0118] [ 0.2999 0. -0.3291] [ 0.016 0.1175 -0.0118] [ 0.0041 0.0604 -0.0061] [ 0.0016 0.0375 0.0024] [ 0.0011 0.0308 -0.0007] [ 0.0006 0.024 -0.0021]] """ # We unpack variables. if True: Nl = xi.shape[0] # We determine which arguments are constants. if True: try: Ep = np.array([complex(Ep[l]) for l in range(Nl)]) variable_Ep = False except: variable_Ep = True try: epsilonp = [np.array([complex(epsilonp[l][i]) for i in range(3)]) for l in range(Nl)] variable_epsilonp = False except: variable_epsilonp = True # We obtain code for the time evolution. if True: detuning_knob = symbols("delta1:"+str(Nl)) args = (Ep, epsilonp, detuning_knob, gamma, omega_level, rm, xi, theta, file_name, True) args = (Ep, epsilonp, detuning_knob, gamma, omega_level, rm, xi, theta, True, file_name, True) time_evolution = fast_time_evolution(*args) code = time_evolution+"\n\n" # We establish the arguments of the output function. if True: code += "def sweep_time_evolution(t, rho0, " if variable_Ep: code += "Ep, " if variable_epsilonp: code += "epsilonp, " code += "detuning_knob, average=False, " code += "time_evolution=time_evolution):\n" code += ' r"""A fast frequency sweep of the steady state."""\n' # Code to determine the sweep range. if True: code += """ sweepN = -1\n""" code += """ for i, delta in enumerate(detuning_knob):\n""" code += """ if hasattr(delta, "__getitem__"):\n""" code += """ sweepN = i\n""" code += """ delta0 = delta[0]\n""" code += """ deltaf = delta[1]\n""" code += """ Ndelta = delta[2]\n""" code += """ break\n\n""" code += """ if sweepN == -1:\n""" code += """ s = 'One of the detuning knobs '\n""" code += """ s += 'must be of the form '\n""" code += """ s += '(start, stop, Nsteps)'\n""" code += """ raise ValueError(s)\n\n""" code += """ deltas = np.linspace(delta0, deltaf, Ndelta)\n\n""" # We call time_evolution. if True: code += " args = [[t, rho0, " if variable_Ep: code += "Ep, " if variable_epsilonp: code += "epsilonp, " code += """list(detuning_knob[:sweepN]) +\n""" code += """ [deltas[i]] +\n""" code += """ list(detuning_knob[sweepN+1:]), average]\n""" code += """ for i in range(Ndelta)]\n\n""" code += " rho = np.array([time_evolution(*argsi)\n" code += " for argsi in args])\n\n" # We finish the code. if True: code += " return deltas, rho\n" # We write the code to file if provided, and execute it. if True: if file_name is not None: f = file(file_name+".py", "w") f.write(code) f.close() sweep_time_evolution = code if not return_code: exec sweep_time_evolution return sweep_time_evolution
0.001176