text
stringlengths
78
104k
score
float64
0
0.18
def verify_checksum(message, previous_csum=0): """Verify checksum for incoming message. :param message: incoming message :param previous_csum: accumulated checksum value :return return True if message checksum type is None or checksum is correct """ if message.message_type in CHECKSUM_MSG_TYPES: csum = compute_checksum( message.checksum[0], message.args, previous_csum, ) if csum == message.checksum[1]: return True else: return False else: return True
0.001698
def apply(self, doc): """ Generate MentionCaptions from a Document by parsing all of its Captions. :param doc: The ``Document`` to parse. :type doc: ``Document`` :raises TypeError: If the input doc is not of type ``Document``. """ if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionCaptions.apply() must be of type Document" ) for caption in doc.captions: yield TemporaryCaptionMention(caption)
0.007435
async def popen_uci(command: Union[str, List[str]], *, setpgrp: bool = False, loop=None, **popen_args: Any) -> Tuple[asyncio.SubprocessTransport, UciProtocol]: """ Spawns and initializes an UCI engine. :param command: Path of the engine executable, or a list including the path and arguments. :param setpgrp: Open the engine process in a new process group. This will stop signals (such as keyboard interrupts) from propagating from the parent process. Defaults to ``False``. :param popen_args: Additional arguments for `popen <https://docs.python.org/3/library/subprocess.html#popen-constructor>`_. Do not set ``stdin``, ``stdout``, ``bufsize`` or ``universal_newlines``. Returns a subprocess transport and engine protocol pair. """ transport, protocol = await UciProtocol.popen(command, setpgrp=setpgrp, loop=loop, **popen_args) try: await protocol.initialize() except: transport.close() raise return transport, protocol
0.004812
def pause(self): """Pauses the stream.""" res = librtmp.RTMP_Pause(self.client.rtmp, 1) if res < 1: raise RTMPError("Failed to pause")
0.011696
def apply_panes_settings(self): """Update dockwidgets features settings""" for plugin in (self.widgetlist + self.thirdparty_plugins): features = plugin.FEATURES if CONF.get('main', 'vertical_dockwidget_titlebars'): features = features | QDockWidget.DockWidgetVerticalTitleBar plugin.dockwidget.setFeatures(features) plugin.update_margins()
0.004695
def get_new_document(self, cursor_pos=None): """ Create a `Document` instance that contains the resulting text. """ lines = [] # Original text, before cursor. if self.original_document.text_before_cursor: lines.append(self.original_document.text_before_cursor) # Selected entries from the history. for line_no in sorted(self.selected_lines): lines.append(self.history_lines[line_no]) # Original text, after cursor. if self.original_document.text_after_cursor: lines.append(self.original_document.text_after_cursor) # Create `Document` with cursor at the right position. text = '\n'.join(lines) if cursor_pos is not None and cursor_pos > len(text): cursor_pos = len(text) return Document(text, cursor_pos)
0.002304
def process_response(self, request, response): """ Forces the HTTP ``Vary`` header onto requests to avoid having responses cached across subdomains. """ if getattr(settings, 'FORCE_VARY_ON_HOST', True): patch_vary_headers(response, ('Host',)) return response
0.00627
def request(self, method, path, contents, headers, decode_json=False, stream=False, query=None, cdn=False): """ See :py:func:`swiftly.client.client.Client.request` """ if query: path += '?' + '&'.join( ('%s=%s' % (quote(k), quote(v)) if v else quote(k)) for k, v in sorted(six.iteritems(query))) reset_func = self._default_reset_func if isinstance(contents, six.string_types): contents = StringIO(contents) tell = getattr(contents, 'tell', None) seek = getattr(contents, 'seek', None) if tell and seek: try: orig_pos = tell() reset_func = lambda: seek(orig_pos) except Exception: tell = seek = None elif not contents: reset_func = lambda: None status = 0 reason = 'Unknown' attempt = 0 while attempt < self.attempts: attempt += 1 if cdn: conn_path = self.cdn_path else: conn_path = self.storage_path titled_headers = dict((k.title(), v) for k, v in six.iteritems({ 'User-Agent': self.user_agent})) if headers: titled_headers.update( (k.title(), v) for k, v in six.iteritems(headers)) resp = None if not hasattr(contents, 'read'): if method not in self.no_content_methods and contents and \ 'Content-Length' not in titled_headers and \ 'Transfer-Encoding' not in titled_headers: titled_headers['Content-Length'] = str( len(contents or '')) req = self.Request.blank( conn_path + path, environ={'REQUEST_METHOD': method, 'swift_owner': True}, headers=titled_headers, body=contents) verbose_headers = ' '.join( '%s: %s' % (k, v) for k, v in six.iteritems(titled_headers)) self.verbose( '> %s %s %s', method, conn_path + path, verbose_headers) resp = req.get_response(self.swift_proxy) else: req = self.Request.blank( conn_path + path, environ={'REQUEST_METHOD': method, 'swift_owner': True}, headers=titled_headers) content_length = None for h, v in six.iteritems(titled_headers): if h.lower() == 'content-length': content_length = int(v) req.headers[h] = v if method not in self.no_content_methods and \ content_length is None: titled_headers['Transfer-Encoding'] = 'chunked' req.headers['Transfer-Encoding'] = 'chunked' else: req.content_length = content_length req.body_file = contents verbose_headers = ' '.join( '%s: %s' % (k, v) for k, v in six.iteritems(titled_headers)) self.verbose( '> %s %s %s', method, conn_path + path, verbose_headers) resp = req.get_response(self.swift_proxy) status = resp.status_int reason = resp.status.split(' ', 1)[1] hdrs = headers_to_dict(resp.headers.items()) if stream: def iter_reader(size=-1): if size == -1: return ''.join(resp.app_iter) else: try: return next(resp.app_iter) except StopIteration: return '' iter_reader.read = iter_reader value = iter_reader else: value = resp.body self.verbose('< %s %s', status, reason) if status and status // 100 != 5: if not stream and decode_json and status // 100 == 2: if value: value = json.loads(value) else: value = None return (status, reason, hdrs, value) if reset_func: reset_func() self.sleep(2 ** attempt) raise Exception('%s %s failed: %s %s' % (method, path, status, reason))
0.001534
def resample(self, sampling_rate, inplace=False, kind='linear'): '''Resample the Variable to the specified sampling rate. Parameters ---------- sampling_rate : :obj:`int`, :obj:`float` Target sampling rate (in Hz). inplace : :obj:`bool`, optional If True, performs resampling in-place. If False, returns a resampled copy of the current Variable. Default is False. kind : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} Argument to pass to :obj:`scipy.interpolate.interp1d`; indicates the kind of interpolation approach to use. See interp1d docs for valid values. Default is 'linear'. ''' if not inplace: var = self.clone() var.resample(sampling_rate, True, kind) return var if sampling_rate == self.sampling_rate: return old_sr = self.sampling_rate n = len(self.index) self.index = self._build_entity_index(self.run_info, sampling_rate) x = np.arange(n) num = len(self.index) from scipy.interpolate import interp1d f = interp1d(x, self.values.values.ravel(), kind=kind) x_new = np.linspace(0, n - 1, num=num) self.values = pd.DataFrame(f(x_new)) assert len(self.values) == len(self.index) self.sampling_rate = sampling_rate
0.002105
def uniqueTags(tagList): ''' uniqueTags - Returns the unique tags in tagList. @param tagList list<AdvancedTag> : A list of tag objects. ''' ret = [] alreadyAdded = set() for tag in tagList: myUid = tag.getUid() if myUid in alreadyAdded: continue ret.append(tag) return TagCollection(ret)
0.005319
def run_nose(self, params): """ :type params: Params """ thread.set_index(params.thread_index) log.debug("[%s] Starting nose iterations: %s", params.worker_index, params) assert isinstance(params.tests, list) # argv.extend(['--with-apiritif', '--nocapture', '--exe', '--nologcapture']) end_time = self.params.ramp_up + self.params.hold_for end_time += time.time() if end_time else 0 time.sleep(params.delay) plugin = ApiritifPlugin(self._writer) self._writer.concurrency += 1 config = Config(env=os.environ, files=all_config_files(), plugins=DefaultPluginManager()) config.plugins.addPlugins(extraplugins=[plugin]) config.testNames = params.tests config.verbosity = 3 if params.verbose else 0 if params.verbose: config.stream = open(os.devnull, "w") # FIXME: use "with", allow writing to file/log iteration = 0 try: while True: log.debug("Starting iteration:: index=%d,start_time=%.3f", iteration, time.time()) thread.set_iteration(iteration) ApiritifTestProgram(config=config) log.debug("Finishing iteration:: index=%d,end_time=%.3f", iteration, time.time()) iteration += 1 # reasons to stop if plugin.stop_reason: log.debug("[%s] finished prematurely: %s", params.worker_index, plugin.stop_reason) elif iteration >= params.iterations: log.debug("[%s] iteration limit reached: %s", params.worker_index, params.iterations) elif 0 < end_time <= time.time(): log.debug("[%s] duration limit reached: %s", params.worker_index, params.hold_for) else: continue # continue if no one is faced break finally: self._writer.concurrency -= 1 if params.verbose: config.stream.close()
0.00534
def set_config(config, overwrite=False): ''' Updates the global configuration. :param config: Can be a dictionary containing configuration, or a string which represents a (relative) configuration filename. ''' if config is None: config = get_conf_path_from_env() # must be after the fallback other a bad fallback will incorrectly clear if overwrite is True: _runtime_conf.empty() if isinstance(config, six.string_types): config = conf_from_file(config) _runtime_conf.update(config) if config.__file__: _runtime_conf.__file__ = config.__file__ elif isinstance(config, dict): _runtime_conf.update(conf_from_dict(config)) else: raise TypeError('%s is neither a dictionary or a string.' % config)
0.001215
def index(in_cram, config): """Ensure CRAM file has a .crai index file. """ out_file = in_cram + ".crai" if not utils.file_uptodate(out_file, in_cram): with file_transaction(config, in_cram + ".crai") as tx_out_file: tx_in_file = os.path.splitext(tx_out_file)[0] utils.symlink_plus(in_cram, tx_in_file) cmd = "samtools index {tx_in_file}" do.run(cmd.format(**locals()), "Index CRAM file") return out_file
0.002083
def away(dev, away_end, temperature): """ Enables or disables the away mode. """ if away_end: click.echo("Setting away until %s, temperature: %s" % (away_end, temperature)) else: click.echo("Disabling away mode") dev.set_away(away_end, temperature)
0.007143
def calculate_trans(thickness_cm: np.float, miu_per_cm: np.array): """calculate the transmission signal using the formula transmission = exp( - thickness_cm * atoms_per_cm3 * 1e-24 * sigma_b) Parameters: =========== thickness: float (in cm) atoms_per_cm3: float (number of atoms per cm3 of element/isotope) sigma_b: np.array of sigma retrieved from database Returns: ======== transmission array """ transmission = np.exp(-thickness_cm * miu_per_cm) return np.array(transmission)
0.001876
def _iterable_to_varargs_method(func): """decorator to convert a method taking a iterable to a *args one""" def wrapped(self, *args, **kwargs): return func(self, args, **kwargs) return wrapped
0.004717
def config_dict_to_string(dictionary): """ Convert a given config dictionary :: dictionary[key_1] = value_1 dictionary[key_2] = value_2 ... dictionary[key_n] = value_n into the corresponding string :: key_1=value_1|key_2=value_2|...|key_n=value_n :param dict dictionary: the config dictionary :rtype: string """ parameters = [] for key in dictionary: parameters.append(u"%s%s%s" % ( key, gc.CONFIG_STRING_ASSIGNMENT_SYMBOL, dictionary[key] )) return gc.CONFIG_STRING_SEPARATOR_SYMBOL.join(parameters)
0.001582
def set_seh_chain_pointer(self, value): """ Change the pointer to the first structured exception handler block. @type value: int @param value: Value of the remote pointer to the first block of the structured exception handlers linked list. To disable SEH set the value C{0xFFFFFFFF}. @raise NotImplementedError: This method is only supported in 32 bits versions of Windows. """ if win32.arch != win32.ARCH_I386: raise NotImplementedError( "SEH chain parsing is only supported in 32-bit Windows.") process = self.get_process() address = self.get_linear_address( 'SegFs', 0 ) process.write_pointer( address, value )
0.007884
def plugin_for(cls, model): ''' Find and return a plugin for this model. Uses inheritance to find a model where the plugin is registered. ''' logger.debug("Getting a plugin for: %s", model) if not issubclass(model, Model): return if model in cls.plugins: return cls.plugins[model] for b in model.__bases__: p = cls.plugin_for(b) if p: return p
0.006479
def account_unblock(self, id): """ Unblock a user. Returns a `relationship dict`_ containing the updated relationship to the user. """ id = self.__unpack_id(id) url = '/api/v1/accounts/{0}/unblock'.format(str(id)) return self.__api_request('POST', url)
0.009709
def wordnet_annotations(self): """The list of wordnet annotations of ``words`` layer.""" if not self.is_tagged(WORDNET): self.tag_wordnet() return [[a[WORDNET] for a in analysis] for analysis in self.analysis]
0.008163
def _move_node_file(path, old_id, new_id): """ Move the files from a node when changing his id :param path: Path of the project :param old_id: ID before change :param new_id: New node UUID """ root = os.path.join(path, "project-files") if os.path.exists(root): for dirname in os.listdir(root): module_dir = os.path.join(root, dirname) if os.path.isdir(module_dir): node_dir = os.path.join(module_dir, old_id) if os.path.exists(node_dir): shutil.move(node_dir, os.path.join(module_dir, new_id))
0.001639
def ssh_key_info_from_key_data(key_id, priv_key=None): """Get/load SSH key info necessary for signing. @param key_id {str} Either a private ssh key fingerprint, e.g. 'b3:f0:a1:6c:18:3b:42:63:fd:6e:57:42:74:17:d4:bc', or the path to an ssh private key file (like ssh's IdentityFile config option). @param priv_key {str} Optional. SSH private key file data (PEM format). @return {dict} with these keys: - type: "agent" - signer: Crypto signer class (a PKCS#1 v1.5 signer for RSA keys) - fingerprint: key md5 fingerprint - algorithm: See ALGO_FROM_SSH_KEY_TYPE for supported list. - ... some others added by `load_ssh_key()` """ if FINGERPRINT_RE.match(key_id) and priv_key: key_info = {"fingerprint": key_id, "priv_key": priv_key} else: # Otherwise, we attempt to load necessary details from ~/.ssh. key_info = load_ssh_key(key_id) # Load a key signer. key = None try: key = serialization.load_pem_private_key( key_info["priv_key"], password=None, backend=default_backend()) except TypeError as ex: log.debug("could not import key without passphrase (will " "try with passphrase): %s", ex) if "priv_key_path" in key_info: prompt = "Passphrase [%s]: " % key_info["priv_key_path"] else: prompt = "Passphrase: " for i in range(3): passphrase = getpass(prompt) if not passphrase: break try: key = serialization.load_pem_private_key( key_info["priv_key"], password=passphrase, backend=default_backend()) except ValueError: continue else: break if not key: details = "" if "priv_key_path" in key_info: details = " (%s)" % key_info["priv_key_path"] raise MantaError("could not import key" + details) # If load_ssh_key() wasn't run, set the algorithm here. if 'algorithm' not in key_info: if isinstance(key, ec.EllipticCurvePrivateKey): key_info['algorithm'] = ECDSA_ALGO_FROM_KEY_SIZE[str(key.key_size)] elif isinstance(key, rsa.RSAPrivateKey): key_info['algorithm'] = RSA_STR else: raise MantaError("Unsupported key type for: {}".format(key_id)) key_info["signer"] = key key_info["type"] = "ssh_key" return key_info
0.000778
def genweights(p,q,dt,error=None , n=False): """ ;+ ; GENWEIGTHS : return optimal weigthing coefficients from Powell and Leben 2004<br /> ; translated from Matlab genweigths.m program to IDL<br /><br /> ; ; Reference : Powell, B. S., et R. R. Leben (2004), An Optimal Filter for <br /> ; Geostrophic Mesoscale Currents from Along-Track Satellite Altimetry, <br /> ; Journal of Atmospheric and Oceanic Technology, 21(10), 1633-1642. ; ; @author Renaud DUSSURGET, LEGOS/CTOH ; @history Created Sep. 2009 from genweights.m (Brian Powell (c) 2004, <br /> ; University of Colorado, Boulder)<br /> ;- """ p = np.abs(p) q = np.abs(q) #check inputs if (-p > q) : raise "genweights : P must be lesser than q" #Build matrices N = p + q T = N + 1 A = np.matrix(np.zeros((T,T))) A[T-1,:] = np.append(np.repeat(1.0,N),0) sn = np.arange(T) - p sn = sn.compress(sn != 0) for i in np.arange(len(sn)) : A[i,:] = np.append(((1./sn)*(-sn[i]/2.)),sn[i]**2.*dt**2./4.) #Eq.11 (PL) A[i,i] = -1. B = np.zeros(T) B[N] = 1.0 #Compute the coefficients cn=np.dot(A.I,B)##B # cn=cn.transpose() cn=np.array([i for i in cn.flat]) cn = cn[0:N] #Check the indices #Compute the error error = np.sqrt(np.sum(cn.transpose()/(sn*dt))**2. + np.sum( (cn.transpose()/(sn*dt))**2. ) ); return cn, sn if n else cn
0.031517
def slot_enable_nio(self, slot_number, port_number): """ Enables a slot NIO binding. :param slot_number: slot number :param port_number: port number """ is_running = yield from self.is_running() if is_running: # running router yield from self._hypervisor.send('vm slot_enable_nio "{name}" {slot_number} {port_number}'.format(name=self._name, slot_number=slot_number, port_number=port_number)) log.info('Router "{name}" [{id}]: NIO enabled on port {slot_number}/{port_number}'.format(name=self._name, id=self._id, slot_number=slot_number, port_number=port_number))
0.007673
def visit_FunctionDef(self, node, **kwargs): """ Handles function definitions within code. Process a function's docstring, keeping well aware of the function's context and whether or not it's part of an interface definition. """ if self.options.debug: stderr.write("# Function {0.name}{1}".format(node, linesep)) # Push either 'interface' or 'class' onto our containing nodes # hierarchy so we can keep track of context. This will let us tell # if a function is nested within another function or even if a class # is nested within a function. containingNodes = kwargs.get('containingNodes') or [] containingNodes.append((node.name, 'function')) if self.options.topLevelNamespace: fullPathNamespace = self._getFullPathName(containingNodes) contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace) modifiedContextTag = self._processMembers(node, contextTag) tail = '@namespace {0}'.format(modifiedContextTag) else: tail = self._processMembers(node, '') if get_docstring(node): self._processDocstring(node, tail, containingNodes=containingNodes) # Visit any contained nodes. self.generic_visit(node, containingNodes=containingNodes) # Remove the item we pushed onto the containing nodes hierarchy. containingNodes.pop()
0.001996
def tag_name(cls, tag): """return the name of the tag, with the namespace removed""" while isinstance(tag, etree._Element): tag = tag.tag return tag.split('}')[-1]
0.009852
def can_create_book_with_record_types(self, book_record_types): """Tests if this user can create a single ``Book`` using the desired record types. While ``CommentingManager.getBookRecordTypes()`` can be used to examine which records are supported, this method tests which record(s) are required for creating a specific ``Book``. Providing an empty array tests if a ``Book`` can be created with no records. arg: book_record_types (osid.type.Type[]): array of book record types return: (boolean) - ``true`` if ``Book`` creation using the specified record ``Types`` is supported, ``false`` otherwise raise: NullArgument - ``book_record_types`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinAdminSession.can_create_bin_with_record_types # NOTE: It is expected that real authentication hints will be # handled in a service adapter above the pay grade of this impl. if self._catalog_session is not None: return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=book_record_types) return True
0.003079
def get_sec_overview(self): """ Generate the "overview" section of the report. """ logger.debug("Calculating Overview metrics.") data_path = os.path.join(self.data_dir, "overview") if not os.path.exists(data_path): os.makedirs(data_path) overview_config = { "activity_metrics": [], "author_metrics": [], "bmi_metrics": [], "time_to_close_metrics": [], "projects_metrics": [] } for ds in self.data_sources: metric_file = self.ds2class[ds] metric_index = self.get_metric_index(ds) overview = metric_file.overview(metric_index, self.start_date, self.end_date) for section in overview_config: overview_config[section] += overview[section] overview_config['activity_file_csv'] = "data_source_evolution.csv" overview_config['efficiency_file_csv'] = "efficiency.csv" # ACTIVITY METRICS metrics = overview_config['activity_metrics'] file_name = overview_config['activity_file_csv'] file_name = os.path.join(data_path, file_name) csv = "metricsnames, netvalues, relativevalues, datasource\n" for metric in metrics: (last, percentage) = get_trend(metric.timeseries()) csv += "{}, {}, {}, {}\n".format(metric.name, last, percentage, metric.DS_NAME) csv = csv.replace("_", "\_") create_csv(file_name, csv) # AUTHOR METRICS """ Git Authors: ----------- Description: average number of developers per month by quarters (so we have the average number of developers per month during those three months). If the approach is to work at the level of month, then just the number of developers per month. """ author = overview_config['author_metrics'] if author: authors_by_period = author[0] title_label = file_label = authors_by_period.name + ' per ' + self.interval file_path = os.path.join(data_path, file_label) csv_data = authors_by_period.timeseries(dataframe=True) # generate the CSV and the image file displaying the data self.create_csv_fig_from_df([csv_data], file_path, [authors_by_period.name], fig_type="bar", title=title_label, xlabel="time_period", ylabel=authors_by_period.id) # BMI METRICS bmi = [] bmi_metrics = overview_config['bmi_metrics'] csv = "" for metric in bmi_metrics: bmi.append(metric.aggregations()) csv += metric.id + ", " # Time to close METRICS ttc = [] ttc_metrics = overview_config['time_to_close_metrics'] for metric in ttc_metrics: ttc.append(metric.aggregations()) csv += metric.id + ", " # generate efficiency file csv = csv[:-2] + "\n" csv = csv.replace("_", "") bmi.extend(ttc) for val in bmi: csv += "%s, " % str_val(val) if csv[-2:] == ", ": csv = csv[:-2] file_name = os.path.join(data_path, 'efficiency.csv') create_csv(file_name, csv) logger.debug("Overview metrics generation complete!")
0.002035
def load(cls, sc, path): """ Load a model from the given path. """ model = cls._load_java(sc, path) wrapper =\ sc._jvm.org.apache.spark.mllib.api.python.PowerIterationClusteringModelWrapper(model) return PowerIterationClusteringModel(wrapper)
0.009934
def query_by_string(self, query, fields=None, limit=10, sampling=None): """ Returns raw rows that matches the given query string :arg query: query string to be run against Kibana log messages (ex. @message:"^PHP Fatal"). :type fields list[str] or None :arg limit: the number of results (defaults to 10) :type sampling int or None :arg sampling: Percentage of results to be returned (0,100) """ query = { "query_string": { "query": query, } } return self._search(query, fields, limit, sampling)
0.004815
def add_preset(self, name, desc="", note=""): """Add a new command line preset for the current options with the specified name. :param name: the name of the new preset :returns: True on success or False otherwise """ policy = self.policy if policy.find_preset(name): self.ui_log.error("A preset named '%s' already exists" % name) return False desc = desc or self.opts.desc note = note or self.opts.note try: policy.add_preset(name=name, desc=desc, note=note, opts=self.opts) except Exception as e: self.ui_log.error("Could not add preset: %s" % e) return False # Filter --add-preset <name> from arguments list arg_index = self._args.index("--add-preset") args = self._args[0:arg_index] + self._args[arg_index + 2:] self.ui_log.info("Added preset '%s' with options %s\n" % (name, " ".join(args))) return True
0.001929
def _generate(self, item, *args): """ wraps execution of specific methods.""" if item in self.done: return # verbose output with location. if self.generate_locations and item.location: print("# %s:%d" % item.location, file=self.stream) if self.generate_comments: self.print_comment(item) log.debug("generate %s, %s", item.__class__.__name__, item.name) # #log.debug('generate: %s( %s )', type(item).__name__, name) #if name in self.known_symbols: # log.debug('item is in known_symbols %s'% name ) # mod = self.known_symbols[name] # print >> self.imports, "from %s import %s" % (mod, name) # self.done.add(item) # if isinstance(item, typedesc.Structure): # self.done.add(item.get_head()) # self.done.add(item.get_body()) # return # # to avoid infinite recursion, we have to mark it as done # before actually generating the code. self.done.add(item) # go to specific treatment mth = getattr(self, type(item).__name__) mth(item, *args) return
0.003314
def _set_scores(self): """ Compute anomaly scores for the time series by sliding both lagging window and future window. """ anom_scores = {} self._generate_SAX() self._construct_all_SAX_chunk_dict() length = self.time_series_length lws = self.lag_window_size fws = self.future_window_size for i, timestamp in enumerate(self.time_series.timestamps): if i < lws or i > length - fws: anom_scores[timestamp] = 0 else: anom_scores[timestamp] = self._compute_anom_score_between_two_windows(i) self.anom_scores = TimeSeries(self._denoise_scores(anom_scores))
0.005747
def get_firewalls(self): """Returns a list of all firewalls on the account. :returns: A list of firewalls on the current account. """ mask = ('firewallNetworkComponents,' 'networkVlanFirewall,' 'dedicatedFirewallFlag,' 'firewallGuestNetworkComponents,' 'firewallInterfaces,' 'firewallRules,' 'highAvailabilityFirewallFlag') return [firewall for firewall in self.account.getNetworkVlans(mask=mask) if has_firewall(firewall)]
0.003367
def GetChunk(time, breakpoints, b, mask=[]): ''' Returns the indices corresponding to a given light curve chunk. :param int b: The index of the chunk to return ''' M = np.delete(np.arange(len(time)), mask, axis=0) if b > 0: res = M[(M > breakpoints[b - 1]) & (M <= breakpoints[b])] else: res = M[M <= breakpoints[b]] return res
0.002646
def is_bound(method): """ Decorator that asserts the model instance is bound. Requires: 1. an ``id`` attribute 2. a ``url`` attribute 2. a manager set """ @functools.wraps(method) def wrapper(self, *args, **kwargs): if not self._is_bound: raise ValueError("%r must be bound to call %s()" % (self, method.__name__)) return method(self, *args, **kwargs) return wrapper
0.004598
def select(self, Class, set=None, recursive=True, ignore=True): """See :meth:`AbstractElement.select`""" if self.mode == Mode.MEMORY: for t in self.data: if Class.__name__ == 'Text': yield t else: for e in t.select(Class,set,recursive,ignore): yield e
0.013298
def tags(self): """The tags property. Returns: (hash). the property value. (defaults to: {}) """ if 'tags' in self._values: return self._values['tags'] self._values['tags'] = copy.deepcopy(self._defaults['tags']) return self._values['tags']
0.009346
def setModified(self, isModified: bool): """ Set the modified state to ``isModified``. From a programmer's perspective this method does the same as the native ``QsciScintilla`` method but also ensures that the undo framework knows when the document state was changed. |Args| * ``isModified`` (**bool**): whether or not the document is considered unmodified. |Returns| **None** |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type. """ if not isModified: self.qteUndoStack.saveState() super().setModified(isModified)
0.002928
def SCAS(cpu, dest, src): """ Scans String. Compares the byte, word, or double word specified with the memory operand with the value in the AL, AX, EAX, or RAX register, and sets the status flags according to the results. The memory operand address is read from either the ES:RDI, ES:EDI or the ES:DI registers (depending on the address-size attribute of the instruction, 32 or 16, respectively):: IF (byte comparison) THEN temp = AL - SRC; SetStatusFlags(temp); THEN IF DF = 0 THEN (E)DI = (E)DI + 1; ELSE (E)DI = (E)DI - 1; FI; ELSE IF (word comparison) THEN temp = AX - SRC; SetStatusFlags(temp) THEN IF DF = 0 THEN (E)DI = (E)DI + 2; ELSE (E)DI = (E)DI - 2; FI; ELSE (* doubleword comparison *) temp = EAX - SRC; SetStatusFlags(temp) THEN IF DF = 0 THEN (E)DI = (E)DI + 4; ELSE (E)DI = (E)DI - 4; FI; FI; FI; :param cpu: current CPU. :param dest: destination operand. :param src: source operand. """ dest_reg = dest.reg mem_reg = src.mem.base # , src.type, src.read() size = dest.size arg0 = dest.read() arg1 = src.read() res = arg0 - arg1 cpu._calculate_CMP_flags(size, res, arg0, arg1) increment = Operators.ITEBV(cpu.address_bit_size, cpu.DF, -size // 8, size // 8) cpu.write_register(mem_reg, cpu.read_register(mem_reg) + increment)
0.003279
def allconcat(self, x, mesh_axis, concat_axis, stack=False): """Grouped allconcat (like MPI allgather followed by concat). TODO(noam): inefficient - replace with a XLA allconcat when available Args: x: a LaidOutTensor mesh_axis: an integer - the mesh axis along which to group concat_axis: an integer (the Tensor axis along which to concatenate) stack: a boolean - whether to stack instead of concat Returns: a LaidOutTensor """ x = x.to_laid_out_tensor() coord = self.laid_out_pcoord(mesh_axis) t = x.one_slice old_shape = t.shape.as_list() num_parts = self.shape[mesh_axis].size t = tf.expand_dims(t, concat_axis) t *= tf.reshape( tf.one_hot(coord.one_slice, num_parts, dtype=t.dtype), [num_parts if i == concat_axis else 1 for i in xrange(len(old_shape) + 1)]) if not stack: new_shape = old_shape[:] new_shape[concat_axis] *= num_parts t = tf.reshape(t, new_shape) return self.allreduce(self.LaidOutTensor([t]), [mesh_axis], "SUM")
0.003756
def schedule(self, callback, *args, **kwargs): """Schedule the callback to be called asynchronously in a thread pool. Args: callback (Callable): The function to call. args: Positional arguments passed to the function. kwargs: Key-word arguments passed to the function. Returns: None """ self._executor.submit(callback, *args, **kwargs)
0.004706
def load_headers(fsns:List[int]): """Load header files """ ip = get_ipython() ip.user_ns['_headers'] = {} for type_ in ['raw', 'processed']: print("Loading %d headers (%s)" % (len(fsns), type_), flush=True) processed = type_ == 'processed' headers = [] for f in fsns: for l in [l_ for l_ in ip.user_ns['_loaders'] if l_.processed == processed]: try: headers.append(l.loadheader(f)) break except FileNotFoundError: continue allsamplenames = {h.title for h in headers} if not headers: print('NO HEADERS READ FOR TYPE "%s"' % type_) else: print("%d headers (%s) out of %d have been loaded successfully." % (len(headers), type_, len(fsns))) print('Read FSN range:', min([h.fsn for h in headers]), 'to', max([h.fsn for h in headers])) print("Samples covered by these headers:") print(" " + "\n ".join(sorted(allsamplenames)), flush=True) if processed: ip.user_ns['allsamplenames'] = allsamplenames ip.user_ns['_headers'][type_] = headers
0.004975
def param_particle_pos(self, ind): """ Get position of one or more particles """ ind = self._vps(listify(ind)) return [self._i2p(i, j) for i in ind for j in ['z', 'y', 'x']]
0.010152
def get_smartplug_by_name(self, name): """Retrieves a smartplug object by its name :param name: The name of the smartplug to return :return: A smartplug object """ return next((plug for plug in self.smartplugs if plug.name.lower() == name.lower()), None)
0.006329
def trilaterate_v(self, P1, P2, P3, r1, r2, r3): r''' Find whether 3 spheres intersect ''' temp1 = P2-P1 e_x = temp1/np.linalg.norm(temp1, axis=1)[:, np.newaxis] temp2 = P3-P1 i = self._my_dot(e_x, temp2)[:, np.newaxis] temp3 = temp2 - i*e_x e_y = temp3/np.linalg.norm(temp3, axis=1)[:, np.newaxis] d = np.linalg.norm(P2-P1, axis=1)[:, np.newaxis] j = self._my_dot(e_y, temp2)[:, np.newaxis] x = (r1*r1 - r2*r2 + d*d) / (2*d) y = (r1*r1 - r3*r3 - 2*i*x + (i*i) + (j*j)) / (2*j) temp4 = r1*r1 - x*x - y*y return temp4 >= 0
0.00312
def stream_file(self, project, path): """ Read file of a project and stream it :param project: A project object :param path: The path of the file in the project :returns: A file stream """ # Due to Python 3.4 limitation we can't use with and asyncio # https://www.python.org/dev/peps/pep-0492/ # that why we wrap the answer class StreamResponse: def __init__(self, response): self._response = response def __enter__(self): return self._response.content def __exit__(self): self._response.close() url = self._getUrl("/projects/{}/stream/{}".format(project.id, path)) response = yield from self._session().request("GET", url, auth=self._auth, timeout=None) if response.status == 404: raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(path)) elif response.status == 403: raise aiohttp.web.HTTPForbidden(text="forbidden to open {} on compute".format(path)) elif response.status != 200: raise aiohttp.web.HTTPInternalServerError(text="Unexpected error {}: {}: while opening {} on compute".format(response.status, response.reason, path)) return StreamResponse(response)
0.00507
def perform_command(self): """ Perform command and return the appropriate exit code. :rtype: int """ if len(self.actual_arguments) < 2: return self.print_help() text_format = gf.safe_unicode(self.actual_arguments[0]) if text_format == u"list": text = gf.safe_unicode(self.actual_arguments[1]) elif text_format in TextFileFormat.ALLOWED_VALUES: text = self.actual_arguments[1] if not self.check_input_file(text): return self.ERROR_EXIT_CODE else: return self.print_help() l1_id_regex = self.has_option_with_value(u"--l1-id-regex") l2_id_regex = self.has_option_with_value(u"--l2-id-regex") l3_id_regex = self.has_option_with_value(u"--l3-id-regex") id_regex = self.has_option_with_value(u"--id-regex") id_format = self.has_option_with_value(u"--id-format") class_regex = self.has_option_with_value(u"--class-regex") sort = self.has_option_with_value(u"--sort") parameters = { gc.PPN_TASK_IS_TEXT_MUNPARSED_L1_ID_REGEX: l1_id_regex, gc.PPN_TASK_IS_TEXT_MUNPARSED_L2_ID_REGEX: l2_id_regex, gc.PPN_TASK_IS_TEXT_MUNPARSED_L3_ID_REGEX: l3_id_regex, gc.PPN_TASK_IS_TEXT_UNPARSED_ID_REGEX: id_regex, gc.PPN_TASK_IS_TEXT_UNPARSED_CLASS_REGEX: class_regex, gc.PPN_TASK_IS_TEXT_UNPARSED_ID_SORT: sort, gc.PPN_TASK_OS_FILE_ID_REGEX: id_format } if (text_format == TextFileFormat.MUNPARSED) and ((l1_id_regex is None) or (l2_id_regex is None) or (l3_id_regex is None)): self.print_error(u"You must specify --l1-id-regex and --l2-id-regex and --l3-id-regex for munparsed format") return self.ERROR_EXIT_CODE if (text_format == TextFileFormat.UNPARSED) and (id_regex is None) and (class_regex is None): self.print_error(u"You must specify --id-regex and/or --class-regex for unparsed format") return self.ERROR_EXIT_CODE if (text_format in [TextFileFormat.PLAIN, TextFileFormat.SUBTITLES]) and (id_format is not None): try: identifier = id_format % 1 except (TypeError, ValueError): self.print_error(u"The given string '%s' is not a valid id format" % id_format) return self.ERROR_EXIT_CODE text_file = self.get_text_file(text_format, text, parameters) if text_file is None: self.print_error(u"Unable to build a TextFile from the given parameters") elif len(text_file) == 0: self.print_error(u"No text fragments found") else: self.print_generic(text_file.__unicode__()) return self.NO_ERROR_EXIT_CODE return self.ERROR_EXIT_CODE
0.003156
def remove_comments(code): """Remove C-style comment from GLSL code string.""" pattern = r"(\".*?\"|\'.*?\')|(/\*.*?\*/|//[^\r\n]*\n)" # first group captures quoted strings (double or single) # second group captures comments (//single-line or /* multi-line */) regex = re.compile(pattern, re.MULTILINE | re.DOTALL) def do_replace(match): # if the 2nd group (capturing comments) is not None, # it means we have captured a non-quoted (real) comment string. if match.group(2) is not None: return "" # so we will return empty to remove the comment else: # otherwise, we will return the 1st group return match.group(1) # captured quoted-string return regex.sub(do_replace, code)
0.001311
def connect_command(self): ''' Generates a JSON string with the params to be used when sending CONNECT to the server. ->> CONNECT {"verbose": false, "pedantic": false, "lang": "python2" } ''' options = { "verbose": self.options["verbose"], "pedantic": self.options["pedantic"], "lang": __lang__, "version": __version__, "protocol": PROTOCOL } if "auth_required" in self._server_info: if self._server_info["auth_required"] == True: # In case there is no password, then consider handle # sending a token instead. if self.options["user"] is not None and self.options["password"] is not None: options["user"] = self.options["user"] options["pass"] = self.options["password"] elif self.options["token"] is not None: options["auth_token"] = self.options["token"] elif self._current_server.uri.password is None: options["auth_token"] = self._current_server.uri.username else: options["user"] = self._current_server.uri.username options["pass"] = self._current_server.uri.password if self.options["name"] is not None: options["name"] = self.options["name"] if self.options["no_echo"] is not None: options["echo"] = not self.options["no_echo"] args = json.dumps(options, sort_keys=True) return CONNECT_PROTO.format(CONNECT_OP, args, _CRLF_)
0.002433
def convert_bbox_from_albumentations(bbox, target_format, rows, cols, check_validity=False): """Convert a bounding box from the format used by albumentations to a format, specified in `target_format`. Args: bbox (list): bounding box with coordinates in the format used by albumentations target_format (str): required format of the output bounding box. Should be 'coco' or 'pascal_voc'. rows (int): image height cols (int): image width check_validity (bool): check if all boxes are valid boxes Note: The `coco` format of a bounding box looks like `[x_min, y_min, width, height]`, e.g. [97, 12, 150, 200]. The `pascal_voc` format of a bounding box looks like `[x_min, y_min, x_max, y_max]`, e.g. [97, 12, 247, 212]. Raises: ValueError: if `target_format` is not equal to `coco` or `pascal_voc`. """ if target_format not in {'coco', 'pascal_voc'}: raise ValueError( "Unknown target_format {}. Supported formats are: 'coco' and 'pascal_voc'".format(target_format) ) if check_validity: check_bbox(bbox) bbox = denormalize_bbox(bbox, rows, cols) if target_format == 'coco': x_min, y_min, x_max, y_max = bbox[:4] width = x_max - x_min height = y_max - y_min bbox = [x_min, y_min, width, height] + list(bbox[4:]) return bbox
0.005747
def max_diff(dset1,dset2): '''calculates maximal voxel-wise difference in datasets (in %) Useful for checking if datasets have the same data. For example, if the maximum difference is < 1.0%, they're probably the same dataset''' for dset in [dset1,dset2]: if not os.path.exists(dset): nl.notify('Error: Could not find file: %s' % dset,level=nl.level.error) return float('inf') try: dset1_d = nib.load(dset1) dset2_d = nib.load(dset2) dset1_data = dset1_d.get_data() dset2_data = dset2_d.get_data() except IOError: nl.notify('Error: Could not read files %s and %s' % (dset1,dset2),level=nl.level.error) return float('inf') try: old_err = np.seterr(divide='ignore',invalid='ignore') max_val = 100*np.max(np.ma.masked_invalid(np.double(dset1_data - dset2_data) / ((dset1_data+dset2_data)/2))) np.seterr(**old_err) return max_val except ValueError: return float('inf')
0.010816
def tk_to_dagcircuit(circ:Circuit,_qreg_name:str="q") -> DAGCircuit : """ Convert a :math:`\\mathrm{t|ket}\\rangle` :py:class:`Circuit` to a :py:class:`qiskit.DAGCircuit` . Requires that the circuit only conatins :py:class:`OpType` s from the qelib set. :param circ: A circuit to be converted :return: The converted circuit """ dc = DAGCircuit() qreg = QuantumRegister(circ.n_qubits(), name=_qreg_name) dc.add_qreg(qreg) grid = circ._int_routing_grid() slices = _grid_to_slices(grid) qubits = _grid_to_qubits(grid, qreg) in_boundary = circ._get_boundary()[0] out_boundary = circ._get_boundary()[1] for s in slices : for v in s: o = circ._unsigned_to_op(v) qargs = [ qubits[(v,i)] for i in range(o.get_n_inputs()) ] name, cargs, params = _translate_ops(circ,v) if cargs : _extend_cregs(dc,cargs) if name : dc.add_basis_element(name,o.get_n_inputs(),number_classical=len(cargs),number_parameters=len(params)) ins = Instruction(name, list(map(_normalise_param_out, params)), qargs, cargs) dc.apply_operation_back(ins ,qargs=qargs, cargs=cargs) tk2dg_outs = {} for v in out_boundary: tk2dg_outs[v] = dc.output_map[qubits[(v,0)]] for i, v in enumerate(out_boundary): dc.multi_graph.node[tk2dg_outs[v]]["wire"] = [qubits[(in_boundary[i],0)]] dc.output_map[qubits[(in_boundary[i],0)]] = tk2dg_outs[v] return dc
0.017643
def ensureModelData(self,obj): """ Ensures that the given ``obj`` has been initialized to be used with this model. If the object is found to not be initialized, it will be initialized. """ if not hasattr(obj,"_modeldata"): self.create(obj,cache=True) if "_modelcache" not in obj._modeldata: # Assume all initialization is missing, simply reinitialize self.create(obj,cache=True)
0.016985
def add(self, album, objects, object_type=None, **kwds): """ Endpoint: /album/<id>/<type>/add.json Add objects (eg. Photos) to an album. The objects are a list of either IDs or Trovebox objects. If Trovebox objects are used, the object type is inferred automatically. Returns the updated album object. """ return self._add_remove("add", album, objects, object_type, **kwds)
0.004184
def run(path, timer=False, repeat=3, number=10000, precision=2): """ Extracts and runs the '@cyther' code from the given file 'path' name """ code = extractAtCyther(path) if not code: output = "There was no '@cyther' code collected from the " \ "file '{}'\n".format(path) # TODO This should use a result, right? return {'returncode': 0, 'output': output} module_directory = os.path.dirname(path) module_name = os.path.splitext(os.path.basename(path))[0] setup_string = SETUP_TEMPLATE.format(module_directory, module_name, '{}') if timer: string = TIMER_TEMPLATE.format(setup_string, code, repeat, number, precision, '{}') else: string = setup_string + code script = os.path.join(os.path.dirname(__file__), 'script.py') with open(script, 'w+') as file: file.write(string) response = call(['python', script]) return response
0.001014
def _apply_missings(self, res, include_missing=False): """Return ndarray with missing and insertions as specified. The return value is the result of the following operations on *res*, which is a raw cube value array (raw meaning it has shape of original cube response). * Remove vectors (rows/cols) for missing elements if *include_missin* is False. Note that it does *not* include pruning. """ # --element idxs that satisfy `include_missing` arg. Note this # --includes MR_CAT elements so is essentially all-or-valid-elements element_idxs = tuple( ( d.all_elements.element_idxs if include_missing else d.valid_elements.element_idxs ) for d in self._all_dimensions ) return res[np.ix_(*element_idxs)] if element_idxs else res
0.002176
def convert_dict(d, cls=AttrDict): # not used ''' recursively convert a normal Mapping `d` and it's values to a specified type (defaults to AttrDict) ''' for k, v in d.items(): if isinstance(v, Mapping): d[k] = convert_dict(v) elif isinstance(v, list): for i, e in enumerate(v): if isinstance(e, Mapping): v[i] = convert_dict(e) return cls(d)
0.006787
def string(s, salt=None): """ 获取一个字符串的 MD5 值 :param: * s: (string) 需要进行 hash 的字符串 * salt: (string) 随机字符串,默认为 None :return: * result: (string) 32 位小写 MD5 值 """ m = hashlib.md5() s = s.encode('utf-8') + salt.encode('utf-8') if salt is not None else s.encode('utf-8') m.update(s) result = m.hexdigest() return result
0.006977
def DropPrivileges(): """Attempt to drop privileges if required.""" if config.CONFIG["Server.username"]: try: os.setuid(pwd.getpwnam(config.CONFIG["Server.username"]).pw_uid) except (KeyError, OSError): logging.exception("Unable to switch to user %s", config.CONFIG["Server.username"]) raise
0.017391
def from_yaml(data): """ Interpolate the provided data and return a dict. Currently, this is used to reinterpolate the `molecule.yml` inside an Ansible playbook. If there were any interpolation errors, they would have been found and raised earlier. :return: dict """ molecule_env_file = os.environ['MOLECULE_ENV_FILE'] env = os.environ.copy() env = config.set_env_from_file(env, molecule_env_file) i = interpolation.Interpolator(interpolation.TemplateWithDefaults, env) interpolated_data = i.interpolate(data) return util.safe_load(interpolated_data)
0.001645
def normalize_build_spec(self, build_spec): """ Convert a build spec into a list of Command tuples. After running this command, self.build_cmds should hold all the commands that should be run on the disk in self.disk_path. Args: build_spec (dict): The buildspec part from the init file """ for cmd in build_spec: if not cmd: continue cmd_name = cmd.keys()[0] cmd_options = cmd.values()[0] cmd_handler = self.get_cmd_handler(cmd_name) self.build_cmds.append(cmd_handler(cmd_options))
0.00319
def acquire(self, resources, prop_name): """ Starting with self, walk until you find prop or None """ # Instance custom_prop = getattr(self.props, prop_name, None) if custom_prop: return custom_prop # Parents...can't use acquire as have to keep going on acquireds for parent in self.parents(resources): acquireds = parent.props.acquireds if acquireds: # First try in the per-type acquireds rtype_acquireds = acquireds.get(self.rtype) if rtype_acquireds: prop_acquired = rtype_acquireds.get(prop_name) if prop_acquired: return prop_acquired # Next in the "all" section of acquireds all_acquireds = acquireds.get('all') if all_acquireds: prop_acquired = all_acquireds.get(prop_name) if prop_acquired: return prop_acquired return
0.001912
def wake(self, channel): """Causes the bot to resume operation in the channel. Usage: !wake [channel name] - unignore the specified channel (or current if none specified) """ self.log.info('Waking up in %s', channel) self._bot.dispatcher.unignore(channel) self.send_message(channel, 'Hello, how may I be of service?')
0.008021
def normalize(array, min_value=0., max_value=1.): """Normalizes the numpy array to (min_value, max_value) Args: array: The numpy array min_value: The min value in normalized array (Default value = 0) max_value: The max value in normalized array (Default value = 1) Returns: The array normalized to range between (min_value, max_value) """ arr_min = np.min(array) arr_max = np.max(array) normalized = (array - arr_min) / (arr_max - arr_min + K.epsilon()) return (max_value - min_value) * normalized + min_value
0.001739
def filter_arrange_nodes(nodes: List[ast.stmt], max_line_number: int) -> List[ast.stmt]: """ Finds all nodes that are before the ``max_line_number`` and are not docstrings or ``pass``. """ return [ node for node in nodes if node.lineno < max_line_number and not isinstance(node, ast.Pass) and not (isinstance(node, ast.Expr) and isinstance(node.value, ast.Str)) ]
0.009926
def get_geometry(self): """A convenience function to get the geometry variables. Returns: A tuple containing (thet0, thet, phi0, phi, alpha, beta). See the Scatterer class documentation for a description of these angles. """ return (self.thet0, self.thet, self.phi0, self.phi, self.alpha, self.beta)
0.010499
def disease_term(self, disease_identifier): """Return a disease term Checks if the identifier is a disease number or a id Args: disease_identifier(str) Returns: disease_obj(dict) """ query = {} try: disease_identifier = int(disease_identifier) query['disease_nr'] = disease_identifier except ValueError: query['_id'] = disease_identifier return self.disease_term_collection.find_one(query)
0.00381
def add_entry(self, **kw): """ Add an entry to an AccessList. Use the supported arguments for the inheriting class for keyword arguments. :raises UpdateElementFailed: failure to modify with reason :return: None """ self.data.setdefault('entries', []).append( {'{}_entry'.format(self.typeof): kw})
0.005479
def get_default_wrapper(cls): """Returns the default (first) driver wrapper :returns: default driver wrapper :rtype: toolium.driver_wrapper.DriverWrapper """ if cls.is_empty(): # Create a new driver wrapper if the pool is empty from toolium.driver_wrapper import DriverWrapper DriverWrapper() return cls.driver_wrappers[0]
0.004914
def task_failure_handler(task_id=None, exception=None, traceback=None, args=None, **kwargs): """Task failure handler""" # TODO: find a better way to acces workdir/archive/image task_report = {'task_id': task_id, 'exception': exception, 'traceback': traceback, 'archive': args[1]['archive_path'], 'image': args[1]['image']} notifier.send_task_failure_report(task_report) workdir = args[1]['workdir'] remove_file(workdir)
0.001838
def read_table(self, table_name, index_col=None, coerce_float=True, parse_dates=None, columns=None, schema=None, chunksize=None): """Read SQL database table into a DataFrame. Parameters ---------- table_name : string Name of SQL table in database. index_col : string, optional, default: None Column to set as index. coerce_float : boolean, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. This can result in loss of precision. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg}``, where the arg corresponds to the keyword arguments of :func:`pandas.to_datetime`. Especially useful with databases without native Datetime support, such as SQLite. columns : list, default: None List of column names to select from SQL table. schema : string, default None Name of SQL schema in database to query (if database flavor supports this). If specified, this overwrites the default schema of the SQL database object. chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame See Also -------- pandas.read_sql_table SQLDatabase.read_query """ table = SQLTable(table_name, self, index=index_col, schema=schema) return table.read(coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize)
0.00189
def doc(self): """Docstring of property on target or override specified on PV-object.""" return self._pv.doc or inspect.getdoc( getattr(type(self._target), self._pv.property, None)) or ''
0.013953
def validate_params(valid_options, params): """ Helps us validate the parameters for the request :param valid_options: a list of strings of valid options for the api request :param params: a dict, the key-value store which we really only care about the key which has tells us what the user is using for the API request :returns: None or throws an exception if the validation fails """ #crazy little if statement hanging by himself :( if not params: return #We only allow one version of the data parameter to be passed data_filter = ['data', 'source', 'external_url', 'embed'] multiple_data = [key for key in params.keys() if key in data_filter] if len(multiple_data) > 1: raise Exception("You can't mix and match data parameters") #No bad fields which are not in valid options can pass disallowed_fields = [key for key in params.keys() if key not in valid_options] if disallowed_fields: field_strings = ",".join(disallowed_fields) raise Exception("{} are not allowed fields".format(field_strings))
0.004322
def workers(profile='default'): ''' Return a list of member workers and their status CLI Examples: .. code-block:: bash salt '*' modjk.workers salt '*' modjk.workers other-profile ''' config = get_running(profile) lbn = config['worker.list'].split(',') worker_list = [] ret = {} for lb in lbn: try: worker_list.extend( config['worker.{0}.balance_workers'.format(lb)].split(',') ) except KeyError: pass worker_list = list(set(worker_list)) for worker in worker_list: ret[worker] = { 'activation': config['worker.{0}.activation'.format(worker)], 'state': config['worker.{0}.state'.format(worker)], } return ret
0.001263
def _entries_sorted(self): """:return: list of entries, in a sorted fashion, first by path, then by stage""" return sorted(self.entries.values(), key=lambda e: (e.path, e.stage))
0.015464
def _parser_exit(parser: argparse.ArgumentParser, proc: "DirectoryListProcessor", _=0, message: Optional[str]=None) -> None: """ Override the default exit in the parser. :param parser: :param _: exit code. Unused because we don't exit :param message: Optional message """ if message: parser._print_message(message, sys.stderr) proc.successful_parse = False
0.009662
def get_trunk_interfaces(auth, url, devid=None, devip=None): """Function takes devId as input to RESTFULL call to HP IMC platform :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param devid: str requires devid of the target device :param devip: str of ipv4 address of the target device :return: list of dictionaries where each element of the list represents an interface which has been configured as a VLAN trunk port :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.vlanm import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> trunk_interfaces = get_trunk_interfaces('10', auth.creds, auth.url) >>> assert type(trunk_interfaces) is list >>> assert len(trunk_interfaces[0]) == 3 >>> assert 'allowedVlans' in trunk_interfaces[0] >>> assert 'ifIndex' in trunk_interfaces[0] >>> assert 'pvid' in trunk_interfaces[0] >>> get_trunk_interfaces('350', auth.creds, auth.url) ['No trunk inteface'] """ if devip is not None: devid = get_dev_details(devip, auth, url)['id'] get_trunk_interfaces_url = "/imcrs/vlan/trunk?devId=" + str(devid) + \ "&start=1&size=5000&total=false" f_url = url + get_trunk_interfaces_url response = requests.get(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 200: dev_trunk_interfaces = (json.loads(response.text)) if len(dev_trunk_interfaces) == 2: if isinstance(dev_trunk_interfaces['trunkIf'], list): return dev_trunk_interfaces['trunkIf'] elif isinstance(dev_trunk_interfaces['trunkIf'], dict): return [dev_trunk_interfaces['trunkIf']] else: dev_trunk_interfaces['trunkIf'] = ["No trunk inteface"] return dev_trunk_interfaces['trunkIf'] except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + ' get_trunk_interfaces: An Error has occured'
0.002269
def token_required(view_func): """ Decorator which ensures that one of the WATCHMAN_TOKENS is provided if set. WATCHMAN_TOKEN_NAME can also be set if the token GET parameter must be customized. """ def _parse_auth_header(auth_header): """ Parse the `Authorization` header Expected format: `WATCHMAN-TOKEN Token="ABC123"` """ # TODO: Figure out full set of allowed characters # http://stackoverflow.com/questions/19028068/illegal-characters-in-http-headers # https://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 # https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 reg = re.compile('(\w+)[=] ?"?([\w-]+)"?') header_dict = dict(reg.findall(auth_header)) return header_dict['Token'] def _get_passed_token(request): """ Try to get the passed token, starting with the header and fall back to `GET` param """ try: auth_header = request.META['HTTP_AUTHORIZATION'] token = _parse_auth_header(auth_header) except KeyError: token = request.GET.get(settings.WATCHMAN_TOKEN_NAME) return token def _validate_token(request): if settings.WATCHMAN_TOKENS: watchman_tokens = settings.WATCHMAN_TOKENS.split(',') elif settings.WATCHMAN_TOKEN: watchman_tokens = [settings.WATCHMAN_TOKEN, ] else: return True return _get_passed_token(request) in watchman_tokens @csrf_exempt @wraps(view_func) def _wrapped_view(request, *args, **kwargs): if _validate_token(request): return view_func(request, *args, **kwargs) return HttpResponseForbidden() return _wrapped_view
0.002237
def _html_link_cells(self): """ This will return a new table with cell linked with their columns that have <Link> in the name :return: """ new_table = self.copy() for row in new_table: for c in new_table.columns: link = '%s <Link>' % c if row.get(link, None): row[c] = '<a href="%s">%s</a>' % (row[link], row[c]) new_table.columns = [c for c in self.columns if '<Link>' not in c] return new_table
0.003752
def requests_view(request, requestType): ''' Generic request view. Parameters: request is the HTTP request requestType is URL name of a RequestType. e.g. "food", "maintenance", "network", "site" ''' userProfile = UserProfile.objects.get(user=request.user) request_type = get_object_or_404(RequestType, url_name=requestType) page_name = "{0} Requests".format(request_type.name.title()) if not request_type.enabled: message = "{0} requests have been disabled.".format(request_type.name.title()) return red_home(request, message) relevant_managers = request_type.managers.filter(active=True) manager = any(i.incumbent == userProfile for i in relevant_managers) request_form = RequestForm( request.POST if "submit_request" in request.POST else None, profile=userProfile, request_type=request_type, ) if request_form.is_valid(): request_form.save() return HttpResponseRedirect(reverse('managers:requests', kwargs={'requestType': requestType})) # number of requests loaded x = 0 # A pseudo-dictionary, actually a list with items of form (request, # [request_responses_list], response_form, upvote, vote_form) requests_dict = list() requests = Request.objects.filter(request_type=request_type) if not request_type.managers.filter(incumbent__user=request.user): requests = requests.exclude( ~Q(owner__user=request.user), private=True, ) for req in requests: request_responses = Response.objects.filter(request=req) if manager: response_form = ManagerResponseForm( request.POST if "add_response-{0}".format(req.pk) in request.POST else None, initial={'action': Response.NONE}, prefix="{0}".format(req.pk), profile=userProfile, request=req, ) else: response_form = ResponseForm( request.POST if "add_response-{0}".format(req.pk) in request.POST else None, prefix="{0}".format(req.pk), profile=userProfile, request=req, ) upvote = userProfile in req.upvotes.all() vote_form = VoteForm( request.POST if "vote-{0}".format(req.pk) in request.POST else None, profile=userProfile, request=req, ) if response_form.is_valid(): response_form.save() return HttpResponseRedirect(reverse('managers:requests', kwargs={'requestType': requestType})) if vote_form.is_valid(): vote_form.save() return HttpResponseRedirect(reverse('managers:requests', kwargs={'requestType': requestType})) requests_dict.append((req, request_responses, response_form, upvote, vote_form)) x += 1 if x >= settings.MAX_REQUESTS: break return render_to_response('requests.html', { 'manager': manager, 'request_type': request_type, 'page_name': page_name, 'request_form': request_form, 'requests_dict': requests_dict, 'relevant_managers': relevant_managers, }, context_instance=RequestContext(request))
0.002649
def build_gemini_query(self, query, extra_info): """Append sql to a gemini query Args: query(str): The gemini query extra_info(str): The text that should be added Return: extended_query(str) """ if 'WHERE' in query: return "{0} AND {1}".format(query, extra_info) else: return "{0} WHERE {1}".format(query, extra_info)
0.004684
def texture_cube(self, size, components, data=None, *, alignment=1, dtype='f1') -> 'TextureCube': ''' Create a :py:class:`TextureCube` object. Args: size (tuple): The width, height of the texture. Each side of the cube will have this size. components (int): The number of components 1, 2, 3 or 4. data (bytes): Content of the texture. Keyword Args: alignment (int): The byte alignment 1, 2, 4 or 8. dtype (str): Data type. Returns: :py:class:`TextureCube` object ''' res = TextureCube.__new__(TextureCube) res.mglo, res._glo = self.mglo.texture_cube(size, components, data, alignment, dtype) res._size = size res._components = components res._dtype = dtype res.ctx = self res.extra = None return res
0.005394
def add_sample(self, metricId, timestamp, value, metricType=None, host=None, sparseDataStrategy='None', unit='', tags=None, min=None, max=None, avg=None, sum=None, cnt=None, ts_is_ms=False): """ :param metricId: Metric FQN :type metricId: string :param timestamp: Timestamp for the sample :type timestamp: int :param value: Value of the sample :type value: float :param metricType: Metric Type :type metricType: string :param host: Element FQN :type host: string :param sparseDataStrategy: Sparse data strategy :type sparseDataStrategy: string :param unit: Metric Unit type :type unit: string :param tags: List of dicts :type tags: list :param min: Minimum of the sample :type min: float :param max: Maximum of the sample :type max: float :param avg: Average of the sample :type avg: float :param sum: Sum of the sample :type sum: float :param cnt: Count of the sample :type cnt: float :param ts_is_ms: Is the timestamp in milliseconds :type ts_is_ms: bool """ if self.id is None and host is not None: self.id = host if self.name is None and host is not None: self.name = host if tags is not None: Tags = [] for i in tags: for k in i: Tags.append(Tag(k, i[k])) else: Tags = None metricIdSan = self._sanitize(metricId) if not hasattr(self, "_metrics"): setattr(self, "_metrics", {}) if self._metrics.get(metricIdSan) is None: self._metrics[metricIdSan] = Metric(metricIdSan, metricType, sparseDataStrategy, unit, Tags) if timestamp is None: ts = to_ms_timestamp_int(datetime.datetime.utcnow()) else: if ts_is_ms: ts = int(timestamp) else: ts = int(timestamp * 1000) self.samples.append(Sample(metricIdSan, ts, value, min, max, avg, sum, cnt))
0.005375
def write(self, auth, resource, value, options={}, defer=False): """ Writes a single value to the resource specified. Args: auth: cik for authentication. resource: resource to write to. value: value to write options: options. """ return self._call('write', auth, [resource, value, options], defer)
0.005291
def _get_and_write_fp(self, iso_path, outfp, blocksize): # type: (bytes, BinaryIO, int) -> None ''' An internal method to fetch a single file from the ISO and write it out to the file object. Parameters: iso_path - The absolute path to the file to get data from. outfp - The file object to write data to. blocksize - The blocksize to use when copying data. Returns: Nothing. ''' try: return self._get_file_from_iso_fp(outfp, blocksize, None, None, iso_path) except pycdlibexception.PyCdlibException: pass try: return self._get_file_from_iso_fp(outfp, blocksize, iso_path, None, None) except pycdlibexception.PyCdlibException: pass self._get_file_from_iso_fp(outfp, blocksize, None, iso_path, None)
0.005682
def _extract_packages(self): """ Extract a package in a new temporary directory. """ self.path_unpacked = mkdtemp(prefix="scoap3_package_", dir=CFG_TMPSHAREDDIR) for path in self.retrieved_packages_unpacked: scoap3utils_extract_package(path, self.path_unpacked, self.logger) return self.path_unpacked
0.005025
def click_link_text(self, link_text, timeout=settings.SMALL_TIMEOUT): """ This method clicks link text on a page """ # If using phantomjs, might need to extract and open the link directly if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT: timeout = self.__get_new_timeout(timeout) if self.browser == 'phantomjs': if self.is_link_text_visible(link_text): element = self.wait_for_link_text_visible( link_text, timeout=timeout) element.click() return self.open(self.__get_href_from_link_text(link_text)) return if not self.is_link_text_present(link_text): self.wait_for_link_text_present(link_text, timeout=timeout) pre_action_url = self.get_current_url() try: element = self.wait_for_link_text_visible( link_text, timeout=0.2) self.__demo_mode_highlight_if_active(link_text, by=By.LINK_TEXT) try: element.click() except (StaleElementReferenceException, ENI_Exception): self.wait_for_ready_state_complete() time.sleep(0.05) element = self.wait_for_link_text_visible( link_text, timeout=timeout) element.click() except Exception: found_css = False text_id = self.get_link_attribute(link_text, "id", False) if text_id: link_css = '[id="%s"]' % link_text found_css = True if not found_css: href = self.__get_href_from_link_text(link_text, False) if href: if href.startswith('/') or page_utils.is_valid_url(href): link_css = '[href="%s"]' % href found_css = True if not found_css: ngclick = self.get_link_attribute(link_text, "ng-click", False) if ngclick: link_css = '[ng-click="%s"]' % ngclick found_css = True if not found_css: onclick = self.get_link_attribute(link_text, "onclick", False) if onclick: link_css = '[onclick="%s"]' % onclick found_css = True success = False if found_css: if self.is_element_visible(link_css): self.click(link_css) success = True else: # The link text might be hidden under a dropdown menu success = self.__click_dropdown_link_text( link_text, link_css) if not success: element = self.wait_for_link_text_visible( link_text, timeout=settings.MINI_TIMEOUT) element.click() if settings.WAIT_FOR_RSC_ON_CLICKS: self.wait_for_ready_state_complete() if self.demo_mode: if self.driver.current_url != pre_action_url: self.__demo_mode_pause_if_active() else: self.__demo_mode_pause_if_active(tiny=True)
0.000615
def linehexdump(x, onlyasc=0, onlyhex=0, dump=False): """Build an equivalent view of hexdump() on a single line Note that setting both onlyasc and onlyhex to 1 results in a empty output :param x: a Packet :param onlyasc: 1 to display only the ascii view :param onlyhex: 1 to display only the hexadecimal view :param dump: print the view if False :returns: a String only when dump=True """ s = "" s = hexstr(x, onlyasc=onlyasc, onlyhex=onlyhex, color=not dump) if dump: return s else: print(s)
0.001792
def pushtx(tx_hex, coin_symbol='btc', api_key=None): ''' Takes a signed transaction hex binary (and coin_symbol) and broadcasts it to the bitcoin network. ''' assert is_valid_coin_symbol(coin_symbol) assert api_key, 'api_key required' url = _get_pushtx_url(coin_symbol=coin_symbol) logger.info(url) data = {'tx': tx_hex} params = {'token': api_key} r = requests.post(url, json=data, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS) return get_valid_json(r)
0.005848
def _st_decode(self, msg): """ST: Temperature update.""" group = int(msg[4:5]) temperature = int(msg[7:10]) if group == 0: temperature -= 60 elif group == 1: temperature -= 40 return {'group': group, 'device': int(msg[5:7])-1, 'temperature': temperature}
0.005848
def _has_streamhandler(logger, level=None, fmt=LOG_FORMAT, stream=DEFAULT_STREAM): """Check the named logger for an appropriate existing StreamHandler. This only returns True if a StreamHandler that exaclty matches our specification is found. If other StreamHandlers are seen, we assume they were added for a different purpose. """ # Ensure we are talking the same type of logging levels # if they passed in a string we need to convert it to a number if isinstance(level, basestring): level = logging.getLevelName(level) for handler in logger.handlers: if not isinstance(handler, logging.StreamHandler): continue if handler.stream is not stream: continue if handler.level != level: continue if not handler.formatter or handler.formatter._fmt != fmt: continue return True return False
0.001059
def emit_metadata_for_region_py(self, region, region_filename, module_prefix): """Emit Python code generating the metadata for the given region""" terrobj = self.territory[region] with open(region_filename, "w") as outfile: prnt(_REGION_METADATA_PROLOG % {'region': terrobj.identifier(), 'module': module_prefix}, file=outfile) prnt("PHONE_METADATA_%s = %s" % (terrobj.identifier(), terrobj), file=outfile)
0.008811
def revoke(self, auth, codetype, code, defer=False): """ Given an activation code, the associated entity is revoked after which the activation code can no longer be used. Args: auth: Takes the owner's cik codetype: The type of code to revoke (client | share) code: Code specified by <codetype> (cik | share-activation-code) """ return self._call('revoke', auth, [codetype, code], defer)
0.006479
async def refresh_token(loader, client_configuration=None, interval=60): """Refresh token if necessary, updates the token in client configurarion :param loader: KubeConfigLoader returned by load_kube_config :param client_configuration: The kubernetes.client.Configuration to set configs to. :param interval: how often check if token is up-to-date """ if loader.provider != 'gcp': return if client_configuration is None: client_configuration = Configuration() while 1: await asyncio.sleep(interval) await loader.load_gcp_token() client_configuration.api_key['authorization'] = loader.token
0.001479
def backpropagate_2d(uSin, angles, res, nm, lD=0, coords=None, weight_angles=True, onlyreal=False, padding=True, padval=0, count=None, max_count=None, verbose=0): r"""2D backpropagation with the Fourier diffraction theorem Two-dimensional diffraction tomography reconstruction algorithm for scattering of a plane wave :math:`u_0(\mathbf{r}) = u_0(x,z)` by a dielectric object with refractive index :math:`n(x,z)`. This method implements the 2D backpropagation algorithm :cite:`Mueller2015arxiv`. .. math:: f(\mathbf{r}) = -\frac{i k_\mathrm{m}}{2\pi} \sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\! \left \{ \text{FFT}^{-1}_{\mathrm{1D}} \left \{ \left| k_\mathrm{Dx} \right| \frac{\text{FFT}_{\mathrm{1D}} \left \{ u_{\mathrm{B},\phi_j}(x_\mathrm{D}) \right \} }{u_0(l_\mathrm{D})} \exp \! \left[i k_\mathrm{m}(M - 1) \cdot (z_{\phi_j}-l_\mathrm{D}) \right] \right \} \right \} with the forward :math:`\text{FFT}_{\mathrm{1D}}` and inverse :math:`\text{FFT}^{-1}_{\mathrm{1D}}` 1D fast Fourier transform, the rotational operator :math:`D_{-\phi_j}`, the angular distance between the projections :math:`\Delta \phi_0`, the ramp filter in Fourier space :math:`|k_\mathrm{Dx}|`, and the propagation distance :math:`(z_{\phi_j}-l_\mathrm{D})`. Parameters ---------- uSin: (A,N) ndarray Two-dimensional sinogram of line recordings :math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})` divided by the incident plane wave :math:`u_0(l_\mathrm{D})` measured at the detector. angles: (A,) ndarray Angular positions :math:`\phi_j` of `uSin` in radians. res: float Vacuum wavelength of the light :math:`\lambda` in pixels. nm: float Refractive index of the surrounding medium :math:`n_\mathrm{m}`. lD: float Distance from center of rotation to detector plane :math:`l_\mathrm{D}` in pixels. coords: None [(2,M) ndarray] Computes only the output image at these coordinates. This keyword is reserved for future versions and is not implemented yet. weight_angles: bool If `True`, weights each backpropagated projection with a factor proportional to the angular distance between the neighboring projections. .. math:: \Delta \phi_0 \longmapsto \Delta \phi_j = \frac{\phi_{j+1} - \phi_{j-1}}{2} .. versionadded:: 0.1.1 onlyreal: bool If `True`, only the real part of the reconstructed image will be returned. This saves computation time. padding: bool Pad the input data to the second next power of 2 before Fourier transforming. This reduces artifacts and speeds up the process for input image sizes that are not powers of 2. padval: float The value used for padding. This is important for the Rytov approximation, where an approximate zero in the phase might translate to 2πi due to the unwrapping algorithm. In that case, this value should be a multiple of 2πi. If `padval` is `None`, then the edge values are used for padding (see documentation of :func:`numpy.pad`). count, max_count: multiprocessing.Value or `None` Can be used to monitor the progress of the algorithm. Initially, the value of `max_count.value` is incremented by the total number of steps. At each step, the value of `count.value` is incremented. verbose: int Increment to increase verbosity. Returns ------- f: ndarray of shape (N,N), complex if `onlyreal` is `False` Reconstructed object function :math:`f(\mathbf{r})` as defined by the Helmholtz equation. :math:`f(x,z) = k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)` See Also -------- odt_to_ri: conversion of the object function :math:`f(\mathbf{r})` to refractive index :math:`n(\mathbf{r})` radontea.backproject: backprojection based on the Fourier slice theorem Notes ----- Do not use the parameter `lD` in combination with the Rytov approximation - the propagation is not correctly described. Instead, numerically refocus the sinogram prior to converting it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`) with a numerical focusing algorithm (available in the Python package :py:mod:`nrefocus`). """ ## ## # TODO: # - combine the 2nd filter and the rotation in the for loop # to save memory. However, memory is not a big issue in 2D. ## ## A = angles.shape[0] if max_count is not None: max_count.value += A + 2 # Check input data assert len(uSin.shape) == 2, "Input data `uB` must have shape (A,N)!" assert len(uSin) == A, "`len(angles)` must be equal to `len(uSin)`!" if coords is not None: raise NotImplementedError("Output coordinates cannot yet be set " + + "for the 2D backrpopagation algorithm.") # Cut-Off frequency # km [1/px] km = (2 * np.pi * nm) / res # Here, the notation defines # a wave propagating to the right as: # # u0(x) = exp(ikx) # # However, in physics usually we use the other sign convention: # # u0(x) = exp(-ikx) # # In order to be consistent with programs like Meep or our # scattering script for a dielectric cylinder, we want to use the # latter sign convention. # This is not a big problem. We only need to multiply the imaginary # part of the scattered wave by -1. # Perform weighting if weight_angles: weights = util.compute_angle_weights_1d(angles).reshape(-1, 1) sinogram = uSin * weights else: sinogram = uSin # Size of the input data ln = sinogram.shape[1] # We perform padding before performing the Fourier transform. # This gets rid of artifacts due to false periodicity and also # speeds up Fourier transforms of the input image size is not # a power of 2. order = max(64., 2**np.ceil(np.log(ln * 2.1) / np.log(2))) if padding: pad = order - ln else: pad = 0 padl = np.int(np.ceil(pad / 2)) padr = np.int(pad - padl) if padval is None: sino = np.pad(sinogram, ((0, 0), (padl, padr)), mode="edge") if verbose > 0: print("......Padding with edge values.") else: sino = np.pad(sinogram, ((0, 0), (padl, padr)), mode="linear_ramp", end_values=(padval,)) if verbose > 0: print("......Verifying padding value: {}".format(padval)) # zero-padded length of sinogram. lN = sino.shape[1] # Ask for the filter. Do not include zero (first element). # # Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ] # - double coverage factor 1/2 already included # - unitary angular frequency to unitary ordinary frequency # conversion performed in calculation of UB=FT(uB). # # f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor) # * iint dϕ₀ dkx (prefactor) # * |kx| (prefactor) # * exp(-i kₘ M lD ) (prefactor) # * UBϕ₀(kx) (dependent on ϕ₀) # * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r) # # (r and s₀ are vectors. In the last term we perform the dot-product) # # kₘM = sqrt( kₘ² - kx² ) # t⊥ = ( cos(ϕ₀), sin(ϕ₀) ) # s₀ = ( -sin(ϕ₀), cos(ϕ₀) ) # # The filter can be split into two parts # # 1) part without dependence on the z-coordinate # # -i kₘ / ((2π)^(3/2) a₀) # * iint dϕ₀ dkx # * |kx| # * exp(-i kₘ M lD ) # # 2) part with dependence of the z-coordinate # # exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) # # The filter (1) can be performed using the classical filter process # as in the backprojection algorithm. # # if count is not None: count.value += 1 # Corresponding sample frequencies fx = np.fft.fftfreq(lN) # 1D array # kx is a 1D array. kx = 2 * np.pi * fx # Differentials for integral dphi0 = 2 * np.pi / A # We will later multiply with phi0. # a, x kx = kx.reshape(1, -1) # Low-pass filter: # less-than-or-equal would give us zero division error. filter_klp = (kx**2 < km**2) # Filter M so there are no nans from the root M = 1. / km * np.sqrt((km**2 - kx**2) * filter_klp) prefactor = -1j * km / (2 * np.pi) prefactor *= dphi0 prefactor *= np.abs(kx) * filter_klp # new in version 0.1.4: # We multiply by the factor (M-1) instead of just (M) # to take into account that we have a scattered # wave that is normalized by u0. prefactor *= np.exp(-1j * km * (M-1) * lD) # Perform filtering of the sinogram projection = np.fft.fft(sino, axis=-1) * prefactor # # filter (2) must be applied before rotation as well # exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) # # t⊥ = ( cos(ϕ₀), sin(ϕ₀) ) # s₀ = ( -sin(ϕ₀), cos(ϕ₀) ) # # This filter is effectively an inverse Fourier transform # # exp(i kx xD) exp(i kₘ (M - 1) yD ) # # xD = x cos(ϕ₀) + y sin(ϕ₀) # yD = - x sin(ϕ₀) + y cos(ϕ₀) # Everything is in pixels center = ln / 2.0 x = np.arange(lN) - center + .5 # Meshgrid for output array yv = x.reshape(-1, 1) Mp = M.reshape(1, -1) filter2 = np.exp(1j * yv * km * (Mp - 1)) # .reshape(1,lN,lN) projection = projection.reshape(A, 1, lN) # * filter2 # Prepare complex output image if onlyreal: outarr = np.zeros((ln, ln)) else: outarr = np.zeros((ln, ln), dtype=np.dtype(complex)) if count is not None: count.value += 1 # Calculate backpropagations for i in np.arange(A): # Create an interpolation object of the projection. # interpolation of the rotated fourier transformed projection # this is already tiled onto the entire image. sino_filtered = np.fft.ifft(projection[i] * filter2, axis=-1) # Resize filtered sinogram back to original size sino = sino_filtered[:ln, padl:padl + ln] rotated_projr = scipy.ndimage.interpolation.rotate( sino.real, -angles[i] * 180 / np.pi, reshape=False, mode="constant", cval=0) # Append results outarr += rotated_projr if not onlyreal: outarr += 1j * scipy.ndimage.interpolation.rotate( sino.imag, -angles[i] * 180 / np.pi, reshape=False, mode="constant", cval=0) if count is not None: count.value += 1 return outarr
0.00009
def value_to_single_key_strokes(value): """Convert value to a list of key strokes >>> value_to_single_key_strokes(123) ['1', '2', '3'] >>> value_to_single_key_strokes('123') ['1', '2', '3'] >>> value_to_single_key_strokes([1, 2, 3]) ['1', '2', '3'] >>> value_to_single_key_strokes(['1', '2', '3']) ['1', '2', '3'] Args: value(int|str|list) Returns: A list of string. """ result = [] if isinstance(value, Integral): value = str(value) for v in value: if isinstance(v, Keys): result.append(v.value) elif isinstance(v, Integral): result.append(str(v)) else: result.append(v) return result
0.001362
def join(self, experiment, genometric_predicate, output="LEFT", joinBy=None, refName="REF", expName="EXP", left_on=None, right_on=None): """ *Wrapper of* ``JOIN`` The JOIN operator takes in input two datasets, respectively known as anchor (the first/left one) and experiment (the second/right one) and returns a dataset of samples consisting of regions extracted from the operands according to the specified condition (known as genometric predicate). The number of generated output samples is the Cartesian product of the number of samples in the anchor and in the experiment dataset (if no joinby close if specified). The attributes (and their values) of the regions in the output dataset are the union of the region attributes (with their values) in the input datasets; homonymous attributes are disambiguated by prefixing their name with their dataset name. The output metadata are the union of the input metadata, with their attribute names prefixed with their input dataset name. :param experiment: an other GMQLDataset :param genometric_predicate: a list of Genometric atomic conditions. For an explanation of each of them go to the respective page. :param output: one of four different values that declare which region is given in output for each input pair of anchor and experiment regions satisfying the genometric predicate: * 'LEFT': outputs the anchor regions from the anchor dataset that satisfy the genometric predicate * 'RIGHT': outputs the anchor regions from the experiment dataset that satisfy the genometric predicate * 'INT': outputs the overlapping part (intersection) of the anchor and experiment regions that satisfy the genometric predicate; if the intersection is empty, no output is produced * 'CONTIG': outputs the concatenation between the anchor and experiment regions that satisfy the genometric predicate, i.e. the output region is defined as having left (right) coordinates equal to the minimum (maximum) of the corresponding coordinate values in the anchor and experiment regions satisfying the genometric predicate :param joinBy: list of metadata attributes :param refName: name that you want to assign to the reference dataset :param expName: name that you want to assign to the experiment dataset :param left_on: list of region fields of the reference on which the join must be performed :param right_on: list of region fields of the experiment on which the join must be performed :return: a new GMQLDataset An example of usage, in which we perform the join operation between Example_Dataset_1 and Example_Dataset_2 specifying than we want to join the regions of the former with the first regions at a minimim distance of 120Kb of the latter and finally we want to output the regions of Example_Dataset_2 matching the criteria:: import gmql as gl d1 = gl.get_example_dataset("Example_Dataset_1") d2 = gl.get_example_dataset("Example_Dataset_2") result_dataset = d1.join(experiment=d2, genometric_predicate=[gl.MD(1), gl.DGE(120000)], output="right") """ if isinstance(experiment, GMQLDataset): other_idx = experiment.__index else: raise TypeError("experiment must be a GMQLDataset. " "{} was provided".format(type(experiment))) if isinstance(genometric_predicate, list) and \ all([isinstance(x, GenometricCondition) for x in genometric_predicate]): regionJoinCondition = self.opmng.getRegionJoinCondition(list(map(lambda x: x.get_gen_condition(), genometric_predicate))) else: raise TypeError("genometric_predicate must be a list og GenometricCondition. " "{} was found".format(type(genometric_predicate))) if isinstance(output, str): regionBuilder = self.opmng.getRegionBuilderJoin(output) else: raise TypeError("output must be a string. " "{} was provided".format(type(output))) if not isinstance(expName, str): raise TypeError("expName must be a string. {} was provided".format(type(expName))) if not isinstance(refName, str): raise TypeError("refName must be a string. {} was provided".format(type(expName))) if isinstance(joinBy, list) and \ all([isinstance(x, str) for x in joinBy]): metaJoinCondition = Some(self.opmng.getMetaJoinCondition(joinBy)) elif joinBy is None: metaJoinCondition = none() else: raise TypeError("joinBy must be a list of strings. " "{} was found".format(type(joinBy))) left_on_exists = False left_on_len = 0 if isinstance(left_on, list) and \ all([isinstance(x, str) for x in left_on]): left_on_len = len(left_on) left_on = Some(left_on) left_on_exists = True elif left_on is None: left_on = none() else: raise TypeError("left_on must be a list of strings. " "{} was provided".format(type(left_on))) if isinstance(right_on, list) and \ all([isinstance(x, str)] for x in right_on) and \ left_on_exists and len(right_on) == left_on_len: right_on = Some(right_on) elif right_on is None and not left_on_exists: right_on = none() else: raise TypeError("right_on must be a list of strings. " "{} was provided".format(type(right_on))) new_index = self.opmng.join(self.__index, other_idx, metaJoinCondition, regionJoinCondition, regionBuilder, refName, expName, left_on, right_on) new_local_sources, new_remote_sources = self.__combine_sources(self, experiment) new_location = self.__combine_locations(self, experiment) return GMQLDataset(index=new_index, location=new_location, local_sources=new_local_sources, remote_sources=new_remote_sources, meta_profile=self.meta_profile)
0.008168
def handle(self, pkt, raddress, rport): """Handle the packet in response to an RRQ to the server.""" if not self.context.tidport: self.context.tidport = rport log.info("Set remote port for session to %s" % rport) # Now check the packet type and dispatch it properly. if isinstance(pkt, TftpPacketOACK): log.info("Received OACK from server") try: self.handleOACK(pkt) except TftpException as err: log.error("Failed to negotiate options: %s" % str(err)) self.sendError(TftpErrors.FailedNegotiation) raise else: log.debug("Sending ACK to OACK") self.sendACK(blocknumber=0) log.debug("Changing state to TftpStateExpectDAT") return TftpStateExpectDAT(self.context) elif isinstance(pkt, TftpPacketDAT): # If there are any options set, then the server didn't honour any # of them. log.info("Received DAT from server") if self.context.options: log.info("Server ignored options, falling back to defaults") self.context.options = { 'blksize': DEF_BLKSIZE } return self.handleDat(pkt) # Every other packet type is a problem. elif isinstance(pkt, TftpPacketACK): # Umm, we ACK, the server doesn't. self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received ACK from server while in download") elif isinstance(pkt, TftpPacketWRQ): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received WRQ from server while in download") elif isinstance(pkt, TftpPacketERR): self.sendError(TftpErrors.IllegalTftpOp) log.debug("Received ERR packet: %s", pkt) if pkt.errorcode == TftpErrors.FileNotFound: raise TftpFileNotFoundError("File not found") else: raise TftpException("Received ERR from server: {}".format(pkt)) else: self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received unknown packet type from server: %s" % pkt) # By default, no state change. return self
0.002135