text
stringlengths
78
104k
score
float64
0
0.18
async def result_continuation(task): """A preliminary result processor we'll chain on to the original task This will get executed wherever the source task was executed, in this case one of the threads in the ThreadPoolExecutor""" await asyncio.sleep(0.1) num, res = task.result() return num, res * 2
0.00304
def get_nts_sorted(self, hdrgo_prt, hdrgos, hdrgo_sort): """Return a flat list of grouped and sorted GO terms.""" nts_flat = [] self.get_sorted_hdrgo2usrgos(hdrgos, nts_flat, hdrgo_prt, hdrgo_sort) return nts_flat
0.008163
def drain_transport(self): ''' "Drain" the transport connection. This command simply returns all waiting messages sent from the remote chrome instance. This can be useful when waiting for a specific asynchronous message from chrome, but higher level calls are better suited for managing wait-for-message type needs. ''' self.transport.check_process_ded() ret = self.transport.drain(tab_key=self.tab_id) self.transport.check_process_ded() return ret
0.029661
def update(self, deltat=1.0): '''straight lines, with short life''' DNFZ.update(self, deltat) self.lifetime -= deltat if self.lifetime <= 0: self.randpos() self.lifetime = random.uniform(300,600)
0.011952
def dispatch(self, tree): "Dispatcher function, dispatching tree type T to method _T." if isinstance(tree, list): for t in tree: self.dispatch(t) return meth = getattr(self, "_" + tree.__class__.__name__) meth(tree)
0.006969
def error_code(self, code): """ :param code: :return: """ context = { 'error_title': "Damn error page %i" % code, 'error_header': "What has happened?", 'error_footer': "WHY!?!?!", 'error_messages': { ('Error %i' % code): self.__message__(code) } } return WWebTemplateResponse(WTemplateText(self.__error_template__()), context=context)
0.043478
def extract_HBS_learning_curves(runs): """ function to get the hyperband learning curves This is an example function showing the interface to use the HB_result.get_learning_curves method. Parameters ---------- runs: list of HB_result.run objects the performed runs for an unspecified config Returns ------- list of learning curves: list of lists of tuples An individual learning curve is a list of (t, x_t) tuples. This function must return a list of these. One could think of cases where one could extract multiple learning curves from these runs, e.g. if each run is an independent training run of a neural network on the data. """ sr = sorted(runs, key=lambda r: r.budget) lc = list(filter(lambda t: not t[1] is None, [(r.budget, r.loss) for r in sr])) return([lc,])
0.031133
def registerMUX(self, stm: Union[HdlStatement, Operator], sig: RtlSignal, inputs_cnt: int): """ mux record is in format (self.MUX, n, m) where n is number of bits of this mux and m is number of possible inputs """ assert inputs_cnt > 1 res = self.resources w = sig._dtype.bit_length() k = (ResourceMUX, w, inputs_cnt) res[k] = res.get(k, 0) + 1 self.resource_for_object[(stm, sig)] = k
0.006073
def find_tor_binary(globs=('/usr/sbin/', '/usr/bin/', '/Applications/TorBrowser_*.app/Contents/MacOS/'), system_tor=True): """ Tries to find the tor executable using the shell first or in in the paths whose glob-patterns is in the given 'globs'-tuple. :param globs: A tuple of shell-style globs of directories to use to find tor (TODO consider making that globs to actual tor binary?) :param system_tor: This controls whether bash is used to seach for 'tor' or not. If False, we skip that check and use only the 'globs' tuple. """ # Try to find the tor executable using the shell if system_tor: try: proc = subprocess.Popen( ('which tor'), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True ) except OSError: pass else: stdout, _ = proc.communicate() if proc.poll() == 0 and stdout != '': return stdout.strip() # the shell may not provide type and tor is usually not on PATH when using # the browser-bundle. Look in specific places for pattern in globs: for path in glob.glob(pattern): torbin = os.path.join(path, 'tor') if is_executable(torbin): return torbin return None
0.000707
def to_epoch(t): """Take a datetime, either as a string or a datetime.datetime object, and return the corresponding epoch""" if isinstance(t, str): if '+' not in t: t = t + '+00:00' t = parser.parse(t) elif t.tzinfo is None or t.tzinfo.utcoffset(t) is None: t = t.replace(tzinfo=pytz.timezone('utc')) t0 = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, pytz.timezone('utc')) delta = t - t0 return int(delta.total_seconds())
0.00207
def from_Cary(filepath, name=None, parent=None, verbose=True): """Create a collection object from a Cary UV VIS absorbance file. We hope to support as many Cary instruments and datasets as possible. This function has been tested with data collected on a Cary50 UV/VIS spectrometer. If any alternate instruments are found not to work as expected, please submit a bug report on our `issue tracker`__. __ github.com/wright-group/WrightTools/issues .. plot:: >>> import WrightTools as wt >>> from WrightTools import datasets >>> p = datasets.Cary.CuPCtS_H2O_vis >>> data = wt.collection.from_Cary(p)[0] >>> wt.artists.quick1D(data) Parameters ---------- filepath : path-like Path to Cary output file (.csv). parent : WrightTools.Collection A collection object in which to place a collection of Data objects. verbose : boolean (optional) Toggle talkback. Default is True. Returns ------- data New data object. """ # check filepath filestr = os.fspath(filepath) filepath = pathlib.Path(filepath) if ".csv" not in filepath.suffixes: wt_exceptions.WrongFileTypeWarning.warn(filepath, "csv") if name is None: name = "cary" # import array lines = [] ds = np.DataSource(None) with ds.open(filestr, "rt", encoding="iso-8859-1") as f: header = f.readline() columns = f.readline() while True: line = f.readline() if line == "\n" or line == "" or line == "\r\n": break else: # Note, it is necessary to call this twice, as a single call will # result in something like ',,,,' > ',nan,,nan,'. line = line.replace(",,", ",nan,") line = line.replace(",,", ",nan,") # Ensure that the first column has nan, if necessary if line[0] == ",": line = "nan" + line clean = line[:-2] # lines end with ',/n' lines.append(np.fromstring(clean, sep=",")) lines = [line for line in lines if len(line) > 0] header = header.split(",") columns = columns.split(",") arr = np.array(lines).T duplicate = len(header) // 2 == len(set(header) - {""}) # chew through all scans datas = Collection(name=name, parent=parent, edit_local=parent is not None) units_dict = {"°c": "deg_C", "°f": "deg_F"} for i in range(0, len(header) - 1, 2): r = re.compile(r"[ \t\(\)]+") spl = r.split(columns[i]) ax = spl[0].lower() if len(spl) > 0 else None units = spl[1].lower() if len(spl) > 1 else None units = units_dict.get(units, units) if duplicate: name = "{}_{:03d}".format(header[i], i // 2) else: name = header[i] dat = datas.create_data(name, kind="Cary", source=filestr) dat.create_variable(ax, arr[i][~np.isnan(arr[i])], units=units) dat.create_channel( columns[i + 1].lower(), arr[i + 1][~np.isnan(arr[i + 1])], label=columns[i + 1].lower() ) dat.transform(ax) # finish if verbose: print("{0} data objects successfully created from Cary file:".format(len(datas))) for i, data in enumerate(datas): print(" {0}: {1}".format(i, data)) return datas
0.001459
def get_partitions(self, persistence=None): """ Returns the partitioned data based on a specified persistence level. @ In, persistence, a floating point value specifying the size of the smallest feature we want to track. Default = None means consider all features. @ Out, a dictionary lists where each key is a min-max tuple specifying the index of the minimum and maximum, respectively. Each entry will hold a list of indices specifying points that are associated to this min-max pair. """ if persistence is None: persistence = self.persistence partitions = {} # TODO: Possibly cache at the critical persistence values, # previously caching was done at every query level, but that # does not make sense as the partitions will only change once # the next value in self.persistences is attained. Honestly, # this is probably not a necessary optimization that needs to # be made. Consider instead, Yarden's way of storing the points # such that merged arrays will be adjacent. for key, items in self.base_partitions.items(): min_index = key[0] max_index = key[1] while ( self.merge_sequence[min_index][0] < persistence and self.merge_sequence[min_index][1] != min_index ): min_index = self.merge_sequence[min_index][1] while ( self.merge_sequence[max_index][0] < persistence and self.merge_sequence[max_index][1] != max_index ): max_index = self.merge_sequence[max_index][1] new_key = (min_index, max_index) if new_key not in partitions: partitions[new_key] = [] partitions[new_key].extend(items.tolist()) for key in partitions: partitions[key] = sorted(list(set(partitions[key]))) return partitions
0.000976
def __create_orget_address(conn, name, region): ''' Reuse or create a static IP address. Returns a native GCEAddress construct to use with libcloud. ''' try: addy = conn.ex_get_address(name, region) except ResourceNotFoundError: # pylint: disable=W0703 addr_kwargs = { 'name': name, 'region': region } new_addy = create_address(addr_kwargs, "function") addy = conn.ex_get_address(new_addy['name'], new_addy['region']) return addy
0.001912
def get_version(self): """Fetches the current version number of the Graph API being used.""" args = {"access_token": self.access_token} try: response = self.session.request( "GET", FACEBOOK_GRAPH_URL + self.version + "/me", params=args, timeout=self.timeout, proxies=self.proxies, ) except requests.HTTPError as e: response = json.loads(e.read()) raise GraphAPIError(response) try: headers = response.headers version = headers["facebook-api-version"].replace("v", "") return str(version) except Exception: raise GraphAPIError("API version number not available")
0.002545
def put(self, file): """ Create a new file on github :param file: File to create :return: File or self.ProxyError """ input_ = { "message": file.logs, "author": file.author.dict(), "content": file.base64, "branch": file.branch } uri = "{api}/repos/{origin}/contents/{path}".format( api=self.github_api_url, origin=self.origin, path=file.path ) data = self.request("PUT", uri, data=input_) if data.status_code == 201: file.pushed = True return file else: decoded_data = json.loads(data.content.decode("utf-8")) return self.ProxyError( data.status_code, (decoded_data, "message"), step="put", context={ "uri": uri, "params": input_ } )
0.002094
def segment(self, text, lower=True, use_stop_words=True, use_speech_tags_filter=False): """对一段文本进行分词,返回list类型的分词结果 Keyword arguments: lower -- 是否将单词小写(针对英文) use_stop_words -- 若为True,则利用停止词集合来过滤(去掉停止词) use_speech_tags_filter -- 是否基于词性进行过滤。若为True,则使用self.default_speech_tag_filter过滤。否则,不过滤。 """ jieba_result = cut_for_search(text) jieba_result = [w for w in jieba_result] # 去除特殊符号 word_list = [w.strip() for w in jieba_result] word_list = [word for word in word_list if len(word) > 0] if use_speech_tags_filter == False: jieba_result = [w for w in jieba_result] if lower: word_list = [word.lower() for word in word_list] if use_stop_words: word_list = [word.strip() for word in word_list if word.strip() not in self.stop_words] return word_list
0.006459
def _item_to_document_ref(iterator, item): """Convert Document resource to document ref. Args: iterator (google.api_core.page_iterator.GRPCIterator): iterator response item (dict): document resource """ document_id = item.name.split(_helpers.DOCUMENT_PATH_DELIMITER)[-1] return iterator.collection.document(document_id)
0.002717
def _compress(self, value, module_name): """Compress the value passed in using the named compression module. :param bytes value: The uncompressed value :rtype: bytes """ self.logger.debug('Decompressing with %s', module_name) if not isinstance(value, bytes): value = value.encode('utf-8') return self._maybe_import(module_name).compress(value)
0.004843
def get_conf(conf, sect, opt): """ Gets a config 'opt' from 'conf' file, under section 'sect'. If no 'opt' exists under 'sect', it looks for option on the default_configs dictionary If there exists an environmental variable named MAMBUPY_{upper_case_opt}, it overrides whatever the conf files or default_configs dict says. But if you send a command line argument named mambupy_{lower_case_opt}, it overrides anything else. Args: conf (ConfigParser): ConfigParser that reads from certain config file (INI format) sect (string): section under the config file opt (string): option to read Returns: string: configuration option. If not found on conf, returns a value from default_configs dict. If environmental variable exists with name MAMBUPY_{upper_case_opt} it overrides anything else """ argu = getattr(args, "mambupy_"+opt.lower()) if not argu: envir = os.environ.get("MAMBUPY_"+opt.upper()) if not envir: try: return conf.get(sect,opt) except NoSectionError: return default_configs[opt] return envir return argu
0.001634
def init_log_rate(output_f, N=None, message='', print_rate=None): """Initialze the log_rate function. Returnas a partial function to call for each event. If N is not specified but print_rate is specified, the initial N is set to 100, and after the first message, the N value is adjusted to emit print_rate messages per second """ if print_rate and not N: N = 100 if not N: N = 5000 d = [0, # number of items processed time(), # start time. This one gets replaced after first message N, # ticker to next message N, # frequency to log a message message, print_rate, deque([], maxlen=4) # Deque for averaging last N rates ] assert isinstance(output_f, Callable) f = partial(_log_rate, output_f, d) f.always = output_f f.count = lambda: d[0] return f
0.001119
def file_pour(filepath, block_size=10240, *args, **kwargs): """Write physical files from entries.""" def opener(archive_res): _LOGGER.debug("Opening from file (file_pour): %s", filepath) _archive_read_open_filename(archive_res, filepath, block_size) return _pour(opener, *args, flags=0, **kwargs)
0.003067
def generate(env): """Add Builders and construction variables for yacc to an Environment.""" c_file, cxx_file = SCons.Tool.createCFileBuilders(env) # C c_file.add_action('.y', YaccAction) c_file.add_emitter('.y', yEmitter) c_file.add_action('.yacc', YaccAction) c_file.add_emitter('.yacc', yEmitter) # Objective-C c_file.add_action('.ym', YaccAction) c_file.add_emitter('.ym', ymEmitter) # C++ cxx_file.add_action('.yy', YaccAction) cxx_file.add_emitter('.yy', yyEmitter) env['YACC'] = env.Detect('bison') or 'yacc' env['YACCFLAGS'] = SCons.Util.CLVar('') env['YACCCOM'] = '$YACC $YACCFLAGS -o $TARGET $SOURCES' env['YACCHFILESUFFIX'] = '.h' env['YACCHXXFILESUFFIX'] = '.hpp' env['YACCVCGFILESUFFIX'] = '.vcg'
0.003755
def do_dd(self, arg): """ [~thread] dd <register> - show memory contents as dwords [~thread] dd <register-register> - show memory contents as dwords [~thread] dd <register> <size> - show memory contents as dwords [~process] dd <address> - show memory contents as dwords [~process] dd <address-address> - show memory contents as dwords [~process] dd <address> <size> - show memory contents as dwords """ self.print_memory_display(arg, HexDump.hexblock_dword) self.last_display_command = self.do_dd
0.003472
def _query(_node_id, value=None, **kw): "Look up value by using Query table" query_result = [] try: query_result = db.execute(text(fetch_query_string('select_query_from_node.sql')), **kw).fetchall() except DatabaseError as err: current_app.logger.error("DatabaseError: %s, %s", err, kw) return value #current_app.logger.debug("queries kw: %s", kw) #current_app.logger.debug("queries value: %s", value) current_app.logger.debug("queries: %s", query_result) if query_result: values = [] for query_name in [x['name'] for x in query_result]: if query_name: result = [] try: current_app.logger.debug("query_name: %s", query_name) #current_app.logger.debug("kw: %s", kw) # Query string can be insert or select here #statement = text(fetch_query_string(query_name)) #params = [x.key for x in statement.params().get_children()] #skw = {key: kw[key] for key in params} #result = db.execute(statement, **skw) result = db.execute(text(fetch_query_string(query_name)), **kw) current_app.logger.debug("result query: %s", result.keys()) except (DatabaseError, StatementError) as err: current_app.logger.error("DatabaseError (%s) %s: %s", query_name, kw, err) if result and result.returns_rows: result = result.fetchall() #values.append(([[dict(zip(result.keys(), x)) for x in result]], result.keys())) #values.append((result.fetchall(), result.keys())) #current_app.logger.debug("fetchall: %s", values) if len(result) == 0: values.append(([], [])) else: current_app.logger.debug("result: %s", result) # There may be more results, but only interested in the # first one. Use the older rowify method for now. # TODO: use case for rowify? values.append(rowify(result, [(x, None) for x in result[0].keys()])) #current_app.logger.debug("fetchone: %s", values) value = values #current_app.logger.debug("value: %s", value) return value
0.007705
def hincrby(self, key, field, increment=1): """Increment the integer value of a hash field by the given number.""" return self.execute(b'HINCRBY', key, field, increment)
0.010811
def _put_pages(self): """ First, the Document object does the heavy-lifting for the individual page objects and content. Then, the overall "Pages" object is generated. """ self.document._get_orientation_changes() self.document._output_pages() # Pages Object, provides reference to page objects (Kids list). self.session._add_object(1) self.session._out('<</Type /Pages') kids = '/Kids [' for i in xrange(0, len(self.document.pages)): kids += str(3 + 2 * i) + ' 0 R ' self.session._out(kids + ']') self.session._out('/Count %s' % len(self.document.pages)) # Overall size of the default PDF page self.session._out('/MediaBox [0 0 %.2f %.2f]' % (self.document.page.width, self.document.page.height)) self.session._out('>>') self.session._out('endobj')
0.00203
def describe_cluster_snapshots(self, cluster_identifier): """ Gets a list of snapshots for a cluster :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str """ response = self.get_conn().describe_cluster_snapshots( ClusterIdentifier=cluster_identifier ) if 'Snapshots' not in response: return None snapshots = response['Snapshots'] snapshots = filter(lambda x: x['Status'], snapshots) snapshots.sort(key=lambda x: x['SnapshotCreateTime'], reverse=True) return snapshots
0.00321
def suggest_filename(file_path, exists=None): """ Try if exist path and append number to its end. For debug you can set as input if file exists or not. """ import os.path import re if not isinstance(exists, bool): exists = os.path.exists(file_path) if exists: file_path, file_extension = os.path.splitext(file_path) # print(file_path) m = re.search(r"_\d+$", file_path) if m is None: # cislo = 2 new_cislo_str = "_2" else: cislostr = (m.group()) cislo = int(cislostr[1:]) + 1 # it is normal number file_path = file_path[:-len(cislostr)] new_cislo_str = "_" + str(cislo) file_path = file_path + new_cislo_str + file_extension # .zfill(2) # trorcha rekurze file_path = suggest_filename(file_path) return file_path
0.001103
def links(self): """Yields all links in the page""" for anchor in self.parsed.findall(".//a"): if anchor.get("href"): href = anchor.get("href") url = self.clean_link( urllib_parse.urljoin(self.base_url, href) ) # Determine if this link is internal. If that distinction # doesn't make sense in this context, then we don't make # any distinction. internal = None if self.api_version and self.api_version >= 2: # Only api_versions >= 2 have a distinction between # external and internal links internal = bool( anchor.get("rel") and "internal" in anchor.get("rel").split() ) yield Link(url, self, internal=internal)
0.00213
def _fast_memory_load_pointer(self, addr, size=None): """ Perform a fast memory loading of a pointer. :param int addr: Address to read from. :param int size: Size of the pointer. Default to machine-word size. :return: A pointer or None if the address does not exist. :rtype: int """ try: return self.project.loader.memory.unpack_word(addr, size=size) except KeyError: return None
0.004049
def render_full(self, request, lodgeit_url=None): """Render the Full HTML page with the traceback info.""" app = request.app root_path = request.app.ps.debugtoolbar.cfg.prefix exc = escape(self.exception) summary = self.render_summary(include_title=False, request=request) token = request.app['debugtoolbar']['pdbt_token'] vars = { 'evalex': app.ps.debugtoolbar.cfg.intercept_exc == 'debug' and 'true' or 'false', 'console': 'console', 'lodgeit_url': lodgeit_url and escape(lodgeit_url) or '', 'title': exc, 'exception': exc, 'exception_type': escape(self.exception_type), 'summary': summary, 'plaintext': self.plaintext, 'plaintext_cs': re.sub('-{2,}', '-', self.plaintext), 'traceback_id': self.id, 'static_path': root_path + 'static/', 'token': token, 'root_path': root_path, 'url': root_path + 'exception?token=%s&tb=%s' % (token, self.id), } template = app.ps.jinja2.env.get_template('debugtoolbar/exception.html') return template.render(app=app, request=request, **vars)
0.003265
def install(self, package, shutit_pexpect_child=None, options=None, timeout=shutit_global.shutit_global_object.default_timeout, force=False, check_exit=True, echo=None, reinstall=False, background=False, wait=False, block_other_commands=True, note=None, loglevel=logging.INFO): """Distro-independent install function. Takes a package name and runs the relevant install function. @param package: Package to install, which is run through package_map @param shutit_pexpect_child: See send() @param timeout: Timeout (s) to wait for finish of install. Defaults to 3600. @param options: Dictionary for specific options per install tool. Overrides any arguments passed into this function. @param force: Force if necessary. Defaults to False @param check_exit: If False, failure to install is ok (default True) @param reinstall: Advise a reinstall where possible (default False) @param note: See send() @type package: string @type timeout: integer @type options: dict @type force: boolean @type check_exit: boolean @type reinstall: boolean @return: True if all ok (ie it's installed), else False. @rtype: boolean """ shutit_global.shutit_global_object.yield_to_draw() # If separated by spaces, install separately shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child) ignore_background = not wait return shutit_pexpect_session.install(package, options=options, timeout=timeout, force=force, check_exit=check_exit, reinstall=reinstall, echo=echo, note=note, run_in_background=background, ignore_background=ignore_background, block_other_commands=block_other_commands, loglevel=loglevel)
0.038446
def dump_requestdriver_cookies_into_webdriver(requestdriver, webdriverwrapper, handle_sub_domain=True): """Adds all cookies in the RequestDriver session to Webdriver @type requestdriver: RequestDriver @param requestdriver: RequestDriver with cookies @type webdriverwrapper: WebDriverWrapper @param webdriverwrapper: WebDriverWrapper to receive cookies @param handle_sub_domain: If True, will check driver url and change cookies with subdomains of that domain to match the current driver domain in order to avoid cross-domain cookie errors @rtype: None @return: None """ driver_hostname = urlparse(webdriverwrapper.current_url()).netloc for cookie in requestdriver.session.cookies: # Check if there will be a cross-domain violation and handle if necessary cookiedomain = cookie.domain if handle_sub_domain: if is_subdomain(cookiedomain, driver_hostname): # Cookies of requestdriver are subdomain cookies of webdriver; make them the base domain cookiedomain = driver_hostname try: webdriverwrapper.add_cookie({ 'name': cookie.name, 'value': cookie.value, 'domain': cookiedomain, 'path': cookie.path }) except WebDriverException, e: raise WebDriverException( msg='Cannot set cookie "{name}" with domain "{domain}" on url "{url}" {override}: {message}'.format( name=cookie.name, domain=cookiedomain, url=webdriverwrapper.current_url(), override='(Note that subdomain override is set!)' if handle_sub_domain else '', message=e.message), screen=e.screen, stacktrace=e.stacktrace )
0.003743
def printContentsOfJobStore(jobStorePath, nameOfJob=None): """ Fetch a list of all files contained in the jobStore directory input if nameOfJob is not declared, otherwise it only prints out the names of files for that specific job for which it can find a match. Also creates a logFile containing this same record of job files in the working directory. :param jobStorePath: Directory path to recursively look for files. :param nameOfJob: Default is None, which prints out all files in the jobStore. If specified, it will print all jobStore files that have been written to the jobStore by that job. """ if nameOfJob: glob = "*" + nameOfJob + "*" logFile = nameOfJob + "_fileset.txt" else: glob = "*" logFile = "jobstore_files.txt" nameOfJob = "" list_of_files = recursiveGlob(directoryname=jobStorePath, glob_pattern=glob) if os.path.exists(logFile): os.remove(logFile) for gfile in sorted(list_of_files): if not gfile.endswith('.new'): logger.debug(nameOfJob + "File: %s", os.path.basename(gfile)) with open(logFile, "a+") as f: f.write(os.path.basename(gfile)) f.write("\n")
0.004769
def language_create(name, maintenance_db, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2016.3.0 Installs a language into a database CLI Example: .. code-block:: bash salt '*' postgres.language_create plpgsql dbname name Language to install maintenance_db The database to install the language in user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' if language_exists(name, maintenance_db): log.info('Language %s already exists in %s', name, maintenance_db) return False query = 'CREATE LANGUAGE {0}'.format(name) ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return ret['retcode'] == 0
0.000696
def tiles_from_bounds(self, bounds, zoom): """ Return all tiles intersecting with bounds. Bounds values will be cleaned if they cross the antimeridian or are outside of the Northern or Southern tile pyramid bounds. Parameters ---------- bounds : tuple (left, bottom, right, top) bounding values in tile pyramid CRS zoom : integer zoom level Yields ------ intersecting tiles : generator generates ``BufferedTiles`` """ for tile in self.tiles_from_bbox(box(*bounds), zoom): yield self.tile(*tile.id)
0.003067
def top_sort_recursive(graph): """ Time complexity is the same as DFS, which is O(V + E) Space complexity: O(V) """ order, enter, state = [], set(graph), {} def dfs(node): state[node] = GRAY #print(node) for k in graph.get(node, ()): sk = state.get(k, None) if sk == GRAY: raise ValueError("cycle") if sk == BLACK: continue enter.discard(k) dfs(k) order.append(node) state[node] = BLACK while enter: dfs(enter.pop()) return order
0.008237
def is_installed(name): """ Checks whether a package with the name is already installed. :param name: the name of the package :type name: str :return: whether the package is installed :rtype: bool """ pkgs = installed_packages() for pkge in pkgs: if pkge.name == name: return True return False
0.002825
def value(self): """The current value of the mean.""" return self._sum / tf.cast(self._count, self._dtype)
0.008772
def remove_replica(self, partition_name, osr_broker_ids, count=1): """Removing a replica is done by trying to remove a replica from every broker and choosing the resulting state with the highest fitness score. Out-of-sync replicas will always be removed before in-sync replicas. :param partition_name: (topic_id, partition_id) of the partition to remove replicas of. :param osr_broker_ids: A list of the partition's out-of-sync broker ids. :param count: The number of replicas to remove. """ try: partition = self.cluster_topology.partitions[partition_name] except KeyError: raise InvalidPartitionError( "Partition name {name} not found.".format(name=partition_name), ) if partition.replication_factor - count < 1: raise InvalidReplicationFactorError( "Cannot decrease replication factor from {rf} to {new_rf}." "Replication factor must be at least 1." .format( rf=partition.replication_factor, new_rf=partition.replication_factor - count, ) ) osr = { broker for broker in partition.replicas if broker.id in osr_broker_ids } # Create state from current cluster topology. state = _State(self.cluster_topology) partition_index = state.partitions.index(partition) for _ in range(count): # Find eligible replication groups. non_empty_rgs = [ rg for rg in six.itervalues(self.cluster_topology.rgs) if rg.count_replica(partition) > 0 ] rgs_with_osr = [ rg for rg in non_empty_rgs if any(b in osr for b in rg.brokers) ] candidate_rgs = rgs_with_osr or non_empty_rgs # Since replicas will only be removed from the candidate rgs, only # count replicas on those rgs when determining which rgs are # over-replicated. replica_count = sum( rg.count_replica(partition) for rg in candidate_rgs ) opt_replicas, _ = compute_optimum( len(candidate_rgs), replica_count, ) over_replicated_rgs = [ rg for rg in candidate_rgs if rg.count_replica(partition) > opt_replicas ] or candidate_rgs candidate_rgs = over_replicated_rgs or candidate_rgs # Remove the replica from every eligible broker. new_states = [] for rg in candidate_rgs: osr_brokers = { broker for broker in rg.brokers if broker in osr } candidate_brokers = osr_brokers or rg.brokers for broker in candidate_brokers: if broker in partition.replicas: broker_index = state.brokers.index(broker) new_states.append( state.remove_replica(partition_index, broker_index) ) # Update cluster topology with highest scoring state. state = sorted(new_states, key=self._score, reverse=True)[0] self.cluster_topology.update_cluster_topology(state.assignment) osr = {b for b in osr if b in partition.replicas}
0.001132
def send(self, content): """ Write the content received from the SSH client to the standard input of the forked command. :param str content: string to be sent to the forked command """ try: self.process.stdin.write(content) except IOError as e: # There was a problem with the child process. It probably # died and we can't proceed. The best option here is to # raise an exception informing the user that the informed # ProxyCommand is not working. raise ProxyCommandFailure(" ".join(self.cmd), e.strerror) return len(content)
0.003021
def close(self): """shut down the pool's workers this method sets the :attr:`closing` attribute, and once all queued work has been completed it will set the :attr:`closed` attribute """ self._closing = True for i in xrange(self.size): self.inq.put(_STOP)
0.006349
def stateenabled(self, window_name, object_name): """ Check whether an object state is enabled or not @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to look for, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success 0 on failure. @rtype: integer """ try: object_handle = self._get_object_handle(window_name, object_name) if object_handle.AXEnabled: return 1 except LdtpServerException: pass return 0
0.002736
def find_files(self, word): """Yield matching directory or file names. :param word: :return: iterable """ base_path, last_path, position = parse_path(word) paths = suggest_path(word) for name in sorted(paths): suggestion = complete_path(name, last_path) if suggestion: yield Completion(suggestion, position)
0.004963
def std(self, ddof=1, *args, **kwargs): """ Compute standard deviation of groups, excluding missing values. Parameters ---------- ddof : integer, default 1 Degrees of freedom. """ nv.validate_resampler_func('std', args, kwargs) return self._downsample('std', ddof=ddof)
0.00578
def _parse_time_string(time_str): """ Get a datetime.time object from a string time representation. The start and end of acquisition are stored in the optional keyword parameters $BTIM and $ETIM. The following formats are used according to the FCS standard: - FCS 2.0: 'hh:mm:ss' - FCS 3.0: 'hh:mm:ss[:tt]', where 'tt' is optional, and represents fractional seconds in 1/60ths. - FCS 3.1: 'hh:mm:ss[.cc]', where 'cc' is optional, and represents fractional seconds in 1/100ths. This function attempts to transform these formats to 'hh:mm:ss:ffffff', where 'ffffff' is in microseconds, and then parse it using the datetime module. Parameters: ----------- time_str : str, or None String representation of time, or None. Returns: -------- t : datetime.time, or None Time parsed from `time_str`. If parsing was not possible, return None. If `time_str` is None, return None """ # If input is None, return None if time_str is None: return None time_l = time_str.split(':') if len(time_l) == 3: # Either 'hh:mm:ss' or 'hh:mm:ss.cc' if '.' in time_l[2]: # 'hh:mm:ss.cc' format time_str = time_str.replace('.', ':') else: # 'hh:mm:ss' format time_str = time_str + ':0' # Attempt to parse string, return None if not possible try: t = datetime.datetime.strptime(time_str, '%H:%M:%S:%f').time() except: t = None elif len(time_l) == 4: # 'hh:mm:ss:tt' format time_l[3] = '{:06d}'.format(int(float(time_l[3])*1e6/60)) time_str = ':'.join(time_l) # Attempt to parse string, return None if not possible try: t = datetime.datetime.strptime(time_str, '%H:%M:%S:%f').time() except: t = None else: # Unknown format t = None return t
0.001817
def add_scan_alarm(self, scan_id, host='', name='', value='', port='', test_id='', severity='', qod=''): """ Adds an alarm result to scan_id scan. """ self.scan_collection.add_result(scan_id, ResultType.ALARM, host, name, value, port, test_id, severity, qod)
0.008902
def _base_placeholder(self): """ Return the notes master placeholder this notes slide placeholder inherits from, or |None| if no placeholder of the matching type is present. """ notes_master = self.part.notes_master ph_type = self.element.ph_type return notes_master.placeholders.get(ph_type=ph_type)
0.005495
def speed_average(Temperature,element,isotope): r"""This function calculates the average speed (in meters per second) of an atom in a vapour assuming a Maxwell-Boltzmann velocity distribution. This is simply sqrt(8*k_B*T/m/pi) where k_B is Boltzmann's constant, T is the temperature (in Kelvins) and m is the mass of the atom (in kilograms). >>> print speed_average(25+273.15,"Rb",85) 272.65940782 >>> print speed_average(25+273.15,"Cs",133) 217.938062809 """ atom = Atom(element, isotope) return sqrt(8*k_B*Temperature/atom.mass/pi)
0.005085
def session_demo_danger_callback(da_children, session_state=None, **kwargs): 'Update output based just on state' if not session_state: return "Session state not yet available" return "Session state contains: " + str(session_state.get('bootstrap_demo_state', "NOTHING")) + " and the page render count is " + str(session_state.get("ind_use", "NOT SET"))
0.005376
def parse_cctop_full(infile): """Parse a CCTOP XML results file and return a list of the consensus TM domains in the format:: [(1, inside_outside_or_tm), (2, inside_outside_or_tm), ...] Where the first value of a tuple is the sequence residue number, and the second is the predicted location with the values 'I' (inside), 'O' (outside), or 'M' (membrane). Args: infile (str): Path to CCTOP XML file Returns: list: List of tuples in the format described above """ parser = etree.XMLParser(ns_clean=True) with open(infile, 'r') as f: tree = etree.fromstring(f.read(), parser) all_info = [] if tree.find('Topology') is not None: for r in tree.find('Topology').findall('Region'): region_start = int(r.attrib['from']) region_end = int(r.attrib['to']) region = r.attrib['loc'] for i in range(region_start, region_end + 1): all_info.append((i, region)) return all_info
0.002865
def batchnorm_to_fp32(module): ''' BatchNorm layers to have parameters in single precision. Find all layers and convert them back to float. This can't be done with built in .apply as that function will apply fn to all modules, parameters, and buffers. Thus we wouldn't be able to guard the float conversion based on the module type. ''' if isinstance(module, nn.modules.batchnorm._BatchNorm): module.float() for child in module.children(): batchnorm_to_fp32(child) return module
0.001873
def _init_code(self, code: int) -> None: """ Initialize from an int terminal code. """ if -1 < code < 256: self.code = '{:02}'.format(code) self.hexval = term2hex(code) self.rgb = hex2rgb(self.hexval) else: raise ValueError(' '.join(( 'Code must be in the range 0-255, inclusive.', 'Got: {} ({})' )).format(code, getattr(code, '__name__', type(code).__name__)))
0.004193
def tagged_projects( self, tag): """*return a list of projects contained within this taskpaper object filtered by a given tag* **Key Arguments:** - ``tag`` -- the tag to filter the projects by. **Return:** - ``projectList`` -- the list of filtered projects **Usage:** To filter the projects recursively found with a taskpaper document object and return only those projects tagged with ``flag``, using the following: .. code-block:: python filteredProjects = doc.tagged_projects("flag") for p in filteredProjects: print p.title Note you can give the tag with or without the *@*, and you can also give a tag attribute, e.g. ``@due(today)`` """ self.refresh projectList = [] tag = tag.replace("@", "").lower() for p in self.projects: for aTag in p.tags: if "(" not in tag: aTag = aTag.split("(")[0] if aTag.lower() == tag: projectList.append(p) break subProjects = p.tagged_projects(tag) projectList += subProjects return projectList
0.003897
def updateWCS(self, pixel_scale=None, orient=None,refpos=None,refval=None,size=None): """ Create a new CD Matrix from the absolute pixel scale and reference image orientation. """ # Set up parameters necessary for updating WCS # Check to see if new value is provided, # If not, fall back on old value as the default _updateCD = no if orient is not None and orient != self.orient: pa = DEGTORAD(orient) self.orient = orient self._orient_lin = orient _updateCD = yes else: # In case only pixel_scale was specified pa = DEGTORAD(self.orient) if pixel_scale is not None and pixel_scale != self.pscale: _ratio = pixel_scale / self.pscale self.pscale = pixel_scale _updateCD = yes else: # In case, only orient was specified pixel_scale = self.pscale _ratio = None # If a new plate scale was given, # the default size should be revised accordingly # along with the default reference pixel position. # Added 31 Mar 03, WJH. if _ratio is not None: self.naxis1 /= _ratio self.naxis2 /= _ratio self.crpix1 = self.naxis1/2. self.crpix2 = self.naxis2/2. # However, if the user provides a given size, # set it to use that no matter what. if size is not None: self.naxis1 = size[0] self.naxis2 = size[1] # Insure that naxis1,2 always return as integer values. self.naxis1 = int(self.naxis1) self.naxis2 = int(self.naxis2) if refpos is not None: self.crpix1 = refpos[0] self.crpix2 = refpos[1] if self.crpix1 is None: self.crpix1 = self.naxis1/2. self.crpix2 = self.naxis2/2. if refval is not None: self.crval1 = refval[0] self.crval2 = refval[1] # Reset WCS info now... if _updateCD: # Only update this should the pscale or orientation change... pscale = pixel_scale / 3600. self.cd11 = -pscale * N.cos(pa) self.cd12 = pscale * N.sin(pa) self.cd21 = self.cd12 self.cd22 = -self.cd11 # Now make sure that all derived values are really up-to-date based # on these changes self.update()
0.002414
def now_micros(absolute=False) -> int: """Return current micros since epoch as integer.""" micros = int(time.time() * 1e6) if absolute: return micros return micros - EPOCH_MICROS
0.026316
def getResourceFileList(self, pid): """ Get a listing of files within a resource. :param pid: The HydroShare ID of the resource whose resource files are to be listed. :raises: HydroShareArgumentException if any parameters are invalid. :raises: HydroShareNotAuthorized if user is not authorized to perform action. :raises: HydroShareNotFound if the resource was not found. :raises: HydroShareHTTPException if an unexpected HTTP response code is encountered. :return: A generator that can be used to fetch dict objects, each dict representing the JSON object representation of the resource returned by the REST end point. For example: { "count": 95, "next": "https://www.hydroshare.org/hsapi/resource/32a08bc23a86e471282a832143491b49/file_list/?page=2", "previous": null, "results": [ { "url": "http://www.hydroshare.org/django_irods/download/32a08bc23a86e471282a832143491b49/data/contents/foo/bar.txt", "size": 23550, "content_type": "text/plain" }, { "url": "http://www.hydroshare.org/django_irods/download/32a08bc23a86e471282a832143491b49/data/contents/dem.tif", "size": 107545, "content_type": "image/tiff" }, { "url": "http://www.hydroshare.org/django_irods/download/32a08bc23a86e471282a832143491b49/data/contents/data.csv", "size": 148, "content_type": "text/csv" }, { "url": "http://www.hydroshare.org/django_irods/download/32a08bc23a86e471282a832143491b49/data/contents/data.sqlite", "size": 267118, "content_type": "application/x-sqlite3" }, { "url": "http://www.hydroshare.org/django_irods/download/32a08bc23a86e471282a832143491b49/data/contents/viz.png", "size": 128, "content_type": "image/png" } ] } """ url = "{url_base}/resource/{pid}/files/".format(url_base=self.url_base, pid=pid) return resultsListGenerator(self, url)
0.005768
def which(program, paths=None): """ takes a program name or full path, plus an optional collection of search paths, and returns the full path of the requested executable. if paths is specified, it is the entire list of search paths, and the PATH env is not used at all. otherwise, PATH env is used to look for the program """ def is_exe(fpath): return (os.path.exists(fpath) and os.access(fpath, os.X_OK) and os.path.isfile(os.path.realpath(fpath))) found_path = None fpath, fname = os.path.split(program) # if there's a path component, then we've specified a path to the program, # and we should just test if that program is executable. if it is, return if fpath: program = os.path.abspath(os.path.expanduser(program)) if is_exe(program): found_path = program # otherwise, we've just passed in the program name, and we need to search # the paths to find where it actually lives else: paths_to_search = [] if isinstance(paths, (tuple, list)): paths_to_search.extend(paths) else: env_paths = os.environ.get("PATH", "").split(os.pathsep) paths_to_search.extend(env_paths) for path in paths_to_search: exe_file = os.path.join(path, program) if is_exe(exe_file): found_path = exe_file break return found_path
0.001368
def create_and_write_saml_metadata(proxy_conf, key, cert, dir, valid, split_frontend_metadata=False, split_backend_metadata=False): """ Generates SAML metadata for the given PROXY_CONF, signed with the given KEY and associated CERT. """ satosa_config = SATOSAConfig(proxy_conf) secc = _get_security_context(key, cert) frontend_entities, backend_entities = create_entity_descriptors(satosa_config) output = [] if frontend_entities: if split_frontend_metadata: output.extend(_create_split_entity_descriptors(frontend_entities, secc, valid)) else: output.extend(_create_merged_entities_descriptors(frontend_entities, secc, valid, "frontend.xml")) if backend_entities: if split_backend_metadata: output.extend(_create_split_entity_descriptors(backend_entities, secc, valid)) else: output.extend(_create_merged_entities_descriptors(backend_entities, secc, valid, "backend.xml")) for metadata, filename in output: path = os.path.join(dir, filename) print("Writing metadata to '{}'".format(path)) with open(path, "w") as f: f.write(metadata)
0.00652
def _already_resized_on_fb(self,fn,pid,_megapixels): """Checks if image file (fn) with photo_id (pid) has already been resized on fb. If so, returns True""" logger.debug("%s - resize requested"%(fn)) # Get width/height from fb width_fb,height_fb=self._getphoto_originalsize(pid) # Now compute what image will be if we resize it new_width,new_height=pusher_utils.resize_compute_width_height(\ fn,_megapixels) logger.debug("%s - fb %d/%d, current %d/%d"\ %(fn,width_fb,height_fb,new_width,new_height)) # Check both cases since FB sometimes rotates photos if width_fb==new_width and height_fb==new_height: return True elif width_fb==new_height and height_fb==new_width: return True return False
0.027251
def minimum(attrs, inputs, proto_obj): """Elementwise minimum of arrays.""" # MXNet minimum compares only two symbols at a time. # ONNX can send more than two to compare. # Breaking into multiple mxnet ops to compare two symbols at a time if len(inputs) > 1: mxnet_op = symbol.minimum(inputs[0], inputs[1]) for op_input in inputs[2:]: mxnet_op = symbol.minimum(mxnet_op, op_input) else: mxnet_op = symbol.minimum(inputs[0], inputs[0]) return mxnet_op, attrs, inputs
0.00189
def linsolve(self, A, b): """ Solve linear equation set ``Ax = b`` and store the solutions in ``b``. Parameters ---------- A Sparse matrix b RHS of the equation Returns ------- None """ if self.sparselib == 'umfpack': return umfpack.linsolve(A, b) elif self.sparselib == 'klu': return klu.linsolve(A, b)
0.004435
def add_file(dk_api, kitchen, recipe_name, message, api_file_key): """ returns a string. :param dk_api: -- api object :param kitchen: string :param recipe_name: string :param message: string -- commit message, string :param api_file_key: string -- directory where the recipe file lives :rtype: DKReturnCode """ rc = DKReturnCode() if kitchen is None or recipe_name is None or message is None or api_file_key is None: s = 'ERROR: DKCloudCommandRunner bad input parameters' rc.set(rc.DK_FAIL, s) return rc ig = DKIgnore() if ig.ignore(api_file_key): rs = 'DKCloudCommand.add_file ignoring %s' % api_file_key rc.set_message(rs) return rc if not os.path.exists(api_file_key): s = "'%s' does not exist" % api_file_key rc.set(rc.DK_FAIL, s) return rc try: with open(api_file_key, 'r') as f: file_contents = f.read() except ValueError as e: s = 'ERROR: %s' % e.message rc.set(rc.DK_FAIL, s) return rc rc = dk_api.add_file(kitchen, recipe_name, message, api_file_key, file_contents) if rc.ok(): rs = 'DKCloudCommand.add_file for %s succeed' % api_file_key else: rs = 'DKCloudCommand.add_file for %s failed\nmessage: %s' % (api_file_key, rc.get_message()) rc.set_message(rs) return rc
0.003238
def get_tokens_unprocessed(self, text, stack=('root',)): """Reset the content-type state.""" self.content_type = None return RegexLexer.get_tokens_unprocessed(self, text, stack)
0.00995
def check_weight_method(weight_method_spec, use_orig_distr=False, allow_non_symmetric=False): "Check if weight_method is recognized and implemented, or ensure it is callable." if not isinstance(use_orig_distr, bool): raise TypeError('use_original_distribution flag must be boolean!') if not isinstance(allow_non_symmetric, bool): raise TypeError('allow_non_symmetric flag must be boolean') if isinstance(weight_method_spec, str): weight_method_spec = weight_method_spec.lower() if weight_method_spec in list_medpy_histogram_metrics: from medpy.metric import histogram as medpy_hist_metrics weight_func = getattr(medpy_hist_metrics, weight_method_spec) if use_orig_distr: warnings.warn('use_original_distribution must be False when using builtin histogram metrics, ' 'which expect histograms as input - setting it to False.', HiwenetWarning) use_orig_distr = False elif weight_method_spec in metrics_on_original_features: weight_func = getattr(more_metrics, weight_method_spec) if not use_orig_distr: warnings.warn('use_original_distribution must be True when using builtin non-histogram metrics, ' 'which expect original feature values in ROI/node as input ' '- setting it to True.', HiwenetWarning) use_orig_distr = True if weight_method_spec in symmetric_metrics_on_original_features: print('Chosen metric is symmetric. Ignoring asymmetric=False flag.') allow_non_symmetric=False else: raise NotImplementedError('Chosen histogram distance/metric not implemented or invalid.') elif callable(weight_method_spec): # ensure 1) takes two ndarrays try: dummy_weight = weight_method_spec(make_random_histogram(), make_random_histogram()) except: raise TypeError('Error applying given callable on two input arrays.\n' '{} must accept two arrays and return a single scalar value!') else: # and 2) returns only one number if not np.isscalar(dummy_weight): raise TypeError('Given callable does not return a single scalar as output.') weight_func = weight_method_spec else: raise ValueError('Supplied method to compute edge weight is not recognized:\n' 'must be a string identifying one of the implemented methods\n{}' '\n or a valid callable that accepts that two arrays ' 'and returns 1 scalar.'.format(list_medpy_histogram_metrics)) return weight_func, use_orig_distr, allow_non_symmetric
0.0062
def accept_moderator_invite(self, subreddit): """Accept a moderator invite to the given subreddit. Callable upon an instance of Subreddit with no arguments. :returns: The json response from the server. """ data = {'r': six.text_type(subreddit)} # Clear moderated subreddits and cache self.user._mod_subs = None # pylint: disable=W0212 self.evict(self.config['my_mod_subreddits']) return self.request_json(self.config['accept_mod_invite'], data=data)
0.00381
def execute(self, *args, **kwargs): """ Initializes and runs the tool. This is shorhand to parse command line arguments, then calling: self.setup(parsed_arguments) self.process() """ args = self.parser.parse_args(*args, **kwargs) self.process(args)
0.006231
def _make_skel_func(code, cell_count, base_globals=None): """ Creates a skeleton function object that contains just the provided code and the correct number of cells in func_closure. All other func attributes (e.g. func_globals) are empty. """ if base_globals is None: base_globals = {} elif isinstance(base_globals, string_types): base_globals_name = base_globals try: # First try to reuse the globals from the module containing the # function. If it is not possible to retrieve it, fallback to an # empty dictionary. if importlib is not None: base_globals = vars(importlib.import_module(base_globals)) elif sys.modules.get(base_globals, None) is not None: base_globals = vars(sys.modules[base_globals]) else: raise ImportError except ImportError: base_globals = _dynamic_modules_globals.get( base_globals_name, None) if base_globals is None: base_globals = _DynamicModuleFuncGlobals() _dynamic_modules_globals[base_globals_name] = base_globals base_globals['__builtins__'] = __builtins__ closure = ( tuple(_make_empty_cell() for _ in range(cell_count)) if cell_count >= 0 else None ) return types.FunctionType(code, base_globals, None, None, closure)
0.000689
def getDXGIOutputInfo(self): """ [D3D10/11 Only] Returns the adapter index and output index that the user should pass into EnumAdapters and EnumOutputs to create the device and swap chain in DX10 and DX11. If an error occurs both indices will be set to -1. """ fn = self.function_table.getDXGIOutputInfo pnAdapterIndex = c_int32() fn(byref(pnAdapterIndex)) return pnAdapterIndex.value
0.008753
def get_event_public_discounts(self, id, **data): """ GET /events/:id/public_discounts/ Returns a :ref:`paginated <pagination>` response with a key of ``discounts``, containing a list of public :format:`discounts <discount>` available on this event. Note that public discounts and discounts have exactly the same form and structure; they're just namespaced separately, and public ones (and the public GET endpoints) are visible to anyone who can see the event. """ return self.get("/events/{0}/public_discounts/".format(id), data=data)
0.008446
def make_zone_file(json_zone_file_input, origin=None, ttl=None, template=None): """ Generate the DNS zonefile, given a json-encoded description of the zone file (@json_zone_file) and the template to fill in (@template) json_zone_file = { "$origin": origin server, "$ttl": default time-to-live, "soa": [ soa records ], "ns": [ ns records ], "a": [ a records ], "aaaa": [ aaaa records ] "cname": [ cname records ] "alias": [ alias records ] "mx": [ mx records ] "ptr": [ ptr records ] "txt": [ txt records ] "srv": [ srv records ] "spf": [ spf records ] "uri": [ uri records ] } """ if template is None: template = DEFAULT_TEMPLATE[:] # careful... json_zone_file = copy.deepcopy(json_zone_file_input) if origin is not None: json_zone_file['$origin'] = origin if ttl is not None: json_zone_file['$ttl'] = ttl soa_records = [json_zone_file.get('soa')] if json_zone_file.get('soa') else None zone_file = template zone_file = process_origin(json_zone_file.get('$origin', None), zone_file) zone_file = process_ttl(json_zone_file.get('$ttl', None), zone_file) zone_file = process_soa(soa_records, zone_file) zone_file = process_ns(json_zone_file.get('ns', None), zone_file) zone_file = process_a(json_zone_file.get('a', None), zone_file) zone_file = process_aaaa(json_zone_file.get('aaaa', None), zone_file) zone_file = process_cname(json_zone_file.get('cname', None), zone_file) zone_file = process_alias(json_zone_file.get('alias', None), zone_file) zone_file = process_mx(json_zone_file.get('mx', None), zone_file) zone_file = process_ptr(json_zone_file.get('ptr', None), zone_file) zone_file = process_txt(json_zone_file.get('txt', None), zone_file) zone_file = process_srv(json_zone_file.get('srv', None), zone_file) zone_file = process_spf(json_zone_file.get('spf', None), zone_file) zone_file = process_uri(json_zone_file.get('uri', None), zone_file) # remove newlines, but terminate with one zone_file = "\n".join( filter( lambda l: len(l.strip()) > 0, [tl.strip() for tl in zone_file.split("\n")] ) ) + "\n" return zone_file
0.001686
def ReliefF_compute_scores(inst, attr, nan_entries, num_attributes, mcmap, NN, headers, class_type, X, y, labels_std, data_type): """ Unique scoring procedure for ReliefF algorithm. Scoring based on k nearest hits and misses of current target instance. """ scores = np.zeros(num_attributes) for feature_num in range(num_attributes): scores[feature_num] += compute_score(attr, mcmap, NN, feature_num, inst, nan_entries, headers, class_type, X, y, labels_std, data_type) return scores
0.008977
def get(self, request, *args, **kwargs): """ Retrieve list of service requests """ if 'service_code' not in request.GET.keys(): return Response({ 'detail': _('A service code must be inserted') }, status=404) service_code = request.GET['service_code'] if service_code not in SERVICES.keys(): return Response({ 'detail': _('Service not found') }, status=404) start_date = None end_date = None status = None layer = None STATUSES = {} for status_type in ('open', 'closed'): STATUSES[status_type] = [k for k, v in STATUS.items() if v == status_type] if 'start_date' in request.GET.keys(): start_date = request.GET['start_date'] if iso8601_REGEXP.match(start_date) is None: return Response({ 'detail': _('Invalid date inserted') }, status=404) if 'end_date' in request.GET.keys(): end_date = request.GET['end_date'] if iso8601_REGEXP.match(end_date) is None: return Response({ 'detail': _('Invalid date inserted') }, status=404) if 'status' in request.GET.keys(): if request.GET['status'] not in ('open','closed'): return Response({ 'detail': _('Invalid status inserted') }, status=404) status = request.GET['status'] if 'layer' in request.GET.keys(): layer = request.GET['layer'] node_layer = get_object_or_404(Layer, slug=layer) service_model = MODELS[service_code] if service_code in ('vote', 'comment', 'rate'): self.queryset = service_model.objects.none() else: self.queryset = service_model.objects.all() # Filter by layer if layer is not None: self.queryset = self.queryset.filter(layer = node_layer) # Check of date parameters if start_date is not None and end_date is not None: self.queryset = self.queryset.filter(added__gte = start_date).filter(added__lte = end_date) if start_date is not None and end_date is None: self.queryset = self.queryset.filter(added__gte = start_date) if start_date is None and end_date is not None: self.queryset = self.queryset.filter(added__lte = end_date) # Check of status parameter if status is not None: q_list = [Q(status__slug__exact = s) for s in STATUSES[status]] self.queryset = self.queryset.filter(reduce(operator.or_, q_list)) return self.list(request, *args, **kwargs)
0.01168
def columns(x, rho, proxop): """Applies a proximal operator to the columns of a matrix""" xnext = np.zeros_like(x) for ix in range(x.shape[1]): xnext[:, ix] = proxop(x[:, ix], rho) return xnext
0.004545
def graph_type(self, graph): """What type of graph is this?""" graph = self.pack(graph) return self.sql('graph_type', graph).fetchone()[0]
0.012346
def handle_cursor(cls, cursor, query, session): """Updates progress information""" from pyhive import hive # pylint: disable=no-name-in-module unfinished_states = ( hive.ttypes.TOperationState.INITIALIZED_STATE, hive.ttypes.TOperationState.RUNNING_STATE, ) polled = cursor.poll() last_log_line = 0 tracking_url = None job_id = None while polled.operationState in unfinished_states: query = session.query(type(query)).filter_by(id=query.id).one() if query.status == QueryStatus.STOPPED: cursor.cancel() break log = cursor.fetch_logs() or '' if log: log_lines = log.splitlines() progress = cls.progress(log_lines) logging.info('Progress total: {}'.format(progress)) needs_commit = False if progress > query.progress: query.progress = progress needs_commit = True if not tracking_url: tracking_url = cls.get_tracking_url(log_lines) if tracking_url: job_id = tracking_url.split('/')[-2] logging.info( 'Found the tracking url: {}'.format(tracking_url)) tracking_url = tracking_url_trans(tracking_url) logging.info( 'Transformation applied: {}'.format(tracking_url)) query.tracking_url = tracking_url logging.info('Job id: {}'.format(job_id)) needs_commit = True if job_id and len(log_lines) > last_log_line: # Wait for job id before logging things out # this allows for prefixing all log lines and becoming # searchable in something like Kibana for l in log_lines[last_log_line:]: logging.info('[{}] {}'.format(job_id, l)) last_log_line = len(log_lines) if needs_commit: session.commit() time.sleep(hive_poll_interval) polled = cursor.poll()
0.001296
def get_interface_detail_output_interface_ifHCOutBroadcastPkts(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail output = ET.SubElement(get_interface_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') ifHCOutBroadcastPkts = ET.SubElement(interface, "ifHCOutBroadcastPkts") ifHCOutBroadcastPkts.text = kwargs.pop('ifHCOutBroadcastPkts') callback = kwargs.pop('callback', self._callback) return callback(config)
0.002237
def utf8(s): """ Coerce an object to bytes if it is Unicode. """ if isinstance(s, mitogen.core.UnicodeType): s = s.encode('utf-8') return s
0.005988
def LogAccessWrapper(func): """Decorator that ensures that HTTP access is logged.""" def Wrapper(request, *args, **kwargs): """Wrapping function.""" try: response = func(request, *args, **kwargs) server_logging.LOGGER.LogHttpAdminUIAccess(request, response) except Exception: # pylint: disable=g-broad-except # This should never happen: wrapped function is supposed to handle # all possible exceptions and generate a proper Response object. # Still, handling exceptions here to guarantee that the access is logged # no matter what. response = werkzeug_wrappers.Response("", status=500) server_logging.LOGGER.LogHttpAdminUIAccess(request, response) raise return response return Wrapper
0.017016
def calcFontScaling(self): '''Calculates the current font size and left position for the current window.''' self.ypx = self.figure.get_size_inches()[1]*self.figure.dpi self.xpx = self.figure.get_size_inches()[0]*self.figure.dpi self.fontSize = self.vertSize*(self.ypx/2.0) self.leftPos = self.axes.get_xlim()[0] self.rightPos = self.axes.get_xlim()[1]
0.007519
def gen_anytext(*args): """ Convenience function to create bag of words for anytext property """ bag = [] for term in args: if term is not None: if isinstance(term, list): for term2 in term: if term2 is not None: bag.append(term2) else: bag.append(term) return ' '.join(bag)
0.002451
def update_remote_ids(self, remote_folder): """ Set remote id based on remote_folder and check children against this folder's children. :param remote_folder: RemoteFolder to compare against """ self.remote_id = remote_folder.id _update_remote_children(remote_folder, self.children)
0.009119
def assignrepr(self, prefix, style=None, utcoffset=None): """Return a |repr| string with an prefixed assignement. Without option arguments given, printing the returned string looks like: >>> from hydpy import Timegrid >>> timegrid = Timegrid('1996-11-01 00:00:00', ... '1997-11-01 00:00:00', ... '1d') >>> print(timegrid.assignrepr(prefix='timegrid = ')) timegrid = Timegrid('1996-11-01 00:00:00', '1997-11-01 00:00:00', '1d') The optional arguments are passed to method |Date.to_repr| without any modifications: >>> print(timegrid.assignrepr( ... prefix='', style='iso1', utcoffset=120)) Timegrid('1996-11-01T01:00:00+02:00', '1997-11-01T01:00:00+02:00', '1d') """ skip = len(prefix) + 9 blanks = ' ' * skip return (f"{prefix}Timegrid('" f"{self.firstdate.to_string(style, utcoffset)}',\n" f"{blanks}'{self.lastdate.to_string(style, utcoffset)}',\n" f"{blanks}'{str(self.stepsize)}')")
0.001645
async def forget(request, response): """Forget previously remembered identity. Usually it clears cookie or server-side storage to forget user session. """ identity_policy = request.config_dict.get(IDENTITY_KEY) if identity_policy is None: text = ("Security subsystem is not initialized, " "call aiohttp_security.setup(...) first") # in order to see meaningful exception message both: on console # output and rendered page we add same message to *reason* and # *text* arguments. raise web.HTTPInternalServerError(reason=text, text=text) await identity_policy.forget(request, response)
0.001497
def __send_command(self, command, command_string=b'', response_size=8): """ send command to the terminal """ if command not in [const.CMD_CONNECT, const.CMD_AUTH] and not self.is_connect: raise ZKErrorConnection("instance are not connected.") buf = self.__create_header(command, command_string, self.__session_id, self.__reply_id) try: if self.tcp: top = self.__create_tcp_top(buf) self.__sock.send(top) self.__tcp_data_recv = self.__sock.recv(response_size + 8) self.__tcp_length = self.__test_tcp_top(self.__tcp_data_recv) if self.__tcp_length == 0: raise ZKNetworkError("TCP packet invalid") self.__header = unpack('<4H', self.__tcp_data_recv[8:16]) self.__data_recv = self.__tcp_data_recv[8:] else: self.__sock.sendto(buf, self.__address) self.__data_recv = self.__sock.recv(response_size) self.__header = unpack('<4H', self.__data_recv[:8]) except Exception as e: raise ZKNetworkError(str(e)) self.__response = self.__header[0] self.__reply_id = self.__header[3] self.__data = self.__data_recv[8:] if self.__response in [const.CMD_ACK_OK, const.CMD_PREPARE_DATA, const.CMD_DATA]: return { 'status': True, 'code': self.__response } return { 'status': False, 'code': self.__response }
0.003123
def load_bulk(cls, bulk_data, parent=None, keep_ids=False): """Loads a list/dictionary structure to the tree.""" cls = get_result_class(cls) # tree, iterative preorder added = [] if parent: parent_id = parent.pk else: parent_id = None # stack of nodes to analize stack = [(parent_id, node) for node in bulk_data[::-1]] foreign_keys = cls.get_foreign_keys() while stack: parent_id, node_struct = stack.pop() # shallow copy of the data strucure so it doesn't persist... node_data = node_struct['data'].copy() cls._process_foreign_keys(foreign_keys, node_data) if keep_ids: node_data['id'] = node_struct['id'] if parent_id: parent = cls.objects.get(pk=parent_id) node_obj = parent.add_child(**node_data) else: node_obj = cls.add_root(**node_data) added.append(node_obj.pk) if 'children' in node_struct: # extending the stack with the current node as the parent of # the new nodes stack.extend([ (node_obj.pk, node) for node in node_struct['children'][::-1] ]) return added
0.001472
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) obs = BinnedAnalysis.BinnedObs(irfs=args.irfs, expCube=args.expcube, srcMaps=args.cmap, binnedExpMap=args.bexpmap) if args.no_psf: performConvolution = False else: performConvolution = True config = BinnedAnalysis.BinnedConfig(performConvolution=performConvolution) like = BinnedAnalysis.BinnedAnalysis(obs, optimizer='MINUIT', srcModel=GtSrcmapsDiffuse.NULL_MODEL, wmap=None, config=config) source_factory = pyLike.SourceFactory(obs.observation) source_factory.readXml(args.srcmdl, BinnedAnalysis._funcFactory, False, True, True) source = source_factory.releaseSource(args.source) try: diffuse_source = pyLike.DiffuseSource.cast(source) except TypeError: diffuse_source = None if diffuse_source is not None: try: diffuse_source.mapBaseObject().projmap().setExtrapolation(False) except RuntimeError: pass like.logLike.saveSourceMap_partial(args.outfile, source, args.kmin, args.kmax) if args.gzip: os.system("gzip -9 %s" % args.outfile)
0.003752
def list_active_courses_in_account(self, account_id, by_subaccounts=None, by_teachers=None, completed=None, enrollment_term_id=None, enrollment_type=None, hide_enrollmentless_courses=None, include=None, published=None, search_term=None, state=None, with_enrollments=None): """ List active courses in an account. Retrieve the list of courses in this account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - with_enrollments """If true, include only courses with at least one enrollment. If false, include only courses with no enrollments. If not present, do not filter on course enrollment status.""" if with_enrollments is not None: params["with_enrollments"] = with_enrollments # OPTIONAL - enrollment_type """If set, only return courses that have at least one user enrolled in in the course with one of the specified enrollment types.""" if enrollment_type is not None: self._validate_enum(enrollment_type, ["teacher", "student", "ta", "observer", "designer"]) params["enrollment_type"] = enrollment_type # OPTIONAL - published """If true, include only published courses. If false, exclude published courses. If not present, do not filter on published status.""" if published is not None: params["published"] = published # OPTIONAL - completed """If true, include only completed courses (these may be in state 'completed', or their enrollment term may have ended). If false, exclude completed courses. If not present, do not filter on completed status.""" if completed is not None: params["completed"] = completed # OPTIONAL - by_teachers """List of User IDs of teachers; if supplied, include only courses taught by one of the referenced users.""" if by_teachers is not None: params["by_teachers"] = by_teachers # OPTIONAL - by_subaccounts """List of Account IDs; if supplied, include only courses associated with one of the referenced subaccounts.""" if by_subaccounts is not None: params["by_subaccounts"] = by_subaccounts # OPTIONAL - hide_enrollmentless_courses """If present, only return courses that have at least one enrollment. Equivalent to 'with_enrollments=true'; retained for compatibility.""" if hide_enrollmentless_courses is not None: params["hide_enrollmentless_courses"] = hide_enrollmentless_courses # OPTIONAL - state """If set, only return courses that are in the given state(s). By default, all states but "deleted" are returned.""" if state is not None: self._validate_enum(state, ["created", "claimed", "available", "completed", "deleted", "all"]) params["state"] = state # OPTIONAL - enrollment_term_id """If set, only includes courses from the specified term.""" if enrollment_term_id is not None: params["enrollment_term_id"] = enrollment_term_id # OPTIONAL - search_term """The partial course name, code, or full ID to match and return in the results list. Must be at least 3 characters.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - include """- All explanations can be seen in the {api:CoursesController#index Course API index documentation} - "sections", "needs_grading_count" and "total_scores" are not valid options at the account level""" if include is not None: self._validate_enum(include, ["syllabus_body", "term", "course_progress", "storage_quota_used_mb", "total_students", "teachers"]) params["include"] = include self.logger.debug("GET /api/v1/accounts/{account_id}/courses with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/courses".format(**path), data=data, params=params, all_pages=True)
0.004349
def unregister_widget(self, widget_cls): """Unregisters the given widget.""" if widget_cls.__name__ in self.widgets: del self.widgets[widget_cls().get_name()]
0.010753
def load(source, triples=False, cls=PENMANCodec, **kwargs): """ Deserialize a list of PENMAN-encoded graphs from *source*. Args: source: a filename or file-like object to read from triples: if True, read graphs as triples instead of as PENMAN cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: a list of Graph objects """ decode = cls(**kwargs).iterdecode if hasattr(source, 'read'): return list(decode(source.read())) else: with open(source) as fh: return list(decode(fh.read()))
0.001585
def get_notification(self, id): """ :calls: `GET /notifications/threads/:id <http://developer.github.com/v3/activity/notifications>`_ :rtype: :class:`github.Notification.Notification` """ assert isinstance(id, (str, unicode)), id headers, data = self._requester.requestJsonAndCheck( "GET", "/notifications/threads/" + id ) return github.Notification.Notification(self._requester, headers, data, completed=True)
0.008016
def main(argv=None): """ :param argv: Argument list to parse or None (sys.argv will be set). """ args = _parse_args((argv if argv else sys.argv)[1:]) cnf = os.environ.copy() if args.env else {} extra_opts = dict() if args.extra_opts: extra_opts = anyconfig.parser.parse(args.extra_opts) diff = _load_diff(args, extra_opts) if cnf: API.merge(cnf, diff) else: cnf = diff if args.args: diff = anyconfig.parser.parse(args.args) API.merge(cnf, diff) if args.validate: _exit_with_output("Validation succeds") cnf = API.gen_schema(cnf) if args.gen_schema else _do_filter(cnf, args) _output_result(cnf, args.output, args.otype, args.inputs, args.itype, extra_opts=extra_opts)
0.001256
async def blob(self, elem=None, elem_type=None, params=None): """ Loads/dumps blob :return: """ elem_type = elem_type if elem_type else elem.__class__ if hasattr(elem_type, 'blob_serialize'): elem = elem_type() if elem is None else elem return await elem.blob_serialize(self, elem=elem, elem_type=elem_type, params=params) if self.writing: return await x.dump_blob(self.iobj, elem=elem, elem_type=elem_type, params=params) else: return await x.load_blob(self.iobj, elem_type=elem_type, params=params, elem=elem)
0.008013
def who_has(self, subid): """Return a list of names who own subid in their id range set.""" answer = [] for name in self.__map: if subid in self.__map[name] and not name in answer: answer.append(name) return answer
0.01087
def deform2curve(script, curve=mp_func.torus_knot('t'), step=0.001): """ Deform a mesh along a parametric curve function Provide a parametric curve function with z as the parameter. This will deform the xy cross section of the mesh along the curve as z increases. Source: http://blackpawn.com/texts/pqtorus/ Methodology: T = P' - P N1 = P' + P B = T x N1 N = B x T newPoint = point.x*N + point.y*B """ curve_step = [] for idx, val in enumerate(curve): curve[idx] = val.replace('t', 'z') curve_step.append(val.replace('t', 'z+{}'.format(step))) tangent = mp_func.v_subtract(curve_step, curve) normal1 = mp_func.v_add(curve_step, curve) bee = mp_func.v_cross(tangent, normal1) normal = mp_func.v_cross(bee, tangent) bee = mp_func.v_normalize(bee) normal = mp_func.v_normalize(normal) new_point = mp_func.v_add(mp_func.v_multiply('x', normal), mp_func.v_multiply('y', bee)) function = mp_func.v_add(curve, new_point) vert_function(script, x_func=function[0], y_func=function[1], z_func=function[2]) return function
0.002655
def auth_required_same_user(*args, **kwargs): """ Decorator for requiring an authenticated user to be the same as the user in the URL parameters. By default the user url parameter name to lookup is ``id``, but this can be customized by passing an argument:: @auth_require_same_user('user_id') @bp.route('/users/<int:user_id>/foo/<int:id>') def get(user_id, id): # do stuff Any keyword arguments are passed along to the @auth_required decorator, so roles can also be specified in the same was as it, eg:: @auth_required_same_user('user_id', role='ROLE_ADMIN') Aborts with ``HTTP 403: Forbidden`` if the user-check fails. """ auth_kwargs = {} user_id_parameter_name = 'id' if not (args and callable(args[0])): auth_kwargs = kwargs if args and isinstance(args[0], str): user_id_parameter_name = args[0] def wrapper(fn): @wraps(fn) @auth_required(**auth_kwargs) def decorated(*args, **kwargs): try: user_id = request.view_args[user_id_parameter_name] except KeyError: raise KeyError('Unable to find the user lookup parameter ' f'{user_id_parameter_name} in the url args') if not Permission(UserNeed(user_id)).can(): abort(HTTPStatus.FORBIDDEN) return fn(*args, **kwargs) return decorated if args and callable(args[0]): return wrapper(args[0]) return wrapper
0.000644
def surface_energy(self, ucell_entry, ref_entries=None): """ Calculates the surface energy of this SlabEntry. Args: ucell_entry (entry): An entry object for the bulk ref_entries (list: [entry]): A list of entries for each type of element to be used as a reservoir for nonstoichiometric systems. The length of this list MUST be n-1 where n is the number of different elements in the bulk entry. The chempot of the element ref_entry that is not in the list will be treated as a variable. Returns (Add (Sympy class)): Surface energy """ # Set up ref_entries = [] if not ref_entries else ref_entries # Check if appropriate ref_entries are present if the slab is non-stoichiometric # TODO: There should be a way to identify which specific species are # non-stoichiometric relative to the others in systems with more than 2 species slab_comp = self.composition.as_dict() ucell_entry_comp = ucell_entry.composition.reduced_composition.as_dict() slab_clean_comp = Composition({el: slab_comp[el] for el in ucell_entry_comp.keys()}) if slab_clean_comp.reduced_composition != ucell_entry.composition.reduced_composition: list_els = [list(entry.composition.as_dict().keys())[0] for entry in ref_entries] if not any([el in list_els for el in ucell_entry.composition.as_dict().keys()]): warnings.warn("Elemental references missing for the non-dopant species.") gamma = (Symbol("E_surf") - Symbol("Ebulk")) / (2 * Symbol("A")) ucell_comp = ucell_entry.composition ucell_reduced_comp = ucell_comp.reduced_composition ref_entries_dict = {str(list(ref.composition.as_dict().keys())[0]): \ ref for ref in ref_entries} ref_entries_dict.update(self.ads_entries_dict) # Calculate Gibbs free energy of the bulk per unit formula gbulk = ucell_entry.energy / \ ucell_comp.get_integer_formula_and_factor()[1] # First we get the contribution to the bulk energy # from each element with an existing ref_entry. bulk_energy, gbulk_eqn = 0, 0 for el, ref in ref_entries_dict.items(): N, delu = self.composition.as_dict()[el], Symbol("delu_" + str(el)) if el in ucell_comp.as_dict().keys(): gbulk_eqn += ucell_reduced_comp[el] * (delu + ref.energy_per_atom) bulk_energy += N * (Symbol("delu_" + el) + ref.energy_per_atom) # Next, we add the contribution to the bulk energy from # the variable element (the element without a ref_entry), # as a function of the other elements for ref_el in ucell_comp.as_dict().keys(): if str(ref_el) not in ref_entries_dict.keys(): break refEperA = (gbulk - gbulk_eqn) / ucell_reduced_comp.as_dict()[ref_el] bulk_energy += self.composition.as_dict()[ref_el] * refEperA se = gamma.subs({Symbol("E_surf"): self.energy, Symbol("Ebulk"): bulk_energy, Symbol("A"): self.surface_area}) return float(se) if type(se).__name__ == "Float" else se
0.00484
def _process_noop_timeperiod(self, job_record): """ method is valid for processes having time_grouping != 1. should a job record fall in-between grouped time milestones, its state should be set to STATE_NOOP without any processing """ job_record.state = job.STATE_NOOP self.job_dao.update(job_record) time_grouping = context.process_context[job_record.process_name].time_grouping msg = 'Job {0}@{1} with time_grouping {2} was transferred to STATE_NOOP' \ .format(job_record.process_name, job_record.timeperiod, time_grouping) self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg)
0.008708
def check_model(self, max_paths=1, max_path_length=5): """Check all the statements added to the ModelChecker. Parameters ---------- max_paths : Optional[int] The maximum number of specific paths to return for each Statement to be explained. Default: 1 max_path_length : Optional[int] The maximum length of specific paths to return. Default: 5 Returns ------- list of (Statement, PathResult) Each tuple contains the Statement checked against the model and a PathResult object describing the results of model checking. """ results = [] for stmt in self.statements: result = self.check_statement(stmt, max_paths, max_path_length) results.append((stmt, result)) return results
0.002342
def policy_present(name, rules): ''' Ensure a Vault policy with the given name and rules is present. name The name of the policy rules Rules formatted as in-line HCL .. code-block:: yaml demo-policy: vault.policy_present: - name: foo/bar - rules: | path "secret/top-secret/*" { policy = "deny" } path "secret/not-very-secret/*" { policy = "write" } ''' url = "v1/sys/policy/{0}".format(name) response = __utils__['vault.make_request']('GET', url) try: if response.status_code == 200: return _handle_existing_policy(name, rules, response.json()['rules']) elif response.status_code == 404: return _create_new_policy(name, rules) else: response.raise_for_status() except Exception as e: return { 'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to get policy: {0}'.format(e) }
0.001791