text
stringlengths
78
104k
score
float64
0
0.18
def process_runner(quantity=1, queue=None, backend=None): ''' Process queued runners quantity number of runners to process queue queue to insert the runner reference into backend backend that to use for the queue CLI Example: .. code-block:: bash salt-run queue.process_runner salt-run queue.process_runner 5 ''' queue_kwargs = __get_queue_opts(queue=queue, backend=backend) data = process_queue(quantity=quantity, is_runner=True, **queue_kwargs) for job in data['items']: __salt__[job['fun']](*job['args'], **job['kwargs'])
0.001605
def is_pid_healthy(pid): ''' This is a health check that will confirm the PID is running and executed by salt. If pusutil is available: * all architectures are checked if psutil is not available: * Linux/Solaris/etc: archs with `/proc/cmdline` available are checked * AIX/Windows: assume PID is healhty and return True ''' if HAS_PSUTIL: try: proc = psutil.Process(pid) except psutil.NoSuchProcess: log.warning("PID %s is no longer running.", pid) return False return any(['salt' in cmd for cmd in proc.cmdline()]) if salt.utils.platform.is_aix() or salt.utils.platform.is_windows(): return True if not salt.utils.process.os_is_running(pid): log.warning("PID %s is no longer running.", pid) return False cmdline_file = os.path.join('proc', str(pid), 'cmdline') try: with salt.utils.files.fopen(cmdline_file, 'rb') as fp_: return b'salt' in fp_.read() except (OSError, IOError) as err: log.error("There was a problem reading proc file: %s", err) return False
0.000867
def _do_download(version, download_base, to_dir, download_delay): """Download Setuptools.""" py_desig = 'py{sys.version_info[0]}.{sys.version_info[1]}'.format(sys=sys) tp = 'setuptools-{version}-{py_desig}.egg' egg = os.path.join(to_dir, tp.format(**locals())) if not os.path.exists(egg): archive = download_setuptools(version, download_base, to_dir, download_delay) _build_egg(egg, archive, to_dir) sys.path.insert(0, egg) # Remove previously-imported pkg_resources if present (see # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). if 'pkg_resources' in sys.modules: _unload_pkg_resources() import setuptools setuptools.bootstrap_install_from = egg
0.002663
def as_point(row): '''Create a Point from a data block row''' return Point(row[COLS.X], row[COLS.Y], row[COLS.Z], row[COLS.R], int(row[COLS.TYPE]))
0.005814
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # Check that the requested standard deviation type is available assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) # # Set parameters magn = rup.mag epi = dists.repi theta = dists.azimuth # # Convert Mw into Ms if magn < 6.58: mag = (magn - 0.59) / 0.86 else: mag = (magn + 2.42) / 1.28 # # Set coefficients coeff = self.COEFFS[imt] a1ca, a1cb, a1cc, a1cd, a1ce, a2ca, a2cb, a2cc, a2cd, a2ce = \ gc(coeff, mag) # # Get correction coefficients. Here for each site we find the # the geometry of the ellipses ras = [] for epi, theta in zip(dists.repi, dists.azimuth): res = get_ras(epi, theta, mag, coeff) ras.append(res) ras = np.array(ras) rbs = rbf(ras, coeff, mag) # # Compute values of ground motion for the two cases. The value of # 225 is hardcoded under the assumption that the hypocentral depth # corresponds to 15 km (i.e. 15**2) mean1 = (a1ca + a1cb * mag + a1cc * np.log((ras**2+225)**0.5 + a1cd * np.exp(a1ce * mag))) mean2 = (a2ca + a2cb * mag + a2cc * np.log((rbs**2+225)**0.5 + a2cd * np.exp(a2ce * mag))) # # Get distances x = (mean1 * np.sin(np.radians(dists.azimuth)))**2 y = (mean2 * np.cos(np.radians(dists.azimuth)))**2 mean = mean1 * mean2 / np.sqrt(x+y) if imt.name == "PGA": mean = np.exp(mean)/g/100 elif imt.name == "PGV": mean = np.exp(mean) else: raise ValueError('Unsupported IMT') # # Get the standard deviation stddevs = self._compute_std(coeff, stddev_types, len(dists.repi)) # # Return results return np.log(mean), stddevs
0.000873
def inc_nbrs(self, node): """ List of nodes connected by incoming edges """ l = map(self.head, self.inc_edges(node)) #l.sort() return l
0.021858
def copy(self, empty=False): """returns an independent copy of the current object.""" # Create an empty object newobject = self.__new__(self.__class__) if empty: return # And fill it ! for prop in ["_properties","_side_properties", "_derived_properties","_build_properties" ]: if prop not in dir(self): continue try: # Try to deep copy because but some time it does not work (e.g. wcs) newobject.__dict__[prop] = copy.deepcopy(self.__dict__[prop]) except: newobject.__dict__[prop] = copy.copy(self.__dict__[prop]) # This be sure things are correct newobject._update_() # and return it return newobject
0.012865
def check_language_data(self, qid, lang_data, lang, lang_data_type): """ Method to check if certain language data exists as a label, description or aliases :param lang_data: list of string values to check :type lang_data: list :param lang: language code :type lang: str :param lang_data_type: What kind of data is it? 'label', 'description' or 'aliases'? :return: """ all_lang_strings = set(x.strip().lower() for x in self.get_language_data(qid, lang, lang_data_type)) for s in lang_data: if s.strip().lower() not in all_lang_strings: print('fastrun failed at label: {}, string: {}'.format(lang_data_type, s)) return True return False
0.007732
def create_braintree_gateway(cls, braintree_gateway, **kwargs): """Create BraintreeGateway Create a new BraintreeGateway This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_braintree_gateway(braintree_gateway, async=True) >>> result = thread.get() :param async bool :param BraintreeGateway braintree_gateway: Attributes of braintreeGateway to create (required) :return: BraintreeGateway If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_braintree_gateway_with_http_info(braintree_gateway, **kwargs) else: (data) = cls._create_braintree_gateway_with_http_info(braintree_gateway, **kwargs) return data
0.006186
def DefaultExtension(schema_obj, form_obj, schemata=None): """Create a default field""" if schemata is None: schemata = ['systemconfig', 'profile', 'client'] DefaultExtends = { 'schema': { "properties/modules": [ schema_obj ] }, 'form': { 'modules': { 'items/': form_obj } } } output = {} for schema in schemata: output[schema] = DefaultExtends return output
0.001927
def p_bexpr_func(p): """ bexpr : ID bexpr """ args = make_arg_list(make_argument(p[2], p.lineno(2))) p[0] = make_call(p[1], p.lineno(1), args) if p[0] is None: return if p[0].token in ('STRSLICE', 'VAR', 'STRING'): entry = SYMBOL_TABLE.access_call(p[1], p.lineno(1)) entry.accessed = True return # TODO: Check that arrays really needs kind=function to be set # Both array accesses and functions are tagged as functions # functions also has the class_ attribute set to 'function' p[0].entry.set_kind(KIND.function, p.lineno(1)) p[0].entry.accessed = True
0.001585
def transform(self, Z): """Perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z check_rdd(X, (sp.spmatrix, np.ndarray)) mapper = self.broadcast( super(SparkTruncatedSVD, self).transform, Z.context) return Z.transform(mapper, column='X', dtype=np.ndarray)
0.00312
def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' # Read current configuration and store default values current_network_settings = _parse_rh_config(_RH_NETWORK_FILE) # Build settings opts = _parse_network_settings(settings, current_network_settings) try: template = JINJA.get_template('network.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template network.jinja') return '' network = template.render(opts) if settings['test']: return _read_temp(network) # Write settings _write_file_network(network, _RH_NETWORK_FILE) return _read_file(_RH_NETWORK_FILE)
0.001239
def update_list_positions_obj(client, positions_obj_id, revision, values): ''' Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out. See https://developer.wunderlist.com/documentation/endpoints/positions for more info Return: The updated ListPositionsObj-mapped object defining the order of list layout ''' return _update_positions_obj(client, client.api.Endpoints.LIST_POSITIONS, positions_obj_id, revision, values)
0.009259
def visitInlineShapeAnd(self, ctx: ShExDocParser.InlineShapeAndContext): """ inlineShapeAnd: inlineShapeNot (KW_AND inlineShapeNot)* """ if len(ctx.inlineShapeNot()) > 1: self.expr = ShapeAnd(id=self.label, shapeExprs=[]) for sa in ctx.inlineShapeNot(): sep = ShexShapeExpressionParser(self.context) sep.visit(sa) self._and_collapser(self.expr, sep.expr) else: self.visit(ctx.inlineShapeNot(0))
0.003984
def get_leaderboard(self, unit=None, before=None): """ Get user's rank in the week in descending order, stream from ``https://www.duolingo.com/friendships/leaderboard_activity?unit=week&_=time :param before: Datetime in format '2015-07-06 05:42:24' :param unit: maybe week or month :type before: str :type unit: str :rtype: List """ if unit: url = 'https://www.duolingo.com/friendships/leaderboard_activity?unit={}&_={}' else: raise Exception('Needs unit as argument (week or month)') if before: url = url.format(unit, before) else: raise Exception('Needs str in Datetime format "%Y.%m.%d %H:%M:%S"') self.leader_data = self._make_req(url).json() data = [] for result in iter(self.get_friends()): for value in iter(self.leader_data['ranking']): if result['id'] == int(value): temp = {'points': int(self.leader_data['ranking'][value]), 'unit': unit, 'id': result['id'], 'username': result['username']} data.append(temp) return sorted(data, key=lambda user: user['points'], reverse=True)
0.002264
def get_arguments(self): """ Extracts the specific arguments of this CLI """ ApiCli.get_arguments(self) if self.args.plugin_name is not None: self.plugin_name = self.args.plugin_name self.path = "v1/plugins/{0}".format(self.plugin_name)
0.006734
def make_vcard(name, displayname, email=None, phone=None, fax=None, videophone=None, memo=None, nickname=None, birthday=None, url=None, pobox=None, street=None, city=None, region=None, zipcode=None, country=None, org=None, lat=None, lng=None, source=None, rev=None, title=None): """\ Creates a QR Code which encodes a `vCard <https://en.wikipedia.org/wiki/VCard>`_ version 3.0. Only a subset of available vCard properties is supported. :param str name: The name. If it contains a semicolon, , the first part is treated as lastname and the second part is treated as forename. :param str displayname: Common name. :param str|iterable email: E-mail address. Multiple values are allowed. :param str|iterable phone: Phone number. Multiple values are allowed. :param str|iterable fax: Fax number. Multiple values are allowed. :param str|iterable videophone: Phone number for video calls. Multiple values are allowed. :param str memo: A notice for the contact. :param str nickname: Nickname. :param str|date birthday: Birthday. If a string is provided, it should encode the date as YYYY-MM-DD value. :param str|iterable url: Homepage. Multiple values are allowed. :param str|None pobox: P.O. box (address information). :param str|None street: Street address. :param str|None city: City (address information). :param str|None region: Region (address information). :param str|None zipcode: Zip code (address information). :param str|None country: Country (address information). :param str org: Company / organization name. :param float lat: Latitude. :param float lng: Longitude. :param str source: URL where to obtain the vCard. :param str|date rev: Revision of the vCard / last modification date. :param str|iterable|None title: Job Title. Multiple values are allowed. :rtype: segno.QRCode """ return segno.make_qr(make_vcard_data(name, displayname, email=email, phone=phone, fax=fax, videophone=videophone, memo=memo, nickname=nickname, birthday=birthday, url=url, pobox=pobox, street=street, city=city, region=region, zipcode=zipcode, country=country, org=org, lat=lat, lng=lng, source=source, rev=rev, title=title))
0.000754
def audit_1_4(self): """1.4 Ensure access keys are rotated every 90 days or less (Scored)""" for row in self.credential_report: for access_key in "1", "2": if json.loads(row["access_key_{}_active".format(access_key)]): last_rotated = row["access_key_{}_last_rotated".format(access_key)] if self.parse_date(last_rotated) < datetime.now(tzutc()) - timedelta(days=90): msg = "Active access key {} in account {} last rotated over 90 days ago" raise Exception(msg.format(access_key, row["user"]))
0.008026
def getoutputfiles(self, loadmetadata=True, client=None,requiremetadata=False): """Iterates over all output files and their output template. Yields (CLAMOutputFile, str:outputtemplate_id) tuples. The last three arguments are passed to its constructor.""" for outputfilename, outputtemplate in self.outputpairs(): yield CLAMOutputFile(self.projectpath, outputfilename, loadmetadata,client,requiremetadata), outputtemplate
0.015625
def app(self_or_cls, plot, show=False, new_window=False, websocket_origin=None, port=0): """ Creates a bokeh app from a HoloViews object or plot. By default simply attaches the plot to bokeh's curdoc and returns the Document, if show option is supplied creates an Application instance and displays it either in a browser window or inline if notebook extension has been loaded. Using the new_window option the app may be displayed in a new browser tab once the notebook extension has been loaded. A websocket origin is required when launching from an existing tornado server (such as the notebook) and it is not on the default port ('localhost:8888'). """ if not isinstance(self_or_cls, BokehRenderer) or self_or_cls.mode != 'server': renderer = self_or_cls.instance(mode='server') else: renderer = self_or_cls def modify_doc(doc): renderer(plot, doc=doc) handler = FunctionHandler(modify_doc) app = Application(handler) if not show: # If not showing and in notebook context return app return app elif self_or_cls.notebook_context and not new_window: # If in notebook, show=True and no new window requested # display app inline if isinstance(websocket_origin, list): if len(websocket_origin) > 1: raise ValueError('In the notebook only a single websocket origin ' 'may be defined, which must match the URL of the ' 'notebook server.') websocket_origin = websocket_origin[0] opts = dict(notebook_url=websocket_origin) if websocket_origin else {} return bkshow(app, **opts) # If app shown outside notebook or new_window requested # start server and open in new browser tab from tornado.ioloop import IOLoop loop = IOLoop.current() if websocket_origin and not isinstance(websocket_origin, list): websocket_origin = [websocket_origin] opts = dict(allow_websocket_origin=websocket_origin) if websocket_origin else {} opts['io_loop'] = loop server = Server({'/': app}, port=port, **opts) def show_callback(): server.show('/') server.io_loop.add_callback(show_callback) server.start() def sig_exit(*args, **kwargs): loop.add_callback_from_signal(do_stop) def do_stop(*args, **kwargs): loop.stop() signal.signal(signal.SIGINT, sig_exit) try: loop.start() except RuntimeError: pass return server
0.003215
def _get_range(book, range_, sheet): """Return a range as nested dict of openpyxl cells.""" filename = None if isinstance(book, str): filename = book book = opxl.load_workbook(book, data_only=True) elif isinstance(book, opxl.Workbook): pass else: raise TypeError if _is_range_address(range_): sheet_names = [name.upper() for name in book.sheetnames] index = sheet_names.index(sheet.upper()) data = book.worksheets[index][range_] else: data = _get_namedrange(book, range_, sheet) if data is None: raise ValueError( "Named range '%s' not found in %s" % (range_, filename or book) ) return data
0.001355
def creator(entry, config): """Creator function for creating an instance of an Ansible script.""" ansible_playbook = "ansible.playbook.dry.run.see.comment" ansible_inventory = "ansible.inventory.dry.run.see.comment" ansible_playbook_content = render(config.script, model=config.model, env=config.env, variables=config.variables, item=config.item) ansible_inventory_content = render(entry['inventory'], model=config.model, env=config.env, variables=config.variables, item=config.item) if not config.dry_run: ansible_playbook = write_temporary_file(ansible_playbook_content, 'ansible-play-', '.yaml') ansible_playbook_content = '' ansible_inventory = write_temporary_file(ansible_inventory_content, prefix='ansible-inventory-') ansible_inventory_content = '' # rendering the Bash script for running the Ansible playbook template_file = os.path.join(os.path.dirname(__file__), 'templates/ansible.sh.j2') with open(template_file) as handle: template = handle.read() config.script = render(template, debug=config.debug, ansible_playbook_content=ansible_playbook_content, ansible_playbook=ansible_playbook, ansible_inventory_content=ansible_inventory_content, ansible_inventory=ansible_inventory, limit=entry['limit']) return Ansible(config)
0.006663
def getheader(self, name, default=None): """Get the header value for a name. This is the normal interface: it returns a stripped version of the header value for a given header name, or None if it doesn't exist. This uses the dictionary version which finds the *last* such header. """ return self.dict.get(name.lower(), default)
0.005319
def generate_json_schema(cls, schema, context=DEFAULT_DICT): """Generate a JSON Schema from a Marshmallow schema. Args: schema (marshmallow.Schema|str): The Marshmallow schema, or the Python path to one, to create the JSON schema for. Keyword Args: file_pointer (file, optional): The path or pointer to the file to write this schema to. If not provided, the schema will be dumped to ``sys.stdout``. Returns: dict: The JSON schema in dictionary form. """ schema = cls._get_schema(schema) # Generate the JSON Schema return cls(context=context).dump(schema).data
0.002813
def is_thin_archieve(self): """ Return the is thin archieve attribute of the BFD file being processed. """ if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute( self._ptr, BfdAttributes.IS_THIN_ARCHIEVE)
0.006557
def bind(self, dst: str, src: Node) -> bool: """Allow to alias a node to another name. Useful to bind a node to _ as return of Rule:: R = [ __scope__:L [item:I #add_item(L, I]* #bind('_', L) ] It's also the default behaviour of ':>' """ for m in self.rule_nodes.maps: for k, v in m.items(): if k == dst: m[k] = src return True raise Exception('%s not found' % dst)
0.002114
def inject_path(path): """ Imports :func: from a python file at :path: and executes it with *args, **kwargs arguments. Everytime this function is called the module is reloaded so that you can alter your debug code while the application is running. The result of the function is returned, otherwise the exception is returned (if one is raised) """ try: dirname = os.path.dirname(path) if dirname not in sys.path: exists_in_sys = False sys.path.append(dirname) else: exists_in_sys = True module_name = os.path.splitext(os.path.split(path)[1])[0] if module_name in sys.modules: reload(sys.modules[module_name]) else: __import__(module_name) if not exists_in_sys: sys.path.remove(dirname) except Exception as e: return e
0.00454
def parseBranches(self, descendants): """ Parse top level of latex :param list elements: list of source objects :return: list of filtered TreeOfContents objects >>> toc = TOC.fromLatex(r'\section{h1}\subsection{subh1}\section{h2}\ ... \subsection{subh2}') >>> toc.parseTopDepth(toc.descendants) 1 >>> toc.parseBranches(toc.descendants) [h1, h2] >>> len(toc.branches) 2 >>> len(toc.section.branches) 1 """ i, branches = self.parseTopDepth(descendants), [] for descendant in descendants: if self.getHeadingLevel(descendant, self.hierarchy) == i: branches.append({'source': descendant}) if self.getHeadingLevel(descendant, self.hierarchy) > i \ and branches: branches[-1].setdefault('descendants', []).append(descendant) return [TOC(str(descendant), depth=i, hierarchy=self.hierarchy, **branch) for branch in branches]
0.007663
def customize_base_cfg(cfgname, cfgopt_strs, base_cfg, cfgtype, alias_keys=None, valid_keys=None, offset=0, strict=True): """ Args: cfgname (str): config name cfgopt_strs (str): mini-language defining key variations base_cfg (dict): specifies the default cfg to customize cfgtype (?): alias_keys (None): (default = None) valid_keys (None): if base_cfg is not specied, this defines the valid keys (default = None) offset (int): (default = 0) strict (bool): (default = True) Returns: list: cfg_combo - list of config dicts defining customized configs based on cfgopt_strs. customized configs always are given an _cfgindex, _cfgstr, and _cfgname key. CommandLine: python -m utool.util_gridsearch --test-customize_base_cfg:0 Ignore: >>> cfgname = 'default' >>> cfgopt_strs = 'dsize=1000,per_name=[1,2]' Example: >>> # ENABLE_DOCTEST >>> from utool.util_gridsearch import * # NOQA >>> import utool as ut >>> cfgname = 'name' >>> cfgopt_strs = 'b=[1,2]' >>> base_cfg = {} >>> alias_keys = None >>> cfgtype = None >>> offset = 0 >>> valid_keys = None >>> strict = False >>> cfg_combo = customize_base_cfg(cfgname, cfgopt_strs, base_cfg, cfgtype, >>> alias_keys, valid_keys, offset, strict) >>> result = ('cfg_combo = %s' % (ut.repr2(cfg_combo, nl=1),)) >>> print(result) cfg_combo = [ {'_cfgindex': 0, '_cfgname': 'name', '_cfgstr': 'name:b=[1,2]', '_cfgtype': None, 'b': 1}, {'_cfgindex': 1, '_cfgname': 'name', '_cfgstr': 'name:b=[1,2]', '_cfgtype': None, 'b': 2}, ] """ import utool as ut cfg = base_cfg.copy() # Parse config options without expansion cfg_options = noexpand_parse_cfgstrs(cfgopt_strs, alias_keys) # Ensure that nothing bad is being updated if strict: parsed_keys = cfg_options.keys() if valid_keys is not None: ut.assert_all_in(parsed_keys, valid_keys, 'keys specified not in valid set') else: ut.assert_all_in(parsed_keys, cfg.keys(), 'keys specified not in default options') # Finalize configuration dict cfg.update(cfg_options) cfg['_cfgtype'] = cfgtype cfg['_cfgname'] = cfgname # Perform expansion cfg_combo = ut.all_dict_combinations(cfg) #if len(cfg_combo) > 1: for combox, cfg_ in enumerate(cfg_combo, start=offset): cfg_['_cfgindex'] = combox for cfg_ in cfg_combo: if len(cfgopt_strs) > 0: cfg_['_cfgstr'] = cfg_['_cfgname'] + NAMEVARSEP + cfgopt_strs else: cfg_['_cfgstr'] = cfg_['_cfgname'] return cfg_combo
0.000677
def is_same_time(self): """Return True if local.mtime == remote.mtime.""" return ( self.local and self.remote and FileEntry._eps_compare(self.local.mtime, self.remote.mtime) == 0 )
0.0125
def smart_scrubb(df,col_name,error_rate = 0): """ Scrubs from the back of an 'object' column in a DataFrame until the scrub would semantically alter the contents of the column. If only a subset of the elements in the column are scrubbed, then a boolean array indicating which elements have been scrubbed is appended to the dataframe. Returns the string that was scrubbed. df - DataFrame DataFrame to scrub col_name - string Name of column to scrub error_rate - number, default 0 The maximum amount of values this function can ignore while scrubbing, expressed as a fraction of the total amount of rows in the dataframe. """ scrubbed = "" while True: valcounts = df[col_name].str[-len(scrubbed)-1:].value_counts() if not len(valcounts): break if not valcounts[0] >= (1-error_rate) * _utils.rows(df): break scrubbed=valcounts.index[0] if scrubbed == '': return None which = df[col_name].str.endswith(scrubbed) _basics.col_scrubb(df,col_name,which,len(scrubbed),True) if not which.all(): new_col_name = _basics.colname_gen(df,"{}_sb-{}".format(col_name,scrubbed)) df[new_col_name] = which return scrubbed
0.014902
def cookies(self): """Container of request cookies """ cookies = SimpleCookie() cookie = self.environ.get('HTTP_COOKIE') if cookie: cookies.load(cookie) return cookies
0.008811
def WriteClientActionRequests(self, requests, cursor=None): """Writes messages that should go to the client to the db.""" query = ("INSERT IGNORE INTO client_action_requests " "(client_id, flow_id, request_id, timestamp, request) " "VALUES %s ON DUPLICATE KEY UPDATE " "timestamp=VALUES(timestamp), request=VALUES(request)") now = mysql_utils.RDFDatetimeToTimestamp(rdfvalue.RDFDatetime.Now()) value_templates = [] args = [] for r in requests: args.extend([ db_utils.ClientIDToInt(r.client_id), db_utils.FlowIDToInt(r.flow_id), r.request_id, now, r.SerializeToString() ]) value_templates.append("(%s, %s, %s, FROM_UNIXTIME(%s), %s)") query %= ",".join(value_templates) try: cursor.execute(query, args) except MySQLdb.IntegrityError as e: request_keys = [(r.client_id, r.flow_id, r.request_id) for r in requests] raise db.AtLeastOneUnknownRequestError(request_keys=request_keys, cause=e)
0.006796
def calc_hash(self, hashalg=None): """ Calculate the hash of the file. Will be called automatically from the constructor if the file exists and hashalg is given (and supported), but may also be called manually e.g. to update the hash if the file has changed. """ fd = open(self.filename, "rb") buf = fd.read() fd.close() if hashalg: self.hashalg = hashalg.upper() self.hash = getattr(hashlib, self.hashalg.lower())(buf).hexdigest()
0.010889
def _save(self, file_to_write, overwrite): """Save PDS3Image object as PDS3 file. Parameters ---------- filename: Set filename for the pds image to be saved. Overwrite: Use this keyword to save image with same filename. Usage: image.save('temp.IMG', overwrite=True) """ if overwrite: file_to_write = self.filename elif os.path.isfile(file_to_write): msg = 'File ' + file_to_write + ' already exists !\n' + \ 'Call save() with "overwrite = True" to overwrite the file.' raise IOError(msg) encoder = pvl.encoder.PDSLabelEncoder serial_label = pvl.dumps(self.label, cls=encoder) label_sz = len(serial_label) image_pointer = int(label_sz / self.label['RECORD_BYTES']) + 1 self.label['^IMAGE'] = image_pointer + 1 if self._sample_bytes != self.label['IMAGE']['SAMPLE_BITS'] * 8: self.label['IMAGE']['SAMPLE_BITS'] = self.data.itemsize * 8 sample_type_to_save = self.DTYPES[self._sample_type[0] + self.dtype.kind] self.label['IMAGE']['SAMPLE_TYPE'] = sample_type_to_save if len(self.data.shape) == 3: self.label['IMAGE']['BANDS'] = self.data.shape[0] self.label['IMAGE']['LINES'] = self.data.shape[1] self.label['IMAGE']['LINE_SAMPLES'] = self.data.shape[2] else: self.label['IMAGE']['BANDS'] = 1 self.label['IMAGE']['LINES'] = self.data.shape[0] self.label['IMAGE']['LINE_SAMPLES'] = self.data.shape[1] diff = 0 if len(pvl.dumps(self.label, cls=encoder)) != label_sz: diff = abs(label_sz - len(pvl.dumps(self.label, cls=encoder))) pvl.dump(self.label, file_to_write, cls=encoder) offset = image_pointer * self.label['RECORD_BYTES'] - label_sz stream = open(file_to_write, 'a') for i in range(0, offset+diff): stream.write(" ") if (self._bands > 1 and self._format != 'BAND_SEQUENTIAL'): raise NotImplementedError else: self.data.tofile(stream, format='%' + self.dtype.kind) stream.close()
0.001362
def change_column_if_table_exists(self, tablename: str, oldfieldname: str, newfieldname: str, newdef: str) -> Optional[int]: """Renames a column and alters its definition.""" if not self.table_exists(tablename): return None if not self.column_exists(tablename, oldfieldname): return None sql = "ALTER TABLE {t} CHANGE COLUMN {old} {new} {newdef}".format( t=tablename, old=oldfieldname, new=newfieldname, newdef=newdef, ) log.info(sql) return self.db_exec_literal(sql)
0.008032
def _pad_former_ports(self, port_handler): """Create ports with former port index. :param port_handler: Port information to be registered. Depending on slot type and card type, it is necessary to register LAN ports with former index to VIOM table. """ if not port_handler.need_padding(): return for port_idx in range(1, port_handler.port_idx): pad_handler = port_handler.__class__( port_handler.slot_type, port_handler.card_type, port_handler.slot_idx, port_handler.card_idx, port_idx) if not self._find_port(pad_handler): self._add_port(pad_handler, pad_handler.create_lan_port())
0.002494
def migrate(self, mapping, index, doc_type): """ Migrate a ES mapping :param mapping: new mapping :param index: index of old mapping :param doc_type: type of old mapping :return: The diff mapping """ old_mapping = self.get_doctype(index, doc_type) #case missing if not old_mapping: self.connection.indices.put_mapping(doc_type=doc_type, mapping=mapping, indices=index) return mapping # we need to calculate the diff mapping_diff = old_mapping.get_diff(mapping) if not mapping_diff: return None from pprint import pprint pprint(mapping_diff.as_dict()) mapping_diff.connection = old_mapping.connection mapping_diff.save()
0.005013
def close(self): """Closes associated resources of this request object. This closes all file handles explicitly. You can also use the request object in a with statement with will automatically close it. .. versionadded:: 0.9 """ files = self.__dict__.get('files') for key, value in iter_multi_items(files or ()): value.close()
0.005038
def eval_objfn(self): """Compute components of objective function as well as total contribution to objective function. """ fval = self.obfn_f() gval = self.obfn_g(self.obfn_gvar()) obj = fval + gval return (obj, fval, gval)
0.007143
def iraf_phot(f,x,y,zmag=26.5,apin=10,skyin=15,skywidth=10): """Compute the magnitude of the star at location x/y""" import pyfits import re infits=pyfits.open(f,'update') f=re.sub(r'.fits$','',f) ### Get my python routines from pyraf import iraf from pyraf.irafpar import IrafParList ### keep all the parameters locally cached. iraf.set(uparm="./") iraf.set(imtype="fits") ### Load the required IRAF packages iraf.digiphot() iraf.apphot() iraf.daophot() ### temp file name hash. tfile={} iraf.datapars.datamax=60000 iraf.datapars.datamin=-1000 iraf.datapars.airmass='AIRMASS' iraf.datapars.filter='FILTER' iraf.datapars.obstime='TIME-OBS' iraf.datapars.exposure='EXPTIME' iraf.datapars.gain='GAIN' iraf.datapars.ccdread='RDNOISE' iraf.datapars.fwhmpsf=5.0 iraf.centerpars.calgorithm='centroid' iraf.photpars.zmag=zmag iraf.photpars.apertures=apin iraf.fitskypars.annulus=skyin iraf.fitskypars.dannulus=skywidth iraf.daophot.verbose=iraf.no iraf.daophot.verify=iraf.no iraf.daophot.update=iraf.no iraf.psf.interactive=iraf.no iraf.pstselect.interactive=iraf.no iraf.datapars.saveParList() iraf.fitskypars.saveParList() iraf.centerpars.saveParList() iraf.findpars.saveParList() iraf.photpars.saveParList() tfiles = ['coo','mag'] for file in tfiles: extname=f tfile[file]=extname+"."+file if ( os.access(tfile[file],os.F_OK) ): os.unlink(tfile[file]) this_image=f fd = open(tfile['coo'],'w') fd.write('%f %f\n' % ( x, y) ) fd.close() print "Measuring photometry psf star in "+tfile['coo'] iraf.daophot.phot(image=this_image, coords=tfile['coo'], output=tfile['mag']) import string a=iraf.txdump(tfile['mag'],"MAG,XCEN,YCEN",iraf.yes,Stdout=1) (mag,x,y)=string.split(a[0]) inhdu=infits[0].header inhdu.update("PSFMAG",float(mag),comment="PSF Magnitude") inhdu.update("PSF_X",float(x),comment="PSF Magnitude") inhdu.update("PSF_Y",float(y),comment="PSF Magnitude") inhdu.update("ZMAG",zmag,comment="ZMAG of PSF ") ## now measure using a smaller aperture to get aper correction iraf.photpars.apertures=apin*3.0 os.unlink(tfile['mag']) iraf.daophot.phot(image=this_image, coords=tfile['coo'], output=tfile['mag']) a=iraf.txdump(tfile['mag'],"MAG,XCEN,YCEN",iraf.yes,Stdout=1) (magout,x,y)=string.split(a[0]) inhdu.update("APCOR",float(magout)-float(mag),comment="AP_OUT - AP_IN") inhdu.update("AP_IN",apin*3.0,comment="Small aperature") inhdu.update("AP_OUT",apin,comment="Large aperture") # ### append this psf to the output images.... infits.close() ### remove the temp file we used for this computation. #for tf in tfile.keys(): # if os.access(tfile[tf],os.F_OK): # os.unlink(tfile[tf]) return 0
0.0314
def transform_symmop(self, symmop): # type: (Union[SymmOp, MagSymmOp]) -> Union[SymmOp, MagSymmOp] """ Takes a symmetry operation and transforms it. :param symmop: SymmOp or MagSymmOp :return: """ W = symmop.rotation_matrix w = symmop.translation_vector Q = np.linalg.inv(self.P) W_ = np.matmul(np.matmul(Q, W), self.P) I = np.identity(3) w_ = np.matmul(Q, (w + np.matmul(W - I, self.p))) if isinstance(symmop, MagSymmOp): return MagSymmOp.from_rotation_and_translation_and_time_reversal( rotation_matrix=W_, translation_vec=w_, time_reversal=symmop.time_reversal, tol=symmop.tol) elif isinstance(symmop, SymmOp): return SymmOp.from_rotation_and_translation( rotation_matrix=W_, translation_vec=w_, tol=symmop.tol)
0.005562
def _results(r): r"""Select from a tuple of(root, funccalls, iterations, flag)""" x, funcalls, iterations, flag = r return results(x, funcalls, iterations, flag == 0)
0.005618
def get(self, block=False, timeout=None): """Return a message from the queue. Example: >>> queue.get() 'my message' >>> queue.get() 'another message' :param block: whether or not to wait until a msg is available in the queue before returning; ``False`` by default :param timeout: when using :attr:`block`, if no msg is available for :attr:`timeout` in seconds, give up and return ``None`` """ if block: if timeout is None: timeout = 0 msg = self.__redis.blpop(self.key, timeout=timeout) if msg is not None: msg = msg[1] else: msg = self.__redis.lpop(self.key) if msg is not None and self.serializer is not None: msg = self.serializer.loads(msg) return msg
0.00454
def proof_req_attr_referents(proof_req: dict) -> dict: """ Given a proof request with all requested attributes having cred def id restrictions, return its attribute referents by cred def id and attribute. The returned structure can be useful in populating the extra WQL query parameter in the credential search API. :param proof_req: proof request with all requested attribute specifications having cred def id restriction; e.g., :: { 'name": 'proof_req', 'version': '0.0', 'requested_attributes': { '18_greenLevel_uuid': { 'restrictions': [ { 'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag' } ], 'name': 'greenLevel', 'non_revoked': { 'to': 1532367957, 'from': 1532367957 } }, '18_legalName_uuid': { 'restrictions': [ { 'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag' } ], 'name': 'legalName', 'non_revoked': { 'to': 1532367957, 'from': 1532367957 } }, '15_id_uuid': { # this specification will not show up in response: no cred def id restriction :-( 'name': 'id', 'non_revoked': { 'to': 1532367957, 'from': 1532367957 } } } 'requested_predicates': { } } :return: nested dict mapping cred def id to name to proof request referent; e.g., :: { 'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag': { 'legalName': '18_legalName_uuid' 'greenLevel': '18_greenLevel_uuid' } } """ rv = {} for uuid, spec in proof_req['requested_attributes'].items(): cd_id = None for restriction in spec.get('restrictions', []): cd_id = restriction.get('cred_def_id', None) if cd_id: break if not cd_id: continue if cd_id not in rv: # cd_id of None is not OK rv[cd_id] = {} rv[cd_id][spec['name']] = uuid return rv
0.002341
def compile_mpim_users(self): """ Gets the info for the members within the multiple person instant message Returns a list of all dms with the members that have ever existed :rtype: [object] { name: <name> users: [<user_id>] } """ mpim_data = self._read_from_json("mpims.json") mpims = [c for c in mpim_data.values()] all_mpim_users = [] for mpim in mpims: mpim_members = {"name": mpim["name"], "users": [self.__USER_DATA[m] for m in mpim["members"]]} all_mpim_users.append(mpim_members) return all_mpim_users
0.006079
def encrypt_ascii(self, data, key=None, v=None, extra_bytes=0, digest="hex"): """ Encrypt data and return as ascii string. Hexadecimal digest as default. Avaiable digests: hex: Hexadecimal base64: Base 64 hqx: hexbin4 """ digests = {"hex": binascii.b2a_hex, "base64": binascii.b2a_base64, "hqx": binascii.b2a_hqx} digestor = digests.get(digest) if not digestor: TripleSecError(u"Digestor not supported.") binary_result = self.encrypt(data, key, v, extra_bytes) result = digestor(binary_result) return result
0.004304
def count(self): """ Total number of array cells """ return functools.reduce(lambda x, y: x * y, (x.count for x in self.bounds))
0.019737
def attach_attrs_table(key, value, fmt, meta): """Extracts attributes and attaches them to element.""" # We can't use attach_attrs_factory() because Table is a block-level element if key in ['Table']: assert len(value) == 5 caption = value[0] # caption, align, x, head, body # Set n to the index where the attributes start n = 0 while n < len(caption) and not \ (caption[n]['t'] == 'Str' and caption[n]['c'].startswith('{')): n += 1 try: attrs = extract_attrs(caption, n) value.insert(0, attrs) except (ValueError, IndexError): pass
0.004532
def _compile_sequence(self, schema, seq_type): """Validate a sequence type. This is a sequence of valid values or validators tried in order. >>> validator = Schema(['one', 'two', int]) >>> validator(['one']) ['one'] >>> with raises(er.MultipleInvalid, 'expected int @ data[0]'): ... validator([3.5]) >>> validator([1]) [1] """ _compiled = [self._compile(s) for s in schema] seq_type_name = seq_type.__name__ def validate_sequence(path, data): if not isinstance(data, seq_type): raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path) # Empty seq schema, allow any data. if not schema: if data: raise er.MultipleInvalid([ er.ValueInvalid('not a valid value', [value]) for value in data ]) return data out = [] invalid = None errors = [] index_path = UNDEFINED for i, value in enumerate(data): index_path = path + [i] invalid = None for validate in _compiled: try: cval = validate(index_path, value) if cval is not Remove: # do not include Remove values out.append(cval) break except er.Invalid as e: if len(e.path) > len(index_path): raise invalid = e else: errors.append(invalid) if errors: raise er.MultipleInvalid(errors) if _isnamedtuple(data): return type(data)(*out) else: return type(data)(out) return validate_sequence
0.00205
def cosi_pdf(z,k=1): """Equation (11) of Morton & Winn (2014) """ return 2*k/(np.pi*np.sinh(k)) * quad(cosi_integrand,z,1,args=(k,z))[0]
0.040541
def to_dict(self): """ Convert the Paginator into a dict """ return { 'current': self.current, 'first': self.first, 'last': self.last, 'next': self.more, 'prev': self.prev, }
0.007722
def instance( dt, tz=UTC # type: _datetime.datetime # type: Union[str, _Timezone, None] ): # type: (...) -> DateTime """ Create a DateTime instance from a datetime one. """ if not isinstance(dt, _datetime.datetime): raise ValueError("instance() only accepts datetime objects.") if isinstance(dt, DateTime): return dt tz = dt.tzinfo or tz # Checking for pytz/tzinfo if isinstance(tz, _datetime.tzinfo) and not isinstance(tz, _Timezone): # pytz if hasattr(tz, "localize") and tz.zone: tz = tz.zone else: # We have no sure way to figure out # the timezone name, we fallback # on a fixed offset tz = tz.utcoffset(dt).total_seconds() / 3600 return datetime( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, tz=tz )
0.00224
def _climlab_to_rrtm(field): '''Prepare field with proper dimension order. RRTM code expects arrays with (ncol, nlay) and with pressure decreasing from surface at element 0 climlab grid dimensions are any of: - (num_lev,) --> (1, num_lev) - (num_lat, num_lev) --> (num_lat, num_lev) - (num_lat, num_lon, num_lev) --> (num_lat*num_lon, num_lev) But lat-lon grids not yet supported here! Case single column ''' # Make this work just with 1D (KM,) arrays # (KM,) --> (1, nlay) try: # Flip along the last axis to reverse the pressure order field = field[..., ::-1] except: if np.isscalar(field): return field else: raise ValueError('field must be array_like or scalar.') shape = field.shape if len(shape)==1: # (num_lev) # Need to append an extra dimension for singleton horizontal ncol return field[np.newaxis, ...] elif len(shape)==2: # (num_lat, num_lev) return field elif len(shape) > 2: raise ValueError('lat-lon grids not yet supported here.')
0.004405
def map_tuples(func, obj): """ Returns the mapped coordinates from a Geometry after applying the provided function to each coordinate. :param func: Function to apply to tuples :type func: function :param obj: A geometry or feature to extract the coordinates from. :type obj: Point, LineString, MultiPoint, MultiLineString, Polygon, MultiPolygon :return: The result of applying the function to each dimension in the array. :rtype: list :raises ValueError: if the provided object is not GeoJSON. """ if obj['type'] == 'Point': coordinates = tuple(func(obj['coordinates'])) elif obj['type'] in ['LineString', 'MultiPoint']: coordinates = [tuple(func(c)) for c in obj['coordinates']] elif obj['type'] in ['MultiLineString', 'Polygon']: coordinates = [[ tuple(func(c)) for c in curve] for curve in obj['coordinates']] elif obj['type'] == 'MultiPolygon': coordinates = [[[ tuple(func(c)) for c in curve] for curve in part] for part in obj['coordinates']] elif obj['type'] in ['Feature', 'FeatureCollection', 'GeometryCollection']: return map_geometries(lambda g: map_tuples(func, g), obj) else: raise ValueError("Invalid geometry object %s" % repr(obj)) return {'type': obj['type'], 'coordinates': coordinates}
0.000717
def convert_tree(self, tree): """Return (tree_id, tree) or None (if the tree has no edges). """ nodeById = {} root_node = None node_list = _index_list_of_values(tree, 'node') for node in node_list: nodeById[node['@id']] = node r = node.get('@root') # _LOG.debug(' node {} @root={}'.format(node['@id'], r)) if r in [True, 'true']: # @TEMP accepting true or "true" assert root_node is None root_node = node assert root_node is not None edgeBySourceId = {} edge_list = _get_index_list_of_values(tree, 'edge') for edge in edge_list: sourceId = edge['@source'] eid = edge['@id'] del edge['@id'] byso = edgeBySourceId.setdefault(sourceId, {}) byso[eid] = edge # If all that succeeds, add the new object to the dict, creating a fat structure tree['nodeById'] = nodeById tree['edgeBySourceId'] = edgeBySourceId tree['^ot:rootNodeId'] = root_node['@id'] # Make the struct leaner tid = tree['@id'] if self.remove_old_structs: del tree['@id'] del tree['node'] try: del tree['edge'] except: # Tree Tr75035 in http://treebase.org/treebase-web/search/study/summary.html?id=14763 # is empty. in NeXML that shows up as a tree with a node but no edges. # See https://github.com/OpenTreeOfLife/opentree/issues/641 # TODO: returning None seems safest, but could cull trees with just metadata. # but creating a fake tree for metadata is ugly. So, I'm fine with not # supporting this. _LOG.warn('Tree with ID "{}" is being dropped because it has no edges'.format(tid)) assert not edge_list return None for node in node_list: if '^ot:isLeaf' in node: del node['^ot:isLeaf'] del node['@id'] return tid, tree
0.004172
def _send_batch(self): """ Send the waiting messages, if there are any, and we can... This is called by our LoopingCall every send_every_t interval, and from send_messages everytime we have enough messages to send. This is also called from py:method:`send_messages` via py:method:`_check_send_batch` if there are enough messages/bytes to require a send. Note, the send will be delayed (triggered by completion or failure of previous) if we are currently trying to complete the last batch send. """ # We can be triggered by the LoopingCall, and have nothing to send... # Or, we've got SendRequest(s) to send, but are still processing the # previous batch... if (not self._batch_reqs) or self._batch_send_d: return # Save a local copy, and clear the global list & metrics requests, self._batch_reqs = self._batch_reqs, [] self._waitingByteCount = 0 self._waitingMsgCount = 0 # Iterate over them, fetching the partition for each message batch d_list = [] for req in requests: # For each request, we get the topic & key and use that to lookup # the next partition on which we should produce d_list.append(self._next_partition(req.topic, req.key)) d = self._batch_send_d = Deferred() # Since DeferredList doesn't propagate cancel() calls to deferreds it # might be waiting on for a result, we need to use this structure, # rather than just using the DeferredList directly d.addCallback(lambda r: DeferredList(d_list, consumeErrors=True)) d.addCallback(self._send_requests, requests) # Once we finish fully processing the current batch, clear the # _batch_send_d and check if any more requests piled up when we # were busy. d.addBoth(self._complete_batch_send) d.addBoth(self._check_send_batch) # Fire off the callback to start processing... d.callback(None)
0.000964
def _candidate_log_files(): """Return possible locations for the .h2oconfig file, one at a time.""" # Search for .h2oconfig in the current directory and all parent directories relpath = ".h2oconfig" prevpath = None while True: abspath = os.path.abspath(relpath) if abspath == prevpath: break prevpath = abspath relpath = "../" + relpath yield abspath # Also check if .h2oconfig exists in the user's directory yield os.path.expanduser("~/.h2oconfig")
0.00708
def write_message(self, status=messages.INFO, message=None): """ Writes a message to django's messaging framework and returns the written message. :param status: The message status level. Defaults to \ messages.INFO. :param message: The message to write. If not given, \ defaults to appending 'saved' to the unicode representation \ of `self.object`. """ if not message: message = u"%s saved" % self.object messages.add_message(self.request, status, message) return message
0.005119
def rdp_rec(M, epsilon, dist=pldist): """ Simplifies a given array of points. Recursive version. :param M: an array :type M: numpy array :param epsilon: epsilon in the rdp algorithm :type epsilon: float :param dist: distance function :type dist: function with signature ``f(point, start, end)`` -- see :func:`rdp.pldist` """ dmax = 0.0 index = -1 for i in xrange(1, M.shape[0]): d = dist(M[i], M[0], M[-1]) if d > dmax: index = i dmax = d if dmax > epsilon: r1 = rdp_rec(M[:index + 1], epsilon, dist) r2 = rdp_rec(M[index:], epsilon, dist) return np.vstack((r1[:-1], r2)) else: return np.vstack((M[0], M[-1]))
0.002677
def contents(self): r"""Returns all contents in this expression. Optionally includes whitespace if set when node was created. >>> expr1 = TexExpr('textbf', ('\n', 'hi')) >>> list(expr1.contents) ['hi'] >>> expr2 = TexExpr('textbf', ('\n', 'hi'), preserve_whitespace=True) >>> list(expr2.contents) ['\n', 'hi'] """ for content in self.all: is_whitespace = isinstance(content, str) and content.isspace() if not is_whitespace or self.preserve_whitespace: yield content
0.003407
def _send_command_wrapper(self, cmd): """ Send command to the remote device with a caching feature to avoid sending the same command twice based on the SSH_MAPPER_BASE dict cmd key. Parameters ---------- cmd : str The command to send to the remote device after checking cache. Returns ------- response : str The response from the remote device. """ cached_results = self._results_cache.get(cmd) if not cached_results: response = self._send_command(cmd) self._results_cache[cmd] = response return response else: return cached_results
0.004225
def register_plugin(self): """Register plugin in Spyder's main window""" self.focus_changed.connect(self.main.plugin_focus_changed) self.main.add_dockwidget(self) self.main.console.set_help(self) self.internal_shell = self.main.console.shell self.console = self.main.console
0.006061
def Write3DData(self, data3d, path, filetype='auto', metadata=None, progress_callback=None, sfin=True): """ :param data3d: input ndarray data :param path: output path, to specify slice number advanced formatting options (like {:06d}) can be used Check function filename_format() for more details. :param metadata: {'voxelsize_mm': [1, 1, 1]} :param filetype: dcm, vtk, rawiv, image_stack :param progress_callback: fuction for progressbar f.e. callback(value, minimum, maximum) :param sfin: Use separate file for segmentation if necessary """ self.orig_path = path path = os.path.expanduser(path) try: d3d = data3d.pop('data3d') metadata = data3d data3d = d3d except: pass if progress_callback is not None: self.progress_callback = progress_callback if filetype == 'auto': startpath, ext = os.path.splitext(path) filetype = ext[1:].lower() segmentation = None if metadata is not None and "segmentation" in metadata.keys(): segmentation_path = self.__get_segmentation_path(path) segmentation = metadata["segmentation"] mtd = {'voxelsize_mm': [1, 1, 1]} if metadata is not None: mtd.update(metadata) metadata=mtd if path.find('{') >= 0: filetype = 'image_stack' # one_file_per_slice = True # if one_file_per_slice: # self._one_file_per_slice(self, data3d, path, filetype, metadata) # else: # self._all_in_one_file(self, data3d, path, filetype, metadata) # # def _all_in_one_file(self, data3d, path, filetype, metadata): if filetype in ['vtk', 'tiff', 'tif', "mhd", "nii", "raw"]: self._write_with_sitk(path, data3d, metadata) if sfin and segmentation is not None: self._write_with_sitk(segmentation_path, segmentation, metadata) elif filetype in ['dcm', 'DCM', 'dicom']: self._write_with_sitk(path, data3d, metadata) self._fix_sitk_bug(path, metadata) if sfin and segmentation is not None: self._write_with_sitk(segmentation_path, segmentation, metadata) self._fix_sitk_bug(segmentation_path, metadata) elif filetype in ['rawiv']: rawN.write(path, data3d, metadata) elif filetype in ['image_stack']: self.save_image_stack(data3d, path, metadata) elif filetype in ['hdf5', 'hdf', 'h5', 'he5']: self.save_hdf5(data3d, path, metadata) elif filetype in ['pkl', 'pklz']: from . import misc metadata['data3d'] = data3d datap = metadata misc.obj_to_file(datap, path) else: logger.error('Unknown filetype: "' + filetype + '"') raise ValueError("Unknown filetype: '" + filetype + "'")
0.003312
def autodoc_process_docstring(app, what, name, obj, options, lines): """Handler for the event emitted when autodoc processes a docstring. See http://sphinx-doc.org/ext/autodoc.html#event-autodoc-process-docstring. The TL;DR is that we can modify ``lines`` in-place to influence the output. """ # check that only symbols that can be directly imported from ``callee`` # package are being documented _, symbol = name.rsplit('.', 1) if symbol not in callee.__all__: raise SphinxError( "autodoc'd '%s' is not a part of the public API!" % name) # for classes exempt from automatic merging of class & __init__ docs, # pretend their __init__ methods have no docstring at all, # so that nothing will be appended to the class's docstring if what == 'class' and name in autoclass_content_exceptions: # amusingly, when autodoc reads the constructor's docstring # for appending it to class docstring, it will report ``what`` # as 'class' (again!); hence we must check what it actually read ctor_docstring_lines = prepare_docstring(obj.__init__.__doc__) if lines == ctor_docstring_lines: lines[:] = []
0.000827
def vectorize(*args, **kwargs): """ Allows using `@vectorize` as well as `@vectorize()`. """ if args and callable(args[0]): # Guessing the argument is the method. return _vectorize(args[0]) else: # Wait for the second call. return lambda m: _vectorize(m, *args, **kwargs)
0.003096
def construct_arguments(self, args, kwargs, options, bound=False): """ Construct args list and kwargs dictionary for this signature. They are created such that: - the original explicit call arguments (args, kwargs) are preserved - missing arguments are filled in by name using options (if possible) - default arguments are overridden by options - TypeError is thrown if: * kwargs contains one or more unexpected keyword arguments * conflicting values for a parameter in both args and kwargs * there is an unfilled parameter at the end of this process """ expected_args = self._get_expected_args(bound) self._assert_no_unexpected_args(expected_args, args) self._assert_no_unexpected_kwargs(expected_args, kwargs) self._assert_no_duplicate_args(expected_args, args, kwargs) args, kwargs = self._fill_in_options(args, kwargs, options, bound) self._assert_no_missing_args(args, kwargs, bound) return args, kwargs
0.00186
def title(value, failure_string='N/A'): """ Converts a string into titlecase. Lifted from Django. """ try: value = value.lower() t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title()) result = re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t) if not result: return failure_string return result except: return failure_string
0.011494
def extend(self, outpipe): """Similar to __pipe__, except that outpipe must be a Stream, in which case self.iterator will be modified in-place by calling outpipe.__call__ on it. """ self.iterator = outpipe.__call__(self.iterator) return self
0.031621
def wait_for_ssh(ip): """Wait for SSH to be available at given IP address.""" for _ in range(12): with safe_socket() as s: try: s.connect((ip, 22)) return True except socket.timeout: pass time.sleep(10) return False
0.022642
def match_similar(base, items): """Get the most similar matching item from a list of items. @param base: base item to locate best match @param items: list of items for comparison @return: most similar matching item or None """ finds = list(find_similar(base, items)) if finds: return max(finds, key=base.similarity) # TODO: make O(n) return None
0.002571
def get_unique_parameters(self): ''' Returns a list of the unique parameters (no duplicates). ''' # start with parameters in the `_parameters` dictionary parameters = self._parameters.values() # add parameters defined with the class for name in dir(self): item = getattr(self, name) if isinstance(item, Parameter): if item.name not in self._parameters: parameters.append(item) return parameters
0.003861
def normalizeRotationAngle(value): """ Normalizes an angle. * Value must be a :ref:`type-int-float`. * Value must be between -360 and 360. * If the value is negative, it is normalized by adding it to 360 * Returned value is a ``float`` between 0 and 360. """ if not isinstance(value, (int, float)): raise TypeError("Angle must be instances of " ":ref:`type-int-float`, not %s." % type(value).__name__) if abs(value) > 360: raise ValueError("Angle must be between -360 and 360.") if value < 0: value = value + 360 return float(value)
0.001541
def append_dims_and_file_extension(fname, data_df): """Append dimensions and file extension to output filename. N.B. Dimensions are cols x rows. Args: fname (string): output filename data_df (pandas df) Returns: out_fname (string): output filename with matrix dims and .gct appended """ # If there's no .gct at the end of output file name, add the dims and .gct if not fname.endswith(".gct"): out_fname = '{0}_n{1}x{2}.gct'.format(fname, data_df.shape[1], data_df.shape[0]) return out_fname # Otherwise, only add the dims else: basename = os.path.splitext(fname)[0] out_fname = '{0}_n{1}x{2}.gct'.format(basename, data_df.shape[1], data_df.shape[0]) return out_fname
0.003906
def time_zone_by_name(self, hostname): """ Returns time zone in tzdata format (e.g. America/New_York or Europe/Paris) :arg hostname: Hostname (e.g. example.com) """ addr = self._gethostbyname(hostname) return self.time_zone_by_addr(addr)
0.01049
def _construct_target(self, function): """Constructs the Target property for the CloudWatch Events Rule. :returns: the Target property :rtype: dict """ target = { 'Arn': function.get_runtime_attr("arn"), 'Id': self.logical_id + 'LambdaTarget' } if self.Input is not None: target['Input'] = self.Input if self.InputPath is not None: target['InputPath'] = self.InputPath return target
0.003906
def generate_apiary_doc(task_router): """Generate apiary documentation. Create a Apiary generator and add application packages to it. :param task_router: task router, injected :type task_router: TaskRouter :return: apiary generator :rtype: ApiaryDoc """ generator = ApiaryDoc() for m in task_router.get_task_packages() + get_method_packages(): m = importlib.import_module(m) generator.docmodule(m) return generator
0.00211
def get_login(self, use_session=True): """ Get an active login session @param use_session: Use a saved session file if available @type use_session: bool """ # Should we try and return an existing login session? if use_session and self._login.check(): self.cookiejar = self._login.cookiejar return self.cookiejar # Prompt the user for their login credentials username = click.prompt('IPS Username') password = click.prompt('IPS Password', hide_input=True) remember = click.confirm('Save login session?', True) # Process the login cookiejar = self._login.process(username, password, remember) if remember: self.cookiejar = cookiejar return cookiejar
0.002454
def readlines(self, timeout=1): """ read all lines that are available. abort after timeout when no more data arrives. """ lines = [] while 1: line = self.readline(timeout=timeout) if line: lines.append(line) if not line or line[-1:] != '\n': break return lines
0.005208
def main(): """Run main, catching any exceptions and cleaning up the temp directories.""" cleanup_and_exit = sys.exit # Function to do cleanup and exit before the import. exit_code = 0 # Imports are done here inside the try block so some ugly (and useless) # traceback info is avoided on user's ^C (KeyboardInterrupt, EOFError on Windows). try: from . import external_program_calls as ex # Creates tmp dir as side effect. cleanup_and_exit = ex.cleanup_and_exit # Switch to the real one, deletes temp dir. from . import main_pdfCropMargins # Imports external_program_calls, don't do first. main_pdfCropMargins.main_crop() # Run the actual program. except (KeyboardInterrupt, EOFError): # Windows raises EOFError on ^C. print("\nGot a KeyboardInterrupt, cleaning up and exiting...\n", file=sys.stderr) except SystemExit: exit_code = sys.exc_info()[1] print() except: # Echo back the unexpected error so the user can see it. print("\nCaught an unexpected exception in the pdfCropMargins program.", file=sys.stderr) print("Unexpected error: ", sys.exc_info()[0], file=sys.stderr) print("Error message : ", sys.exc_info()[1], file=sys.stderr) print() exit_code = 1 import traceback max_traceback_length = 30 traceback.print_tb(sys.exc_info()[2], limit=max_traceback_length) # raise # Re-raise the error. finally: # Some people like to hit multiple ^C chars, which kills cleanup. # Call cleanup again each time. for i in range(30): # Give up after 30 tries. try: cleanup_and_exit(exit_code) except (KeyboardInterrupt, EOFError): continue
0.009076
def _Tension(T): """Equation for the surface tension Parameters ---------- T : float Temperature, [K] Returns ------- σ : float Surface tension, [N/m] Notes ------ Raise :class:`NotImplementedError` if input isn't in limit: * 248.15 ≤ T ≤ 647 * Estrapolate to -25ºC in supercooled liquid metastable state Examples -------- >>> _Tension(300) 0.0716859625 >>> _Tension(450) 0.0428914992 References ---------- IAPWS, Revised Release on Surface Tension of Ordinary Water Substance June 2014, http://www.iapws.org/relguide/Surf-H2O.html """ if 248.15 <= T <= Tc: Tr = T/Tc return 1e-3*(235.8*(1-Tr)**1.256*(1-0.625*(1-Tr))) else: raise NotImplementedError("Incoming out of bound")
0.001206
def load_dict(): """ Load java messages that can be ignored pickle file into a dict structure g_ok_java_messages. :return: none """ global g_load_java_message_filename global g_ok_java_messages if os.path.isfile(g_load_java_message_filename): # only load dict from file if it exists. with open(g_load_java_message_filename,'rb') as ofile: g_ok_java_messages = pickle.load(ofile) else: # no previous java messages to be excluded are found g_ok_java_messages["general"] = []
0.007299
def write(models, out=None, base=None, logger=logging): ''' models - one or more input Versa models from which output is generated. ''' assert out is not None #Output stream required if not isinstance(models, list): models = [models] for m in models: for link in m.match(): s, p, o = link[:3] #Skip docheader statements if s == (base or '') + '@docheader': continue if p in RESOURCE_MAPPING: p = RESOURCE_MAPPING[p] if o in RESOURCE_MAPPING: o = RESOURCE_MAPPING[o] if p == VERSA_TYPE_REL: p = RDF_TYPE_REL print(strconv(s), strconv(p), strconv(o), '.', file=out) return
0.014225
def _bnd(self, xloc, left, right, cache): """ Distribution bounds. Example: >>> print(chaospy.Uniform().range([-2, 0, 2, 4])) [[0. 0. 0. 0.] [1. 1. 1. 1.]] >>> print(chaospy.Add(chaospy.Uniform(), 2).range([-2, 0, 2, 4])) [[2. 2. 2. 2.] [3. 3. 3. 3.]] >>> print(chaospy.Add(2, chaospy.Uniform()).range([-2, 0, 2, 4])) [[2. 2. 2. 2.] [3. 3. 3. 3.]] >>> print(chaospy.Add(1, 1).range([-2, 0, 2, 4])) [[2. 2. 2. 2.] [2. 2. 2. 2.]] """ left = evaluation.get_forward_cache(left, cache) right = evaluation.get_forward_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise evaluation.DependencyError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): return left+right, left+right else: left, right = right, left right = numpy.asfarray(right) if len(right.shape) == 3: xloc_ = (xloc.T-right[0].T).T lower, upper = evaluation.evaluate_bound(left, xloc_, cache=cache.copy()) lower0, upper0 = (lower.T+right[0].T).T, (upper.T+right[0].T).T xloc_ = (xloc.T-right[1].T).T lower, upper = evaluation.evaluate_bound(left, xloc_, cache=cache) lower1, upper1 = (lower.T+right[1].T).T, (upper.T+right[1].T).T lower = numpy.min([lower0, lower1], 0) upper = numpy.max([upper0, upper1], 0) else: xloc_ = (xloc.T-right.T).T lower, upper = evaluation.evaluate_bound(left, xloc_, cache=cache.copy()) lower, upper = (lower.T+right.T).T, (upper.T+right.T).T assert lower.shape == xloc.shape assert upper.shape == xloc.shape return lower, upper
0.002035
def isPeBounded(self): """ Determines if the current L{PE} instance is bounded, i.e. has a C{BOUND_IMPORT_DIRECTORY}. @rtype: bool @return: Returns C{True} if the current L{PE} instance is bounded. Otherwise, returns C{False}. """ boundImportsDir = self.ntHeaders.optionalHeader.dataDirectory[consts.BOUND_IMPORT_DIRECTORY] if boundImportsDir.rva.value and boundImportsDir.size.value: return True return False
0.012146
def matchRecordAttrs(mapobj, attrs): """attempt to match given attributes against a single map object's attributes""" for k,v in iteritems(attrs): try: val = getattr(mapobj, k) except AttributeError: # k isn't an attr of record if bool(v): return False # if k doesn't exist in mapobj but was required, no match else: continue # otherwise ignore attributes that aren't defined for the given map record if val != v: return False # if any criteria matches, it's considered a match return True
0.024518
def infer_module_name(filename, fspath): """Convert a python filename to a module relative to pythonpath.""" filename, _ = os.path.splitext(filename) for f in fspath: short_name = f.relative_path(filename) if short_name: # The module name for __init__.py files is the directory. if short_name.endswith(os.path.sep + "__init__"): short_name = short_name[:short_name.rfind(os.path.sep)] return short_name.replace(os.path.sep, '.') # We have not found filename relative to anywhere in pythonpath. return ''
0.001692
def update_constants(nmrstar2cfg="", nmrstar3cfg="", resonance_classes_cfg="", spectrum_descriptions_cfg=""): """Update constant variables. :return: None :rtype: :py:obj:`None` """ nmrstar_constants = {} resonance_classes = {} spectrum_descriptions = {} this_directory = os.path.dirname(__file__) nmrstar2_config_filepath = os.path.join(this_directory, "conf/constants_nmrstar2.json") nmrstar3_config_filepath = os.path.join(this_directory, "conf/constants_nmrstar3.json") resonance_classes_config_filepath = os.path.join(this_directory, "conf/resonance_classes.json") spectrum_descriptions_config_filepath = os.path.join(this_directory, "conf/spectrum_descriptions.json") with open(nmrstar2_config_filepath, "r") as nmrstar2config, open(nmrstar3_config_filepath, "r") as nmrstar3config: nmrstar_constants["2"] = json.load(nmrstar2config) nmrstar_constants["3"] = json.load(nmrstar3config) with open(resonance_classes_config_filepath, "r") as config: resonance_classes.update(json.load(config)) with open(spectrum_descriptions_config_filepath, "r") as config: spectrum_descriptions.update(json.load(config)) if nmrstar2cfg: with open(nmrstar2cfg, "r") as nmrstar2config: nmrstar_constants["2"].update(json.load(nmrstar2config)) if nmrstar3cfg: with open(nmrstar2cfg, "r") as nmrstar3config: nmrstar_constants["3"].update(json.load(nmrstar3config)) if resonance_classes_cfg: with open(nmrstar2cfg, "r") as config: resonance_classes.update(json.load(config)) if spectrum_descriptions_cfg: with open(spectrum_descriptions_cfg, "r") as config: spectrum_descriptions.update(json.load(config)) NMRSTAR_CONSTANTS.update(nmrstar_constants) RESONANCE_CLASSES.update(resonance_classes) SPECTRUM_DESCRIPTIONS.update(spectrum_descriptions)
0.003612
def get_driver(self, namespace, parsed_args, **kwargs): """Get mutually-exlusive plugin for plugin namespace. """ option, dest = self._namespace_to_option(namespace) dest_prefix = '{0}_'.format(dest) driver_name = getattr(parsed_args, dest, 'default') driver_extension = self.driver_managers[namespace][driver_name] return driver_extension.plugin.from_args( parsed_args, dest_prefix, **kwargs)
0.004329
def execute(path, argv=None, environ=None, command_class=ExternalSearchCommand): """ :param path: :type path: basestring :param argv: :type: argv: list, tuple, or None :param environ: :type environ: dict :param command_class: External search command class to instantiate and execute. :type command_class: type :return: :rtype: None """ assert issubclass(command_class, ExternalSearchCommand) command_class(path, argv, environ).execute()
0.006098
def restore_state(self): """Restore the state of the dock to the last known state.""" if self.state is None: return for myCount in range(0, self.exposure_layer_combo.count()): item_text = self.exposure_layer_combo.itemText(myCount) if item_text == self.state['exposure']: self.exposure_layer_combo.setCurrentIndex(myCount) break for myCount in range(0, self.hazard_layer_combo.count()): item_text = self.hazard_layer_combo.itemText(myCount) if item_text == self.state['hazard']: self.hazard_layer_combo.setCurrentIndex(myCount) break for myCount in range(0, self.aggregation_layer_combo.count()): item_text = self.aggregation_layer_combo.itemText(myCount) if item_text == self.state['aggregation']: self.aggregation_layer_combo.setCurrentIndex(myCount) break self.results_webview.setHtml(self.state['report'])
0.001929
def block_worker(self, worker_id, reason): """ Block a worker from working on my tasks. """ params = {'WorkerId': worker_id, 'Reason': reason} return self._process_request('BlockWorker', params)
0.008511
def wait_for_confirmation(provider, transaction_id): 'Sleep on a loop until we see a confirmation of the transaction.' while(True): transaction = provider.gettransaction(transaction_id) if transaction["confirmations"] > 0: break time.sleep(10)
0.003484
def reversed(self): ''' Returns a copy of this arc, with the direction flipped. >>> Arc((0, 0), 1, 0, 360, True).reversed() Arc([0.000, 0.000], 1.000, 360.000, 0.000, False, degrees=360.000) >>> Arc((0, 0), 1, 175, -175, True).reversed() Arc([0.000, 0.000], 1.000, -175.000, 175.000, False, degrees=10.000) >>> Arc((0, 0), 1, 0, 370, True).reversed() Arc([0.000, 0.000], 1.000, 370.000, 0.000, False, degrees=360.000) ''' return Arc(self.center, self.radius, self.to_angle, self.from_angle, not self.direction)
0.011345
def get_info(node_id, info_id): """Get a specific info. Both the node and info id must be specified in the url. """ exp = experiment(session) # check the node exists node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/info, node does not exist") # execute the experiment method: info = models.Info.query.get(info_id) if info is None: return error_response(error_type="/info GET, info does not exist", participant=node.participant) elif (info.origin_id != node.id and info.id not in [t.info_id for t in node.transmissions(direction="incoming", status="received")]): return error_response(error_type="/info GET, forbidden info", status=403, participant=node.participant) try: # ping the experiment exp.info_get_request(node=node, infos=info) session.commit() except: return error_response(error_type="/info GET server error", status=403, participant=node.participant) # return the data return success_response(field="info", data=info.__json__(), request_type="info get")
0.001422
def _piped_bamprep_region(data, region, out_file, tmp_dir): """Do work of preparing BAM input file on the selected region. """ if _need_prep(data): prep_params = _get_prep_params(data) _piped_bamprep_region_gatk(data, region, prep_params, out_file, tmp_dir) else: raise ValueError("No realignment specified")
0.005747
def _set_remote_span(self, v, load=False): """ Setter method for remote_span, mapped from YANG variable /interface_vlan/interface/vlan/remote_span (empty) If this variable is read-only (config: false) in the source YANG file, then _set_remote_span is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_remote_span() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="remote-span", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure the vlan as rspan vlan', u'alt-name': u'rspan-vlan', u'callpoint': u'interface_vlan'}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='empty', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """remote_span must be of a type compatible with empty""", 'defined-type': "empty", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="remote-span", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure the vlan as rspan vlan', u'alt-name': u'rspan-vlan', u'callpoint': u'interface_vlan'}}, namespace='urn:brocade.com:mgmt:brocade-span', defining_module='brocade-span', yang_type='empty', is_config=True)""", }) self.__remote_span = t if hasattr(self, '_set'): self._set()
0.005988
def coordinates(self): """ Get or set the internal coordinate system. Available coordinate systems are: - ``'jacobi'`` (default) - ``'democraticheliocentric'`` - ``'whds'`` """ i = self._coordinates for name, _i in COORDINATES.items(): if i==_i: return name return i
0.007979
def _prepare_fabric_fw_internal(self, tenant_id, fw_dict, is_fw_virt, result): """Internal routine to prepare the fabric. This creates an entry in FW DB and runs the SM. """ if not self.auto_nwk_create: LOG.info("Auto network creation disabled") return False try: tenant_name = fw_dict.get('tenant_name') fw_id = fw_dict.get('fw_id') fw_name = fw_dict.get('fw_name') # TODO(padkrish) More than 1 FW per tenant not supported. if tenant_id in self.service_attr and ( result == fw_const.RESULT_FW_CREATE_DONE): LOG.error("Fabric already prepared for tenant %(tenant)s," " %(name)s", {'tenant': tenant_id, 'name': tenant_name}) return True if tenant_id not in self.service_attr: self.create_serv_obj(tenant_id) self.service_attr[tenant_id].create_fw_db(fw_id, fw_name, tenant_id) ret = self.run_create_sm(tenant_id, fw_dict, is_fw_virt) if ret: LOG.info("SM create returned True for Tenant Name " "%(tenant)s FW %(fw)s", {'tenant': tenant_name, 'fw': fw_name}) self.service_attr[tenant_id].set_fabric_create(True) else: LOG.error("SM create returned False for Tenant Name " "%(tenant)s FW %(fw)s", {'tenant': tenant_name, 'fw': fw_name}) except Exception as exc: LOG.error("Exception raised in create fabric int %s", str(exc)) return False return ret
0.001623