text
stringlengths
78
104k
score
float64
0
0.18
def runEventLoop(argv=None, unexpectedErrorAlert=None, installInterrupt=None, pdb=None, main=NSApplicationMain): """Run the event loop, ask the user if we should continue if an exception is caught. Use this function instead of NSApplicationMain(). """ if argv is None: argv = sys.argv if pdb is None: pdb = 'USE_PDB' in os.environ if pdb: from PyObjCTools import Debugging Debugging.installVerboseExceptionHandler() # bring it to the front, starting from terminal # often won't activator = PyObjCAppHelperApplicationActivator_wrap.alloc().init() NSNotificationCenter.defaultCenter().addObserver_selector_name_object_( activator, 'activateNow:', NSApplicationDidFinishLaunchingNotification, None, ) else: Debugging = None if installInterrupt is None and pdb: installInterrupt = True if unexpectedErrorAlert is None: unexpectedErrorAlert = unexpectedErrorAlertPdb runLoop = NSRunLoop.currentRunLoop() stopper = PyObjCAppHelperRunLoopStopper_wrap.alloc().init() PyObjCAppHelperRunLoopStopper_wrap.addRunLoopStopper_toRunLoop_(stopper, runLoop) firstRun = NSApp() is None try: while stopper.shouldRun(): try: if firstRun: firstRun = False if installInterrupt: installMachInterrupt() main(argv) else: NSApp().run() except RAISETHESE: traceback.print_exc() break except: exctype, e, tb = sys.exc_info() objc_exception = False if isinstance(e, objc.error): NSLog("%@", str(e)) elif not unexpectedErrorAlert(): NSLog("%@", "An exception has occured:") traceback.print_exc() sys.exit(0) else: NSLog("%@", "An exception has occured:") traceback.print_exc() else: break finally: if Debugging is not None: Debugging.removeExceptionHandler() PyObjCAppHelperRunLoopStopper_wrap.removeRunLoopStopperFromRunLoop_(runLoop)
0.002096
def from_config(cls, filename): """ Create a TelegramObserver from a given configuration file. The file can be in any format supported by Sacred (.json, .pickle, [.yaml]). It has to specify a ``token`` and a ``chat_id`` and can optionally set ``silent_completion``,``completed_text``, ``interrupted_text``, and ``failed_text``. """ import telegram d = load_config_file(filename) request = cls.get_proxy_request(d) if 'proxy_url' in d else None if 'token' in d and 'chat_id' in d: bot = telegram.Bot(d['token'], request=request) obs = cls(bot, **d) else: raise ValueError("Telegram configuration file must contain " "entries for 'token' and 'chat_id'!") for k in ['completed_text', 'interrupted_text', 'failed_text']: if k in d: setattr(obs, k, d[k]) return obs
0.002051
def session(self, create=True): """ Used to created default session """ if hasattr(self.local, 'session'): return self.local.session else: if create: s = Session(self.name) self.local.session = s return s
0.006329
def _get_result(self) -> float: """Return current measurement result in lx.""" try: data = self._bus.read_word_data(self._i2c_add, self._mode) self._ok = True except OSError as exc: self.log_error("Bad reading in bus: %s", exc) self._ok = False return -1 count = data >> 8 | (data & 0xff) << 8 mode2coeff = 2 if self._high_res else 1 ratio = 1 / (1.2 * (self._mtreg / 69.0) * mode2coeff) return ratio * count
0.00381
def validate_and_get_warnings(self): """ Validates/checks a given GTFS feed with respect to a number of different issues. The set of warnings that are checked for, can be found in the gtfs_validator.ALL_WARNINGS Returns ------- warnings: WarningsContainer """ self.warnings_container.clear() self._validate_stops_with_same_stop_time() self._validate_speeds_and_trip_times() self._validate_stop_spacings() self._validate_stop_sequence() self._validate_misplaced_stops() return self.warnings_container
0.006525
def parse_data(self, logfile): """Parse data from data stream and replace object lines. :param logfile: [required] Log file data stream. :type logfile: str """ for line in logfile: stripped_line = line.strip() parsed_line = Line(stripped_line) if parsed_line.valid: self._valid_lines.append(parsed_line) else: self._invalid_lines.append(stripped_line) self.total_lines = len(self._valid_lines) + len(self._invalid_lines)
0.00363
def variables(self): '''Generator which returns all of the statements in all of the variables tables''' for table in self.tables: if isinstance(table, VariableTable): # FIXME: settings have statements, variables have rows WTF? :-( for statement in table.rows: if statement[0] != "": yield statement
0.007463
def check_initializers(initializers, keys): """Checks the given initializers. This checks that `initializers` is a dictionary that only contains keys in `keys`, and furthermore the entries in `initializers` are functions or further dictionaries (the latter used, for example, in passing initializers to modules inside modules) that must satisfy the same constraints. Args: initializers: Dictionary of initializers (allowing nested dictionaries) or None. keys: Iterable of valid keys for `initializers`. Returns: Copy of checked dictionary of initializers. If `initializers=None`, an empty dictionary will be returned. Raises: KeyError: If an initializer is provided for a key not in `keys`. TypeError: If a provided initializer is not a callable function, or `initializers` is not a Mapping. """ if initializers is None: return {} _assert_is_dictlike(initializers, valid_keys=keys) keys = set(keys) if not set(initializers) <= keys: extra_keys = set(initializers) - keys raise KeyError( "Invalid initializer keys {}, initializers can only " "be provided for {}".format( ", ".join("'{}'".format(key) for key in extra_keys), ", ".join("'{}'".format(key) for key in keys))) _check_nested_callables(initializers, "Initializer") return dict(initializers)
0.006545
def p_chr(p): """ string : CHR arg_list """ if len(p[2]) < 1: syntax_error(p.lineno(1), "CHR$ function need at less 1 parameter") p[0] = None return for i in range(len(p[2])): # Convert every argument to 8bit unsigned p[2][i].value = make_typecast(TYPE.ubyte, p[2][i].value, p.lineno(1)) p[0] = make_builtin(p.lineno(1), 'CHR', p[2], type_=TYPE.string)
0.002457
def application(self, func): """Parse the function application subgrammar. Function application can, conceptually, be thought of as a mixfix operator, similar to the way array subscripting works. However, it is not clear at this point whether we want to allow it to work as such, because doing so would permit queries to, at runtime, select methods out of an arbitrary object and then call them. While there is a function whitelist and preventing this sort of thing in the syntax isn't a security feature, it still seems like the syntax should make it clear what the intended use of application is. If we later decide to extend DottySQL to allow function application over an arbitrary LHS expression then that syntax would be a strict superset of the current syntax and backwards compatible. """ start = self.tokens.matched.start if self.tokens.accept(common_grammar.rparen): # That was easy. return ast.Apply(func, start=start, end=self.tokens.matched.end, source=self.original) arguments = [self.expression()] while self.tokens.accept(common_grammar.comma): arguments.append(self.expression()) self.tokens.expect(common_grammar.rparen) return ast.Apply(func, *arguments, start=start, end=self.tokens.matched.end, source=self.original)
0.001349
def load_config(self, config_path=None): """ Load application configuration from a file and merge it with the default configuration. If the ``FEDORA_MESSAGING_CONF`` environment variable is set to a filesystem path, the configuration will be loaded from that location. Otherwise, the path defaults to ``/etc/fedora-messaging/config.toml``. """ self.loaded = True config = copy.deepcopy(DEFAULTS) if config_path is None: if "FEDORA_MESSAGING_CONF" in os.environ: config_path = os.environ["FEDORA_MESSAGING_CONF"] else: config_path = "/etc/fedora-messaging/config.toml" if os.path.exists(config_path): _log.info("Loading configuration from {}".format(config_path)) with open(config_path) as fd: try: file_config = toml.load(fd) for key in file_config: config[key.lower()] = file_config[key] except toml.TomlDecodeError as e: msg = "Failed to parse {}: error at line {}, column {}: {}".format( config_path, e.lineno, e.colno, e.msg ) raise exceptions.ConfigurationException(msg) else: _log.info("The configuration file, {}, does not exist.".format(config_path)) self.update(config) self._validate() return self
0.003327
def edit_config_input_with_inactive(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") edit_config = ET.Element("edit_config") config = edit_config input = ET.SubElement(edit_config, "input") with_inactive = ET.SubElement(input, "with-inactive", xmlns="http://tail-f.com/ns/netconf/inactive/1.0") callback = kwargs.pop('callback', self._callback) return callback(config)
0.006438
def delete_activity(self, activity_id=None): """Deletes the Activity identified by the given Id. arg: activityId (osid.id.Id): the Id of the Activity to delete raise: NotFound - an Activity was not found identified by the given Id raise: NullArgument - activityId is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method must be implemented. """ if activity_id is None: raise NullArgument() if not isinstance(activity_id, Id): raise InvalidArgument('argument type is not an osid Id') url_path = construct_url('activities', bank_id=self._catalog_idstr, act_id=activity_id) result = self._delete_request(url_path) return objects.Activity(result)
0.00207
def register_workflow(self, name, workflow): """Register an workflow to be showed in the workflows list.""" assert name not in self.workflows self.workflows[name] = workflow
0.010152
def click_window_multiple(self, window, button, repeat=2, delay=100000): """ Send a one or more clicks for a specific mouse button at the current mouse location. :param window: The window you want to send the event to or CURRENTWINDOW :param button: The mouse button. Generally, 1 is left, 2 is middle, 3 is right, 4 is wheel up, 5 is wheel down. :param repeat: number of repetitions (default: 2) :param delay: delay between clicks, in microseconds (default: 100k) """ _libxdo.xdo_click_window_multiple( self._xdo, window, button, repeat, delay)
0.003003
def child_set(self, child, **kwargs): """Set a child properties on the given child to key/value pairs.""" for name, value in kwargs.items(): name = name.replace('_', '-') self.child_set_property(child, name, value)
0.007843
def load_sparql(self, sparql_endpoint, verbose=False, hide_base_schemas=True, hide_implicit_types=True, hide_implicit_preds=True, credentials=None): """ Set up a SPARQLStore backend as a virtual ontospy graph Note: we're using a 'SPARQLUpdateStore' backend instead of 'SPARQLStore' cause otherwise authentication fails (https://github.com/RDFLib/rdflib/issues/755) @TODO this error seems to be fixed in upcoming rdflib versions https://github.com/RDFLib/rdflib/pull/744 """ try: # graph = rdflib.Graph('SPARQLStore') # graph = rdflib.ConjunctiveGraph('SPARQLStore') graph = rdflib.ConjunctiveGraph('SPARQLUpdateStore') if credentials and type(credentials) == tuple: # https://github.com/RDFLib/rdflib/issues/343 graph.store.setCredentials(credentials[0], credentials[1]) # graph.store.setHTTPAuth('BASIC') # graph.store.setHTTPAuth('DIGEST') graph.open(sparql_endpoint) self.rdflib_graph = graph self.sparql_endpoint = sparql_endpoint self.sources = [sparql_endpoint] self.sparqlHelper = SparqlHelper(self.rdflib_graph, self.sparql_endpoint) self.namespaces = sorted(self.rdflib_graph.namespaces()) except: printDebug("Error trying to connect to Endpoint.") raise
0.007218
def deserialize(cls, raw_bytes): """ Deserializes the given raw bytes into an instance. Since this is a subclass of ``Part`` but a top-level one (i.e. no other subclass of ``Part`` would have a ``Response`` as a part) this merely has to parse the raw bytes and discard the resulting offset. """ instance, _ = cls.parse(raw_bytes, offset=0) return instance
0.004751
def user_getmedia(userids=None, **kwargs): ''' .. versionadded:: 2016.3.0 Retrieve media according to the given parameters .. note:: This function accepts all standard usermedia.get properties: keyword argument names differ depending on your zabbix version, see here__. .. __: https://www.zabbix.com/documentation/3.2/manual/api/reference/usermedia/get :param userids: return only media that are used by the given users :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: List of retrieved media, False on failure. CLI Example: .. code-block:: bash salt '*' zabbix.user_getmedia ''' conn_args = _login(**kwargs) ret = {} try: if conn_args: method = 'usermedia.get' if userids: params = {"userids": userids} else: params = {} params = _params_extend(params, **kwargs) ret = _query(method, params, conn_args['url'], conn_args['auth']) return ret['result'] else: raise KeyError except KeyError: return ret
0.003465
def hurst(X): """ Compute the Hurst exponent of X. If the output H=0.5,the behavior of the time-series is similar to random walk. If H<0.5, the time-series cover less "distance" than a random walk, vice verse. Parameters ---------- X list a time series Returns ------- H float Hurst exponent Notes -------- Author of this function is Xin Liu Examples -------- >>> import pyeeg >>> from numpy.random import randn >>> a = randn(4096) >>> pyeeg.hurst(a) 0.5057444 """ X = numpy.array(X) N = X.size T = numpy.arange(1, N + 1) Y = numpy.cumsum(X) Ave_T = Y / T S_T = numpy.zeros(N) R_T = numpy.zeros(N) for i in range(N): S_T[i] = numpy.std(X[:i + 1]) X_T = Y - T * Ave_T[i] R_T[i] = numpy.ptp(X_T[:i + 1]) R_S = R_T / S_T R_S = numpy.log(R_S)[1:] n = numpy.log(T)[1:] A = numpy.column_stack((n, numpy.ones(n.size))) [m, c] = numpy.linalg.lstsq(A, R_S)[0] H = m return H
0.00093
def auto_unit(self, number, low_precision=False, min_symbol='K' ): """Make a nice human-readable string out of number. Number of decimal places increases as quantity approaches 1. CASE: 613421788 RESULT: 585M low_precision: 585M CASE: 5307033647 RESULT: 4.94G low_precision: 4.9G CASE: 44968414685 RESULT: 41.9G low_precision: 41.9G CASE: 838471403472 RESULT: 781G low_precision: 781G CASE: 9683209690677 RESULT: 8.81T low_precision: 8.8T CASE: 1073741824 RESULT: 1024M low_precision: 1024M CASE: 1181116006 RESULT: 1.10G low_precision: 1.1G :low_precision: returns less decimal places potentially (default is False) sacrificing precision for more readability. :min_symbol: Do not approache if number < min_symbol (default is K) """ symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') if min_symbol in symbols: symbols = symbols[symbols.index(min_symbol):] prefix = { 'Y': 1208925819614629174706176, 'Z': 1180591620717411303424, 'E': 1152921504606846976, 'P': 1125899906842624, 'T': 1099511627776, 'G': 1073741824, 'M': 1048576, 'K': 1024 } for symbol in reversed(symbols): value = float(number) / prefix[symbol] if value > 1: decimal_precision = 0 if value < 10: decimal_precision = 2 elif value < 100: decimal_precision = 1 if low_precision: if symbol in 'MK': decimal_precision = 0 else: decimal_precision = min(1, decimal_precision) elif symbol in 'K': decimal_precision = 0 return '{:.{decimal}f}{symbol}'.format( value, decimal=decimal_precision, symbol=symbol) return '{!s}'.format(number)
0.002691
def show_context_menu(self, item, mouse_pos=None): "Open a popup menu with options regarding the selected object" if item: d = self.tree.GetItemData(item) if d: obj = d.GetData() if obj: # highligh and store the selected object: self.highlight(obj.wx_obj) self.obj = obj # make the context menu menu = wx.Menu() id_del, id_dup, id_raise, id_lower = [wx.NewId() for i in range(4)] menu.Append(id_del, "Delete") menu.Append(id_dup, "Duplicate") menu.Append(id_raise, "Bring to Front") menu.Append(id_lower, "Send to Back") # make submenu! sm = wx.Menu() for ctrl in sorted(obj._meta.valid_children, key=lambda c: registry.ALL.index(c._meta.name)): new_id = wx.NewId() sm.Append(new_id, ctrl._meta.name) self.Bind(wx.EVT_MENU, lambda evt, ctrl=ctrl: self.add_child(ctrl, mouse_pos), id=new_id) menu.AppendMenu(wx.NewId(), "Add child", sm) self.Bind(wx.EVT_MENU, self.delete, id=id_del) self.Bind(wx.EVT_MENU, self.duplicate, id=id_dup) self.Bind(wx.EVT_MENU, self.bring_to_front, id=id_raise) self.Bind(wx.EVT_MENU, self.send_to_back, id=id_lower) self.PopupMenu(menu) menu.Destroy() self.load_object(self.root_obj)
0.005157
def get_accessibility_type_metadata(self): """Gets the metadata for an accessibility type. return: (osid.Metadata) - metadata for the accessibility types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.logging.LogEntryForm.get_priority_metadata metadata = dict(self._mdata['accessibility_type']) metadata.update({'existing_type_values': self._my_map['accessibilityTypeId']}) return Metadata(**metadata)
0.007678
def validate_args(func: Method, *args: Any, **kwargs: Any) -> Method: """ Check if the request's arguments match a function's signature. Raises TypeError exception if arguments cannot be passed to a function. Args: func: The function to check. args: Positional arguments. kwargs: Keyword arguments. Raises: TypeError: If the arguments cannot be passed to the function. """ signature(func).bind(*args, **kwargs) return func
0.002045
def set_property_batch(self, remote_path, option): """Sets batch metadata properties of remote resource on WebDAV server in batch. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPPATCH :param remote_path: the path to remote resource. :param option: the property attributes as list of dictionaries with following keys: `namespace`: (optional) the namespace for XML property which will be set, `name`: the name of property which will be set, `value`: (optional) the value of property which will be set. Defaults is empty string. """ urn = Urn(remote_path) if not self.check(urn.path()): raise RemoteResourceNotFound(urn.path()) data = WebDavXmlUtils.create_set_property_batch_request_content(option) self.execute_request(action='set_property', path=urn.quote(), data=data)
0.008307
def init(i): # pragma: no cover """ Input: {} Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ global cfg, work, initialized, paths_repos, type_long, string_io if initialized: return {'return':0} # Split version cfg['version']=__version__.split('.') # Default URL. FIXME: should be formed from wfe_host and wfe_port when they are known. # cfg['wfe_url_prefix'] = 'http://%s:%s/web?' % (cfg['default_host'], cfg['default_port']) # Check long/int types try: x=long except Exception as e: type_long=int else: type_long=long # Import StringIO if sys.version_info[0]>2: import io string_io=io.StringIO else: from StringIO import StringIO string_io=StringIO # Check where are repos (to keep compatibility with past CK < V1.5) p='' import inspect pxx=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) px=os.path.dirname(pxx) py=os.path.join(pxx, cfg['subdir_default_repo']) if os.path.isdir(py): p=py if p=='': from distutils.sysconfig import get_python_lib px=get_python_lib() py=os.path.join(px, cfg['kernel_dir'], cfg['subdir_default_repo']) if os.path.isdir(py): p=py if p=='': import site for px in site.getsitepackages(): py=os.path.join(px, cfg['kernel_dir'],cfg['subdir_default_repo']) if os.path.isdir(py): p=py break # Check CK_ROOT environment variable s=os.environ.get(cfg['env_key_root'],'').strip() if s!='': work['env_root']=os.path.realpath(s) for px in cfg['kernel_dirs']: py=os.path.join(work['env_root'], px, cfg['subdir_default_repo']) if os.path.isdir(py): p=py break elif px!='': work['env_root']=px if p=='': return {'return':1, 'error':'Internal CK error (can\'t find default repo) - please report to authors'} # Check default repo work['dir_default_repo']=p work['dir_default_repo_path']=os.path.join(work['dir_default_repo'], cfg['module_repo_name'], cfg['repo_name_default']) work['dir_default_kernel']=os.path.join(work['dir_default_repo'], cfg['subdir_kernel']) work['dir_default_cfg']=os.path.join(work['dir_default_kernel'], cfg['subdir_kernel_default'], cfg['subdir_ck_ext'], cfg['file_meta']) work['dir_work_repo']=work['dir_default_repo'] work['dir_work_repo_path']=work['dir_default_repo_path'] work['dir_work_kernel']=work['dir_default_kernel'] work['dir_work_cfg']=work['dir_default_cfg'] work['repo_name_work']=cfg['repo_name_default'] work['repo_uid_work']=cfg['repo_uid_default'] # Check external repos rps=os.environ.get(cfg['env_key_repos'],'').strip() if rps=='': # Get home user directory from os.path import expanduser home = expanduser("~") # In the original version, if path to repos was not defined, I was using CK path, # however, when installed as root, it will fail # rps=os.path.join(work['env_root'],cfg['subdir_default_repos']) # hence I changed to <user home dir>/CK rps=os.path.join(home, cfg['user_home_dir_ext']) if not os.path.isdir(rps): os.makedirs(rps) work['dir_repos']=rps # Check CK_LOCAL_REPO environment variable - if doesn't exist, create in user space s=os.environ.get(cfg['env_key_local_repo'],'').strip() if s=='': # Set up local default repository s=os.path.join(rps, cfg['repo_name_local']) if not os.path.isdir(s): os.makedirs(s) # Create description rq=save_json_to_file({'json_file':os.path.join(s,cfg['repo_file']), 'dict':{'data_alias':cfg['repo_name_local'], 'data_uoa':cfg['repo_name_local'], 'data_name':cfg['repo_name_local'], 'data_uid':cfg['repo_uid_local']}, 'sort_keys':'yes'}) if rq['return']>0: return rq if s!='': work['local_kernel_uoa']=cfg['subdir_kernel_default'] x=os.environ.get(cfg['env_key_local_kernel_uoa'],'').strip() if x!='': work['local_kernel_uoa']=x work['dir_local_repo']=os.path.realpath(s) work['dir_local_repo_path']=os.path.join(work['dir_local_repo'], cfg['module_repo_name'], cfg['repo_name_local']) work['dir_local_kernel']=os.path.join(work['dir_local_repo'], cfg['subdir_kernel']) work['dir_local_cfg']=os.path.join(work['dir_local_kernel'], work['local_kernel_uoa'], cfg['subdir_ck_ext'], cfg['file_meta']) # Update work repo! work['dir_work_repo']=work['dir_local_repo'] work['dir_work_repo_path']=work['dir_local_repo_path'] work['dir_work_kernel']=work['dir_local_kernel'] work['dir_work_cfg']=work['dir_local_cfg'] work['repo_name_work']=cfg['repo_name_local'] work['repo_uid_work']=cfg['repo_uid_local'] paths_repos.append({'path':work['dir_local_repo'], 'repo_uoa':cfg['repo_name_local'], 'repo_uid':cfg['repo_uid_local'], 'repo_alias':cfg['repo_name_local']}) paths_repos.append({'path':work['dir_default_repo'], 'repo_uoa':cfg['repo_name_default'], 'repo_uid':cfg['repo_uid_default'], 'repo_alias':cfg['repo_name_default']}) # Prepare repo cache work['dir_cache_repo_uoa']=os.path.join(work['dir_work_repo'],cfg['file_cache_repo_uoa']) work['dir_cache_repo_info']=os.path.join(work['dir_work_repo'],cfg['file_cache_repo_info']) # Check if first time and then copy local cache files (with remote-ck) if not os.path.isfile(work['dir_cache_repo_uoa']) and not os.path.isfile(work['dir_cache_repo_info']): rx=load_text_file({'text_file':os.path.join(work['dir_default_repo'],cfg['file_cache_repo_uoa'])}) if rx['return']>0: return rx x1=rx['string'] rx=load_text_file({'text_file':os.path.join(work['dir_default_repo'],cfg['file_cache_repo_info'])}) if rx['return']>0: return rx x2=rx['string'] rx=save_text_file({'text_file':work['dir_cache_repo_info'], 'string':x2}) if rx['return']>0: return rx rx=save_text_file({'text_file':work['dir_cache_repo_uoa'], 'string':x1}) if rx['return']>0: return rx # Check if local configuration exists, and if not, create it if not os.path.isfile(work['dir_local_cfg']): # Create empty local configuration rx=add({'repo_uoa':cfg['repo_name_local'], 'module_uoa':cfg['subdir_kernel'], 'data_uoa':work['local_kernel_uoa']}) if rx['return']>0: return {'return':rx['return'], 'error':'can\'t create local configuration entry'} # Read kernel configuration (if exists) if os.path.isfile(work['dir_work_cfg']): r=load_json_file({'json_file':work['dir_work_cfg']}) if r['return']>0: return r cfg1=r['dict'] # Update cfg r=merge_dicts({'dict1':cfg, 'dict2':cfg1}) if r['return']>0: return r initialized=True return {'return':0}
0.030906
def avail_images(call=None): ''' Return a dict of all available images on the provider ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) compconn = get_conn(client_type='compute') region = get_location() publishers = [] ret = {} def _get_publisher_images(publisher): ''' Get all images from a specific publisher ''' data = {} try: offers = compconn.virtual_machine_images.list_offers( location=region, publisher_name=publisher, ) for offer_obj in offers: offer = offer_obj.as_dict() skus = compconn.virtual_machine_images.list_skus( location=region, publisher_name=publisher, offer=offer['name'], ) for sku_obj in skus: sku = sku_obj.as_dict() results = compconn.virtual_machine_images.list( location=region, publisher_name=publisher, offer=offer['name'], skus=sku['name'], ) for version_obj in results: version = version_obj.as_dict() name = '|'.join(( publisher, offer['name'], sku['name'], version['name'], )) data[name] = { 'publisher': publisher, 'offer': offer['name'], 'sku': sku['name'], 'version': version['name'], } except CloudError as exc: __utils__['azurearm.log_cloud_error']('compute', exc.message) data = {publisher: exc.message} return data try: publishers_query = compconn.virtual_machine_images.list_publishers( location=region ) for publisher_obj in publishers_query: publisher = publisher_obj.as_dict() publishers.append(publisher['name']) except CloudError as exc: __utils__['azurearm.log_cloud_error']('compute', exc.message) pool = ThreadPool(cpu_count() * 6) results = pool.map_async(_get_publisher_images, publishers) results.wait() ret = {k: v for result in results.get() for k, v in six.iteritems(result)} return ret
0.000368
def get_external_references(self): """ Iterator that returns all the external references of the markable @rtype: L{CexternalReference} @return: the external references """ for ext_ref_node in self.node.findall('externalReferences'): ext_refs_obj = CexternalReferences(ext_ref_node) for ref in ext_refs_obj: yield ref
0.00495
def apply_transform(self, transform): """ Apply a homogenous transformation to the PointCloud object in- place. Parameters -------------- transform : (4, 4) float Homogenous transformation to apply to PointCloud """ self.vertices = transformations.transform_points(self.vertices, matrix=transform)
0.004662
def oom_components(Ct, C2t, rank_ind=None, lcc=None, tol_one=1e-2): """ Compute OOM components and eigenvalues from count matrices: Parameters ---------- Ct : ndarray(N, N) count matrix from data C2t : sparse csc-matrix (N*N, N) two-step count matrix from data for all states, columns enumerate intermediate steps. rank_ind : ndarray(N, dtype=bool), optional, default=None indicates which singular values are accepted. By default, all non- zero singular values are accepted. lcc : ndarray(N,), optional, default=None largest connected set of the count-matrix. Two step count matrix will be reduced to this set. tol_one : float, optional, default=1e-2 keep eigenvalues of absolute value less or equal 1+tol_one. Returns ------- Xi : ndarray(M, N, M) matrix of set-observable operators omega: ndarray(M,) information state vector of OOM sigma : ndarray(M,) evaluator of OOM l : ndarray(M,) eigenvalues from OOM """ import msmtools.estimation as me # Decompose count matrix by SVD: if lcc is not None: Ct_svd = me.largest_connected_submatrix(Ct, lcc=lcc) N1 = Ct.shape[0] else: Ct_svd = Ct V, s, W = scl.svd(Ct_svd, full_matrices=False) # Make rank decision: if rank_ind is None: ind = (s >= np.finfo(float).eps) V = V[:, rank_ind] s = s[rank_ind] W = W[rank_ind, :].T # Compute transformations: F1 = np.dot(V, np.diag(s**-0.5)) F2 = np.dot(W, np.diag(s**-0.5)) # Apply the transformations to C2t: N = Ct_svd.shape[0] M = F1.shape[1] Xi = np.zeros((M, N, M)) for n in range(N): if lcc is not None: C2t_n = C2t[:, lcc[n]] C2t_n = _reshape_sparse(C2t_n, (N1, N1)) C2t_n = me.largest_connected_submatrix(C2t_n, lcc=lcc) else: C2t_n = C2t[:, n] C2t_n = _reshape_sparse(C2t_n, (N, N)) Xi[:, n, :] = np.dot(F1.T, C2t_n.dot(F2)) # Compute sigma: c = np.sum(Ct_svd, axis=1) sigma = np.dot(F1.T, c) # Compute eigenvalues: Xi_S = np.sum(Xi, axis=1) l, R = scl.eig(Xi_S.T) # Restrict eigenvalues to reasonable range: ind = np.where(np.logical_and(np.abs(l) <= (1+tol_one), np.real(l) >= 0.0))[0] l = l[ind] R = R[:, ind] # Sort and extract omega l, R = _sort_by_norm(l, R) omega = np.real(R[:, 0]) omega = omega / np.dot(omega, sigma) return Xi, omega, sigma, l
0.001171
def truncate(self, size): """ Change the size of this file. This usually extends or shrinks the size of the file, just like the ``truncate()`` method on Python file objects. :param size: the new size of the file """ self.sftp._log( DEBUG, "truncate({}, {!r})".format(hexlify(self.handle), size) ) attr = SFTPAttributes() attr.st_size = size self.sftp._request(CMD_FSETSTAT, self.handle, attr)
0.004049
def do_youtube_dl(worker, site, page): ''' Runs youtube-dl configured for `worker` and `site` to download videos from `page`. Args: worker (brozzler.BrozzlerWorker): the calling brozzler worker site (brozzler.Site): the site we are brozzling page (brozzler.Page): the page we are brozzling Returns: tuple with two entries: `list` of `dict`: with info about urls fetched: [{ 'url': ..., 'method': ..., 'response_code': ..., 'response_headers': ..., }, ...] `list` of `str`: outlink urls ''' with tempfile.TemporaryDirectory(prefix='brzl-ydl-') as tempdir: ydl = _build_youtube_dl(worker, tempdir, site) ie_result = _try_youtube_dl(worker, ydl, site, page) outlinks = set() if ie_result and ie_result.get('extractor') == 'youtube:playlist': # youtube watch pages as outlinks outlinks = {'https://www.youtube.com/watch?v=%s' % e['id'] for e in ie_result.get('entries_no_dl', [])} # any outlinks for other cases? return ydl.fetch_spy.fetches, outlinks
0.000806
def delete_group(self, group_id, keep_non_orphans=False, keep_orphans=False): """ Delete a group trigger :param group_id: ID of the group trigger to delete :param keep_non_orphans: if True converts the non-orphan member triggers to standard triggers :param keep_orphans: if True converts the orphan member triggers to standard triggers """ params = {'keepNonOrphans': str(keep_non_orphans).lower(), 'keepOrphans': str(keep_orphans).lower()} self._delete(self._service_url(['triggers', 'groups', group_id], params=params))
0.010239
def send_contributor_email(self, contributor): """Send an EmailMessage object for a given contributor.""" ContributorReport( contributor, month=self.month, year=self.year, deadline=self._deadline, start=self._start, end=self._end ).send()
0.005988
def allow_pgcodes(cr, *codes): """Context manager that will omit specified error codes. E.g., suppose you expect a migration to produce unique constraint violations and you want to ignore them. Then you could just do:: with allow_pgcodes(cr, psycopg2.errorcodes.UNIQUE_VIOLATION): cr.execute("INSERT INTO me (name) SELECT name FROM you") .. warning:: **All** sentences inside this context will be rolled back if **a single error** is raised, so the above example would insert **nothing** if a single row violates a unique constraint. This would ignore duplicate files but insert the others:: cr.execute("SELECT name FROM you") for row in cr.fetchall(): with allow_pgcodes(cr, psycopg2.errorcodes.UNIQUE_VIOLATION): cr.execute("INSERT INTO me (name) VALUES (%s)", row[0]) :param *str codes: Undefined amount of error codes found in :mod:`psycopg2.errorcodes` that are allowed. Codes can have either 2 characters (indicating an error class) or 5 (indicating a concrete error). Any other errors will be raised. """ try: with cr.savepoint(): with core.tools.mute_logger('odoo.sql_db'): yield except (ProgrammingError, IntegrityError) as error: msg = "Code: {code}. Class: {class_}. Error: {error}.".format( code=error.pgcode, class_=errorcodes.lookup(error.pgcode[:2]), error=errorcodes.lookup(error.pgcode)) if error.pgcode in codes or error.pgcode[:2] in codes: logger.info(msg) else: logger.exception(msg) raise
0.00058
def getContinuousSet(self, id_): """ Returns the ContinuousSet with the specified id, or raises a ContinuousSetNotFoundException otherwise. """ if id_ not in self._continuousSetIdMap: raise exceptions.ContinuousSetNotFoundException(id_) return self._continuousSetIdMap[id_]
0.006006
def _reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None): # pylint: disable=unused-argument """Computes `log(sum(exp(input_tensor))) along the specified axis.""" try: return scipy_special.logsumexp( input_tensor, axis=_astuple(axis), keepdims=keepdims) except NotImplementedError: # We offer a non SP version just in case SP isn't installed and this # because logsumexp is often used. m = _max_mask_non_finite(input_tensor, axis=axis, keepdims=True) y = input_tensor - m y = np.exp(y, out=y) return m + np.log(np.sum(y, axis=_astuple(axis), keepdims=keepdims))
0.008065
def predict_is(self, h): """ Outputs predictions for the Aggregate algorithm on the in-sample data Parameters ---------- h : int How many steps to run the aggregating algorithm on Returns ---------- - pd.DataFrame of ensemble predictions """ result = pd.DataFrame([self.run(h=h)[2]]).T result.index = self.index[-h:] return result
0.011494
async def get_messages(self, name): """Get stored messages for a service. Args: name (string): The name of the service to get messages from. Returns: list(ServiceMessage): A list of the messages stored for this service """ resp = await self.send_command(OPERATIONS.CMD_QUERY_MESSAGES, {'name': name}, MESSAGES.QueryMessagesResponse, timeout=5.0) return [states.ServiceMessage.FromDictionary(x) for x in resp]
0.00956
def parse_markdown_readme(): """ Convert README.md to RST via pandoc, and load into memory (fallback to LONG_DESCRIPTION on failure) """ # Attempt to run pandoc on markdown file import subprocess try: subprocess.call( ['pandoc', '-t', 'rst', '-o', 'README.rst', 'README.md'] ) except OSError: return LONG_DESCRIPTION # Attempt to load output try: readme = open(os.path.join( os.path.dirname(__file__), 'README.rst' )) except IOError: return LONG_DESCRIPTION return readme.read()
0.001637
def get_nfd_quick_check_property(value, is_bytes=False): """Get `NFD QUICK CHECK` property.""" obj = unidata.ascii_nfd_quick_check if is_bytes else unidata.unicode_nfd_quick_check if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['nfdquickcheck'].get(negated, negated) else: value = unidata.unicode_alias['nfdquickcheck'].get(value, value) return obj[value]
0.006881
def capture_events(receiver, dest): """ Capture all events sent to `receiver` in the sequence `dest`. This is a generator, and it is best used with ``yield from``. The observable effect of using this generator with ``yield from`` is identical to the effect of using `receiver` with ``yield from`` directly (including the return value), but in addition, the values which are *sent* to the receiver are captured in `dest`. If `receiver` raises an exception or the generator is closed prematurely using its :meth:`close`, `dest` is cleared. This is used to implement :class:`CapturingXSO`. See the documentation there for use cases. .. versionadded:: 0.5 """ # the following code is a copy of the formal definition of `yield from` # in PEP 380, with modifications to capture the value sent during yield _i = iter(receiver) try: _y = next(_i) except StopIteration as _e: return _e.value try: while True: try: _s = yield _y except GeneratorExit as _e: try: _m = _i.close except AttributeError: pass else: _m() raise _e except BaseException as _e: _x = sys.exc_info() try: _m = _i.throw except AttributeError: raise _e else: try: _y = _m(*_x) except StopIteration as _e: _r = _e.value break else: dest.append(_s) try: if _s is None: _y = next(_i) else: _y = _i.send(_s) except StopIteration as _e: _r = _e.value break except: # NOQA dest.clear() raise return _r
0.000484
def best_value(Y,sign=1): ''' Returns a vector whose components i are the minimum (default) or maximum of Y[:i] ''' n = Y.shape[0] Y_best = np.ones(n) for i in range(n): if sign == 1: Y_best[i]=Y[:(i+1)].min() else: Y_best[i]=Y[:(i+1)].max() return Y_best
0.01548
def inverted(arg): """Yield the inverse items of the provided object. If *arg* has a :func:`callable` ``__inverted__`` attribute, return the result of calling it. Otherwise, return an iterator over the items in `arg`, inverting each item on the fly. *See also* :attr:`bidict.BidirectionalMapping.__inverted__` """ inv = getattr(arg, '__inverted__', None) if callable(inv): return inv() return ((val, key) for (key, val) in _iteritems_mapping_or_iterable(arg))
0.001961
def _parse_subnet(self, subnet_dict): """Return the subnet, start, end, gateway of a subnet. """ if not subnet_dict: return alloc_pool = subnet_dict.get('allocation_pools') cidr = subnet_dict.get('cidr') subnet = cidr.split('/')[0] start = alloc_pool[0].get('start') end = alloc_pool[0].get('end') gateway = subnet_dict.get('gateway_ip') sec_gateway = subnet_dict.get('secondary_gw') return {'subnet': subnet, 'start': start, 'end': end, 'gateway': gateway, 'sec_gateway': sec_gateway}
0.003373
def is_binary(filename): """ :param filename: File to check. :returns: True if it's a binary file, otherwise False. """ logger.debug('is_binary: %(filename)r', locals()) # Check if the file extension is in a list of known binary types binary_extensions = ['pyc', 'iso', 'zip', 'pdf'] for ext in binary_extensions: if filename.endswith(ext): return True # Check if the starting chunk is a binary string chunk = get_starting_chunk(filename) return is_binary_string(chunk)
0.001869
def was_suicide(self): ''' Checks if the king of the other side is attacked. Such a position is not valid and could only be reached by an illegal move. ''' return self.is_attacked_by(self.turn, self.king_squares[self.turn ^ 1])
0.011236
def _get_database(self, options): """Get the database to restore.""" database_name = options.get('database') if not database_name: if len(settings.DATABASES) > 1: errmsg = "Because this project contains more than one database, you"\ " must specify the --database option." raise CommandError(errmsg) database_name = list(settings.DATABASES.keys())[0] if database_name not in settings.DATABASES: raise CommandError("Database %s does not exist." % database_name) return database_name, settings.DATABASES[database_name]
0.004666
async def fetch_data(self): """Get the latest data from EBox.""" # Get http session await self._get_httpsession() # Get login page token = await self._get_login_page() # Post login page await self._post_login_page(token) # Get home data home_data = await self._get_home_data() # Get usage data usage_data = await self._get_usage_data() # Merge data self._data.update(home_data) self._data.update(usage_data)
0.003854
def register_resources(klass, registry, resource_class): """ meta model subscriber on resource registration. We watch for PHD event that provides affected entities and register the health-event filter to the resources. """ services = {'acm-certificate', 'directconnect', 'dms-instance', 'directory', 'ec2', 'dynamodb-table', 'cache-cluster', 'efs', 'app-elb', 'elb', 'emr', 'rds', 'storage-gateway'} if resource_class.type in services: resource_class.filter_registry.register('health-event', klass)
0.006678
def get_illegal_targets(part, include): """ Retrieve the illegal parent parts where `Part` can be moved/copied. :param part: `Part` to be moved/copied. :type part: :class:`Part` :param include: `Set` object with id's to be avoided as target parent `Part` :type include: set :return: `List` object of illegal id's :rtype: list """ list_of_illegal_targets = include or set() for descendant in part.children(descendants='children'): list_of_illegal_targets.add(descendant.id) return list_of_illegal_targets
0.003571
def from_string(cls, key, key_id=None): """Construct an Signer instance from a private key in PEM format. Args: key (str): Private key in PEM format. key_id (str): An optional key id used to identify the private key. Returns: google.auth.crypt.Signer: The constructed signer. Raises: ValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in PEM format. """ key = _helpers.from_bytes(key) # PEM expects str in Python 3 marker_id, key_bytes = pem.readPemBlocksFromFile( six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER) # Key is in pkcs1 format. if marker_id == 0: private_key = rsa.key.PrivateKey.load_pkcs1( key_bytes, format='DER') # Key is in pkcs8. elif marker_id == 1: key_info, remaining = decoder.decode( key_bytes, asn1Spec=_PKCS8_SPEC) if remaining != b'': raise ValueError('Unused bytes', remaining) private_key_info = key_info.getComponentByName('privateKey') private_key = rsa.key.PrivateKey.load_pkcs1( private_key_info.asOctets(), format='DER') else: raise ValueError('No key could be detected.') return cls(private_key, key_id=key_id)
0.001455
def root_rhx_gis(self) -> Optional[str]: """rhx_gis string returned in the / query.""" if self.is_logged_in: # At the moment, rhx_gis seems to be required for anonymous requests only. By returning None when logged # in, we can save the root_rhx_gis lookup query. return None if not self._root_rhx_gis: self._root_rhx_gis = self.get_json('', {})['rhx_gis'] return self._root_rhx_gis
0.006508
def indexes(self, collection=None): """Return a list with the current indexes Skip the mandatory _id_ indexes Args: collection(str) Returns: indexes(list) """ indexes = [] for collection_name in self.collections(): if collection and collection != collection_name: continue for index_name in self.db[collection_name].index_information(): if index_name != '_id_': indexes.append(index_name) return indexes
0.008389
def get_bounding_box(font): """ Returns max and min bbox of given truetype font """ ymin = 0 ymax = 0 if font.sfntVersion == 'OTTO': ymin = font['head'].yMin ymax = font['head'].yMax else: for g in font['glyf'].glyphs: char = font['glyf'][g] if hasattr(char, 'yMin') and ymin > char.yMin: ymin = char.yMin if hasattr(char, 'yMax') and ymax < char.yMax: ymax = char.yMax return ymin, ymax
0.001988
def addOutParameter(self, name, type, namespace=None, element_type=0): """Add an output parameter description to the call info.""" parameter = ParameterInfo(name, type, namespace, element_type) self.outparams.append(parameter) return parameter
0.007273
def tokenize_sents(string): """ Tokenize input text to sentences. :param string: Text to tokenize :type string: str or unicode :return: sentences :rtype: list of strings """ string = six.text_type(string) spans = [] for match in re.finditer('[^\s]+', string): spans.append(match) spans_count = len(spans) rez = [] off = 0 for i in range(spans_count): tok = string[spans[i].start():spans[i].end()] if i == spans_count - 1: rez.append(string[off:spans[i].end()]) elif tok[-1] in ['.', '!', '?', '…', '»']: tok1 = tok[re.search('[.!?…»]', tok).start()-1] next_tok = string[spans[i + 1].start():spans[i + 1].end()] if (next_tok[0].isupper() and not tok1.isupper() and not (tok[-1] != '.' or tok1[0] == '(' or tok in ABBRS)): rez.append(string[off:spans[i].end()]) off = spans[i + 1].start() return rez
0.001894
def _extract_fund(l, _root): """ Creates flat funding dictionary. :param list l: Funding entries """ logger_ts.info("enter _extract_funding") for idx, i in enumerate(l): for k, v in i.items(): _root['funding' + str(idx + 1) + '_' + k] = v return _root
0.006689
async def _cancel_payloads(self): """Cancel all remaining payloads""" for task in self._tasks: task.cancel() await asyncio.sleep(0) for task in self._tasks: while not task.done(): await asyncio.sleep(0.1) task.cancel()
0.006452
def query(cls, automation=None, package=None, status=None, name=None, created_by=None, created_from=None, created_to=None, order_by=None, order=None, offset=None, limit=None, api=None): """ Query (List) automation runs. :param name: Automation run name :param automation: Automation template :param package: Package :param status: Run status :param created_by: Username of user that created the run :param order_by: Property by which to order results :param order: Ascending or descending ("asc" or "desc") :param created_from: Date the run is created after :param created_to: Date the run is created before :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: collection object """ if automation: automation = Transform.to_automation(automation) if package: package = Transform.to_automation_package(package) api = api or cls._API return super(AutomationRun, cls)._query( url=cls._URL['query'], name=name, automation=automation, package=package, status=status, created_by=created_by, created_from=created_from, created_to=created_to, order_by=order_by, order=order, offset=offset, limit=limit, api=api, )
0.002613
def render(self): """Runs the render until thread flag is set. Returns ------- self """ while not self._stop_spinner.is_set(): self._render_frame() time.sleep(0.001 * self._interval) return self
0.007273
def iteritems(self, **options): '''Return a query interator with (id, object) pairs.''' iter = self.query(**options) while True: obj = iter.next() yield (obj.id, obj)
0.009346
def weapon_cooldown(self) -> Union[int, float]: """ Returns some time (more than game loops) until the unit can fire again, returns -1 for units that can't attack. Usage: if unit.weapon_cooldown == 0: await self.do(unit.attack(target)) elif unit.weapon_cooldown < 0: await self.do(unit.move(closest_allied_unit_because_cant_attack)) else: await self.do(unit.move(retreatPosition)) """ if self.can_attack: return self._proto.weapon_cooldown return -1
0.005272
def display_eventtype(self): """Read the list of event types in the annotations and update widgets. """ if self.annot is not None: event_types = sorted(self.annot.event_types, key=str.lower) else: event_types = [] self.idx_eventtype.clear() evttype_group = QGroupBox('Event Types') layout = QVBoxLayout() evttype_group.setLayout(layout) self.check_all_eventtype = check_all = QCheckBox('All event types') check_all.setCheckState(Qt.Checked) check_all.clicked.connect(self.toggle_eventtype) layout.addWidget(check_all) self.idx_eventtype_list = [] for one_eventtype in event_types: self.idx_eventtype.addItem(one_eventtype) item = QCheckBox(one_eventtype) layout.addWidget(item) item.setCheckState(Qt.Checked) item.stateChanged.connect(self.update_annotations) item.stateChanged.connect(self.toggle_check_all_eventtype) self.idx_eventtype_list.append(item) self.idx_eventtype_scroll.setWidget(evttype_group)
0.002616
def remove_all_gap_columns( self ): """ Remove any columns containing only gaps from alignment components, text of components is modified IN PLACE. """ seqs = [] for c in self.components: try: seqs.append( list( c.text ) ) except TypeError: seqs.append( None ) i = 0 text_size = self.text_size while i < text_size: all_gap = True for seq in seqs: if seq is None: continue if seq[i] != '-': all_gap = False if all_gap: for seq in seqs: if seq is None: continue del seq[i] text_size -= 1 else: i += 1 for i in range( len( self.components ) ): if seqs[i] is None: continue self.components[i].text = ''.join( seqs[i] ) self.text_size = text_size
0.020471
def _make_gelf_dict(self, record): """Create a dictionary representing a Graylog GELF log from a python :class:`logging.LogRecord` :param record: :class:`logging.LogRecord` to create a Graylog GELF log from. :type record: logging.LogRecord :return: dictionary representing a Graylog GELF log. :rtype: dict """ # construct the base GELF format gelf_dict = { 'version': "1.0", 'host': BaseGELFHandler._resolve_host(self.fqdn, self.localname), 'short_message': self.formatter.format(record) if self.formatter else record.getMessage(), 'timestamp': record.created, 'level': SYSLOG_LEVELS.get(record.levelno, record.levelno), 'facility': self.facility or record.name, } # add in specified optional extras self._add_full_message(gelf_dict, record) if self.level_names: self._add_level_names(gelf_dict, record) if self.facility is not None: self._set_custom_facility(gelf_dict, self.facility, record) if self.debugging_fields: self._add_debugging_fields(gelf_dict, record) if self.extra_fields: self._add_extra_fields(gelf_dict, record) return gelf_dict
0.002278
def iter_teams(self, number=-1, etag=None): """Iterate over teams that are part of this organization. :param int number: (optional), number of teams to return. Default: -1 returns all available teams. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Team <Team>`\ s """ url = self._build_url('teams', base_url=self._api) return self._iter(int(number), url, Team, etag=etag)
0.005825
def select(self): """ generate the selection """ if self.condition is not None: return self.table.table.read_where(self.condition.format(), start=self.start, stop=self.stop) elif self.coordinates is not None: return self.table.table.read_coordinates(self.coordinates) return self.table.table.read(start=self.start, stop=self.stop)
0.004032
def storeToXML(self, out, comment=None, encoding='UTF-8'): """ Write the `Properties` object's entries (in unspecified order) in XML properties format to ``out``. :param out: a file-like object to write the properties to :type out: binary file-like object :param comment: if non-`None`, ``comment`` will be output as a ``<comment>`` element before the ``<entry>`` elements :type comment: text string or `None` :param string encoding: the name of the encoding to use for the XML document (also included in the XML declaration) :return: `None` """ dump_xml(self.data, out, comment=comment, encoding=encoding)
0.002782
def rule_command_cmdlist_interface_h_interface_ge_leaf_interface_gigabitethernet_leaf(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa") index_key = ET.SubElement(rule, "index") index_key.text = kwargs.pop('index') command = ET.SubElement(rule, "command") cmdlist = ET.SubElement(command, "cmdlist") interface_h = ET.SubElement(cmdlist, "interface-h") interface_ge_leaf = ET.SubElement(interface_h, "interface-ge-leaf") interface = ET.SubElement(interface_ge_leaf, "interface") gigabitethernet_leaf = ET.SubElement(interface, "gigabitethernet-leaf") gigabitethernet_leaf.text = kwargs.pop('gigabitethernet_leaf') callback = kwargs.pop('callback', self._callback) return callback(config)
0.0044
def makedoedict(str1): """makedoedict""" blocklist = str1.split('..') blocklist = blocklist[:-1]#remove empty item after last '..' blockdict = {} belongsdict = {} for num in range(0, len(blocklist)): blocklist[num] = blocklist[num].strip() linelist = blocklist[num].split(os.linesep) aline = linelist[0] alinelist = aline.split('=') name = alinelist[0].strip() aline = linelist[1] alinelist = aline.split('=') belongs = alinelist[-1].strip() theblock = blocklist[num] + os.linesep + '..' + os.linesep + os.linesep #put the '..' back in the block blockdict[name] = theblock belongsdict[name] = belongs return [blockdict, belongsdict]
0.006579
def make_value_from_env(self, param, value_type, function): """ get environment variable """ value = os.getenv(param) if value is None: self.notify_user("Environment variable `%s` undefined" % param) return self.value_convert(value, value_type)
0.006579
def register_deregister(notifier, event_type, callback=None, args=None, kwargs=None, details_filter=None, weak=False): """Context manager that registers a callback, then deregisters on exit. NOTE(harlowja): if the callback is none, then this registers nothing, which is different from the behavior of the ``register`` method which will *not* accept none as it is not callable... """ if callback is None: yield else: notifier.register(event_type, callback, args=args, kwargs=kwargs, details_filter=details_filter, weak=weak) try: yield finally: notifier.deregister(event_type, callback, details_filter=details_filter)
0.001122
def run(self): """ Run the server. Returns with system error code. """ normalized = os.path.normpath(self.path) + ("/" if self.path.endswith("/") else "") if self.path != normalized: sys.stderr.write("Please use full path '%s'" % (normalized,)) return -1 self.butterStore = ButterStore.ButterStore(None, self.path, self.mode, dryrun=False) # self.butterStore.ignoreExtraVolumes = True self.toObj = _Arg2Obj(self.butterStore) self.toDict = _Obj2Dict() self.running = True with self.butterStore: with self: while self.running: self._processCommand() return 0
0.005602
def list_handler(args): """usage: {program} list List the anchors for a file. """ repo = open_repository(None) for anchor_id, anchor in repo.items(): print("{} {}:{} => {}".format(anchor_id, anchor.file_path.relative_to(repo.root), anchor.context.offset, anchor.metadata)) return ExitCode.OK
0.002494
def complete_block(self): """Return code lines **with** bootstrap""" return "".join(line[SOURCE] for line in self._boot_lines + self._source_lines)
0.018405
def set_frequencies(self, freq, rg=None, setMaxfeq=True, setMinfreq=True, setSpeed=True): ''' Set cores frequencies freq: int frequency in KHz rg: list of range of cores setMaxfeq: set the maximum frequency, default to true setMinfreq: set the minimum frequency, default to true setSpeed: only set the frequency, default to true ''' to_change = self.__get_ranges("online") if type(rg) == int: rg= [rg] if rg: to_change= set(rg) & set(self.__get_ranges("online")) for cpu in to_change: if setSpeed: fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_setspeed") self.__write_cpu_file(fpath, str(freq).encode()) if setMinfreq: fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_min_freq") self.__write_cpu_file(fpath, str(freq).encode()) if setMaxfeq: fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_max_freq") self.__write_cpu_file(fpath, str(freq).encode())
0.013711
def create_class_instance(element, element_id, doc_id): """ given an Salt XML element, returns a corresponding `SaltElement` class instance, i.e. a SaltXML `SToken` node will be converted into a `TokenNode`. Parameters ---------- element : lxml.etree._Element an `etree._Element` is the XML representation of a Salt element, e.g. a single 'nodes' or 'edges' element element_id : int the index of element (used to connect edges to nodes) doc_id : str the ID of the SaltXML document Returns ------- salt_element : SaltElement an instance of a `SaltElement` subclass instance, e.g. a `TokenNode`, `TextualRelation` or `SaltLayer` """ xsi_type = get_xsi_type(element) element_class = XSI_TYPE_CLASSES[xsi_type] return element_class.from_etree(element)
0.001161
def fasta_file_to_dict(fasta_file, id=True, header=False, seq=False): """Returns a dict from a fasta file and the number of sequences as the second return value. fasta_file can be a string path or a file object. The key of fasta_dict can be set using the keyword arguments and results in a combination of id, header, sequence, in that order. joined with '||'. (default: id) Duplicate keys are checked and a warning is logged if found. The value of fasta_dict is a python dict with 3 keys: header, id and seq Changelog: 2014/11/17: * Added support for url escaped id """ fasta_file_f = fasta_file if isinstance(fasta_file, str): fasta_file_f = open(fasta_file, 'rb') fasta_dict = OrderedDict() keys = ['id', 'header', 'seq'] flags = dict([('id', id), ('header', header), ('seq', seq)]) entry = dict([('id', ''), ('header', ''), ('seq', '')]) count = 0 line_num = 0 for line in fasta_file_f: line = line.strip() if line and line[0] == '>': count += 1 key = '||'.join([entry[i] for i in keys if flags[i]]) if key: # key != '' if key in fasta_dict: # check for duplicate key logger.warning('%s : Line %d : Duplicate %s [%s] : ID = [%s].', fasta_file_f.name, line_num, '||'.join([i for i in keys if flags[i]]), key[:25] + (key[25:] and '..'), entry['id']) entry['seq'] = ''.join(entry['seq']) fasta_dict[key] = entry # check for url escaped id if id: unescaped_id = unquote(entry['id']) if id != unescaped_id: key = '||'.join([unescaped_id] + [entry[i] for i in keys if i != 'id' and flags[i]]) entry['unescaped_id'] = unescaped_id fasta_dict[key] = entry entry = dict() entry['header'] = line entry['id'] = line.split()[0][1:] entry['seq'] = [] else: entry['seq'].append(line.upper()) line_num += 1 if isinstance(fasta_file, str): fasta_file_f.close() key = '||'.join([entry[i] for i in keys if flags[i]]) if key: # key != '' if key in fasta_dict: logger.warning('%s : Line %d : Duplicate %s [%s] : ID = [%s].', fasta_file_f.name, line_num, '||'.join([i for i in keys if flags[i]]), key[:25] + (key[25:] and '..'), entry['id']) entry['seq'] = ''.join(entry['seq']) fasta_dict[key] = entry # check for url escaped id if id: unescaped_id = unquote(entry['id']) if id != unescaped_id: key = '||'.join([unescaped_id] + [entry[i] for i in keys if i != 'id' and flags[i]]) entry['unescaped_id'] = unescaped_id fasta_dict[key] = entry return fasta_dict, count
0.003413
def _get_files_recursively(rootdir): """Sometimes, we want to use this tool with non-git repositories. This function allows us to do so. """ output = [] for root, dirs, files in os.walk(rootdir): for filename in files: output.append(os.path.join(root, filename)) return output
0.003115
def get_line_indent(token): """Finds the indent of the line the token starts in.""" start = token.start_mark.buffer.rfind('\n', 0, token.start_mark.pointer) + 1 content = start while token.start_mark.buffer[content] == ' ': content += 1 return content - start
0.00304
def pre_serialize(self, raw, pkt, i): ''' Set length of the header based on ''' self.length = len(raw) + OpenflowHeader._MINLEN
0.012579
def name(self): """Returns the real name of the franchise given the team ID. Examples: 'nwe' -> 'New England Patriots' 'sea' -> 'Seattle Seahawks' :returns: A string corresponding to the team's full name. """ doc = self.get_main_doc() headerwords = doc('div#meta h1')[0].text_content().split() lastIdx = headerwords.index('Franchise') teamwords = headerwords[:lastIdx] return ' '.join(teamwords)
0.004124
def lookups(self): "Yields (lookup, display, no_argument) pairs" for filter in self._filters: yield filter.key, filter.display, filter.no_argument
0.011494
def run_per_switch_cmds(self, switch_cmds): """Applies cmds to appropriate switches This takes in a switch->cmds mapping and runs only the set of cmds specified for a switch on that switch. This helper is used for applying/removing ACLs to/from interfaces as this config will vary from switch to switch. """ for switch_ip, cmds in switch_cmds.items(): switch = self._switches.get(switch_ip) self.run_openstack_sg_cmds(cmds, switch)
0.003906
def convert_cidr(cidr): ''' returns the network address, subnet mask and broadcast address of a cidr address .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' network.convert_cidr 172.31.0.0/16 ''' ret = {'network': None, 'netmask': None, 'broadcast': None} cidr = calc_net(cidr) network_info = ipaddress.ip_network(cidr) ret['network'] = six.text_type(network_info.network_address) ret['netmask'] = six.text_type(network_info.netmask) ret['broadcast'] = six.text_type(network_info.broadcast_address) return ret
0.003241
def safe_call(request: Request, methods: Methods, *, debug: bool) -> Response: """ Call a Request, catching exceptions to ensure we always return a Response. Args: request: The Request object. methods: The list of methods that can be called. debug: Include more information in error responses. Returns: A Response object. """ with handle_exceptions(request, debug) as handler: result = call(methods.items[request.method], *request.args, **request.kwargs) handler.response = SuccessResponse(result=result, id=request.id) return handler.response
0.003221
def conf_budget(self, budget): """ Set limit on the number of conflicts. """ if self.maplesat: pysolvers.maplecm_cbudget(self.maplesat, budget)
0.010417
def import_data(self, data): """Import additional data for tuning Parameters ---------- data: a list of dictionarys, each of which has at least two keys, 'parameter' and 'value' """ _completed_num = 0 for trial_info in data: logger.info("Importing data, current processing progress %s / %s" %(_completed_num, len(data))) _completed_num += 1 assert "parameter" in trial_info _params = trial_info["parameter"] assert "value" in trial_info _value = trial_info['value'] if not _value: logger.info("Useless trial data, value is %s, skip this trial data." %_value) continue self.supplement_data_num += 1 _parameter_id = '_'.join(["ImportData", str(self.supplement_data_num)]) self.total_data.append(_params) self.receive_trial_result(parameter_id=_parameter_id, parameters=_params, value=_value) logger.info("Successfully import data to metis tuner.")
0.008295
def process_text(text, pmid=None, python2_path=None): """Processes the specified plain text with TEES and converts output to supported INDRA statements. Check for the TEES installation is the TEES_PATH environment variable, and configuration file; if not found, checks candidate paths in tees_candidate_paths. Raises an exception if TEES cannot be found in any of these places. Parameters ---------- text : str Plain text to process with TEES pmid : str The PMID from which the paper comes from, to be stored in the Evidence object of statements. Set to None if this is unspecified. python2_path : str TEES is only compatible with python 2. This processor invokes this external python 2 interpreter so that the processor can be run in either python 2 or python 3. If None, searches for an executible named python2 in the PATH environment variable. Returns ------- tp : TEESProcessor A TEESProcessor object which contains a list of INDRA statements extracted from TEES extractions """ # Try to locate python2 in one of the directories of the PATH environment # variable if it is not provided if python2_path is None: for path in os.environ["PATH"].split(os.pathsep): proposed_python2_path = os.path.join(path, 'python2.7') if os.path.isfile(proposed_python2_path): python2_path = proposed_python2_path print('Found python 2 interpreter at', python2_path) break if python2_path is None: raise Exception('Could not find python2 in the directories ' + 'listed in the PATH environment variable. ' + 'Need python2 to run TEES.') # Run TEES a1_text, a2_text, sentence_segmentations = run_on_text(text, python2_path) # Run the TEES processor tp = TEESProcessor(a1_text, a2_text, sentence_segmentations, pmid) return tp
0.001442
def save_ndarray_to_fits(array=None, file_name=None, main_header=None, cast_to_float=True, crpix1=None, crval1=None, cdelt1=None, overwrite=True): """Save numpy array(s) into a FITS file with the provided filename. Parameters ---------- array : numpy array or list of numpy arrays Array(s) to be exported as the FITS file. If the input is a list, a multi-extension FITS file is generated assuming that the list contains a list of arrays. file_name : string File name for the output FITS file. main_header : astropy FITS header Header to be introduced in the primary HDU. cast_to_float : bool or list of booleans If True, the array(s) data are save as float. If a list of arrays has been provided, this parameter must be either a list (with the same length) of booleans or None. crpix1 : float, list of floats or None If not None, this value is used for the keyword CRPIX1. If a list of arrays has been provided, this paramater must be either a list (with the same length) of floats or None. crval1 : float, list of floats or None If not None, this value is used for the keyword CRVAL1. If a list of arrays has been provided, this paramater must be either a list (with the same length) of floats or None. cdelt1 : float, list of floats or None If not None, this value is used for the keyword CDELT1. If a list of arrays has been provided, this paramater must be either a list (with the same length) of floats or None. overwrite : bool If True, the file is overwritten (in the case it already exists). """ # protections if file_name is None: raise ValueError("File_name is not defined in save_ndarray_to_fits") if type(array) is list: list_of_arrays = array narrays = len(list_of_arrays) # cast_to_float must be a list of bools if type(cast_to_float) is not list: raise ValueError("Expected list of cast_to_float not found!") else: if len(cast_to_float) != narrays: raise ValueError("Unexpected length of cast_to_float") list_cast_to_float = cast_to_float # check that the additional associated lists have been provided # and that they have the expected length (or they are None) for ldum, cdum in zip([crpix1, crval1, cdelt1], ['crpix1', 'crval1', 'cdelt1']): if ldum is not None: if type(ldum) is not list: raise ValueError("Expected list of " + cdum + " not found!") else: if len(ldum) != narrays: raise ValueError("Unexpected length of " + cdum) if crpix1 is None: list_crpix1 = [None] * narrays else: list_crpix1 = crpix1 if crval1 is None: list_crval1 = [None] * narrays else: list_crval1 = crval1 if cdelt1 is None: list_cdelt1 = [None] * narrays else: list_cdelt1 = cdelt1 else: list_of_arrays = [array] list_cast_to_float = [cast_to_float] list_crpix1 = [crpix1] list_crval1 = [crval1] list_cdelt1 = [cdelt1] hdulist = fits.HDUList() for ihdu, tmp_array in enumerate(list_of_arrays): if type(tmp_array) is not np.ndarray: raise ValueError("Array#" + str(ihdu) + "=" + str(tmp_array) + " must be a numpy.ndarray") if ihdu == 0: if list_cast_to_float[ihdu]: hdu = fits.PrimaryHDU(data=tmp_array.astype(np.float), header=main_header) else: hdu = fits.PrimaryHDU(data=tmp_array, header=main_header) else: if list_cast_to_float[ihdu]: hdu = fits.ImageHDU(data=tmp_array.astype(np.float)) else: hdu = fits.ImageHDU(data=tmp_array) # set additional FITS keywords if requested tmp_crpix1 = list_crpix1[ihdu] if tmp_crpix1 is not None: hdu.header.set('CRPIX1', tmp_crpix1, 'Reference pixel') tmp_crval1 = list_crval1[ihdu] if tmp_crval1 is not None: hdu.header.set('CRVAL1', tmp_crval1, 'Reference wavelength corresponding to CRPIX1') tmp_cdelt1 = list_cdelt1[ihdu] if tmp_cdelt1 is not None: hdu.header.set('CDELT1', tmp_cdelt1, 'Linear dispersion (angstrom/pixel)') # add HDU to HDUList hdulist.append(hdu) # write output file hdulist.writeto(file_name, overwrite=overwrite)
0.000202
def _Rzderiv(self,R,z,phi=0.,t=0.): #pragma: no cover """ NAME: _Rzderiv PURPOSE: evaluate the mixed R,z derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: d2phi/dR/dz HISTORY: 2016-12-26 - Written - Bovy (UofT/CCA) """ raise AttributeError # Implementation above does not work bc SCF.Rzderiv is not implemented r= numpy.sqrt(R**2.+z**2.) out= self._scf.Rzderiv(R,z,phi=phi,use_physical=False) for a,ds,d2s,H,dH in zip(self._Sigma_amp,self._dsigmadR, self._d2SigmadR2,self._Hz,self._dHzdz): out+= 4.*numpy.pi*a*(H(z)*R*z/r**2.*(d2s(r)-ds(r)/r) +ds(r)*dH(z)*R/r) return out
0.023991
def save_inst(self, obj): """Inner logic to save instance. Based off pickle.save_inst""" cls = obj.__class__ # Try the dispatch table (pickle module doesn't do it) f = self.dispatch.get(cls) if f: f(self, obj) # Call unbound method with explicit self return memo = self.memo write = self.write save = self.save if hasattr(obj, '__getinitargs__'): args = obj.__getinitargs__() len(args) # XXX Assert it's a sequence pickle._keep_alive(args, memo) else: args = () write(pickle.MARK) if self.bin: save(cls) for arg in args: save(arg) write(pickle.OBJ) else: for arg in args: save(arg) write(pickle.INST + cls.__module__ + '\n' + cls.__name__ + '\n') self.memoize(obj) try: getstate = obj.__getstate__ except AttributeError: stuff = obj.__dict__ else: stuff = getstate() pickle._keep_alive(stuff, memo) save(stuff) write(pickle.BUILD)
0.001663
def decode_id_token(token, client): """ Represent the ID Token as a JSON Web Token (JWT). Return a hash. """ keys = get_client_alg_keys(client) return JWS().verify_compact(token, keys=keys)
0.004695
def __configure_canvas(self, *args): """ Private function to configure the internal Canvas. Changes the width of the canvas to fit the interior Frame :param args: Tkinter event """ if self.interior.winfo_reqwidth() is not self._canvas.winfo_width(): self._canvas.configure(width=self.interior.winfo_reqwidth())
0.010309
def get_short_uid_dict(self, query=None): """Create a dictionary of shortend UIDs for all contacts. All arguments are only used if the address book is not yet initialized and will just be handed to self.load(). :param query: see self.load() :type query: str :returns: the contacts mapped by the shortes unique prefix of their UID :rtype: dict(str: CarddavObject) """ if self._short_uids is None: if not self._loaded: self.load(query) if not self.contacts: self._short_uids = {} elif len(self.contacts) == 1: self._short_uids = {uid[0:1]: contact for uid, contact in self.contacts.items()} else: self._short_uids = {} sorted_uids = sorted(self.contacts) # Prepare for the loop; the first and last items are handled # seperatly. item0, item1 = sorted_uids[:2] same1 = self._compare_uids(item0, item1) self._short_uids[item0[:same1 + 1]] = self.contacts[item0] for item_new in sorted_uids[2:]: # shift the items and the common prefix lenght one further item0, item1 = item1, item_new same0, same1 = same1, self._compare_uids(item0, item1) # compute the final prefix length for item1 same = max(same0, same1) self._short_uids[item0[:same + 1]] = self.contacts[item0] # Save the last item. self._short_uids[item1[:same1 + 1]] = self.contacts[item1] return self._short_uids
0.001136
def _read_dict(self, f): """ Converts h5 groups to dictionaries """ d = {} for k, item in f.items(): if type(item) == h5py._hl.dataset.Dataset: v = item.value if type(v) == np.string_: v = str(v) if type(v) == str and v == "NONE": d[k] = None elif type(v) == str and v == "EMPTYARR": d[k] = np.array([]) elif isinstance(v, bytes): d[k] = v.decode('utf-8') else: d[k] = v elif k[:5] == "DICT_": d[k[5:]] = self._read_dict(item) elif k[:5] == "LIST_": tmpD = self._read_dict(item) d[k[5:]] = [tmpD[str(i)] for i in range(len(tmpD))] return d
0.002326
def restrict_joins(self, q, bindings): """ Restrict the joins across all tables referenced in the database query to those specified in the model for the relevant dimensions. If a single table is used for the query, no unnecessary joins are performed. If more than one table are referenced, this ensures their returned rows are connected via the fact table. """ if len(q.froms) == 1: return q else: for binding in bindings: if binding.table == self.fact_table: continue concept = self.model[binding.ref] if isinstance(concept, Dimension): dimension = concept else: dimension = concept.dimension dimension_table, key_col = dimension.key_attribute.bind(self) if binding.table != dimension_table: raise BindingException('Attributes must be of same table as ' 'as their dimension key') join_column_name = dimension.join_column_name if isinstance(join_column_name, string_types): try: join_column = self.fact_table.columns[join_column_name] except KeyError: raise BindingException("Join column '%s' for %r not in fact table." % (dimension.join_column_name, dimension)) else: if not isinstance(join_column_name, list) or len(join_column_name) != 2: raise BindingException("Join column '%s' for %r should be either a string or a 2-tuple." % (join_column_name, dimension)) try: join_column = self.fact_table.columns[join_column_name[0]] except KeyError: raise BindingException("Join column '%s' for %r not in fact table." % (dimension.join_column_name[0], dimension)) try: key_col = dimension_table.columns[join_column_name[1]] except KeyError: raise BindingException("Join column '%s' for %r not in dimension table." % (dimension.join_column_name[1], dimension)) q = q.where(join_column == key_col) return q
0.004644
def read_file(self, file_name, section=None): """Read settings from specified ``section`` of config file.""" file_name, section = self.parse_file_name_and_section(file_name, section) if not os.path.isfile(file_name): raise SettingsFileNotFoundError(file_name) parser = self.make_parser() with open(file_name) as fp: parser.read_file(fp) settings = OrderedDict() if parser.has_section(section): section_dict = parser[section] self.section_found_while_reading = True else: section_dict = parser.defaults().copy() extends = section_dict.get('extends') if extends: extends = self.decode_value(extends) extends, extends_section = self.parse_file_name_and_section( extends, extender=file_name, extender_section=section) settings.update(self.read_file(extends, extends_section)) settings.update(section_dict) if not self.section_found_while_reading: raise SettingsFileSectionNotFoundError(section) return settings
0.002629
def max_end(self): """ Retrieves the maximum index. :return: """ return max(len(self.input_string), self._max_end) if self.input_string else self._max_end
0.015464