text
stringlengths
78
104k
score
float64
0
0.18
def bandpass_filter(data, low, high, fs, order=5): """ Does a bandpass filter over the given data. :param data: The data (numpy array) to be filtered. :param low: The low cutoff in Hz. :param high: The high cutoff in Hz. :param fs: The sample rate (in Hz) of the data. :param order: The order of the filter. The higher the order, the tighter the roll-off. :returns: Filtered data (numpy array). """ nyq = 0.5 * fs low = low / nyq high = high / nyq b, a = signal.butter(order, [low, high], btype='band') y = signal.lfilter(b, a, data) return y
0.003317
def insertGlyph(self, glyph, name=None): """ Insert **glyph** into the layer. :: >>> glyph = layer.insertGlyph(otherGlyph, name="A") This method is deprecated. :meth:`BaseFont.__setitem__` instead. """ if name is None: name = glyph.name self[name] = glyph
0.006079
def writeByte(self, byte): """ Writes a byte into the L{WriteData} stream object. @type byte: int @param byte: Byte value to write into the stream. """ self.data.write(pack("B" if not self.signed else "b", byte))
0.011152
def bounding_polygon(self): """ Returns the bounding box polygon for this tile :return: `pywom.utils.geo.Polygon` instance """ lon_left, lat_bottom, lon_right, lat_top = Tile.tile_coords_to_bbox(self.x, self.y, self.zoom) print(lon_left, lat_bottom, lon_right, lat_top) return Polygon([[[lon_left, lat_top], [lon_right, lat_top], [lon_right, lat_bottom], [lon_left, lat_bottom], [lon_left, lat_top]]])
0.005455
def param_sweep(model, sequences, param_grid, n_jobs=1, verbose=0): """Fit a series of models over a range of parameters. Parameters ---------- model : msmbuilder.BaseEstimator An *instance* of an estimator to be used to fit data. sequences : list of array-like List of sequences, or a single sequence. Each sequence should be a 1D iterable of state labels. Labels can be integers, strings, or other orderable objects. param_grid : dict or sklearn.grid_search.ParameterGrid Parameter grid to specify models to fit. See sklearn.grid_search.ParameterGrid for an explanation n_jobs : int, optional Number of jobs to run in parallel using joblib.Parallel Returns ------- models : list List of models fit to the data according to param_grid """ if isinstance(param_grid, dict): param_grid = ParameterGrid(param_grid) elif not isinstance(param_grid, ParameterGrid): raise ValueError("param_grid must be a dict or ParamaterGrid instance") # iterable with (model, sequence) as items iter_args = ((clone(model).set_params(**params), sequences) for params in param_grid) models = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_param_sweep_helper)(args) for args in iter_args) return models
0.000722
def on_select_task(self, task): '''Called when a task is selected to fetch & process''' # inject informations about project logger.info('select %(project)s:%(taskid)s %(url)s', task) project_info = self.projects.get(task['project']) assert project_info, 'no such project' task['type'] = self.TASK_PACK task['group'] = project_info.group task['project_md5sum'] = project_info.md5sum task['project_updatetime'] = project_info.updatetime # lazy join project.crawl_config if getattr(project_info, 'crawl_config', None): task = BaseHandler.task_join_crawl_config(task, project_info.crawl_config) project_info.active_tasks.appendleft((time.time(), task)) self.send_task(task) return task
0.003713
def which(filename, interactive=False, verbose=False): """Yield all executable files on path that matches `filename`. """ exe = [e.lower() for e in os.environ.get('PATHEXT', '').split(';')] if sys.platform != 'win32': # pragma: nocover exe.append('') name, ext = os.path.splitext(filename) has_extension = bool(ext) if has_extension and ext.lower() not in exe: raise ValueError("which can only search for executable files") def match(filenames): """Returns the sorted subset of ``filenames`` that matches ``filename``. """ res = set() for fname in filenames: if fname == filename: # pragma: nocover res.add(fname) # exact match continue fname_name, fname_ext = os.path.splitext(fname) if fname_name == name and fname_ext.lower() in exe: # pragma: nocover res.add(fname) return sorted(res) returnset = set() found = False for pth in get_path_directories(): if verbose: # pragma: nocover print('checking pth..') fnames = _listdir(pth, exe) if not fnames: continue for m in match(fnames): found_file = _normalize(os.path.join(pth, m)) if found_file not in returnset: # pragma: nocover if is_executable(found_file): yield found_file returnset.add(found_file) found = True if not found and interactive: # pragma: nocover print("Couldn't find %r anywhere on the path.." % filename) sys.exit(1)
0.002435
def add_show_function(self, show_name, show_func): """ Appends a show function to the locally cached DesignDocument shows dictionary. :param show_name: Name used to identify the show function. :param show_func: Javascript show function. """ if self.get_show_function(show_name) is not None: raise CloudantArgumentError(110, show_name) self.shows.__setitem__(show_name, show_func)
0.004376
def note_adapter(obj, request): ''' Adapter for rendering a :class:`skosprovider.skos.Note` to json. :param skosprovider.skos.Note obj: The note to be rendered. :rtype: :class:`dict` ''' return { 'note': obj.note, 'type': obj.type, 'language': obj.language, 'markup': obj.markup }
0.002933
def get(self, attach, *args, **kwargs): """ :param attach: if True, return file as an attachment. """ response = self.make_response(*args, **kwargs) # type: Response response.content_type = self.get_content_type(*args, **kwargs) if attach: filename = self.get_filename(*args, **kwargs) if not filename: filename = "file.bin" headers = response.headers headers.add("Content-Disposition", "attachment", filename=filename) self.set_cache_headers(response) return response
0.003339
def _theorem6p2(): """See Theorem 6.2 in paper. Prunes (x,...,a) when (x,a) is explored and a has the same neighbour set in both graphs. """ pruning_set2 = set() def _prune2(x, a, nbrs_a): frozen_nbrs_a = frozenset(nbrs_a) for i in range(len(x)): key = (tuple(x[0:i]), a, frozen_nbrs_a) if key in pruning_set2: return True return False def _explored2(x, a, nbrs_a): prunable = (tuple(x), a, frozenset(nbrs_a)) # (s,a,N(a)) pruning_set2.add(prunable) return prunable def _finished2(prunable): pruning_set2.remove(prunable) return _prune2, _explored2, _finished2
0.002886
def render(self, display): """Render basicly the text.""" # to handle changing objects / callable if self.text != self._last_text: self._render() display.blit(self._surface, (self.topleft, self.size))
0.008163
def main(): parser = argparse.ArgumentParser(description="An interface to CarbonBlack environments") #profiles = auth.CredentialStore("response").get_profiles() parser.add_argument('-e', '--environment', choices=auth.CredentialStore("response").get_profiles(), help='specify a specific instance you want to work with. If not defined \'-t production\' will be used implicitly.') parser.add_argument('-t', '--envtypes', type=str, help='specify any combination of envtypes. Default=All \'production\' envtypes. Ignored if -e is set.', default='production') #parser.add_argument('--debug', action='store_true', help='print debugging info') #parser.add_argument('--warnings', action='store_true', # help="Warn before printing large executions") subparsers = parser.add_subparsers(dest='command') #title='subcommands', help='additional help') cbinterface_commands = [ 'query', 'proc', 'collect', 'remediate', 'enumerate_usb', 'vxdetect'] parser_vx = subparsers.add_parser('vxdetect', help="search cbsandbox for processes in vxstream report, show detections") parser_vx.add_argument('vxstream_report', help='path to vxstream report') parser_vx.add_argument('-p', '--print-process-tree', action='store_true', help='print the process tree') parser_usb = subparsers.add_parser('enumerate_usb', help="Show recent removable drive activity on the sensor") parser_usb.add_argument('sensor', help='hostname of the sensor') parser_usb.add_argument('-s', '--start-time', action='store', help='how far back to query (default:ALL time)') parser_proc = subparsers.add_parser('proc', help="analyze a process GUID. 'proc -h' for more") parser_proc.add_argument('process', help="the process GUID to analyze") parser_proc.add_argument('--warnings', action='store_true', help="Warn before printing large executions") parser_proc.add_argument('-w', '--walk-tree', action='store_true', help="walk and analyze the process tree") parser_proc.add_argument('-wp', '--walk-parents', action='store_true', help="print details on the process ancestry") #parser_proc.add_argument('-d', '--detection', action='store_true', # help="show detections that would result in ACE alerts") parser_proc.add_argument('-i', '--proc-info', action='store_true', help="show binary and process information") parser_proc.add_argument('-c','--show-children', action='store_true', help="only print process children event details") parser_proc.add_argument('-nc', '--netconns', action='store_true', help="print network connections") parser_proc.add_argument('-fm', '--filemods', action='store_true', help="print file modifications") parser_proc.add_argument('-rm', '--regmods', action='store_true', help="print registry modifications") parser_proc.add_argument('-um', '--unsigned-modloads', action='store_true', help="print unsigned modloads") parser_proc.add_argument('-ml', '--modloads', action='store_true', help="print modloads") parser_proc.add_argument('-cp', '--crossprocs', action='store_true', help="print crossprocs") #parser_proc.add_argument('-intel', '--intel-hits', action='store_true', # help="show intel (feed/WL) hits that do not result in ACE alerts") parser_proc.add_argument('--no-analysis', action='store_true', help="Don't fetch and print process activity") parser_proc.add_argument('--json', action='store_true', help='output process summary in json') parser_proc.add_argument('--segment-limit', action='store', type=int, default=None, help='stop processing events into json after this many process segments') facet_args = [ 'process_name', 'childproc_name', 'username', 'parent_name', 'path', 'hostname', 'parent_pid', 'comms_ip', 'process_md5', 'start', 'group', 'interface_ip', 'modload_count', 'childproc_count', 'cmdline', 'regmod_count', 'process_pid', 'parent_id', 'os_type', 'rocessblock_count', 'crossproc_count', 'netconn_count', 'parent_md5', 'host_type', 'last_update', 'filemod_count' ] parser_query = subparsers.add_parser('query', help="execute a process search query. 'query -h' for more") parser_query.add_argument('query', help="the process search query you'd like to execute") parser_query.add_argument('-s', '--start-time', action='store', help="Only return processes with events after given date/time stamp\ (server’s clock). Format:'Y-m-d H:M:S' eastern time") parser_query.add_argument('-e', '--end-time', action='store', help="Set the maximum last update time. Format:'Y-m-d H:M:S' eastern time") parser_query.add_argument('--facet', action='store', choices=facet_args, help='stats info on single field accross query results (ex. process_name)') parser_query.add_argument('--no-warnings', action='store_true', help="Don't warn before printing large query results") parser_query.add_argument('-lh', '--logon-history', action='store_true', help="Display available logon history for given username or hostname") parser_collect = subparsers.add_parser('collect', help='perform LR collection tasks on a host') parser_collect.add_argument('sensor', help="the hostname/sensor to collect from") parser_collect.add_argument('-f', '--filepath', action='store', help='collect file') parser_collect.add_argument('-c', '--command-exec', action='store', help='command to execute') parser_collect.add_argument('-p', '--process-list', action='store_true', help='show processes running on sensor') parser_collect.add_argument('-m', '--memdump', action='store', const='ALLMEM', nargs='?', help='dump memory on a specific process-id') parser_collect.add_argument('-lr', '--regkeypath', action='store', help='List all registry values from the specified registry key.') parser_collect.add_argument('-r', '--regkeyvalue', action='store', help='Returns the associated value of the specified registry key.') parser_collect.add_argument('-i', '--info', action='store_true', help='print sensor information') parser_collect.add_argument('-gst', '--get-task', action='store_true', help='get scheduled tasks or specifc task') parser_collect.add_argument('-mc', '--multi-collect', action='store', help='path to ini file listing files and regs to collect') remediate_file_example = """Example remediate ini file: [files] file1=C:\\Users\\user\\Desktop\\testfile.txt [process_names] proc1=cmd.exe proc2=notepad++.exe [directories] directory1=C:\\Users\\user\\Desktop\\nanocore [scheduled_tasks] task1=\\monkey_task task1=\\Microsoft\\windows\\some\\flake\\task [pids] pid1=10856 [registry_paths] reg1=HKLM\\Software\\Microsoft\\Windows\\CurrentVersion\\Run\\calc reg2=HKLM\\Software\\Microsoft\\Windows\\CurrentVersion\\Run\\hippo""" parser_remediate = subparsers.add_parser('remediate', help='remediate a host') parser_remediate.add_argument('sensor', help="the hostname/sensor needing remediation") parser_remediate.add_argument('-i', '--isolate', help='toggle host isolation', default=False, action='store_true') parser_remediate.add_argument('-f', '--remediation-filepath', help="path to the remediation ini file; 'help' as the filepath for example") parser_remediate.add_argument('-dst', '--delete-scheduled-task', help="path of scheduled task to delete") parser_remediate.add_argument('-kpname', '--kill-process-name', help="kill all processes with this name") parser_remediate.add_argument('-kpid', '--kill-pid', help="a process id to kill") parser_remediate.add_argument('-df', '--delete-file', help="path to file needing deletion") parser_remediate.add_argument('-dr', '--delete-regkey', help="path to regkey value needing deletion") parser_remediate.add_argument('-dd', '--delete-directory', help="path to directory needing deletion") args = parser.parse_args() if args.command == 'remediate' and args.remediation_filepath == 'help': print(remediate_file_example) parser.parse_args(['remediate', '-h']) if args.command is None: print("\n\n*****") print("You must specify one of the following commands:\n") print(cbinterface_commands) print("\n*****\n\n") parser.parse_args(['-h']) #args.debug = True #if args.debug: # configure some more logging root = logging.getLogger() root.addHandler(logging.StreamHandler()) logging.getLogger("cbapi").setLevel(logging.ERROR) logging.getLogger("lerc_api").setLevel(logging.WARNING) ''' All VxStream related stuff may be removed in a future version ''' if args.command == 'vxdetect': cb = CbResponseAPI(profile='vxstream') process_list = parse_vxstream_report(cb, args.vxstream_report) if args.print_process_tree: print() print(process_list) print() return 0 # Set up environment profiles profile = None profiles = [] if args.environment: print("Using {} environment ..".format(args.environment)) profiles.append(args.environment) else: # a little hack for getting our environment type variable defined default_profile = auth.default_profile default_profile['envtype'] = 'production' query_envtype = set(args.envtypes.lower().split(',')) for profile in auth.CredentialStore("response").get_profiles(): credentials = auth.CredentialStore("response").get_credentials(profile=profile) profile_envtype = set(credentials['envtype'].lower().split(',')) if(query_envtype.issubset(profile_envtype)): profiles.append(profile) # Process Quering # if args.command == 'query': for profile in profiles: handle_proxy(profile) print("\nSearching {} environment..".format(profile)) q = CBquery(profile=profile) q.process_query(args) return 0 # Select correct environment by sensor hostname and get the sensor object sensor = None if args.command == 'collect' or args.command == 'remediate' or args.command == 'enumerate_usb': cb_results = sensor_search(profiles, args.sensor) if not isinstance(cb_results, list): # an error occured return cb_results else: if not cb_results: LOGGER.info("A sensor with hostname {} wasn't found in any environments".format(args.sensor)) return 0 elif len(cb_results) > 1: LOGGER.error("A sensor by hostname {} was found in multiple environments".format(args.sensor)) for r in cb_results: print("Results:") print("Profile {}: {} (SID:{})".format(r[1],r[0].hostname,r[0].id)) return 1 results = cb_results[0] profile = results[1] sensor = results[0] # Show USB Regmod activity if args.command == 'enumerate_usb': enumerate_usb(sensor, args.start_time) # lerc install arguments can differ by company/environment # same lazy hack to define in cb config config = {} try: default_profile = auth.default_profile default_profile['lerc_install_cmd'] = None config = auth.CredentialStore("response").get_credentials(profile=profile) except: pass # Collection # if args.command == 'collect': hyper_lr = hyperLiveResponse(sensor) if args.info: print(hyper_lr) return True # start a cb lr session lr_session = hyper_lr.go_live() if args.multi_collect: filepaths = regpaths = full_collect = None config = ConfigParser() config.read(args.multi_collect) try: filepaths = config.items("files") except: filepaths = [] try: regpaths = config.items("registry_paths") except: regpaths = [] try: full_collect = config.get('full_collect', 'action') except: pass if regpaths is not None: for regpath in regpaths: if isinstance(regpath, tuple): regpath = regpath[1] print("~ Trying to get {}".format(regpath)) try: result = lr_session.get_registry_value(regpath) if result: localfname = args.sensor + '_regkey_' + result['value_name'] + ".txt" with open(localfname,'wb') as f: f.write(bytes(result['value_data'], 'UTF-8')) print("\t+ Data written to: {}".format(localfname)) except Exception as e: print("[!] Error: {}".format(str(e))) if filepaths is not None: for filepath in filepaths: try: hyper_lr.getFile_with_timeout(filepath[1]) except Exception as e: print("[!] Error: {}".format(str(e))) if full_collect == 'True': return False #LR_collection(hyper_lr, args) return True elif args.filepath: hyper_lr.getFile_with_timeout(args.filepath) elif args.process_list: hyper_lr.print_processes() elif args.memdump: # get config items config = ConfigParser() config.read(CONFIG_PATH) #if config.has_section('memory'): # if cb_compress = config['memory'].getboolean('cb_default_compress') custom_compress = config['memory'].getboolean('custom_compress') custom_compress_file = config['memory']['custom_compress_file'] auto_collect_mem = config['memory'].getboolean('auto_collect_mem_file') lerc_collect_mem = config['memory'].getboolean('lerc_collect_mem') path_to_procdump = config['memory']['path_to_procdump'] if args.memdump == "ALLMEM": return hyper_lr.dump_sensor_memory(cb_compress=cb_compress, custom_compress=custom_compress, custom_compress_file=custom_compress_file, auto_collect_result=auto_collect_mem) else: return hyper_lr.dump_process_memory(args.memdump, path_to_procdump=path_to_procdump) elif args.command_exec: print("executing '{}' on {}".format(args.command_exec, args.sensor)) result = lr_session.create_process(args.command_exec, wait_timeout=60, wait_for_output=True) print("\n-------------------------") result = result.decode('utf-8') print(result + "\n-------------------------") print() elif args.regkeypath: print("\n\t{}".format(args.regkeypath)) results = lr_session.list_registry_keys(args.regkeypath) for result in results: print("\t-------------------------") print("\tName: {}".format(result['value_name'])) print("\tType: {}".format(result['value_type'])) print("\tData: {}".format(result['value_data'])) print() elif args.regkeyvalue: print("\n\t{}".format(args.regkeyvalue)) result = lr_session.get_registry_value(args.regkeyvalue) print("\t-------------------------") print("\tName: {}".format(result['value_name'])) print("\tType: {}".format(result['value_type'])) print("\tData: {}".format(result['value_data'])) print() elif args.get_task: return hyper_lr.get_scheduled_tasks() else: # perform full live response collection if config['lerc_install_cmd']: result = hyper_lr.get_lerc_status() if not result or result == 'UNINSTALLED' or result == 'UNKNOWN': if not hyper_lr.deploy_lerc(config['lerc_install_cmd']): LOGGER.warn("LERC deployment failed") else: LOGGER.info("{} environment is not configrued for LERC deployment".format(profile)) return LR_collection(hyper_lr, args) # Remediation # if args.command == 'remediate': return Remediation(sensor, args) # Process Investigation # process_tree = None if args.command == 'proc': proc = proc_search_environments(profiles, args.process) if not proc: return 1 sp = SuperProcess(proc) if args.proc_info: print(sp) elif args.walk_tree: sp.walk_process_tree() print() print(sp.process_tree) for process in sp.process_tree: if process.is_suppressed: print("+ [DATA SUPPRESSED] {} (PID:{}) - {}".format(process.name, process.pid, process.id)) continue print("+ {} (PID:{}) - {}".format(process.name, process.pid, process.id)) if args.filemods: process.print_filemods() args.no_analysis = True if args.netconns: process.print_netconns() args.no_analysis = True if args.regmods: process.print_regmods() args.no_analysis = True if args.unsigned_modloads: process.print_unsigned_modloads() args.no_analysis = True if args.modloads: process.print_modloads() args.no_analysis = True if args.crossprocs: process.print_crossprocs() args.no_analysis = True if args.walk_parents: sp.show_ancestry() args.no_analysis = True if args.no_analysis != True: if args.json: if args.segment_limit: print(process.events_to_json(segment_limit=args.segment_limit)) else: print(process.events_to_json()) else: process.default_print() else: print() print(sp.process_tree) if args.walk_parents: sp.show_ancestry() args.no_analysis = True if args.filemods: sp.print_filemods() args.no_analysis = True if args.netconns: sp.print_netconns() args.no_analysis = True if args.regmods: sp.print_regmods() args.no_analysis = True if args.unsigned_modloads: sp.print_unsigned_modloads() args.no_analysis = True if args.modloads: sp.print_modloads() args.no_analysis = True if args.crossprocs: sp.print_crossprocs() args.no_analysis = True if args.show_children: sp.print_child_events() args.no_analysis = True if args.no_analysis != True: if args.json: if args.segment_limit: print(sp.events_to_json(segment_limit=args.segment_limit)) else: print(sp.events_to_json()) else: sp.default_print() print() return True
0.005528
def select(self, query, model_class=None, settings=None): ''' Performs a query and returns a generator of model instances. - `query`: the SQL query to execute. - `model_class`: the model class matching the query's table, or `None` for getting back instances of an ad-hoc model. - `settings`: query settings to send as HTTP GET parameters ''' query += ' FORMAT TabSeparatedWithNamesAndTypes' query = self._substitute(query, model_class) r = self._send(query, settings, True) lines = r.iter_lines() field_names = parse_tsv(next(lines)) field_types = parse_tsv(next(lines)) model_class = model_class or ModelBase.create_ad_hoc_model(zip(field_names, field_types)) for line in lines: # skip blank line left by WITH TOTALS modifier if line: yield model_class.from_tsv(line, field_names, self.server_timezone, self)
0.004115
def _rpc_action_stmt(self, stmt: Statement, sctx: SchemaContext) -> None: """Handle rpc or action statement.""" self._handle_child(RpcActionNode(), stmt, sctx)
0.011429
def add_block_widget(self, top=False): """ Return a select widget for blocks which can be added to this column. """ widget = AddBlockSelect(attrs={ 'class': 'glitter-add-block-select', }, choices=self.add_block_options(top=top)) return widget.render(name='', value=None)
0.006042
def collect_args(n): '''Returns a function that can be called `n` times with a single argument before returning all the args that have been passed to it in a tuple. Useful as a substitute for functions that can't easily be curried. >>> collect_args(3)(1)(2)(3) (1, 2, 3) ''' args = [] def arg_collector(arg): args.append(arg) if len(args) == n: return tuple(args) else: return arg_collector return arg_collector
0.001961
def drawCheck( self, painter, option, rect, state ): """ Renders a check indicator within the rectangle based on the inputed \ check state. :param painter | <QtGui.QPainter> option | <QtGui.QStyleOptionViewItem> rect | <QtGui.QRect> state | <QtCore.Qt.CheckState> """ if not self.useCheckMaps(): return super(XTreeWidgetDelegate, self).drawCheck(painter, option, rect, state) pixmap = None if state == QtCore.Qt.Checked: pixmap = self.checkOnMap() elif state == QtCore.Qt.PartiallyChecked: pixmap = self.checkPartialMap() elif state == QtCore.Qt.Unchecked: pixmap = self.checkOffMap() if type(pixmap) in (str, unicode): pixmap = QtGui.QPixmap(pixmap) if not pixmap: return x = rect.x() + (rect.width() - 16) / 2.0 y = rect.y() + (rect.height() - 16) / 2.0 painter.drawPixmap(int(x), int(y), pixmap)
0.008246
def loadSettings(self, groupName=None): """ Reads the registry items from the persistent settings store. """ groupName = groupName if groupName else self.settingsGroupName settings = QtCore.QSettings() logger.info("Reading {!r} from: {}".format(groupName, settings.fileName())) settings.beginGroup(groupName) self.clear() try: for key in settings.childKeys(): if key.startswith('item'): dct = ast.literal_eval(settings.value(key)) regItem = self._itemClass.createFromDict(dct) self.registerItem(regItem) finally: settings.endGroup()
0.004255
def str_is_well_formed(xml_str): """ Args: xml_str : str DataONE API XML doc. Returns: bool: **True** if XML doc is well formed. """ try: str_to_etree(xml_str) except xml.etree.ElementTree.ParseError: return False else: return True
0.003425
def clean_proc(proc, wait_for_kill=10): ''' Generic method for cleaning up multiprocessing procs ''' # NoneType and other fun stuff need not apply if not proc: return try: waited = 0 while proc.is_alive(): proc.terminate() waited += 1 time.sleep(0.1) if proc.is_alive() and (waited >= wait_for_kill): log.error('Process did not die with terminate(): %s', proc.pid) os.kill(proc.pid, signal.SIGKILL) except (AssertionError, AttributeError): # Catch AssertionError when the proc is evaluated inside the child # Catch AttributeError when the process dies between proc.is_alive() # and proc.terminate() and turns into a NoneType pass
0.001261
def connect_to_database(host=None, port=None, connect=False, **kwargs): """ Explicitly begins a database connection for the application (if this function is not called, a connection is created when it is first needed). Takes arguments identical to pymongo.MongoClient.__init__ @param host: the hostname to connect to @param port: the port to connect to @param connect: if True, immediately begin connecting to MongoDB in the background; otherwise connect on the first operation """ return CONNECTION.connect(host=host, port=port, connect=connect, **kwargs)
0.003273
def get_interpolated_value(self, energy, integrated=False): """ Returns the COHP for a particular energy. Args: energy: Energy to return the COHP value for. """ inter = {} for spin in self.cohp: if not integrated: inter[spin] = get_linear_interpolated_value(self.energies, self.cohp[spin], energy) elif self.icohp is not None: inter[spin] = get_linear_interpolated_value(self.energies, self.icohp[spin], energy) else: raise ValueError("ICOHP is empty.") return inter
0.002328
def root_hash(self): """Returns the root hash of this tree. (Only re-computed on change.)""" if self.__root_hash is None: self.__root_hash = ( self.__hasher._hash_fold(self.__hashes) if self.__hashes else self.__hasher.hash_empty()) return self.__root_hash
0.006173
def print_new_versions(strict=False): """Prints new requirement versions.""" new_updates = [] same_updates = [] for req in everything_in(all_reqs): new_versions = [] same_versions = [] for ver_str in all_versions(req): if newer(ver_str_to_tuple(ver_str), min_versions[req], strict=True): new_versions.append(ver_str) elif not strict and newer(ver_str_to_tuple(ver_str), min_versions[req]): same_versions.append(ver_str) update_str = req + ": " + ver_tuple_to_str(min_versions[req]) + " -> " + ", ".join( new_versions + ["(" + v + ")" for v in same_versions], ) if new_versions: new_updates.append(update_str) elif same_versions: same_updates.append(update_str) print("\n".join(new_updates + same_updates))
0.004582
def _GetBytes(partition_key): """Gets the bytes representing the value of the partition key. """ if isinstance(partition_key, six.string_types): return bytearray(partition_key, encoding='utf-8') else: raise ValueError("Unsupported " + str(type(partition_key)) + " for partitionKey.")
0.00885
def init(cls): """ Bind elements to callbacks. """ for el in cls.switcher_els: el.checked = False cls.bind_switcher() cls._draw_conspects() cls._create_searchable_typeahead()
0.008197
def _unique_rows_numpy(a): """return unique rows""" a = np.ascontiguousarray(a) unique_a = np.unique(a.view([('', a.dtype)] * a.shape[1])) return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
0.004444
def explain_instance(self, image, classifier_fn, labels=(1,), hide_color=None, top_labels=5, num_features=100000, num_samples=1000, batch_size=10, segmentation_fn=None, distance_metric='cosine', model_regressor=None, random_seed=None): """Generates explanations for a prediction. First, we generate neighborhood data by randomly perturbing features from the instance (see __data_inverse). We then learn locally weighted linear models on this neighborhood data to explain each of the classes in an interpretable way (see lime_base.py). Args: image: 3 dimension RGB image. If this is only two dimensional, we will assume it's a grayscale image and call gray2rgb. classifier_fn: classifier prediction probability function, which takes a numpy array and outputs prediction probabilities. For ScikitClassifiers , this is classifier.predict_proba. labels: iterable with labels to be explained. hide_color: TODO top_labels: if not None, ignore labels and produce explanations for the K labels with highest prediction probabilities, where K is this parameter. num_features: maximum number of features present in explanation num_samples: size of the neighborhood to learn the linear model batch_size: TODO distance_metric: the distance metric to use for weights. model_regressor: sklearn regressor to use in explanation. Defaults to Ridge regression in LimeBase. Must have model_regressor.coef_ and 'sample_weight' as a parameter to model_regressor.fit() segmentation_fn: SegmentationAlgorithm, wrapped skimage segmentation function random_seed: integer used as random seed for the segmentation algorithm. If None, a random integer, between 0 and 1000, will be generated using the internal random number generator. Returns: An Explanation object (see explanation.py) with the corresponding explanations. """ if len(image.shape) == 2: image = gray2rgb(image) if random_seed is None: random_seed = self.random_state.randint(0, high=1000) if segmentation_fn is None: segmentation_fn = SegmentationAlgorithm('quickshift', kernel_size=4, max_dist=200, ratio=0.2, random_seed=random_seed) try: segments = segmentation_fn(image) except ValueError as e: raise e fudged_image = image.copy() if hide_color is None: for x in np.unique(segments): fudged_image[segments == x] = ( np.mean(image[segments == x][:, 0]), np.mean(image[segments == x][:, 1]), np.mean(image[segments == x][:, 2])) else: fudged_image[:] = hide_color top = labels data, labels = self.data_labels(image, fudged_image, segments, classifier_fn, num_samples, batch_size=batch_size) distances = sklearn.metrics.pairwise_distances( data, data[0].reshape(1, -1), metric=distance_metric ).ravel() ret_exp = ImageExplanation(image, segments) if top_labels: top = np.argsort(labels[0])[-top_labels:] ret_exp.top_labels = list(top) ret_exp.top_labels.reverse() for label in top: (ret_exp.intercept[label], ret_exp.local_exp[label], ret_exp.score, ret_exp.local_pred) = self.base.explain_instance_with_data( data, labels, distances, label, num_features, model_regressor=model_regressor, feature_selection=self.feature_selection) return ret_exp
0.002571
def fetch_all(self, sql, *args, **kwargs): """Executes an SQL SELECT query and returns all selected rows. :param sql: statement to execute :param args: parameters iterable :param kwargs: parameters iterable :return: all selected rows :rtype: list """ with self.locked() as conn: return conn.query(sql, *args, **kwargs).fetch_all()
0.004914
def open_channel(self): """ Open a new channel with RabbitMQ by issuing the Channel.Open RPC command. When RabbitMQ responds that the channel is open, the on_channel_open callback will be invoked by pika. """ self._logger.debug('Creating a new channel') self._connection.channel(on_open_callback=self.on_channel_open)
0.005362
def combinetargets(targets, targetpath, mol_type='nt'): """ Creates a set of all unique sequences in a list of supplied FASTA files. Properly formats headers and sequences to be compatible with local pipelines. Splits hybrid entries. Removes illegal characters. :param targets: fasta gene targets to combine :param targetpath: folder containing the targets """ make_path(targetpath) with open(os.path.join(targetpath, 'combinedtargets.fasta'), 'w') as combined: idset = set() for target in targets: # Remove non-unicode characters present in the FASTA files cleanedstring = str() # Read in the file as binary with open(target, 'rb') as fasta: # Import all the text text = fasta.read() # Convert the binary variable to a string, ignoring non-UTF-8 characters cleanedstring += text.decode('utf-8', 'ignore') # Overwrite the file with the clean string with open(target, 'w') as fasta: fasta.write(cleanedstring) # Clean up each record for record in SeqIO.parse(target, 'fasta'): # In case FASTA records have been spliced together, allow for the splitting of # these records if '>' in record.seq: # Split the two records apart on '>' symbols record.seq, hybrid = record.seq.split('>') # Split the header from the sequence e.g. sspC:6:CP003808.1ATGGAAAGTACATTAGA... # will be split into sspC:6:CP003808.1 and ATGGAAAGTACATTAGA hybridid, seq = re.findall('(.+\\d+\\.\\d)(.+)', str(hybrid))[0] # Replace and dashes in the record.id with underscores hybridid = hybridid.replace('-', '_') # Convert the string to a seq object if mol_type == 'nt': hybridseq = Seq(seq, generic_dna) else: hybridseq = Seq(seq, generic_protein) # Create a SeqRecord of the sequence - use the sequence object and id hybridrecord = SeqRecord(hybridseq, description='', id=hybridid) # Remove and dashes or 'N's from the sequence data - makeblastdb can't handle sequences # with gaps # noinspection PyProtectedMember hybridrecord.seq._data = hybridrecord.seq._data.replace('-', '').replace('N', '') # Write the original record to the file # Extract the sequence record from each entry in the multifasta # Replace and dashes in the record.id with underscores record.id = record.id.replace('-', '_') # Remove and dashes or 'N's from the sequence data - makeblastdb can't handle sequences # with gaps # noinspection PyProtectedMember record.seq._data = record.seq._data.replace('-', '').replace('N', '') # Clear the name and description attributes of the record record.name = '' record.description = '' if record.id not in idset: SeqIO.write(record, combined, 'fasta') if hybridrecord.id not in idset: # Write the second record to file SeqIO.write(hybridrecord, combined, 'fasta') idset.add(hybridrecord.id) else: # Extract the sequence record from each entry in the multifasta # Replace and dashes in the record.id with underscores record.id = record.id.replace('-', '_') # Remove and dashes or 'N's from the sequence data - makeblastdb can't handle sequences # with gaps # noinspection PyProtectedMember record.seq._data = record.seq._data.replace('-', '').replace('N', '') # Clear the name and description attributes of the record record.name = '' record.description = '' if record.id not in idset: SeqIO.write(record, combined, 'fasta') idset.add(record.id)
0.003916
def evaluate_world_model( real_env, hparams, world_model_dir, debug_video_path, split=tf.estimator.ModeKeys.EVAL, ): """Evaluate the world model (reward accuracy).""" frame_stack_size = hparams.frame_stack_size rollout_subsequences = [] def initial_frame_chooser(batch_size): assert batch_size == len(rollout_subsequences) return np.stack([ [frame.observation.decode() for frame in subsequence[:frame_stack_size]] # pylint: disable=g-complex-comprehension for subsequence in rollout_subsequences ]) env_fn = rl.make_simulated_env_fn_from_hparams( real_env, hparams, batch_size=hparams.wm_eval_batch_size, initial_frame_chooser=initial_frame_chooser, model_dir=world_model_dir ) sim_env = env_fn(in_graph=False) subsequence_length = int( max(hparams.wm_eval_rollout_ratios) * hparams.simulated_rollout_length ) rollouts = real_env.current_epoch_rollouts( split=split, minimal_rollout_frames=(subsequence_length + frame_stack_size) ) video_writer = common_video.WholeVideoWriter( fps=10, output_path=debug_video_path, file_format="avi" ) reward_accuracies_by_length = { int(ratio * hparams.simulated_rollout_length): [] for ratio in hparams.wm_eval_rollout_ratios } for _ in range(hparams.wm_eval_num_batches): rollout_subsequences[:] = random_rollout_subsequences( rollouts, hparams.wm_eval_batch_size, subsequence_length + frame_stack_size ) eval_subsequences = [ subsequence[(frame_stack_size - 1):] for subsequence in rollout_subsequences ] # Check that the initial observation is the same in the real and simulated # rollout. sim_init_obs = sim_env.reset() def decode_real_obs(index): return np.stack([ subsequence[index].observation.decode() for subsequence in eval_subsequences # pylint: disable=cell-var-from-loop ]) real_init_obs = decode_real_obs(0) assert np.all(sim_init_obs == real_init_obs) debug_frame_batches = [] def append_debug_frame_batch(sim_obs, real_obs, sim_cum_rews, real_cum_rews, sim_rews, real_rews): """Add a debug frame.""" rews = [[sim_cum_rews, sim_rews], [real_cum_rews, real_rews]] headers = [] for j in range(len(sim_obs)): local_nps = [] for i in range(2): img = PIL_Image().new("RGB", (sim_obs.shape[-2], 11),) draw = PIL_ImageDraw().Draw(img) draw.text((0, 0), "c:{:3}, r:{:3}".format(int(rews[i][0][j]), int(rews[i][1][j])), fill=(255, 0, 0)) local_nps.append(np.asarray(img)) local_nps.append(np.zeros_like(local_nps[0])) headers.append(np.concatenate(local_nps, axis=1)) errs = absolute_hinge_difference(sim_obs, real_obs) headers = np.stack(headers) debug_frame_batches.append( # pylint: disable=cell-var-from-loop np.concatenate([headers, np.concatenate([sim_obs, real_obs, errs], axis=2)], axis=1) ) append_debug_frame_batch(sim_init_obs, real_init_obs, np.zeros(hparams.wm_eval_batch_size), np.zeros(hparams.wm_eval_batch_size), np.zeros(hparams.wm_eval_batch_size), np.zeros(hparams.wm_eval_batch_size)) (sim_cum_rewards, real_cum_rewards) = ( np.zeros(hparams.wm_eval_batch_size) for _ in range(2) ) for i in range(subsequence_length): actions = [subsequence[i].action for subsequence in eval_subsequences] (sim_obs, sim_rewards, _) = sim_env.step(actions) sim_cum_rewards += sim_rewards real_rewards = np.array([ subsequence[i + 1].reward for subsequence in eval_subsequences ]) real_cum_rewards += real_rewards for (length, reward_accuracies) in six.iteritems( reward_accuracies_by_length ): if i + 1 == length: reward_accuracies.append( np.sum(sim_cum_rewards == real_cum_rewards) / len(real_cum_rewards) ) real_obs = decode_real_obs(i + 1) append_debug_frame_batch(sim_obs, real_obs, sim_cum_rewards, real_cum_rewards, sim_rewards, real_rewards) for debug_frames in np.stack(debug_frame_batches, axis=1): debug_frame = None for debug_frame in debug_frames: video_writer.write(debug_frame) if debug_frame is not None: # Append two black frames for aesthetics. for _ in range(2): video_writer.write(np.zeros_like(debug_frame)) video_writer.finish_to_disk() return { "reward_accuracy/at_{}".format(length): np.mean(reward_accuracies) for (length, reward_accuracies) in six.iteritems( reward_accuracies_by_length ) }
0.008818
def _set_child(self, name, child): """ Set child. :param name: Child name. :param child: Parentable object. """ if not isinstance(child, Parentable): raise ValueError('Parentable child object expected, not {child}'.format(child=child)) child._set_parent(self) self._store_child(name, child)
0.008174
def runSearchVariantSets(self, request): """ Runs the specified SearchVariantSetsRequest. """ return self.runSearchRequest( request, protocol.SearchVariantSetsRequest, protocol.SearchVariantSetsResponse, self.variantSetsGenerator)
0.006711
def get_identities(self, item): """Return the identities from an item""" item = item['data'] if 'owner' in item: owner = self.get_sh_identity(item['owner']) yield owner if 'user' in item: user = self.get_sh_identity(item['user']) yield user if 'mentor' in item: mentor = self.get_sh_identity(item['mentor']) yield mentor
0.004619
def _update_zipimporter_cache(normalized_path, cache, updater=None): """ Update zipimporter cache data for a given normalized path. Any sub-path entries are processed as well, i.e. those corresponding to zip archives embedded in other zip archives. Given updater is a callable taking a cache entry key and the original entry (after already removing the entry from the cache), and expected to update the entry and possibly return a new one to be inserted in its place. Returning None indicates that the entry should not be replaced with a new one. If no updater is given, the cache entries are simply removed without any additional processing, the same as if the updater simply returned None. """ for p in _collect_zipimporter_cache_entries(normalized_path, cache): # N.B. pypy's custom zipimport._zip_directory_cache implementation does # not support the complete dict interface: # * Does not support item assignment, thus not allowing this function # to be used only for removing existing cache entries. # * Does not support the dict.pop() method, forcing us to use the # get/del patterns instead. For more detailed information see the # following links: # https://bitbucket.org/pypa/setuptools/issue/202/more-robust-zipimporter-cache-invalidation#comment-10495960 # https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99 old_entry = cache[p] del cache[p] new_entry = updater and updater(p, old_entry) if new_entry is not None: cache[p] = new_entry
0.000586
def build_input_partitions(cls, name='inputTablePartitions', input_name='input'): """ Build an input table partition parameter :param name: parameter name :type name: str :param input_name: bind input port name :param input_name: str :return: input description :rtype: ParamDef """ obj = cls(name) obj.exporter = 'get_input_partitions' obj.input_name = input_name return obj
0.006263
def convert(self, vroot, entry_variables): """ All functions are replaced with the same `new` function. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. """ self.graph_info = GraphInfo(vroot) self.entry_variables = entry_variables with nn.parameter_scope(self.name): # Function loop in the forward order for t, func in enumerate(self.graph_info.funcs): # TODO: error check # Batch normalization check, then skip if func.name == "BatchNormalization": i0 = func.inputs[0] bn_func = func # Test mode check if bn_func.info.args["batch_stat"] == False: # `Target Func -> BN` check from BN if i0.parent.info.type_name in self.inner_prod_functions: nn.logger.info("{} is skipped.".format(func.name)) continue # `Target Func -> BN` conversion if func.name in self.inner_prod_functions: inner_prod_func = func o0 = inner_prod_func.outputs[0] fs = self.graph_info.variable_to_funcs[o0] # No branch check #TODO: branching check (really needed?) if fs is not None and len(fs) == 1: # `Target Func -> BN` check bn_func = fs[0] if bn_func.name == "BatchNormalization": # Test mode check if bn_func.info.args["batch_stat"] == False: # Perform `Target Func -> BN` conversion nn.logger.info("BatchNormalization parameters are folded to " "the preceding convolution.") o = self._inner_prod_bn_conversion( inner_prod_func, bn_func) continue # Identity conversion o = self._identity_conversion(func) self.end_variable = o return self.end_variable
0.002951
def _coords2idx(self, coords): """ Converts from sky coordinates to pixel indices. Args: coords (:obj:`astropy.coordinates.SkyCoord`): Sky coordinates. Returns: Pixel indices of the coordinates, with the same shape as the input coordinates. Pixels which are outside the map are given an index equal to the number of pixels in the map. """ x = self._coords2vec(coords) idx = self._kd.query(x, p=self._metric_p, distance_upper_bound=self._max_pix_scale) return idx[1]
0.003284
def clean(self): ''' priorDays is required for Generic Repeated Expenses to avoid infinite loops ''' if not self.priorDays and not self.startDate: raise ValidationError(_( 'Either a start date or an "up to __ days in the past" limit is required ' + 'for repeated expense rules that are not associated with a venue or a staff member.' )) super(GenericRepeatedExpense,self).clean()
0.013072
def add(self, items, force=True, fprogress=lambda *args: None, path_rewriter=None, write=True, write_extension_data=False): """Add files from the working tree, specific blobs or BaseIndexEntries to the index. :param items: Multiple types of items are supported, types can be mixed within one call. Different types imply a different handling. File paths may generally be relative or absolute. - path string strings denote a relative or absolute path into the repository pointing to an existing file, i.e. CHANGES, lib/myfile.ext, '/home/gitrepo/lib/myfile.ext'. Absolute paths must start with working tree directory of this index's repository to be considered valid. For example, if it was initialized with a non-normalized path, like `/root/repo/../repo`, absolute paths to be added must start with `/root/repo/../repo`. Paths provided like this must exist. When added, they will be written into the object database. PathStrings may contain globs, such as 'lib/__init__*' or can be directories like 'lib', the latter ones will add all the files within the dirctory and subdirectories. This equals a straight git-add. They are added at stage 0 - Blob or Submodule object Blobs are added as they are assuming a valid mode is set. The file they refer to may or may not exist in the file system, but must be a path relative to our repository. If their sha is null ( 40*0 ), their path must exist in the file system relative to the git repository as an object will be created from the data at the path. The handling now very much equals the way string paths are processed, except that the mode you have set will be kept. This allows you to create symlinks by settings the mode respectively and writing the target of the symlink directly into the file. This equals a default Linux-Symlink which is not dereferenced automatically, except that it can be created on filesystems not supporting it as well. Please note that globs or directories are not allowed in Blob objects. They are added at stage 0 - BaseIndexEntry or type Handling equals the one of Blob objects, but the stage may be explicitly set. Please note that Index Entries require binary sha's. :param force: **CURRENTLY INEFFECTIVE** If True, otherwise ignored or excluded files will be added anyway. As opposed to the git-add command, we enable this flag by default as the API user usually wants the item to be added even though they might be excluded. :param fprogress: Function with signature f(path, done=False, item=item) called for each path to be added, one time once it is about to be added where done==False and once after it was added where done=True. item is set to the actual item we handle, either a Path or a BaseIndexEntry Please note that the processed path is not guaranteed to be present in the index already as the index is currently being processed. :param path_rewriter: Function with signature (string) func(BaseIndexEntry) function returning a path for each passed entry which is the path to be actually recorded for the object created from entry.path. This allows you to write an index which is not identical to the layout of the actual files on your hard-disk. If not None and ``items`` contain plain paths, these paths will be converted to Entries beforehand and passed to the path_rewriter. Please note that entry.path is relative to the git repository. :param write: If True, the index will be written once it was altered. Otherwise the changes only exist in memory and are not available to git commands. :param write_extension_data: If True, extension data will be written back to the index. This can lead to issues in case it is containing the 'TREE' extension, which will cause the `git commit` command to write an old tree, instead of a new one representing the now changed index. This doesn't matter if you use `IndexFile.commit()`, which ignores the `TREE` extension altogether. You should set it to True if you intend to use `IndexFile.commit()` exclusively while maintaining support for third-party extensions. Besides that, you can usually safely ignore the built-in extensions when using GitPython on repositories that are not handled manually at all. All current built-in extensions are listed here: http://opensource.apple.com/source/Git/Git-26/src/git-htmldocs/technical/index-format.txt :return: List(BaseIndexEntries) representing the entries just actually added. :raise OSError: if a supplied Path did not exist. Please note that BaseIndexEntry Objects that do not have a null sha will be added even if their paths do not exist. """ # sort the entries into strings and Entries, Blobs are converted to entries # automatically # paths can be git-added, for everything else we use git-update-index paths, entries = self._preprocess_add_items(items) entries_added = [] # This code needs a working tree, therefore we try not to run it unless required. # That way, we are OK on a bare repository as well. # If there are no paths, the rewriter has nothing to do either if paths: entries_added.extend(self._entries_for_paths(paths, path_rewriter, fprogress, entries)) # HANDLE ENTRIES if entries: null_mode_entries = [e for e in entries if e.mode == 0] if null_mode_entries: raise ValueError( "At least one Entry has a null-mode - please use index.remove to remove files for clarity") # END null mode should be remove # HANLDE ENTRY OBJECT CREATION # create objects if required, otherwise go with the existing shas null_entries_indices = [i for i, e in enumerate(entries) if e.binsha == Object.NULL_BIN_SHA] if null_entries_indices: @git_working_dir def handle_null_entries(self): for ei in null_entries_indices: null_entry = entries[ei] new_entry = self._store_path(null_entry.path, fprogress) # update null entry entries[ei] = BaseIndexEntry( (null_entry.mode, new_entry.binsha, null_entry.stage, null_entry.path)) # END for each entry index # end closure handle_null_entries(self) # END null_entry handling # REWRITE PATHS # If we have to rewrite the entries, do so now, after we have generated # all object sha's if path_rewriter: for i, e in enumerate(entries): entries[i] = BaseIndexEntry((e.mode, e.binsha, e.stage, path_rewriter(e))) # END for each entry # END handle path rewriting # just go through the remaining entries and provide progress info for i, entry in enumerate(entries): progress_sent = i in null_entries_indices if not progress_sent: fprogress(entry.path, False, entry) fprogress(entry.path, True, entry) # END handle progress # END for each enty entries_added.extend(entries) # END if there are base entries # FINALIZE # add the new entries to this instance for entry in entries_added: self.entries[(entry.path, 0)] = IndexEntry.from_base(entry) if write: self.write(ignore_extension_data=not write_extension_data) # END handle write return entries_added
0.00569
def create_timedelta(timespec): """Utility function to translate DD:HH:MM:SS into a timedelta object.""" duration = timespec.split(':') seconds = int(duration[-1]) minutes = 0 hours = 0 days = 0 if len(duration) > 1: minutes = int(duration[-2]) if len(duration) > 2: hours = int(duration[-3]) if len(duration) > 3: days = int(duration[-4]) return datetime.timedelta(days=days, seconds=seconds, minutes=minutes, hours=hours)
0.00409
def post_unpack_merkleblock(d, f): """ A post-processing "post_unpack" to merkleblock messages. It validates the merkle proofs (throwing an exception if there's an error), and returns the list of transaction hashes in "tx_hashes". The transactions are supposed to be sent immediately after the merkleblock message. """ level_widths = [] count = d["total_transactions"] while count > 1: level_widths.append(count) count += 1 count //= 2 level_widths.append(1) level_widths.reverse() tx_acc = [] flags = d["flags"] hashes = list(reversed(d["hashes"])) left_hash, flag_index = _recurse(level_widths, 0, 0, hashes, flags, 0, tx_acc) if len(hashes) > 0: raise ValueError("extra hashes: %s" % hashes) idx, r = divmod(flag_index-1, 8) if idx != len(flags) - 1: raise ValueError("not enough flags consumed") if flags[idx] > (1 << (r+1))-1: raise ValueError("unconsumed 1 flag bits set") if left_hash != d["header"].merkle_root: raise ValueError( "merkle root %s does not match calculated hash %s" % ( b2h_rev(d["header"].merkle_root), b2h_rev(left_hash))) d["tx_hashes"] = tx_acc return d
0.002381
def run(self): '''This method runs the the plugin in the appropriate mode parsed from the command line options. ''' handle = 0 handlers = { Modes.ONCE: once, Modes.CRAWL: crawl, Modes.INTERACTIVE: interactive, } handler = handlers[self.mode] patch_sysargv(self.url or 'plugin://%s/' % self.plugin.id, handle) return handler(self.plugin)
0.004535
def post(self, request, format=None): """ Add a new Channel. """ data = request.data.copy() # Get chat type record try: ct = ChatType.objects.get(pk=data.pop("chat_type")) data["chat_type"] = ct except ChatType.DoesNotExist: return typeNotFound404 if not self.is_path_unique( None, data["publish_path"], ct.publish_path ): return notUnique400 # Get user record try: u = User.objects.get(pk=data.pop("owner")) data["owner"] = u except User.DoesNotExist: return userNotFound404 c = Channel(**data) c.save() self.handle_webhook(c) return Response( { "text": "Channel saved.", "method": "POST", "saved": ChannelCMSSerializer(c).data, }, 200, )
0.002081
def forwardCheck(self, variables, domains, assignments, _unassigned=Unassigned): """ Helper method for generic forward checking Currently, this method acts only when there's a single unassigned variable. @param variables: Variables affected by that constraint, in the same order provided by the user @type variables: sequence @param domains: Dictionary mapping variables to their domains @type domains: dict @param assignments: Dictionary mapping assigned variables to their current assumed value @type assignments: dict @return: Boolean value stating if this constraint is currently broken or not @rtype: bool """ unassignedvariable = _unassigned for variable in variables: if variable not in assignments: if unassignedvariable is _unassigned: unassignedvariable = variable else: break else: if unassignedvariable is not _unassigned: # Remove from the unassigned variable domain's all # values which break our variable's constraints. domain = domains[unassignedvariable] if domain: for value in domain[:]: assignments[unassignedvariable] = value if not self(variables, domains, assignments): domain.hideValue(value) del assignments[unassignedvariable] if not domain: return False return True
0.001748
def run(self, message): """Internal instance method run by worker process to actually run the task callable.""" the_callable = self.func_from_info() try: task_message = dict( task=self, channel_message=message, ) the_callable(task_message) finally: if self.end_running < self.next_run: self.enabled=False Channel(KILL_TASK_CHANNEL).send({'id': self.pk}) return if self.iterations == 0: return else: self.iterations -= 1 if self.iterations == 0: self.enabled = False Channel(KILL_TASK_CHANNEL).send({'id':self.pk}) self.save()
0.00615
def static_dag(job, uuid, rg_line, inputs): """ Prefer this here as it allows us to pull the job functions from other jobs without rewrapping the job functions back together. bwa_inputs: Input arguments to be passed to BWA. adam_inputs: Input arguments to be passed to ADAM. gatk_preprocess_inputs: Input arguments to be passed to GATK preprocessing. gatk_adam_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of ADAM preprocessing. gatk_gatk_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of GATK preprocessing. """ # get work directory work_dir = job.fileStore.getLocalTempDir() inputs.cpu_count = cpu_count() inputs.maxCores = sys.maxint args = {'uuid': uuid, 's3_bucket': inputs.s3_bucket, 'sequence_dir': inputs.sequence_dir, 'dir_suffix': inputs.dir_suffix} # get head BWA alignment job function and encapsulate it inputs.rg_line = rg_line inputs.output_dir = 's3://{s3_bucket}/alignment{dir_suffix}'.format(**args) bwa = job.wrapJobFn(download_reference_files, inputs, [[uuid, ['s3://{s3_bucket}/{sequence_dir}/{uuid}_1.fastq.gz'.format(**args), 's3://{s3_bucket}/{sequence_dir}/{uuid}_2.fastq.gz'.format(**args)]]]).encapsulate() # get head ADAM preprocessing job function and encapsulate it adam_preprocess = job.wrapJobFn(static_adam_preprocessing_dag, inputs, 's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args), 's3://{s3_bucket}/analysis{dir_suffix}/{uuid}'.format(**args), suffix='.adam').encapsulate() # Configure options for Toil Germline pipeline. This function call only runs the preprocessing steps. gatk_preprocessing_inputs = copy.deepcopy(inputs) gatk_preprocessing_inputs.suffix = '.gatk' gatk_preprocessing_inputs.preprocess = True gatk_preprocessing_inputs.preprocess_only = True gatk_preprocessing_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args) # get head GATK preprocessing job function and encapsulate it gatk_preprocess = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args), None, # Does not require second URL or RG_Line None), gatk_preprocessing_inputs).encapsulate() # Configure options for Toil Germline pipeline for preprocessed ADAM BAM file. adam_call_inputs = inputs adam_call_inputs.suffix = '.adam' adam_call_inputs.sorted = True adam_call_inputs.preprocess = False adam_call_inputs.run_vqsr = False adam_call_inputs.joint_genotype = False adam_call_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args) # get head GATK haplotype caller job function for the result of ADAM preprocessing and encapsulate it gatk_adam_call = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 's3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.adam.bam'.format(**args), None, None), adam_call_inputs).encapsulate() # Configure options for Toil Germline pipeline for preprocessed GATK BAM file. gatk_call_inputs = copy.deepcopy(inputs) gatk_call_inputs.sorted = True gatk_call_inputs.preprocess = False gatk_call_inputs.run_vqsr = False gatk_call_inputs.joint_genotype = False gatk_call_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args) # get head GATK haplotype caller job function for the result of GATK preprocessing and encapsulate it gatk_gatk_call = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 'S3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.gatk.bam'.format(**args), None, None), gatk_call_inputs).encapsulate() # wire up dag if not inputs.skip_alignment: job.addChild(bwa) if (inputs.pipeline_to_run == "adam" or inputs.pipeline_to_run == "both"): if inputs.skip_preprocessing: job.addChild(gatk_adam_call) else: if inputs.skip_alignment: job.addChild(adam_preprocess) else: bwa.addChild(adam_preprocess) adam_preprocess.addChild(gatk_adam_call) if (inputs.pipeline_to_run == "gatk" or inputs.pipeline_to_run == "both"): if inputs.skip_preprocessing: job.addChild(gatk_gatk_call) else: if inputs.skip_alignment: job.addChild(gatk_preprocess) else: bwa.addChild(gatk_preprocess) gatk_preprocess.addChild(gatk_gatk_call)
0.00385
def data_group_association(self, xid): """Return group dict array following all associations. Args: xid (str): The xid of the group to retrieve associations. Returns: list: A list of group dicts. """ groups = [] group_data = None # get group data from one of the arrays if self.groups.get(xid) is not None: group_data = self.groups.get(xid) del self.groups[xid] elif self.groups_shelf.get(xid) is not None: group_data = self.groups_shelf.get(xid) del self.groups_shelf[xid] if group_data is not None: # convert any obj into dict and process file data group_data = self.data_group_type(group_data) groups.append(group_data) # recursively get associations for assoc_xid in group_data.get('associatedGroupXid', []): groups.extend(self.data_group_association(assoc_xid)) return groups
0.001957
def plot_qq_exp(fignum, I, title, subplot=False): """ plots data against an exponential distribution in 0=>90. Parameters _________ fignum : matplotlib figure number I : data title : plot title subplot : boolean, if True plot as subplot with 1 row, two columns with fignum the plot number """ if subplot == True: plt.subplot(1, 2, fignum) else: plt.figure(num=fignum) X, Y, dpos, dneg = [], [], 0., 0. rad = old_div(np.pi, 180.) xsum = 0 for i in I: theta = (90. - i) * rad X.append(1. - np.cos(theta)) xsum += X[-1] X.sort() n = float(len(X)) kappa = old_div((n - 1.), xsum) for i in range(len(X)): p = old_div((float(i) - 0.5), n) Y.append(-np.log(1. - p)) f = 1. - np.exp(-kappa * X[i]) ds = old_div(float(i), n) - f if dpos < ds: dpos = ds ds = f - old_div((float(i) - 1.), n) if dneg < ds: dneg = ds if dneg > dpos: ds = dneg else: ds = dpos Me = (ds - (old_div(0.2, n))) * (np.sqrt(n) + 0.26 + (old_div(0.5, (np.sqrt(n))))) # Eq. 5.15 from Fisher et al. (1987) plt.plot(Y, X, 'ro') bounds = plt.axis() plt.axis([0, bounds[1], 0., bounds[3]]) notestr = 'N: ' + '%i' % (n) plt.text(.1 * bounds[1], .9 * bounds[3], notestr) notestr = 'Me: ' + '%7.3f' % (Me) plt.text(.1 * bounds[1], .8 * bounds[3], notestr) if Me > 1.094: notestr = "Not Exponential" else: notestr = "Exponential (95%)" plt.text(.1 * bounds[1], .7 * bounds[3], notestr) plt.title(title) plt.xlabel('Exponential Quantile') plt.ylabel('Data Quantile') return Me, 1.094
0.002822
def do_page_truncate(self, args: List[str]): """Read in a text file and display its output in a pager, truncating long lines if they don't fit. Truncated lines can still be accessed by scrolling to the right using the arrow keys. Usage: page_chop <file_path> """ if not args: self.perror('page_truncate requires a path to a file as an argument', traceback_war=False) return self.page_file(args[0], chop=True)
0.010373
def default_select(identifier, all_entry_points): # pylint: disable=inconsistent-return-statements """ Raise an exception when we have ambiguous entry points. """ if len(all_entry_points) == 0: raise PluginMissingError(identifier) elif len(all_entry_points) == 1: return all_entry_points[0] elif len(all_entry_points) > 1: raise AmbiguousPluginError(all_entry_points)
0.004773
def transform_header(mtype_name): '''Add header to json output to wrap around distribution data. ''' head_dict = OrderedDict() head_dict["m-type"] = mtype_name head_dict["components"] = defaultdict(OrderedDict) return head_dict
0.003953
def MAE(x1, x2=-1): """ Mean absolute error - this function accepts two series of data or directly one series with error. **Args:** * `x1` - first data series or error (1d array) **Kwargs:** * `x2` - second series (1d array) if first series was not error directly,\\ then this should be the second series **Returns:** * `e` - MAE of error (float) obtained directly from `x1`, \\ or as a difference of `x1` and `x2` """ e = get_valid_error(x1, x2) return np.sum(np.abs(e)) / float(len(e))
0.001792
def peak_interval(data, alpha=_alpha, npoints=_npoints): """ Identify interval using Gaussian kernel density estimator. """ peak = kde_peak(data,npoints) x = np.sort(data.flat); n = len(x) # The number of entries in the interval window = int(np.rint((1.0-alpha)*n)) # The start, stop, and width of all possible intervals starts = x[:n-window]; ends = x[window:] widths = ends - starts # Just the intervals containing the peak select = (peak >= starts) & (peak <= ends) widths = widths[select] if len(widths) == 0: raise ValueError('Too few elements for interval calculation') min_idx = np.argmin(widths) lo = x[min_idx] hi = x[min_idx+window] return interval(peak,lo,hi)
0.008
def _nonempty_project(string): """ Argparse validator for ensuring a workspace is provided """ value = str(string) if len(value) == 0: msg = "No project provided and no default project configured" raise argparse.ArgumentTypeError(msg) return value
0.003484
def load_tmp_dh(self, dhfile): """ Load parameters for Ephemeral Diffie-Hellman :param dhfile: The file to load EDH parameters from (``bytes`` or ``unicode``). :return: None """ dhfile = _path_string(dhfile) bio = _lib.BIO_new_file(dhfile, b"r") if bio == _ffi.NULL: _raise_current_error() bio = _ffi.gc(bio, _lib.BIO_free) dh = _lib.PEM_read_bio_DHparams(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL) dh = _ffi.gc(dh, _lib.DH_free) _lib.SSL_CTX_set_tmp_dh(self._context, dh)
0.003384
def check_the_end_flag(self, state_key): ''' Check the end flag. If this return value is `True`, the learning is end. Args: state_key: The key of state in `self.t`. Returns: bool ''' # As a rule, the learning can not be stopped. x, y = state_key end_point_tuple = np.where(self.__map_arr == self.__end_point_label) end_point_x_arr, end_point_y_arr = end_point_tuple if x == end_point_x_arr[0] and y == end_point_y_arr[0]: return True else: return False
0.004918
def fetch_items(self, category, **kwargs): """Fetch questions from the Kitsune url :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ offset = kwargs['offset'] logger.info("Looking for questions at url '%s' using offset %s", self.url, str(offset)) nquestions = 0 # number of questions processed tquestions = 0 # number of questions from API data equestions = 0 # number of questions dropped by errors # Always get complete pages so the first item is always # the first one in the page page = int(offset / KitsuneClient.ITEMS_PER_PAGE) page_offset = page * KitsuneClient.ITEMS_PER_PAGE # drop questions from page before the offset drop_questions = offset - page_offset current_offset = offset questions_page = self.client.get_questions(offset) while True: try: raw_questions = next(questions_page) except StopIteration: break except requests.exceptions.HTTPError as e: # Continue with the next page if it is a 500 error if e.response.status_code == 500: logger.exception(e) logger.error("Problem getting Kitsune questions. " "Loosing %i questions. Going to the next page.", KitsuneClient.ITEMS_PER_PAGE) equestions += KitsuneClient.ITEMS_PER_PAGE current_offset += KitsuneClient.ITEMS_PER_PAGE questions_page = self.client.get_questions(current_offset) continue else: # If it is another error just propagate the exception raise e try: questions_data = json.loads(raw_questions) tquestions = questions_data['count'] questions = questions_data['results'] except (ValueError, KeyError) as ex: logger.error(ex) cause = ("Bad JSON format for mozilla_questions: %s" % (raw_questions)) raise ParseError(cause=cause) for question in questions: if drop_questions > 0: # Remove extra questions due to page base retrieval drop_questions -= 1 continue question['offset'] = current_offset current_offset += 1 question['answers_data'] = [] for raw_answers in self.client.get_question_answers(question['id']): answers = json.loads(raw_answers)['results'] question['answers_data'] += answers yield question nquestions += 1 logger.debug("Questions: %i/%i", nquestions + offset, tquestions) logger.info("Total number of questions: %i (%i total)", nquestions, tquestions) logger.info("Questions with errors dropped: %i", equestions)
0.001889
def delete(self, key): """ Remove a key from the cache. """ if key in self.cache: self.cache.pop(key, None)
0.013245
def ip_allocate(self, public=False): """ Allocates a new :any:`IPAddress` for this Instance. Additional public IPs require justification, and you may need to open a :any:`SupportTicket` before you can add one. You may only have, at most, one private IP per Instance. :param public: If the new IP should be public or private. Defaults to private. :type public: bool :returns: The new IPAddress :rtype: IPAddress """ result = self._client.post( "{}/ips".format(Instance.api_endpoint), model=self, data={ "type": "ipv4", "public": public, }) if not 'address' in result: raise UnexpectedResponseError('Unexpected response allocating IP!', json=result) i = IPAddress(self._client, result['address'], result) return i
0.004053
def toarray(self): """ Returns the contents as a local array. Will likely cause memory problems for large objects. """ rdd = self._rdd if self._ordered else self._rdd.sortByKey() x = rdd.values().collect() return asarray(x).reshape(self.shape)
0.006667
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''): ''' a helper method for recursively validating items in a list :return: input_list ''' # construct rules for list and items rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root) list_rules = self.keyMap[rules_path_to_root] initial_key = rules_path_to_root + '[0]' item_rules = self.keyMap[initial_key] # construct list error report template list_error = { 'object_title': object_title, 'model_schema': self.schema, 'input_criteria': list_rules, 'failed_test': 'value_datatype', 'input_path': path_to_root, 'error_value': 0, 'error_code': 4001 } # validate list rules if 'min_size' in list_rules.keys(): if len(input_list) < list_rules['min_size']: list_error['failed_test'] = 'min_size' list_error['error_value'] = len(input_list) list_error['error_code'] = 4031 raise InputValidationError(list_error) if 'max_size' in list_rules.keys(): if len(input_list) > list_rules['max_size']: list_error['failed_test'] = 'max_size' list_error['error_value'] = len(input_list) list_error['error_code'] = 4032 raise InputValidationError(list_error) # construct item error report template item_error = { 'object_title': object_title, 'model_schema': self.schema, 'input_criteria': item_rules, 'failed_test': 'value_datatype', 'input_path': initial_key, 'error_value': None, 'error_code': 4001 } # validate datatype of items for i in range(len(input_list)): input_path = path_to_root + '[%s]' % i item = input_list[i] item_error['input_path'] = input_path try: item_index = self._datatype_classes.index(item.__class__) except: item_error['error_value'] = item.__class__.__name__ raise InputValidationError(item_error) item_type = self._datatype_names[item_index] item_error['error_value'] = item if item_rules['value_datatype'] == 'null': pass else: if item_type != item_rules['value_datatype']: raise InputValidationError(item_error) # call appropriate validation sub-routine for datatype of item if item_type == 'boolean': input_list[i] = self._validate_boolean(item, input_path, object_title) elif item_type == 'number': input_list[i] = self._validate_number(item, input_path, object_title) elif item_type == 'string': input_list[i] = self._validate_string(item, input_path, object_title) elif item_type == 'map': input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title) elif item_type == 'list': input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title) # validate unique values in list if 'unique_values' in list_rules.keys(): if len(set(input_list)) < len(input_list): list_error['failed_test'] = 'unique_values' list_error['error_value'] = input_list list_error['error_code'] = 4033 raise InputValidationError(list_error) # TODO: validate top-level item values against identical to reference # TODO: run lambda function and call validation url return input_list
0.003103
def accessible_to(self, user): """ returns all the items that are accessible to the specified user if user is not authenticated will return public items :param user: an user instance """ if user.is_superuser: try: queryset = self.get_queryset() except AttributeError: queryset = self elif user.is_authenticated(): # get user group (higher id) group = user.groups.all().order_by('-id')[0] queryset = self.filter(access_level__lte=ACCESS_LEVELS.get(group.name)) else: queryset = self.filter(access_level__lte=ACCESS_LEVELS.get('public')) return queryset
0.006793
def create_notes_folder(self, title, parentid=""): """Create new folder :param title: The title of the folder to create :param parentid: The UUID of the parent folder """ if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req('/notes/folders/create', post_data={ 'title' : title, 'parentid' : parentid }) return response
0.008651
def delete_target_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete target group. name (string) - Target Group Name or Amazon Resource Name (ARN). returns (bool) - True on success, False on failure. CLI example: .. code-block:: bash salt myminion boto_elbv2.delete_target_group arn:aws:elasticloadbalancing:us-west-2:644138682826:targetgroup/learn1give1-api/414788a16b5cf163 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not target_group_exists(name, region, key, keyid, profile): return True try: if name.startswith('arn:aws:elasticloadbalancing'): conn.delete_target_group(TargetGroupArn=name) log.info('Deleted target group %s', name) else: tg_info = conn.describe_target_groups(Names=[name]) if len(tg_info['TargetGroups']) != 1: return False arn = tg_info['TargetGroups'][0]['TargetGroupArn'] conn.delete_target_group(TargetGroupArn=arn) log.info('Deleted target group %s ARN %s', name, arn) return True except ClientError as error: log.error('Failed to delete target group %s', name, exc_info_on_loglevel=logging.DEBUG) return False
0.001399
def isInside(self, point, tol=0.0001): """ Return True if point is inside a polydata closed surface. """ poly = self.polydata(True) points = vtk.vtkPoints() points.InsertNextPoint(point) pointsPolydata = vtk.vtkPolyData() pointsPolydata.SetPoints(points) sep = vtk.vtkSelectEnclosedPoints() sep.SetTolerance(tol) sep.CheckSurfaceOff() sep.SetInputData(pointsPolydata) sep.SetSurfaceData(poly) sep.Update() return sep.IsInside(0)
0.00365
def qteSetWidgetSignature(self, widgetSignatures: (str, tuple, list)): """ Specify the widget signatures with which this macro is compatible. Qtmacs uses this information at run time to determine if this macro is compatible with a particular widget, as specified by the widget's signature. Note that this function overwrites all previously set values. |Args| * ``*widgetSignatures`` (**str, tuple, list**): widget signatures as a string, or tuple/list of strings. |Returns| * **None** |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type. """ # Convert the argument to a tuple if it is not already a tuple # or list. if not isinstance(widgetSignatures, (tuple, list)): widgetSignatures = widgetSignatures, # Ensure that all arguments in the tuple/list are strings. for idx, val in enumerate(widgetSignatures): if not isinstance(val, str): args = ('widgetSignatures', 'str', inspect.stack()[0][3]) raise QtmacsArgumentError(*args) # Store the compatible widget signatures as a tuple (of strings). self._qteWidgetSignatures = tuple(widgetSignatures)
0.001517
def push_func(self, cuin, callback): """Push a function for dfp. :param cuin: str,unicode: Callback Unique Identifier Name. :param callback: callable: Corresponding to the cuin to perform a function. :raises: DFPError,NotCallableError: raises an exception .. versionadded:: 2.3.0 """ if cuin and isinstance(cuin, string_types) and callable(callback): if cuin in self._dfp_funcs: raise DFPError("The cuin already exists") else: self._dfp_funcs[cuin] = callback else: if not callable(callback): raise NotCallableError("The cuin %s cannot be called back" % cuin) raise DFPError("Invalid parameter")
0.00527
def _at_exit(self): """ Resets terminal to normal configuration """ if self.process_exit: try: term = self.term if self.set_scroll: term.reset() else: term.move_to(0, term.height) self.term.feed() except ValueError: # Possibly closed file handles pass
0.004608
def connect(self, *, network=None, from_backup=None): """ In DBState, a device can be reconnected to BACnet using: device.connect(network=bacnet) (bacnet = BAC0.connect()) """ if network and from_backup: raise WrongParameter("Please provide network OR from_backup") elif network: self.properties.network = network try: name = self.properties.network.read( "{} device {} objectName".format( self.properties.address, self.properties.device_id ) ) segmentation = self.properties.network.read( "{} device {} segmentationSupported".format( self.properties.address, self.properties.device_id ) ) if not self.segmentation_supported or segmentation not in ( "segmentedTransmit", "segmentedBoth", ): segmentation_supported = False self._log.debug("Segmentation not supported") else: segmentation_supported = True if name: if segmentation_supported: self.new_state(RPMDeviceConnected) else: self.new_state(RPDeviceConnected) # self.db.close() except NoResponseFromController: self._log.error("Unable to connect, keeping DB mode active") elif from_backup: self.properties.db_name = from_backup.split(".")[0] self._init_state()
0.001125
def apply_host_template(self, host_ids, start_roles): """ Apply a host template identified by name on the specified hosts and optionally start them. @param host_ids: List of host ids. @param start_roles: Whether to start the created roles or not. @return: An ApiCommand object. """ return apply_host_template(self._get_resource_root(), self.name, self.clusterRef.clusterName, host_ids, start_roles)
0.004651
def verify_state(self): """ Verify if session was not yet opened. If it is, open it and call connections C{connectionMade} """ # If we're in CONNECTING state - send 'o' message to the client if self.state == SESSION_STATE.CONNECTING: self.handler.send_pack(proto.CONNECT) # Call parent implementation super(Session, self).verify_state()
0.005038
def commit(self): """ Put the document into the new state. """ if self.textAfter is None: # If this is the first 'commit' call then do not make # any changes but store the current document state # and its style. line, col = self.qteWidget.getNumLinesAndColumns() text, style = self.qteWidget.SCIGetStyledText((0, 0, line, col)) self.styleAfter = style self.textAfter = text.decode('utf-8') else: # Put the document into the 'after' state. self.baseClass.setText(self.textAfter) self.qteWidget.SCISetStylingEx(0, 0, self.styleAfter) self.placeCursor(*self.origPosition)
0.002717
def filter_queryset(self, value, queryset): """ Filter the queryset to all instances matching the given attribute. """ filter_kwargs = {self.field_name: value} return queryset.filter(**filter_kwargs)
0.008368
def CopyFileInZip(from_zip, from_name, to_zip, to_name=None): """Read a file from a ZipFile and write it to a new ZipFile.""" data = from_zip.read(from_name) if to_name is None: to_name = from_name to_zip.writestr(to_name, data)
0.020833
def find_and_reserve_fcp(self, assigner_id): """reserve the fcp to assigner_id The function to reserve a fcp for user 1. Check whether assigner_id has a fcp already if yes, make the reserve of that record to 1 2. No fcp, then find a fcp and reserve it fcp will be returned, or None indicate no fcp """ fcp_list = self.db.get_from_assigner(assigner_id) if not fcp_list: new_fcp = self.db.find_and_reserve() if new_fcp is None: LOG.info("no more fcp to be allocated") return None LOG.debug("allocated %s fcp for %s assigner" % (new_fcp, assigner_id)) return new_fcp else: # we got it from db, let's reuse it old_fcp = fcp_list[0][0] self.db.reserve(fcp_list[0][0]) return old_fcp
0.002191
def choice(self, subscribers, message): """ Choose a random connection, favoring those that are reliable from subscriber pool to deliver specified message. @param subscribers: Collection of subscribed connections to destination. @type subscribers: C{list} of L{coilmq.server.StompConnection} @param message: The message to be delivered. @type message: L{stompclient.frame.Frame} @return: A random subscriber from the list or None if list is empty. @rtype: L{coilmq.server.StompConnection} """ if not subscribers: return None reliable_subscribers = [ s for s in subscribers if s.reliable_subscriber] if reliable_subscribers: return random.choice(reliable_subscribers) else: return random.choice(subscribers)
0.005741
def dropNode(self, node): """ Drop a node from the network :param node: node to drop :type node: Node """ conn = self._connections.pop(node, None) if conn is not None: # Calling conn.disconnect() immediately triggers the onDisconnected callback if the connection isn't already disconnected, so this is necessary to prevent the automatic reconnect. self._preventConnectNodes.add(node) conn.disconnect() self._preventConnectNodes.remove(node) if isinstance(node, TCPNode): self._nodes.discard(node) self._nodeAddrToNode.pop(node.address, None) else: self._readonlyNodes.discard(node) self._lastConnectAttempt.pop(node, None)
0.003807
def assert_valid_explicit_coords(variables, dims, explicit_coords): """Validate explicit coordinate names/dims. Raise a MergeError if an explicit coord shares a name with a dimension but is comprised of arbitrary dimensions. """ for coord_name in explicit_coords: if coord_name in dims and variables[coord_name].dims != (coord_name,): raise MergeError( 'coordinate %s shares a name with a dataset dimension, but is ' 'not a 1D variable along that dimension. This is disallowed ' 'by the xarray data model.' % coord_name)
0.001642
def external_table(self): """ schema.external provides a view of the external hash table for the schema :return: external table """ if self._external is None: self._external = ExternalTable(self.connection, self.database) return self._external
0.009901
def _process_response(self, request, response): """Log user operation.""" log_format = self._get_log_format(request) if not log_format: return response params = self._get_parameters_from_request(request) # log a message displayed to user messages = django_messages.get_messages(request) result_message = None if messages: result_message = ', '.join('%s: %s' % (message.tags, message) for message in messages) elif 'action' in request.POST: result_message = request.POST['action'] params['message'] = result_message params['http_status'] = response.status_code self.OPERATION_LOG.info(log_format, params) return response
0.002497
def create_http_monitor(self, topics, transport_url, transport_token=None, transport_method='PUT', connect_timeout=0, response_timeout=0, batch_size=1, batch_duration=0, compression='none', format_type='json'): """Creates a HTTP Monitor instance in Device Cloud for a given list of topics :param topics: a string list of topics (e.g. ['DeviceCore[U]', 'FileDataCore']). :param transport_url: URL of the customer web server. :param transport_token: Credentials for basic authentication in the following format: username:password :param transport_method: HTTP method to use for sending data: PUT or POST. The default is PUT. :param connect_timeout: A value of 0 means use the system default of 5000 (5 seconds). :param response_timeout: A value of 0 means use the system default of 5000 (5 seconds). :param batch_size: How many Msgs received before sending data. :param batch_duration: How long to wait before sending batch if it does not exceed batch_size. :param compression: Compression value (i.e. 'gzip'). :param format_type: What format server should send data in (i.e. 'xml' or 'json'). Returns an object of the created Monitor """ monitor_xml = """\ <Monitor> <monTopic>{topics}</monTopic> <monBatchSize>{batch_size}</monBatchSize> <monFormatType>{format_type}</monFormatType> <monTransportType>http</monTransportType> <monTransportUrl>{transport_url}</monTransportUrl> <monTransportToken>{transport_token}</monTransportToken> <monTransportMethod>{transport_method}</monTransportMethod> <monConnectTimeout>{connect_timeout}</monConnectTimeout> <monResponseTimeout>{response_timeout}</monResponseTimeout> <monCompression>{compression}</monCompression> </Monitor> """.format( topics=','.join(topics), transport_url=transport_url, transport_token=transport_token, transport_method=transport_method, connect_timeout=connect_timeout, response_timeout=response_timeout, batch_size=batch_size, batch_duration=batch_duration, format_type=format_type, compression=compression, ) monitor_xml = textwrap.dedent(monitor_xml) response = self._conn.post("/ws/Monitor", monitor_xml) location = ET.fromstring(response.text).find('.//location').text monitor_id = int(location.split('/')[-1]) return HTTPDeviceCloudMonitor(self._conn, monitor_id)
0.004044
def iterdirty(self): '''Ordered iterator over dirty elements.''' return iter(chain(itervalues(self._new), itervalues(self._modified)))
0.013158
def str_cat(x, other): """Concatenate two string columns on a row-by-row basis. :param expression other: The expression of the other column to be concatenated. :returns: an expression containing the concatenated columns. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.cat(df.text) Expression = str_cat(text, text) Length: 5 dtype: str (expression) --------------------------------- 0 SomethingSomething 1 very prettyvery pretty 2 is comingis coming 3 ourour 4 way.way. """ sl1 = _to_string_sequence(x) sl2 = _to_string_sequence(other) sl = sl1.concat(sl2) return column.ColumnStringArrow.from_string_sequence(sl)
0.002099
def validate_zone(zone): """Checks that the given zone contains the required fields""" if not has_valid_id(zone): raise InvalidZone("%s must contain a valid 'id' attribute" % zone.__name__) if not has_valid_name(zone): raise InvalidZone("%s must contain a valid 'name' attribute" % zone.__name__)
0.009202
def main(): """ Discards all pairs of sentences which can't be decoded by latin-1 encoder. It aims to filter out sentences with rare unicode glyphs and pairs which are most likely not valid English-German sentences. Examples of discarded sentences: ✿★★★Hommage au king de la pop ★★★✿ ✿★★★Que son âme repos... Для их осуществления нам, прежде всего, необходимо преодолеть возражения рыночных фундаменталистов, которые хотят ликвидировать или уменьшить роль МВФ. practised as a scientist in various medical departments of the ⇗Medical University of Hanover , the ⇗University of Ulm , and the ⇗RWTH Aachen (rheumatology, pharmacology, physiology, pathology, microbiology, immunology and electron-microscopy). The same shift】 and press 【】 【alt out with a smaller diameter circle. Brought to you by ABMSUBS ♥leira(Coordinator/Translator) ♥chibichan93(Timer/Typesetter) ♥ja... Some examples: &0u - ☺ &0U - ☻ &tel - ☏ &PI - ¶ &SU - ☼ &cH- - ♥ &M2=♫ &sn - ﺵ SGML maps SGML to unicode. """ args = parse_args() c = Counter() skipped = 0 valid = 0 data1 = [] data2 = [] with open(args.file1) as f1, open(args.file2) as f2: for idx, lines in enumerate(zip(f1, f2)): line1, line2 = lines if idx % 100000 == 1: print('Processed {} lines'.format(idx)) try: line1.encode('latin1') line2.encode('latin1') except UnicodeEncodeError: skipped += 1 else: data1.append(line1) data2.append(line2) valid += 1 c.update(line1) ratio = valid / (skipped + valid) print('Skipped: {}, Valid: {}, Valid ratio {}'.format(skipped, valid, ratio)) print('Character frequency:', c) save_output(args.file1, data1) save_output(args.file2, data2)
0.001001
def run_webserver(): ''' Run web server ''' host = "0.0.0.0" port = CONFIG.getint('Server Parameters', 'port') print "Serving on ", "http://" + host + ":" + str(port) app.config['TEMPLATES_AUTO_RELOAD'] = True app.jinja_env.auto_reload = True app.run(debug=True, host=host, port=port)
0.00639
def show_tree_cache(self, label, current_node=None): ''' Show tree and cache info with color represent _status Optionally accpet current_node arg to highlight the current node we are in ''' import os import tempfile import subprocess assert DEBUG, "Please use dr tree visualization functions in debug mode" cache_path = os.path.abspath('profiles') def string_for(self, my_name): color_mapping = {'new' : 'grey', 'pending':'red', 'cached':'yellow', 'done': 'green'} if hasattr(self, 'label'): my_name = self.label my_name = '%s (%s)' % (my_name, str(self.__class__.__name__)) result = [] if not hasattr(self, 'dterms'): return result for dterm in self.dterms: if hasattr(self, dterm): dtval = getattr(self, dterm) if hasattr(dtval, 'dterms') or hasattr(dtval, 'terms'): child_label = getattr(dtval, 'label') if hasattr(dtval, 'label') else dterm child_label = '%s (%s)' % (child_label, str(dtval.__class__.__name__)) src = 'aaa%d' % (id(self)) dst = 'aaa%d' % (id(dtval)) s = '' color = color_mapping[dtval._status] if hasattr(dtval, '_status') else 'grey' if dtval == current_node: color = 'blue' if isinstance(dtval, Concatenate) and len(dtval.dr_cached) > 0: s = 'dr_cached\n' for k, v in dtval.dr_cached.items(): if v is not None: issparse = sp.issparse(v) size = v.size if issparse: size = v.shape[0] * v.shape[1] nonzero = len(v.data) else: nonzero = np.count_nonzero(v) s += '\nsparse: %s\nsize: %d\nnonzero: %d\n' % (issparse, size, nonzero) # if dtval.called_dr_wrt: # # dtval.called_dr_wrt = False # color = 'brown3' # else: # color = 'azure1' elif len(dtval._cache['drs']) > 0: s = '_cache\n' for k, v in dtval._cache['drs'].items(): if v is not None: issparse = sp.issparse(v) size = v.size if issparse: size = v.shape[0] * v.shape[1] nonzero = len(v.data) else: nonzero = np.count_nonzero(v) s += '\nsparse: %s\nsize: %d\nnonzero: %d\n' % (issparse, size, nonzero) if hasattr(dtval, '_cache_info'): s += '\ncache hit:%s\n' % dtval._cache_info[k] # if hasattr(dtval,'called_dr_wrt') and dtval.called_dr_wrt: # # dtval.called_dr_wrt = False # color = 'brown3' # else: # color = 'azure1' result += ['%s -> %s;' % (src, dst)] # Do not overwrite src #result += ['%s [label="%s"];' % (src, my_name)] result += ['%s [label="%s\n%s\n", color=%s, style=filled];' % (dst, child_label, s, color)] result += string_for(getattr(self, dterm), dterm) return result dot_file_contents = 'digraph G {\n%s\n}' % '\n'.join(list(set(string_for(self, 'root')))) dot_file_name = os.path.join(cache_path, label) png_file_name = os.path.join(cache_path, label+'.png') with open(dot_file_name, 'w') as dot_file: with open(png_file_name, 'w') as png_file: dot_file.write(dot_file_contents) dot_file.flush() png_file = tempfile.NamedTemporaryFile(suffix='.png', delete=False) subprocess.call(['dot', '-Tpng', '-o', png_file.name, dot_file.name]) import webbrowser webbrowser.open('file://' + png_file.name) self.loop_children_do(self.reset_flag)
0.005881
def set_address(addr): """Sets the address for the next operation.""" # Send DNLOAD with first byte=0x21 and page address buf = struct.pack("<BI", 0x21, addr) __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: set address failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: set address failed")
0.001942
def configure(self, options): """ Update the plugin's settings. Args: options (dict): A key-value mapping of options. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ self.client.api.configure_plugin(self.name, options) self.reload()
0.005195
def closeEvent(self, event): """Emits a signal to update start values on components""" self.visibilityChanged.emit(0) model = self.paramList.model() model.hintRequested.disconnect() model.rowsInserted.disconnect() model.rowsRemoved.disconnect()
0.006849
def register_workflow_type(domain=None, name=None, version=None, description=None, defaultTaskStartToCloseTimeout=None, defaultExecutionStartToCloseTimeout=None, defaultTaskList=None, defaultTaskPriority=None, defaultChildPolicy=None, defaultLambdaRole=None): """ Registers a new workflow type and its configuration settings in the specified domain. The retention period for the workflow history is set by the RegisterDomain action. Access Control You can use IAM policies to control this action's access to Amazon SWF resources as follows: If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows . See also: AWS API Documentation :example: response = client.register_workflow_type( domain='string', name='string', version='string', description='string', defaultTaskStartToCloseTimeout='string', defaultExecutionStartToCloseTimeout='string', defaultTaskList={ 'name': 'string' }, defaultTaskPriority='string', defaultChildPolicy='TERMINATE'|'REQUEST_CANCEL'|'ABANDON', defaultLambdaRole='string' ) :type domain: string :param domain: [REQUIRED] The name of the domain in which to register the workflow type. :type name: string :param name: [REQUIRED] The name of the workflow type. The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot. :type version: string :param version: [REQUIRED] The version of the workflow type. Note The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action. The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot. :type description: string :param description: Textual description of the workflow type. :type defaultTaskStartToCloseTimeout: string :param defaultTaskStartToCloseTimeout: If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision. The duration is specified in seconds; an integer greater than or equal to 0. The value 'NONE' can be used to specify unlimited duration. :type defaultExecutionStartToCloseTimeout: string :param defaultExecutionStartToCloseTimeout: If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision. The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of 'NONE' for defaultExecutionStartToCloseTimeout ; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit will always cause the workflow execution to time out. :type defaultTaskList: dict :param defaultTaskList: If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list is not provided when starting the execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision. name (string) -- [REQUIRED]The name of the task list. :type defaultTaskPriority: string :param defaultTaskPriority: The default task priority to assign to the workflow type. If not assigned, then '0' will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide . :type defaultChildPolicy: string :param defaultChildPolicy: If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision. The supported child policies are: TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event. ABANDON: no action will be taken. The child executions will continue to run. :type defaultLambdaRole: string :param defaultLambdaRole: The ARN of the default IAM role to use when a workflow execution of this type invokes AWS Lambda functions. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution and ContinueAsNewWorkflowExecution decision. :returns: domain (string) -- [REQUIRED] The name of the domain in which to register the workflow type. name (string) -- [REQUIRED] The name of the workflow type. The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot. version (string) -- [REQUIRED] The version of the workflow type. Note The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action. The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot. description (string) -- Textual description of the workflow type. defaultTaskStartToCloseTimeout (string) -- If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision. The duration is specified in seconds; an integer greater than or equal to 0. The value "NONE" can be used to specify unlimited duration. defaultExecutionStartToCloseTimeout (string) -- If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision. The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of "NONE" for defaultExecutionStartToCloseTimeout ; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit will always cause the workflow execution to time out. defaultTaskList (dict) -- If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list is not provided when starting the execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision. name (string) -- [REQUIRED]The name of the task list. defaultTaskPriority (string) -- The default task priority to assign to the workflow type. If not assigned, then "0" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide . defaultChildPolicy (string) -- If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision. The supported child policies are: TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event. ABANDON: no action will be taken. The child executions will continue to run. defaultLambdaRole (string) -- The ARN of the default IAM role to use when a workflow execution of this type invokes AWS Lambda functions. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution and ContinueAsNewWorkflowExecution decision. """ pass
0.006552
def toFormMarkup(self, action_url, form_tag_attrs=None, submit_text=u"Continue"): """Generate HTML form markup that contains the values in this message, to be HTTP POSTed as x-www-form-urlencoded UTF-8. @param action_url: The URL to which the form will be POSTed @type action_url: str @param form_tag_attrs: Dictionary of attributes to be added to the form tag. 'accept-charset' and 'enctype' have defaults that can be overridden. If a value is supplied for 'action' or 'method', it will be replaced. @type form_tag_attrs: {unicode: unicode} @param submit_text: The text that will appear on the submit button for this form. @type submit_text: unicode @returns: A string containing (X)HTML markup for a form that encodes the values in this Message object. @rtype: str or unicode """ if ElementTree is None: raise RuntimeError('This function requires ElementTree.') assert action_url is not None form = ElementTree.Element(u'form') if form_tag_attrs: for name, attr in form_tag_attrs.iteritems(): form.attrib[name] = attr form.attrib[u'action'] = oidutil.toUnicode(action_url) form.attrib[u'method'] = u'post' form.attrib[u'accept-charset'] = u'UTF-8' form.attrib[u'enctype'] = u'application/x-www-form-urlencoded' for name, value in self.toPostArgs().iteritems(): attrs = {u'type': u'hidden', u'name': oidutil.toUnicode(name), u'value': oidutil.toUnicode(value)} form.append(ElementTree.Element(u'input', attrs)) submit = ElementTree.Element(u'input', {u'type':'submit', u'value':oidutil.toUnicode(submit_text)}) form.append(submit) return ElementTree.tostring(form, encoding='utf-8')
0.003035
def bar(df, figsize=(24, 10), fontsize=16, labels=None, log=False, color='dimgray', inline=False, filter=None, n=0, p=0, sort=None): """ A bar chart visualization of the nullity of the given DataFrame. :param df: The input DataFrame. :param log: Whether or not to display a logorithmic plot. Defaults to False (linear). :param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default). :param n: The cap on the number of columns to include in the filtered DataFrame. :param p: The cap on the percentage fill of the columns in the filtered DataFrame. :param sort: The sort to apply to the heatmap. Should be one of "ascending", "descending", or None (default). :param figsize: The size of the figure to display. :param fontsize: The figure's font size. This default to 16. :param labels: Whether or not to display the column names. Would need to be turned off on particularly large displays. Defaults to True. :param color: The color of the filled columns. Default to the RGB multiple `(0.25, 0.25, 0.25)`. :return: If `inline` is False, the underlying `matplotlib.figure` object. Else, nothing. """ nullity_counts = len(df) - df.isnull().sum() df = nullity_filter(df, filter=filter, n=n, p=p) df = nullity_sort(df, sort=sort) plt.figure(figsize=figsize) (nullity_counts / len(df)).plot(kind='bar', figsize=figsize, fontsize=fontsize, log=log, color=color) ax1 = plt.gca() axes = [ax1] # Start appending elements, starting with a modified bottom x axis. if labels or (labels is None and len(df.columns) <= 50): ax1.set_xticklabels(ax1.get_xticklabels(), rotation=45, ha='right', fontsize=fontsize) # Create the numerical ticks. ax2 = ax1.twinx() axes.append(ax2) if not log: ax1.set_ylim([0, 1]) ax2.set_yticks(ax1.get_yticks()) ax2.set_yticklabels([int(n*len(df)) for n in ax1.get_yticks()], fontsize=fontsize) else: # For some reason when a logarithmic plot is specified `ax1` always contains two more ticks than actually # appears in the plot. The fix is to ignore the first and last entries. Also note that when a log scale # is used, we have to make it match the `ax1` layout ourselves. ax2.set_yscale('log') ax2.set_ylim(ax1.get_ylim()) ax2.set_yticklabels([int(n*len(df)) for n in ax1.get_yticks()], fontsize=fontsize) else: ax1.set_xticks([]) # Create the third axis, which displays columnar totals above the rest of the plot. ax3 = ax1.twiny() axes.append(ax3) ax3.set_xticks(ax1.get_xticks()) ax3.set_xlim(ax1.get_xlim()) ax3.set_xticklabels(nullity_counts.values, fontsize=fontsize, rotation=45, ha='left') ax3.grid(False) for ax in axes: ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.spines['left'].set_visible(False) ax.xaxis.set_ticks_position('none') ax.yaxis.set_ticks_position('none') if inline: plt.show() else: return ax1
0.005571
def set_timestamp(self,timestamp=None): """ Set the timestamp of the linguistic processor, set to None for the current time @type timestamp:string @param timestamp: version of the linguistic processor """ if timestamp is None: import time timestamp = time.strftime('%Y-%m-%dT%H:%M:%S%Z') self.node.set('timestamp',timestamp)
0.012376
def get_contrib_names(self, contrib): """ Returns an appropriate Name and File-As-Name for a contrib element. This code was refactored out of nav_contributors and package_contributors to provide a single definition point for a common job. This is a useful utility that may be well-employed for other publishers as well. """ collab = contrib.find('collab') anon = contrib.find('anonymous') if collab is not None: proper_name = serialize(collab, strip=True) file_as_name = proper_name elif anon is not None: proper_name = 'Anonymous' file_as_name = proper_name else: name = contrib.find('name') surname = name.find('surname').text given = name.find('given-names') if given is not None: if given.text: # Sometimes these tags are empty proper_name = ' '.join([surname, given.text]) #File-as name is <surname>, <given-initial-char> file_as_name = ', '.join([surname, given.text[0]]) else: proper_name = surname file_as_name = proper_name else: proper_name = surname file_as_name = proper_name return proper_name, file_as_name
0.002146
def get_tags_from_job(user, job_id): """Retrieve all tags attached to a job.""" job = v1_utils.verify_existence_and_get(job_id, _TABLE) if not user.is_in_team(job['team_id']) and not user.is_read_only_user(): raise dci_exc.Unauthorized() JTT = models.JOIN_JOBS_TAGS query = (sql.select([models.TAGS]) .select_from(JTT.join(models.TAGS)) .where(JTT.c.job_id == job_id)) rows = flask.g.db_conn.execute(query) return flask.jsonify({'tags': rows, '_meta': {'count': rows.rowcount}})
0.001842
def annotate(*, start_msg: Optional[str] = None, end_msg: Optional[str] = None, start_no_nl: bool = False) -> types.AnyFunction: """A decorator meant for decorating functions that are decorated with the `animate` decorator. It prints a message to stdout before and/or after the function has finished. .. DANGER:: This decorator can also be used standalone, but you should NOT decorate a function that is decorated with `annotate` with `animate`. That is to say, the decorator order must be like this: .. code-block:: python @annotate @animate def some_function() pass """ return core.Annotate(start_msg=start_msg, end_msg=end_msg, start_no_nl=start_no_nl)
0.004963