text
stringlengths
78
104k
score
float64
0
0.18
def get_belbio_conf_files(): """Get belbio configuration from files """ home = os.path.expanduser("~") cwd = os.getcwd() belbio_conf_fp, belbio_secrets_fp = "", "" env_conf_dir = os.getenv("BELBIO_CONF", "").rstrip("/") conf_paths = [ f"{cwd}/belbio_conf.yaml", f"{cwd}/belbio_conf.yml", f"{env_conf_dir}/belbio_conf.yaml", f"{env_conf_dir}/belbio_conf.yml", f"{home}/.belbio/conf", ] secret_paths = [ f"{cwd}/belbio_secrets.yaml", f"{cwd}/belbio_secrets.yml", f"{env_conf_dir}/belbio_secrets.yaml", f"{env_conf_dir}/belbio_secrets.yml", f"{home}/.belbio/secrets", ] for fn in conf_paths: if os.path.exists(fn): belbio_conf_fp = fn break else: log.error( "No BELBio configuration file found - please add one (see http://bel.readthedocs.io/en/latest/configuration.html)" ) for fn in secret_paths: if os.path.exists(fn): belbio_secrets_fp = fn break return (belbio_conf_fp, belbio_secrets_fp)
0.001778
def post_create_app(cls, app, **settings): """Register the errorhandler for the AppException to the passed in App. Args: app (fleaker.base.BaseApplication): A Flask application that extends the Fleaker Base Application, such that the hooks are implemented. Kwargs: register_errorhandler (bool): A boolean indicating if we want to automatically register an errorhandler for the :class:`AppException` exception class after we create this App. Pass ``False`` to prevent registration. Default is ``True``. Returns: fleaker.base.BaseApplication: Returns the app it was given. """ register_errorhandler = settings.pop('register_errorhandler', True) if register_errorhandler: AppException.register_errorhandler(app) return app
0.002162
def env(self): """Env vars for kernels""" # Add our PYTHONPATH to the kernel pathlist = CONF.get('main', 'spyder_pythonpath', default=[]) default_interpreter = CONF.get('main_interpreter', 'default') pypath = add_pathlist_to_PYTHONPATH([], pathlist, ipyconsole=True, drop_env=False) # Environment variables that we need to pass to our sitecustomize umr_namelist = CONF.get('main_interpreter', 'umr/namelist') if PY2: original_list = umr_namelist[:] for umr_n in umr_namelist: try: umr_n.encode('utf-8') except UnicodeDecodeError: umr_namelist.remove(umr_n) if original_list != umr_namelist: CONF.set('main_interpreter', 'umr/namelist', umr_namelist) env_vars = { 'SPY_EXTERNAL_INTERPRETER': not default_interpreter, 'SPY_UMR_ENABLED': CONF.get('main_interpreter', 'umr/enabled'), 'SPY_UMR_VERBOSE': CONF.get('main_interpreter', 'umr/verbose'), 'SPY_UMR_NAMELIST': ','.join(umr_namelist), 'SPY_RUN_LINES_O': CONF.get('ipython_console', 'startup/run_lines'), 'SPY_PYLAB_O': CONF.get('ipython_console', 'pylab'), 'SPY_BACKEND_O': CONF.get('ipython_console', 'pylab/backend'), 'SPY_AUTOLOAD_PYLAB_O': CONF.get('ipython_console', 'pylab/autoload'), 'SPY_FORMAT_O': CONF.get('ipython_console', 'pylab/inline/figure_format'), 'SPY_BBOX_INCHES_O': CONF.get('ipython_console', 'pylab/inline/bbox_inches'), 'SPY_RESOLUTION_O': CONF.get('ipython_console', 'pylab/inline/resolution'), 'SPY_WIDTH_O': CONF.get('ipython_console', 'pylab/inline/width'), 'SPY_HEIGHT_O': CONF.get('ipython_console', 'pylab/inline/height'), 'SPY_USE_FILE_O': CONF.get('ipython_console', 'startup/use_run_file'), 'SPY_RUN_FILE_O': CONF.get('ipython_console', 'startup/run_file'), 'SPY_AUTOCALL_O': CONF.get('ipython_console', 'autocall'), 'SPY_GREEDY_O': CONF.get('ipython_console', 'greedy_completer'), 'SPY_JEDI_O': CONF.get('ipython_console', 'jedi_completer'), 'SPY_SYMPY_O': CONF.get('ipython_console', 'symbolic_math'), 'SPY_TESTING': running_under_pytest() or SAFE_MODE, 'SPY_HIDE_CMD': CONF.get('ipython_console', 'hide_cmd_windows') } if self.is_pylab is True: env_vars['SPY_AUTOLOAD_PYLAB_O'] = True env_vars['SPY_SYMPY_O'] = False env_vars['SPY_RUN_CYTHON'] = False if self.is_sympy is True: env_vars['SPY_AUTOLOAD_PYLAB_O'] = False env_vars['SPY_SYMPY_O'] = True env_vars['SPY_RUN_CYTHON'] = False if self.is_cython is True: env_vars['SPY_AUTOLOAD_PYLAB_O'] = False env_vars['SPY_SYMPY_O'] = False env_vars['SPY_RUN_CYTHON'] = True # Add our PYTHONPATH to env_vars env_vars.update(pypath) # Making all env_vars strings for key,var in iteritems(env_vars): if PY2: # Try to convert vars first to utf-8. try: unicode_var = to_text_string(var) except UnicodeDecodeError: # If that fails, try to use the file system # encoding because one of our vars is our # PYTHONPATH, and that contains file system # directories try: unicode_var = to_unicode_from_fs(var) except: # If that also fails, make the var empty # to be able to start Spyder. # See https://stackoverflow.com/q/44506900/438386 # for details. unicode_var = '' env_vars[key] = to_binary_string(unicode_var, encoding='utf-8') else: env_vars[key] = to_text_string(var) return env_vars
0.001128
def absent(name, auth=None): ''' Ensure service does not exist name Name of the service ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} __salt__['keystoneng.setup_clouds'](auth) service = __salt__['keystoneng.service_get'](name=name) if service: if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': service.id} ret['comment'] = 'Service will be deleted.' return ret __salt__['keystoneng.service_delete'](name=service) ret['changes']['id'] = service.id ret['comment'] = 'Deleted service' return ret
0.001418
def trace_grid_stack_to_next_plane(self): """Trace this plane's grid_stacks to the next plane, using its deflection angles.""" def minus(grid, deflections): return grid - deflections return self.grid_stack.map_function(minus, self.deflection_stack)
0.01049
async def get_metrics(self): """Get metrics for the unit. :return: Dictionary of metrics for this unit. """ metrics = await self.model.get_metrics(self.tag) return metrics[self.name]
0.008929
def create_lti_session(self, user_id, roles, realname, email, course_id, task_id, consumer_key, outcome_service_url, outcome_result_id, tool_name, tool_desc, tool_url, context_title, context_label): """ Creates an LTI cookieless session. Returns the new session id""" self._destroy_session() # don't forget to destroy the current session (cleans the threaded dict from web.py) self._session.load('') # creates a new cookieless session session_id = self._session.session_id self._session.lti = { "email": email, "username": user_id, "realname": realname, "roles": roles, "task": (course_id, task_id), "outcome_service_url": outcome_service_url, "outcome_result_id": outcome_result_id, "consumer_key": consumer_key, "context_title": context_title, "context_label": context_label, "tool_description": tool_desc, "tool_name": tool_name, "tool_url": tool_url } return session_id
0.005372
def domains(self): """The domains for this app.""" return self._h._get_resources( resource=('apps', self.name, 'domains'), obj=Domain, app=self )
0.010363
def normalize_encoding(encoding, default=DEFAULT_ENCODING): """Normalize the encoding name, replace ASCII w/ UTF-8.""" if encoding is None: return default encoding = encoding.lower().strip() if encoding in ['', 'ascii']: return default try: codecs.lookup(encoding) return encoding except LookupError: return default
0.002639
def __split_name_unit(self, line): """ Split a string that has value and unit as one. :param str line: :return str str: """ vals = [] unit = '' if line != '' or line != ' ': # If there are parenthesis, remove them line = line.replace('(', '').replace(')', '') # When value and units are a range (i.e. '100 m - 200 m'). if re.match(re_name_unit_range, line): m = re.findall(re_name_unit_range, line) if m: for group in m: for item in group: try: val = float(item) vals.append(val) except ValueError: if item: unit = item # Piece the number range back together. if len(vals) == 1: value = vals[0] else: value = str(vals[0]) + ' to ' + str(vals[1]) else: value, unit = self.__name_unit_regex(line) return value, unit
0.001643
async def starter_bus_async(loop = None, **kwargs) : "returns a Connection object for the D-Bus starter bus." return \ Connection \ ( await dbus.Connection.bus_get_async(DBUS.BUS_STARTER, private = False, loop = loop) ) \ .register_additional_standard(**kwargs)
0.0347
def str_lsnode(self, astr_path=""): """ Print/return the set of nodes branching from current node as string """ self.sCore.reset() str_cwd = self.cwd() if len(astr_path): self.cdnode(astr_path) for node in self.snode_current.d_nodes.keys(): self.sCore.write('%s\n' % node) str_ls = self.sCore.strget() if len(astr_path): self.cdnode(str_cwd) return str_ls
0.01006
def pick_q_v1(self): """Assign the actual value of the inlet sequence to the upper joint of the subreach upstream.""" inl = self.sequences.inlets.fastaccess new = self.sequences.states.fastaccess_new new.qjoints[0] = 0. for idx in range(inl.len_q): new.qjoints[0] += inl.q[idx][0]
0.003205
def _send(self, message): """ A helper method that does the actual sending :param SmsMessage message: SmsMessage class instance. :returns: True if message is sent else False :rtype: bool """ params = { 'from': message.from_phone, 'to': ",".join(message.to), 'text': message.body, 'api_key': self.get_api_key(), 'api_secret': self.get_api_secret(), } print(params) logger.debug("POST to %r with body: %r", NEXMO_API_URL, params) return self.parse(NEXMO_API_URL, requests.post(NEXMO_API_URL, data=params))
0.007599
def push_location( self, element_path, # type: Text array_index=None # type: Optional[int] ): # type: (...) -> None """Push an item onto the state's stack of locations.""" location = ProcessorLocation(element_path=element_path, array_index=array_index) self._locations.append(location)
0.013928
def Scanner(function, *args, **kw): """ Public interface factory function for creating different types of Scanners based on the different types of "functions" that may be supplied. TODO: Deprecate this some day. We've moved the functionality inside the Base class and really don't need this factory function any more. It was, however, used by some of our Tool modules, so the call probably ended up in various people's custom modules patterned on SCons code. """ if SCons.Util.is_Dict(function): return Selector(function, *args, **kw) else: return Base(function, *args, **kw)
0.001555
def proximal_gradient(x, f, g, gamma, niter, callback=None, **kwargs): r"""(Accelerated) proximal gradient algorithm for convex optimization. Also known as "Iterative Soft-Thresholding Algorithm" (ISTA). See `[Beck2009]`_ for more information. This solver solves the convex optimization problem:: min_{x in X} f(x) + g(x) where the proximal operator of ``f`` is known and ``g`` is differentiable. Parameters ---------- x : ``f.domain`` element Starting point of the iteration, updated in-place. f : `Functional` The function ``f`` in the problem definition. Needs to have ``f.proximal``. g : `Functional` The function ``g`` in the problem definition. Needs to have ``g.gradient``. gamma : positive float Step size parameter. niter : non-negative int, optional Number of iterations. callback : callable, optional Function called with the current iterate after each iteration. Other Parameters ---------------- lam : float or callable, optional Overrelaxation step size. If callable, it should take an index (starting at zero) and return the corresponding step size. Default: 1.0 Notes ----- The problem of interest is .. math:: \min_{x \in X} f(x) + g(x), where the formal conditions are that :math:`f : X \to \mathbb{R}` is proper, convex and lower-semicontinuous, and :math:`g : X \to \mathbb{R}` is differentiable and :math:`\nabla g` is :math:`1 / \beta`-Lipschitz continuous. Convergence is only guaranteed if the step length :math:`\gamma` satisfies .. math:: 0 < \gamma < 2 \beta and the parameter :math:`\lambda` (``lam``) satisfies .. math:: \sum_{k=0}^\infty \lambda_k (\delta - \lambda_k) = + \infty where :math:`\delta = \min \{1, \beta / \gamma\}`. References ---------- .. _[Beck2009]: http://epubs.siam.org/doi/abs/10.1137/080716542 """ # Get and validate input if x not in f.domain: raise TypeError('`x` {!r} is not in the domain of `f` {!r}' ''.format(x, f.domain)) if x not in g.domain: raise TypeError('`x` {!r} is not in the domain of `g` {!r}' ''.format(x, g.domain)) gamma, gamma_in = float(gamma), gamma if gamma <= 0: raise ValueError('`gamma` must be positive, got {}'.format(gamma_in)) if int(niter) != niter: raise ValueError('`niter` {} not understood'.format(niter)) lam_in = kwargs.pop('lam', 1.0) lam = lam_in if callable(lam_in) else lambda _: float(lam_in) # Get the proximal and gradient f_prox = f.proximal(gamma) g_grad = g.gradient # Create temporary tmp = x.space.element() for k in range(niter): lam_k = lam(k) # x - gamma grad_g (x) tmp.lincomb(1, x, -gamma, g_grad(x)) # Update x x.lincomb(1 - lam_k, x, lam_k, f_prox(tmp)) if callback is not None: callback(x)
0.000326
def _parse_tree(self, node): """ Parse a <image> object """ if 'type' in node.attrib: self.kind = node.attrib['type'] if 'width' in node.attrib: self.width = int(node.attrib['width']) if 'height' in node.attrib: self.height = int(node.attrib['height']) self.url = node.text
0.005731
def save(self, filename, binary=True): """ Writes a structured grid to disk. Parameters ---------- filename : str Filename of grid to be written. The file extension will select the type of writer to use. ".vtk" will use the legacy writer, while ".vts" will select the VTK XML writer. binary : bool, optional Writes as a binary file by default. Set to False to write ASCII. Notes ----- Binary files write much faster than ASCII, but binary files written on one system may not be readable on other systems. Binary can be used only with the legacy writer. """ filename = os.path.abspath(os.path.expanduser(filename)) # Use legacy writer if vtk is in filename if '.vtk' in filename: writer = vtk.vtkStructuredGridWriter() if binary: writer.SetFileTypeToBinary() else: writer.SetFileTypeToASCII() elif '.vts' in filename: writer = vtk.vtkXMLStructuredGridWriter() if binary: writer.SetDataModeToBinary() else: writer.SetDataModeToAscii() else: raise Exception('Extension should be either ".vts" (xml) or' + '".vtk" (legacy)') # Write writer.SetFileName(filename) writer.SetInputData(self) writer.Write()
0.001334
def setVehicleClass(self, vehID, clazz): """setVehicleClass(string, string) -> None Sets the vehicle class for this vehicle. """ self._connection._sendStringCmd( tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_VEHICLECLASS, vehID, clazz)
0.007407
def _update_weights(self, margins, parameters = {}): """ Run calmar, stores new weights and returns adjusted margins """ data = self._build_calmar_data() assert self.initial_weight_name is not None parameters['initial_weight'] = self.initial_weight_name val_pondfin, lambdasol, updated_margins = calmar( data, margins, **parameters) # Updating only afetr filtering weights self.weight = val_pondfin * self.filter_by + self.weight * (logical_not(self.filter_by)) return updated_margins
0.008621
def open_connection(self, connection_id, info=None): """Opens a new connection. :param connection_id: ID of the connection to open. """ request = requests_pb2.OpenConnectionRequest() request.connection_id = connection_id if info is not None: # Info is a list of repeated pairs, setting a dict directly fails for k, v in info.items(): request.info[k] = v response_data = self._apply(request) response = responses_pb2.OpenConnectionResponse() response.ParseFromString(response_data)
0.0033
def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0): """Update tasks redundancy from a project.""" try: project = find_project_by_short_name(config.project['short_name'], config.pbclient, config.all) if task_id: response = config.pbclient.find_tasks(project.id, id=task_id) check_api_error(response) task = response[0] task.n_answers = redundancy response = config.pbclient.update_task(task) check_api_error(response) msg = "Task.id = %s redundancy has been updated to %s" % (task_id, redundancy) return msg else: limit = limit offset = offset tasks = config.pbclient.get_tasks(project.id, limit, offset) with click.progressbar(tasks, label="Updating Tasks") as pgbar: while len(tasks) > 0: for t in pgbar: t.n_answers = redundancy response = config.pbclient.update_task(t) check_api_error(response) # Check if for the data we have to auto-throttle task update sleep, msg = enable_auto_throttling(config, tasks) # If auto-throttling enabled, sleep for sleep seconds if sleep: # pragma: no cover time.sleep(sleep) offset += limit tasks = config.pbclient.get_tasks(project.id, limit, offset) return "All tasks redundancy have been updated" except exceptions.ConnectionError: return ("Connection Error! The server %s is not responding" % config.server) except (ProjectNotFound, TaskNotFound): raise
0.002537
def translate(self, table): """ S.translate(table) -> str Return a copy of the string S, where all characters have been mapped through the given translation table, which must be a mapping of Unicode ordinals to Unicode ordinals, strings, or None. Unmapped characters are left untouched. Characters mapped to None are deleted. """ l = [] for c in self: if ord(c) in table: val = table[ord(c)] if val is None: continue elif isinstance(val, unicode): l.append(val) else: l.append(chr(val)) else: l.append(c) return ''.join(l)
0.003871
def get_feature_by_query(self, **kwargs): """ Retrieve an enumerated sequence feature This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_feature_by_query(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str locus: locus name or URI :param str term: Sequence Ontology (SO) term name, accession, or URI :param int rank: feature rank, must be at least 1 :param int accession: accession, must be at least 1 :return: Feature If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_feature_by_query_with_http_info(**kwargs) else: (data) = self.get_feature_by_query_with_http_info(**kwargs) return data
0.002459
def as_representer(resource, content_type): """ Adapts the given resource and content type to a representer. :param resource: resource to adapt. :param str content_type: content (MIME) type to obtain a representer for. """ reg = get_current_registry() rpr_reg = reg.queryUtility(IRepresenterRegistry) return rpr_reg.create(type(resource), content_type)
0.002597
def create_vehicle_icon(self, name, colour, follow=False, vehicle_type=None): '''add a vehicle to the map''' from MAVProxy.modules.mavproxy_map import mp_slipmap if vehicle_type is None: vehicle_type = self.vehicle_type_name if name in self.have_vehicle and self.have_vehicle[name] == vehicle_type: return self.have_vehicle[name] = vehicle_type icon = self.mpstate.map.icon(colour + vehicle_type + '.png') self.mpstate.map.add_object(mp_slipmap.SlipIcon(name, (0,0), icon, layer=3, rotation=0, follow=follow, trail=mp_slipmap.SlipTrail()))
0.010448
def add_simple_rnn(self,name, W_h, W_x, b, hidden_size, input_size, activation, input_names, output_names, output_all = False, reverse_input = False): """ Add a simple recurrent layer to the model. Parameters ---------- name: str The name of this layer. W_h: numpy.array Weights of the recurrent layer's hidden state. Must be of shape (hidden_size, hidden_size). W_x: numpy.array Weights of the recurrent layer's input. Must be of shape (hidden_size, input_size). b: numpy.array | None Bias of the recurrent layer's output. If None, bias is ignored. Otherwise it must be of shape (hidden_size, ). hidden_size: int Number of hidden units. This is equal to the number of channels of output shape. input_size: int Number of the number of channels of input shape. activation: str Activation function name. Can be one of the following option: ['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR']. See add_activation for more detailed description. input_names: [str] The input blob name list of this layer, in the order of [x, h_input]. output_name: [str] The output blob name list of this layer, in the order of [y, h_output]. output_all: boolean Whether the recurrent layer should output at every time step. - If False, the output is the result after the final state update. - If True, the output is a sequence, containing outputs at all time steps. reverse_input: boolean Whether the recurrent layer should process the input sequence in the reverse order. - If False, the input sequence order is not reversed. - If True, the input sequence order is reversed. See Also -------- add_activation, add_gru, add_unilstm, add_bidirlstm """ spec = self.spec nn_spec = self.nn_spec # Add a new Layer spec_layer = nn_spec.layers.add() spec_layer.name = name for name in input_names: spec_layer.input.append(name) for name in output_names: spec_layer.output.append(name) spec_layer_params = spec_layer.simpleRecurrent spec_layer_params.reverseInput = reverse_input #set the parameters spec_layer_params.inputVectorSize = input_size spec_layer_params.outputVectorSize = hidden_size if b is not None: spec_layer_params.hasBiasVector = True spec_layer_params.sequenceOutput = output_all activation_f = spec_layer_params.activation _set_recurrent_activation(activation_f, activation) # Write the weights spec_layer_params.weightMatrix.floatValue.extend(map(float, W_x.flatten())) spec_layer_params.recursionMatrix.floatValue.extend(map(float, W_h.flatten())) if b is not None: spec_layer_params.biasVector.floatValue.extend(map(float, b.flatten()))
0.006731
def make_inference_summary_table(workflow, inference_file, output_dir, variable_args=None, name="inference_table", analysis_seg=None, tags=None): """ Sets up the corner plot of the posteriors in the workflow. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating inference_file: pycbc.workflow.File The file with posterior samples. output_dir: str The directory to store result plots and files. variable_args : list A list of parameters to use instead of [variable_args]. name: str The name in the [executables] section of the configuration file to use. analysis_segs: {None, ligo.segments.Segment} The segment this job encompasses. If None then use the total analysis time from the workflow. tags: {None, optional} Tags to add to the inference executables. Returns ------- pycbc.workflow.FileList A list of result and output files. """ # default values tags = [] if tags is None else tags analysis_seg = workflow.analysis_time \ if analysis_seg is None else analysis_seg # make the directory that will contain the output files makedir(output_dir) # make a node for plotting the posterior as a corner plot node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos, out_dir=output_dir, tags=tags).create_node() # add command line options node.add_input_opt("--input-file", inference_file) node.new_output_file_opt(analysis_seg, ".html", "--output-file") node.add_opt("--parameters", " ".join(variable_args)) # add node to workflow workflow += node return node.output_files
0.002773
def uninstall(self): """ Remove agent's files from remote host """ if self.session: logger.info('Waiting monitoring data...') self.session.terminate() self.session.wait() self.session = None log_filename = "agent_{host}.log".format(host="localhost") data_filename = "agent_{host}.rawdata".format(host="localhost") try: logger.info('Saving monitoring artefacts from localhost') copyfile(self.workdir + "/_agent.log", log_filename) copyfile(self.workdir + "/monitoring.rawdata", data_filename) logger.info('Deleting temp directory: %s', self.workdir) rmtree(self.workdir) except Exception: logger.error("Exception while uninstalling agent", exc_info=True) logger.info("Removing agent from: localhost") return log_filename, data_filename
0.002139
def set(self, address, host_name): """ Assign one or more PTR record to a single IP Address :type address: str :type host_name: list[str] :param address: (str) The IP address to configure :param host_name: (list[str]) The list of strings representing PTR records :return: (bool) True in case of success, False in case of failure """ request = self._call(SetEnqueueSetReverseDns.SetEnqueueSetReverseDns, IP=address, Hosts=host_name) response = request.commit() return response['Success']
0.006969
def main(): """Main function. Mostly parsing the command line arguments. """ parser = argparse.ArgumentParser() parser.add_argument('--backend', choices=['gatttool', 'bluepy', 'pygatt'], default='gatttool') parser.add_argument('-v', '--verbose', action='store_const', const=True) subparsers = parser.add_subparsers(help='sub-command help', ) parser_poll = subparsers.add_parser('poll', help='poll data from a sensor') parser_poll.add_argument('mac', type=valid_miflora_mac) parser_poll.set_defaults(func=poll) parser_scan = subparsers.add_parser('scan', help='scan for devices') parser_scan.set_defaults(func=scan) parser_scan = subparsers.add_parser('backends', help='list the available backends') parser_scan.set_defaults(func=list_backends) args = parser.parse_args() if args.verbose: logging.basicConfig(level=logging.DEBUG) if not hasattr(args, "func"): parser.print_help() sys.exit(0) args.func(args)
0.00297
def mks(val): """ make sure the value is a string, paying mind to python3 vs 2 """ if sys.version_info > (3, 0, 0): if isinstance(val, bytes): sval = str(val, 'utf-8') else: sval = str(val) else: sval = str(val) return sval
0.003378
def _PrintAnalysisStatusUpdateWindow(self, processing_status): """Prints an analysis status update in window mode. Args: processing_status (ProcessingStatus): processing status. """ if self._stdout_output_writer: self._ClearScreen() output_text = 'plaso - {0:s} version {1:s}\n\n'.format( self._tool_name, plaso.__version__) self._output_writer.Write(output_text) self._PrintAnalysisStatusHeader(processing_status) table_view = views.CLITabularTableView(column_names=[ 'Identifier', 'PID', 'Status', 'Memory', 'Events', 'Tags', 'Reports'], column_sizes=[23, 7, 15, 15, 15, 15, 0]) self._AddsAnalysisProcessStatusTableRow( processing_status.foreman_status, table_view) for worker_status in processing_status.workers_status: self._AddsAnalysisProcessStatusTableRow(worker_status, table_view) table_view.Write(self._output_writer) self._output_writer.Write('\n') if processing_status.aborted: self._output_writer.Write( 'Processing aborted - waiting for clean up.\n\n') if self._stdout_output_writer: # We need to explicitly flush stdout to prevent partial status updates. sys.stdout.flush()
0.004874
def run_delete_sm(self, tenant_id, fw_dict, is_fw_virt): """Runs the delete State Machine. Goes through every state function until the end or when one state returns failure. """ # Read the current state from the DB ret = True serv_obj = self.get_service_obj(tenant_id) state = serv_obj.get_state() # Preserve the ordering of the next lines till while new_state = serv_obj.fixup_state(fw_const.FW_DEL_OP, state) serv_obj.store_local_final_result(fw_const.RESULT_FW_DELETE_INIT) if state != new_state: state = new_state serv_obj.store_state(state) while ret: try: ret = self.fabric_fsm[state][1](tenant_id, fw_dict, is_fw_virt=is_fw_virt) except Exception as exc: LOG.error("Exception %(exc)s for state %(state)s", {'exc': str(exc), 'state': fw_const.fw_state_fn_del_dict.get(state)}) ret = False if ret: LOG.info("State %s return successfully", fw_const.fw_state_fn_del_dict.get(state)) if state == fw_const.INIT_STATE: break state = self.get_next_state(state, ret, fw_const.FW_DEL_OP) serv_obj.store_state(state) return ret
0.001391
def render(self, painter, node=None): """ Draws this node based on its style information :param painter | <QPainter> """ policy = self.visibilityPolicy() if policy == XNodeHotspot.VisibilityPolicy.Never: return elif policy == XNodeHotspot.VisibilityPolicy.Hover and \ not self._hovered: return elif policy == XNodeHotspot.VisibilityPolicy.NodeHover and \ not (node and node.isHovered()): return # initialize the look painter.setPen(self.borderColor()) painter.setBrush(self.color()) # draw a circle if self.style() == XNodeHotspot.Style.Circle: painter.drawEllipse(self.rect()) # draw a square elif self.style() == XNodeHotspot.Style.Square: painter.drawRect(self.rect()) # draw an icon elif self.style() == XNodeHotspot.Style.Icon: rect = self.rect() x = int(rect.x()) y = int(rect.y()) w = int(rect.size().width()) h = int(rect.size().height()) icon = self.icon() hicon = self.hoverIcon() if not self.isEnabled(): pixmap = icon.pixmap(w, h, QIcon.Disabled) elif not self._hovered: pixmap = icon.pixmap(w, h) elif hicon: pixmap = hicon.pixmap(w, h) else: pixmap = icon.pixmap(w, h, QIcon.Selected) painter.drawPixmap(x, y, pixmap)
0.00762
def FilterItems(self, filterFn): """Filter items in a ReservoirBucket, using a filtering function. Filtering items from the reservoir bucket must update the internal state variable self._num_items_seen, which is used for determining the rate of replacement in reservoir sampling. Ideally, self._num_items_seen would contain the exact number of items that have ever seen by the ReservoirBucket and satisfy filterFn. However, the ReservoirBucket does not have access to all items seen -- it only has access to the subset of items that have survived sampling (self.items). Therefore, we estimate self._num_items_seen by scaling it by the same ratio as the ratio of items not removed from self.items. Args: filterFn: A function that returns True for items to be kept. Returns: The number of items removed from the bucket. """ with self._mutex: size_before = len(self.items) self.items = list(filter(filterFn, self.items)) size_diff = size_before - len(self.items) # Estimate a correction the number of items seen prop_remaining = len(self.items) / float( size_before) if size_before > 0 else 0 self._num_items_seen = int(round(self._num_items_seen * prop_remaining)) return size_diff
0.006907
def add_project(self, project_id=None, name=None, **kwargs): """ Creates a project or returns an existing project :param project_id: Project ID :param name: Project name :param kwargs: See the documentation of Project """ if project_id not in self._projects: for project in self._projects.values(): if name and project.name == name: raise aiohttp.web.HTTPConflict(text='Project name "{}" already exists'.format(name)) project = Project(project_id=project_id, controller=self, name=name, **kwargs) self._projects[project.id] = project return self._projects[project.id] return self._projects[project_id]
0.005326
def get_json_results(self, response): ''' Parses the request result and returns the JSON object. Handles all errors. ''' try: # return the proper JSON object, or error code if request didn't go through. self.most_recent_json = response.json() json_results = response.json() if response.status_code in [401, 403]: #401 is invalid key, 403 is out of monthly quota. raise PyMsCognitiveWebSearchException("CODE {code}: {message}".format(code=response.status_code,message=json_results["message"]) ) elif response.status_code in [429]: #429 means try again in x seconds. message = json_results['message'] try: # extract time out seconds from response timeout = int(re.search('in (.+?) seconds', message).group(1)) + 1 print ("CODE 429, sleeping for {timeout} seconds").format(timeout=str(timeout)) time.sleep(timeout) except (AttributeError, ValueError) as e: if not self.silent_fail: raise PyMsCognitiveWebSearchException("CODE 429. Failed to auto-sleep: {message}".format(code=response.status_code,message=json_results["message"]) ) else: print ("CODE 429. Failed to auto-sleep: {message}. Trying again in 5 seconds.".format(code=response.status_code,message=json_results["message"])) time.sleep(5) except ValueError as vE: if not self.silent_fail: raise PyMsCognitiveWebSearchException("Request returned with code %s, error msg: %s" % (r.status_code, r.text)) else: print ("[ERROR] Request returned with code %s, error msg: %s. \nContinuing in 5 seconds." % (r.status_code, r.text)) time.sleep(5) return json_results
0.012827
def send_multi_value(self, channel: int, values: Union[List[int], bytearray]) -> int: """ Send multiple consecutive bytes to the uDMX :param channel: The starting DMX channel number, 1-512 :param values: any sequence of integer values that can be converted to a bytearray (e.g a list). Each value 0-255. :return: number of bytes actually sent """ SetMultiChannel = 2 if isinstance(values, bytearray): ba = values else: ba = bytearray(values) n = self._send_control_message(SetMultiChannel, value_or_length=len(ba), channel=channel, data_or_length=ba) return n
0.005571
def make_ref(self, branch): """ Make a branch on github :param branch: Name of the branch to create :return: Sha of the branch or self.ProxyError """ master_sha = self.get_ref(self.master_upstream) if not isinstance(master_sha, str): return self.ProxyError( 404, "The default branch from which to checkout is either not available or does not exist", step="make_ref" ) params = { "ref": "refs/heads/{branch}".format(branch=branch), "sha": master_sha } uri = "{api}/repos/{origin}/git/refs".format( api=self.github_api_url, origin=self.origin ) data = self.request("POST", uri, data=params) if data.status_code == 201: data = json.loads(data.content.decode("utf-8")) return data["object"]["sha"] else: decoded_data = json.loads(data.content.decode("utf-8")) return self.ProxyError( data.status_code, (decoded_data, "message"), step="make_ref", context={ "uri": uri, "params": params } )
0.00239
def get_verify_code(self, file_path): """ 获取登录验证码并存储 :param file_path: 将验证码图片保存的文件路径 """ url = 'https://mp.weixin.qq.com/cgi-bin/verifycode' payload = { 'username': self.__username, 'r': int(random.random() * 10000000000000), } headers = { 'referer': 'https://mp.weixin.qq.com/', } r = requests.get(url, data=payload, headers=headers, stream=True) self.__cookies = '' for cookie in r.cookies: self.__cookies += cookie.name + '=' + cookie.value + ';' with open(file_path, 'wb') as fd: for chunk in r.iter_content(1024): fd.write(chunk)
0.002793
def pre_release(version): """Generates new docs, release announcements and creates a local tag.""" create_branch(version) changelog(version, write_out=True) check_call(["git", "commit", "-a", "-m", f"Preparing release {version}"]) print() print(f"{Fore.GREEN}Please push your branch to your fork and open a PR.")
0.002959
def _deprecated_register_to_python(self, cd, name, converter=None): """Register a conversion from OpenMath to Python This function has two forms. A three-arguments one: :param cd: A content dictionary name :type cd: str :param name: A symbol name :type name: str :param converter: A conversion function, or a Python object :type: Callable, Any Any object of type ``openmath.OMSymbol``, with content dictionary equal to ``cd`` and name equal to ``name`` will be converted using ``converter``. Also, any object of type ``openmath.OMApplication`` whose first child is an ``openmath.OMSymbol`` as above will be converted using ``converter``. If ``converter`` is a callable, it will be called with the OpenMath object as parameter; otherwise ``converter`` will be returned. In the two-argument form :param cd: A subclass of ``OMAny`` :type cd: type :param name: A conversion function :type name: Callable Any object of type ``cd`` will be passed to ``name()``, and the result will be returned. This forms is mainly to override default conversions for basic OpenMath tags (OMInteger, OMString, etc.). It is discouraged to use it for ``OMSymbol`` and ``OMApplication``. """ if converter is None: if isclass(cd) and issubclass(cd, om.OMAny): self._conv_to_py[cd] = name else: raise TypeError('Two-arguments form expects subclass of openmath.OMAny, found %r' % cd) else: if isinstance(cd, str) and isinstance(name, str): self._conv_sym_to_py[(cd, name)] = converter else: raise TypeError('Three-arguments form expects string, found %r' % cd.__class__)
0.003741
def find_events(symbols, d_data, market_sym='$SPX', trigger=drop_below, trigger_kwargs={}): '''Return dataframe of 1's (event happened) and NaNs (no event), 1 column for each symbol''' df_close = d_data['actual_close'] ts_market = df_close[market_sym] print "Finding `{0}` events with kwargs={1} for {2} ticker symbols".format(trigger.func_name, trigger_kwargs, len(symbols)) print 'Trigger docstring says:\n\n{0}\n\n'.format(trigger.func_doc) # Creating an empty dataframe df_events = copy.deepcopy(df_close) df_events = df_events * np.NAN # Time stamps for the event range ldt_timestamps = df_close.index for s_sym in symbols: if s_sym == market_sym: continue for i in range(1, len(ldt_timestamps)): # Calculating the returns for this timestamp kwargs = dict(trigger_kwargs) kwargs['price_today'] = df_close[s_sym].ix[ldt_timestamps[i]] kwargs['price_yest'] = df_close[s_sym].ix[ldt_timestamps[i - 1]] kwargs['return_today'] = (kwargs['price_today'] / (kwargs['price_yest'] or 1.)) - 1 kwargs['market_price_today'] = ts_market.ix[ldt_timestamps[i]] kwargs['market_price_yest'] = ts_market.ix[ldt_timestamps[i - 1]] kwargs['market_return_today'] = (kwargs['market_price_today'] / (kwargs['market_price_yest'] or 1.)) - 1 if trigger(**kwargs): df_events[s_sym].ix[ldt_timestamps[i]] = 1 print 'Found {0} events where priced dropped below {1}.'.format(df_events.sum(axis=1).sum(axis=0), trigger_kwargs['threshold']) return df_events
0.004266
def _symbol_in(self, symbol, name): """Checks whether the specified symbol is part of the name for completion.""" lsymbol = symbol.lower() lname = name.lower() return lsymbol == lname[:len(symbol)] or "_" + lsymbol in lname
0.011765
def replace_local_hyperlinks( text, base_url="https://github.com/project-rig/rig/blob/master/"): """Replace local hyperlinks in RST with absolute addresses using the given base URL. This is used to make links in the long description function correctly outside of the repository (e.g. when published on PyPi). NOTE: This may need adjusting if further syntax is used. """ def get_new_url(url): return base_url + url[2:] # Deal with anonymous URLS for match in re.finditer(r"^__ (?P<url>\./.*)", text, re.MULTILINE): orig_url = match.groupdict()["url"] url = get_new_url(orig_url) text = re.sub("^__ {}".format(orig_url), "__ {}".format(url), text, flags=re.MULTILINE) # Deal with named URLS for match in re.finditer(r"^\.\. _(?P<identifier>[^:]*): (?P<url>\./.*)", text, re.MULTILINE): identifier = match.groupdict()["identifier"] orig_url = match.groupdict()["url"] url = get_new_url(orig_url) text = re.sub( "^\.\. _{}: {}".format(identifier, orig_url), ".. _{}: {}".format(identifier, url), text, flags=re.MULTILINE) # Deal with image URLS for match in re.finditer(r"^\.\. image:: (?P<url>\./.*)", text, re.MULTILINE): orig_url = match.groupdict()["url"] url = get_new_url(orig_url) text = text.replace(".. image:: {}".format(orig_url), ".. image:: {}".format(url)) return text
0.001895
def getdata(self, tree, location, force_string=False): """Gets XML data from a specific element and handles types.""" found = tree.xpath('%s/text()' % location) if not found: return None else: data = found[0] if force_string: return data if data == 'True': return True elif data == 'False': return False else: try: return int(data) except ValueError: try: return float(data) except ValueError: # It's a string return data
0.002954
def phonemes(self,set=None): """Generator yielding all phonemes (in a particular set if specified). For retrieving one specific morpheme by index, use morpheme() instead""" for layer in self.select(PhonologyLayer): for p in layer.select(Phoneme, set): yield p
0.013201
def exception_handler(exceptionObj): """Function that takes exception Object(<Byte>,<str>) as a parameter and returns the error message<str>""" try: if isinstance(exceptionObj, Exception) and hasattr(exceptionObj, 'args'): if not (hasattr(exceptionObj, 'message' or hasattr(exceptionObj, 'msg'))): if len(exceptionObj.args) >= 1: if type(exceptionObj.args[0]) == type(b''): ob = json.loads(exceptionObj.args[0].decode('utf-8')) if type(ob) == type({}) and ob['message']: return ob['message'] else: try: if type(exceptionObj.args[0]) == type('') and exceptionObj.args[0][0] == 'b': ob = json.loads(exceptionObj.args[0][2:-1]) else: ob = json.loads(exceptionObj.args[0]) if type(ob) == type({}) and ob['message']: try: return exception_handler(ob['message']) except Exception as e: return ob['message'] elif type(ob) == type({}) and ob['msg']: try: return exception_handler(ob['message']) except Exception as e: return ob['msg'] return str(json.loads(exceptionObj.args[0])) except Exception as e: return str(exceptionObj.args[0]) elif hasattr(exceptionObj, 'msg'): return exceptionObj.msg elif hasattr(exceptionObj, 'message'): return exceptionObj.message elif type(exceptionObj) == type(''): try: ob = json.loads(exceptionObj) if type(ob) == type({}): if ob['message']: return ob['message'] elif ob['msg']: return ob['msg'] else: return ob except Exception as e: return exceptionObj except Exception as e: return e
0.004985
def attach(self, to_linode, config=None): """ Attaches this Volume to the given Linode """ result = self._client.post('{}/attach'.format(Volume.api_endpoint), model=self, data={ "linode_id": to_linode.id if issubclass(type(to_linode), Base) else to_linode, "config": None if not config else config.id if issubclass(type(config), Base) else config, }) if not 'id' in result: raise UnexpectedResponseError('Unexpected response when attaching volume!', json=result) self._populate(result) return True
0.012678
def set_random_state(state): """Force-set the state of factory.fuzzy's random generator.""" randgen.state_set = True randgen.setstate(state) faker.generator.random.setstate(state)
0.005102
def request(self, method, url, access_token=None, **kwargs): """ 向微信服务器发送请求 :param method: 请求方法 :param url: 请求地址 :param access_token: access token 值, 如果初始化时传入 conf 会自动获取, 如果没有传入则请提供此值 :param kwargs: 附加数据 :return: 微信服务器响应的 JSON 数据 """ access_token = self.__conf.access_token if self.__conf is not None else access_token if "params" not in kwargs: kwargs["params"] = { "access_token": access_token } else: kwargs["params"]["access_token"] = access_token if isinstance(kwargs.get("data", ""), dict): body = json.dumps(kwargs["data"], ensure_ascii=False) if isinstance(body, six.text_type): body = body.encode('utf8') kwargs["data"] = body r = requests.request( method=method, url=url, **kwargs ) r.raise_for_status() try: response_json = r.json() except ValueError: # 非 JSON 数据 return r headimgurl = response_json.get('headimgurl') if headimgurl: response_json['headimgurl'] = headimgurl.replace('\\', '') self._check_official_error(response_json) return response_json
0.002285
def p_param_arg_exp(self, p): 'param_arg : DOT ID LPAREN expression RPAREN' p[0] = ParamArg(p[2], p[4], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
0.011364
def _get_data_by_field(self, field_number): """Return a data field by field number. This is a useful method to get the values for fields that Ladybug currently doesn't import by default. You can find list of fields by typing EPWFields.fields Args: field_number: a value between 0 to 34 for different available epw fields. Returns: An annual Ladybug list """ if not self.is_data_loaded: self._import_data() # check input data if not 0 <= field_number < self._num_of_fields: raise ValueError("Field number should be between 0-%d" % self._num_of_fields) return self._data[field_number]
0.006916
def count(self, conn, filters): ''' Returns the count of the items that match the provided filters. For the meaning of what the ``filters`` argument means, see the ``.search()`` method docs. ''' pipe, intersect, temp_id = self._prepare(conn, filters) pipe.zcard(temp_id) pipe.delete(temp_id) return pipe.execute()[-2]
0.005128
def _normalize_coerce_to_format_with_lookup(self, v): """ Replace a format with a default """ try: return self.format_lookup.get(v, v) except TypeError: # v is something we can't lookup (like a list) return v
0.007463
def normalise_correlation_coefficient(image_tile_dict, transformed_array, template, normed_tolerance=1): """As above, but for when the correlation coefficient matching method is used """ template_mean = np.mean(template) template_minus_mean = template - template_mean template_norm = np.linalg.norm(template_minus_mean) image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)]- np.mean(image_tile_dict[(x,y)]))*template_norm for (x,y) in image_tile_dict.keys()} match_points = image_tile_dict.keys() # for correlation, then need to transofrm back to get correct value for division h, w = template.shape image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))} normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance} return normalised_matches.keys()
0.015213
def path_shift(self, count=1): ''' Shift some levels of PATH_INFO into SCRIPT_NAME and return the moved part. count defaults to 1''' #/a/b/ /c/d --> 'a','b' 'c','d' if count == 0: return '' pathlist = self.path.strip('/').split('/') scriptlist = self.environ.get('SCRIPT_NAME','/').strip('/').split('/') if pathlist and pathlist[0] == '': pathlist = [] if scriptlist and scriptlist[0] == '': scriptlist = [] if count > 0 and count <= len(pathlist): moved = pathlist[:count] scriptlist = scriptlist + moved pathlist = pathlist[count:] elif count < 0 and count >= -len(scriptlist): moved = scriptlist[count:] pathlist = moved + pathlist scriptlist = scriptlist[:count] else: empty = 'SCRIPT_NAME' if count < 0 else 'PATH_INFO' raise AssertionError("Cannot shift. Nothing left from %s" % empty) self['PATH_INFO'] = self.path = '/' + '/'.join(pathlist) \ + ('/' if self.path.endswith('/') and pathlist else '') self['SCRIPT_NAME'] = '/' + '/'.join(scriptlist) return '/'.join(moved)
0.008197
def create_argument_parser(self): """ Create a parser for the command-line options. May be overwritten for adding more configuration options. @return: an argparse.ArgumentParser instance """ parser = argparse.ArgumentParser( fromfile_prefix_chars='@', description= """Execute benchmarks for a given tool with a set of input files. Benchmarks are defined in an XML file given as input. Command-line parameters can additionally be read from a file if file name prefixed with '@' is given as argument. The tool table-generator can be used to create tables for the results. Part of BenchExec: https://github.com/sosy-lab/benchexec/""") parser.add_argument("files", nargs='+', metavar="FILE", help="XML file with benchmark definition") parser.add_argument("-d", "--debug", action="store_true", help="Enable debug output and a debugging helper on signal USR1") parser.add_argument("-r", "--rundefinition", dest="selected_run_definitions", action="append", help="Run only the specified RUN_DEFINITION from the benchmark definition file. " + "This option can be specified several times and can contain wildcards.", metavar="RUN_DEFINITION") parser.add_argument("-t", "--tasks", dest="selected_sourcefile_sets", action="append", help="Run only the tasks from the tasks tag with TASKS as name. " + "This option can be specified several times and can contain wildcards.", metavar="TASKS") parser.add_argument("-n", "--name", dest="name", default=None, help="Set name of benchmark execution to NAME", metavar="NAME") parser.add_argument("-o", "--outputpath", dest="output_path", type=str, default=self.DEFAULT_OUTPUT_PATH, help="Output prefix for the generated results. " + "If the path is a folder files are put into it," + "otherwise it is used as a prefix for the resulting files.") parser.add_argument("-T", "--timelimit", dest="timelimit", default=None, help='Time limit for each run, e.g. "90s" ' '(overwrites time limit and hard time limit from XML file, ' 'use "-1" to disable time limits completely)', metavar="SECONDS") parser.add_argument("-W", "--walltimelimit", dest="walltimelimit", default=None, help='Wall time limit for each run, e.g. "90s" ' '(overwrites wall time limit from XML file, ' 'use "-1" to use CPU time limit plus a few seconds, such value is also used by default)', metavar="SECONDS") parser.add_argument("-M", "--memorylimit", dest="memorylimit", default=None, help="Memory limit, if no unit is given MB are assumed (-1 to disable)", metavar="BYTES") parser.add_argument("-N", "--numOfThreads", dest="num_of_threads", default=None, type=int, help="Run n benchmarks in parallel", metavar="n") parser.add_argument("-c", "--limitCores", dest="corelimit", default=None, metavar="N", help="Limit each run of the tool to N CPU cores (-1 to disable).") parser.add_argument("--allowedCores", dest="coreset", default=None, type=util.parse_int_list, help="Limit the set of cores BenchExec will use for all runs " "(Applied only if the number of CPU cores is limited).", metavar="N,M-K",) parser.add_argument("--user", dest="users", action="append", metavar="USER", help="Execute benchmarks under given user account(s) (needs password-less sudo setup).") parser.add_argument("--no-compress-results", dest="compress_results", action="store_false", help="Do not compress result files.") def parse_filesize_value(value): try: value = int(value) if value == -1: return None logging.warning( 'Value "%s" for logfile size interpreted as MB for backwards compatibility, ' 'specify a unit to make this unambiguous.', value) value = value * _BYTE_FACTOR * _BYTE_FACTOR except ValueError: value = util.parse_memory_value(value) return value parser.add_argument("--maxLogfileSize", dest="maxLogfileSize", type=parse_filesize_value, default=20*_BYTE_FACTOR*_BYTE_FACTOR, metavar="SIZE", help="Shrink logfiles to given size if they are too big. " "(-1 to disable, default value: 20 MB).") parser.add_argument("--filesCountLimit", type=int, metavar="COUNT", help="maximum number of files the tool may write to (checked periodically, counts only files written in container mode or to temporary directories)") parser.add_argument("--filesSizeLimit", type=util.parse_memory_value, metavar="BYTES", help="maximum size of files the tool may write (checked periodically, counts only files written in container mode or to temporary directories)") parser.add_argument("--commit", dest="commit", action="store_true", help="If the output path is a git repository without local changes, " + "add and commit the result files.") parser.add_argument("--message", dest="commit_message", type=str, default="Results for benchmark run", help="Commit message if --commit is used.") parser.add_argument("--startTime", dest="start_time", type=parse_time_arg, default=None, metavar="'YYYY-MM-DD hh:mm'", help='Set the given date and time as the start time of the benchmark.') parser.add_argument("--version", action="version", version="%(prog)s " + __version__) try: from benchexec import containerexecutor except Exception: # This fails e.g. on MacOS X because of missing libc. # We want to keep BenchExec usable for cases where the # localexecutor is replaced by something else. logging.debug("Could not import container feature:", exc_info=1) else: container_args = parser.add_argument_group("optional arguments for run container") container_on_args = container_args.add_mutually_exclusive_group() container_on_args.add_argument("--container", action='store_true', help="force isolation of run in container (future default starting with BenchExec 2.0)") container_on_args.add_argument("--no-container", action='store_true', help="disable use of containers for isolation of runs (current default)") containerexecutor.add_basic_container_args(container_args) return parser
0.009909
def ecc_correct_intra(ecc_manager_intra, ecc_params_intra, field, ecc, enable_erasures=False, erasures_char="\x00", only_erasures=False): """ Correct an intra-field with its corresponding intra-ecc if necessary """ fentry_fields = {"ecc_field": ecc} field_correct = [] # will store each block of the corrected (or already correct) filepath fcorrupted = False # check if field was corrupted fcorrected = True # check if field was corrected (if it was corrupted) errmsg = '' # Decode each block of the filepath for e in entry_assemble(fentry_fields, ecc_params_intra, len(field), '', field): # Check if this block of the filepath is OK, if yes then we just copy it over if ecc_manager_intra.check(e["message"], e["ecc"]): field_correct.append(e["message"]) else: # Else this block is corrupted, we will try to fix it using the ecc fcorrupted = True # Repair the message block and the ecc try: repaired_block, repaired_ecc = ecc_manager_intra.decode(e["message"], e["ecc"], enable_erasures=enable_erasures, erasures_char=erasures_char, only_erasures=only_erasures) except (ReedSolomonError, RSCodecError), exc: # the reedsolo lib may raise an exception when it can't decode. We ensure that we can still continue to decode the rest of the file, and the other files. repaired_block = None repaired_ecc = None errmsg += "- Error: metadata field at offset %i: %s\n" % (entry_pos[0], exc) # Check if the block was successfully repaired: if yes then we copy the repaired block... if repaired_block is not None and ecc_manager_intra.check(repaired_block, repaired_ecc): field_correct.append(repaired_block) else: # ... else it failed, then we copy the original corrupted block and report an error later field_correct.append(e["message"]) fcorrected = False # Join all the blocks into one string to build the final filepath if isinstance(field_correct[0], bytearray): field_correct = [str(x) for x in field_correct] # workaround when using --ecc_algo 3 or 4, because we get a list of bytearrays instead of str field = ''.join(field_correct) # Report errors return (field, fcorrupted, fcorrected, errmsg)
0.009259
def render_cvmfs_pvc(cvmfs_volume): """Render REANA_CVMFS_PVC_TEMPLATE.""" name = CVMFS_REPOSITORIES[cvmfs_volume] rendered_template = dict(REANA_CVMFS_PVC_TEMPLATE) rendered_template['metadata']['name'] = 'csi-cvmfs-{}-pvc'.format(name) rendered_template['spec']['storageClassName'] = "csi-cvmfs-{}".format(name) return rendered_template
0.002762
def mobile_template(template): """ Mark a function as mobile-ready and pass a mobile template if MOBILE. For example:: @mobile_template('a/{mobile/}b.html') def view(template=None): ... if ``request.MOBILE=True`` the template will be `a/mobile/b.html`. if ``request.MOBILE=False`` the template will be `a/b.html`. This function is useful if the mobile view uses the same context but a different template. """ def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): ctx = stack.top if ctx is not None and hasattr(ctx, 'request'): request = ctx.request is_mobile = getattr(request, 'MOBILE', None) kwargs['template'] = re.sub(r'{(.+?)}', r'\1' if is_mobile else '', template) return f(*args, **kwargs) return wrapper return decorator
0.000987
def authenticate(cls, client_id, secret): """ Authenticate a client using it's secret :param client_id: the client / service ID :param secret: the client secret :returns: a Service instance """ result = yield views.oauth_client.get(key=[secret, client_id]) if not result['rows']: raise Return() service = yield Service.get(client_id) raise Return(service)
0.004454
def translate_path(self, dep_file, dep_rule): """Translate dep_file from dep_rule into this rule's output path.""" dst_base = dep_file.split(os.path.join(dep_rule.address.repo, dep_rule.address.path), 1)[-1] if self.params['strip_prefix']: dst_base = dep_file.split(self.params['strip_prefix'], 1)[-1] return os.path.join(self.address.repo, self.address.path, self.params['prefix'].lstrip('/'), dst_base.lstrip('/'))
0.003552
def new(filename: str, *, file_attrs: Optional[Dict[str, str]] = None) -> LoomConnection: """ Create an empty Loom file, and return it as a context manager. """ if filename.startswith("~/"): filename = os.path.expanduser(filename) if file_attrs is None: file_attrs = {} # Create the file (empty). # Yes, this might cause an exception, which we prefer to send to the caller f = h5py.File(name=filename, mode='w') f.create_group('/layers') f.create_group('/row_attrs') f.create_group('/col_attrs') f.create_group('/row_graphs') f.create_group('/col_graphs') f.flush() f.close() ds = connect(filename, validate=False) for vals in file_attrs: ds.attrs[vals] = file_attrs[vals] # store creation date currentTime = time.localtime(time.time()) ds.attrs['CreationDate'] = timestamp() ds.attrs["LOOM_SPEC_VERSION"] = loompy.loom_spec_version return ds
0.030963
def assert_keys_have_values(self, caller, *keys): """Check that keys list are all in context and all have values. Args: *keys: Will check each of these keys in context caller: string. Calling function name - just used for informational messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if *keys is None """ for key in keys: self.assert_key_has_value(key, caller)
0.003509
def libvlc_video_get_aspect_ratio(p_mi): '''Get current video aspect ratio. @param p_mi: the media player. @return: the video aspect ratio or NULL if unspecified (the result must be released with free() or L{libvlc_free}()). ''' f = _Cfunctions.get('libvlc_video_get_aspect_ratio', None) or \ _Cfunction('libvlc_video_get_aspect_ratio', ((1,),), string_result, ctypes.c_void_p, MediaPlayer) return f(p_mi)
0.006565
def load_verify_locations(self, cafile, capath=None): """ Let SSL know where we can find trusted certificates for the certificate chain. Note that the certificates have to be in PEM format. If capath is passed, it must be a directory prepared using the ``c_rehash`` tool included with OpenSSL. Either, but not both, of *pemfile* or *capath* may be :data:`None`. :param cafile: In which file we can find the certificates (``bytes`` or ``unicode``). :param capath: In which directory we can find the certificates (``bytes`` or ``unicode``). :return: None """ if cafile is None: cafile = _ffi.NULL else: cafile = _path_string(cafile) if capath is None: capath = _ffi.NULL else: capath = _path_string(capath) load_result = _lib.SSL_CTX_load_verify_locations( self._context, cafile, capath ) if not load_result: _raise_current_error()
0.001873
def check_infile_and_wp(curinf, curwp): """Check the existence of the given file and directory path. 1. Raise Runtime exception of both not existed. 2. If the ``curwp`` is None, the set the base folder of ``curinf`` to it. """ if not os.path.exists(curinf): if curwp is None: TauDEM.error('You must specify one of the workspace and the ' 'full path of input file!') curinf = curwp + os.sep + curinf curinf = os.path.abspath(curinf) if not os.path.exists(curinf): TauDEM.error('Input files parameter %s is not existed!' % curinf) else: curinf = os.path.abspath(curinf) if curwp is None: curwp = os.path.dirname(curinf) return curinf, curwp
0.004711
def rerun(version="3.7.0"): """ Rerun last example code block with specified version of python. """ from commandlib import Command Command(DIR.gen.joinpath("py{0}".format(version), "bin", "python"))( DIR.gen.joinpath("state", "examplepythoncode.py") ).in_dir(DIR.gen.joinpath("state")).run()
0.003086
def bulkCmd(snmpDispatcher, authData, transportTarget, nonRepeaters, maxRepetitions, *varBinds, **options): """Initiate SNMP GETBULK query over SNMPv2c. Based on passed parameters, prepares SNMP GETBULK packet (:RFC:`1905#section-4.2.3`) and schedules its transmission by I/O framework at a later point of time. Parameters ---------- snmpDispatcher: :py:class:`~pysnmp.hlapi.v1arch.asyncore.SnmpDispatcher` Class instance representing SNMP dispatcher. authData: :py:class:`~pysnmp.hlapi.v1arch.CommunityData` or :py:class:`~pysnmp.hlapi.v1arch.UsmUserData` Class instance representing SNMP credentials. transportTarget: :py:class:`~pysnmp.hlapi.v1arch.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.v1arch.asyncore.Udp6TransportTarget` Class instance representing transport type along with SNMP peer address. nonRepeaters: int One MIB variable is requested in response for the first `nonRepeaters` MIB variables in request. maxRepetitions: int `maxRepetitions` MIB variables are requested in response for each of the remaining MIB variables in the request (e.g. excluding `nonRepeaters`). Remote SNMP dispatcher may choose lesser value than requested. \*varBinds: :py:class:`~pysnmp.smi.rfc1902.ObjectType` One or more class instances representing MIB variables to place into SNMP request. Other Parameters ---------------- \*\*options : Request options: * `lookupMib` - load MIB and resolve response MIB variables at the cost of slightly reduced performance. Default is `True`. * `cbFun` (callable) - user-supplied callable that is invoked to pass SNMP response data or error to user at a later point of time. Default is `None`. * `cbCtx` (object) - user-supplied object passing additional parameters to/from `cbFun`. Default is `None`. Notes ----- User-supplied `cbFun` callable must have the following call signature: * snmpDispatcher (:py:class:`~pysnmp.hlapi.v1arch.snmpDispatcher`): Class instance representing SNMP dispatcher. * stateHandle (int): Unique request identifier. Can be used for matching multiple ongoing requests with received responses. * errorIndication (str): True value indicates SNMP dispatcher error. * errorStatus (str): True value indicates SNMP PDU error. * errorIndex (int): Non-zero value refers to `varBinds[errorIndex-1]` * varBindTable (tuple): A sequence of sequences (e.g. 2-D array) of variable-bindings represented as :class:`tuple` or :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances representing a table of MIB variables returned in SNMP response, with up to ``maxRepetitions`` rows, i.e. ``len(varBindTable) <= maxRepetitions``. For ``0 <= i < len(varBindTable)`` and ``0 <= j < len(varBinds)``, ``varBindTable[i][j]`` represents: - For non-repeaters (``j < nonRepeaters``), the first lexicographic successor of ``varBinds[j]``, regardless the value of ``i``, or an :py:class:`~pysnmp.smi.rfc1902.ObjectType` instance with the :py:obj:`~pysnmp.proto.rfc1905.endOfMibView` value if no such successor exists; - For repeaters (``j >= nonRepeaters``), the ``i``-th lexicographic successor of ``varBinds[j]``, or an :py:class:`~pysnmp.smi.rfc1902.ObjectType` instance with the :py:obj:`~pysnmp.proto.rfc1905.endOfMibView` value if no such successor exists. See :rfc:`3416#section-4.2.3` for details on the underlying ``GetBulkRequest-PDU`` and the associated ``GetResponse-PDU``, such as specific conditions under which the server may truncate the response, causing ``varBindTable`` to have less than ``maxRepetitions`` rows. * `cbCtx` (object): Original user-supplied object. Returns ------- stateHandle : int Unique request identifier. Can be used for matching received responses with ongoing requests. Raises ------ PySnmpError Or its derivative indicating that an error occurred while performing SNMP operation. Examples -------- >>> from pysnmp.hlapi.v1arch.asyncore import * >>> >>> def cbFun(snmpDispatcher, stateHandle, errorIndication, >>> errorStatus, errorIndex, varBinds, cbCtx): >>> print(errorIndication, errorStatus, errorIndex, varBinds) >>> >>> snmpDispatcher = snmpDispatcher() >>> >>> stateHandle = bulkCmd( >>> snmpDispatcher, >>> CommunityData('public'), >>> UdpTransportTarget(('demo.snmplabs.com', 161)), >>> 0, 2, >>> ('1.3.6.1.2.1.1', None), >>> cbFun=cbFun >>> ) >>> >>> snmpDispatcher.transportDispatcher.runDispatcher() """ def _cbFun(snmpDispatcher, stateHandle, errorIndication, rspPdu, _cbCtx): if not cbFun: return if errorIndication: cbFun(errorIndication, pMod.Integer(0), pMod.Integer(0), None, cbCtx=cbCtx, snmpDispatcher=snmpDispatcher, stateHandle=stateHandle) return errorStatus = pMod.apiBulkPDU.getErrorStatus(rspPdu) errorIndex = pMod.apiBulkPDU.getErrorIndex(rspPdu) varBindTable = pMod.apiBulkPDU.getVarBindTable(reqPdu, rspPdu) errorIndication, nextVarBinds = pMod.apiBulkPDU.getNextVarBinds( varBindTable[-1], errorIndex=errorIndex ) if options.get('lookupMib'): varBindTable = [ VB_PROCESSOR.unmakeVarBinds(snmpDispatcher.cache, vbs) for vbs in varBindTable ] nextStateHandle = pMod.getNextRequestID() nextVarBinds = cbFun(errorIndication, errorStatus, errorIndex, varBindTable, cbCtx=cbCtx, snmpDispatcher=snmpDispatcher, stateHandle=stateHandle, nextStateHandle=nextStateHandle, nextVarBinds=nextVarBinds) if not nextVarBinds: return pMod.apiBulkPDU.setRequestID(reqPdu, nextStateHandle) pMod.apiBulkPDU.setVarBinds(reqPdu, nextVarBinds) return snmpDispatcher.sendPdu(authData, transportTarget, reqPdu, cbFun=_cbFun) if authData.mpModel < 1: raise error.PySnmpError('GETBULK PDU is only supported in SNMPv2c and SNMPv3') lookupMib, cbFun, cbCtx = [options.get(x) for x in ('lookupMib', 'cbFun', 'cbCtx')] if lookupMib: varBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, varBinds) pMod = api.PROTOCOL_MODULES[authData.mpModel] reqPdu = pMod.GetBulkRequestPDU() pMod.apiBulkPDU.setDefaults(reqPdu) pMod.apiBulkPDU.setNonRepeaters(reqPdu, nonRepeaters) pMod.apiBulkPDU.setMaxRepetitions(reqPdu, maxRepetitions) pMod.apiBulkPDU.setVarBinds(reqPdu, varBinds) return snmpDispatcher.sendPdu(authData, transportTarget, reqPdu, cbFun=_cbFun)
0.001957
def destroy(self): """ Destroy the client """ if self.client: #: Stop listening self.client.setWebView(self.widget, None) del self.client super(AndroidWebView, self).destroy()
0.008197
def subset(args): """ %prog subset pairsfile ksfile1 ksfile2 ... -o pairs.ks Subset some pre-calculated ks ka values (in ksfile) according to pairs in tab delimited pairsfile/anchorfile. """ p = OptionParser(subset.__doc__) p.add_option("--noheader", action="store_true", help="don't write ksfile header line [default: %default]") p.add_option("--block", action="store_true", help="preserve block structure in input [default: %default]") p.set_stripnames() p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) pairsfile, ksfiles = args[0], args[1:] noheader = opts.noheader block = opts.block if block: noheader = True outfile = opts.outfile ksvals = {} for ksfile in ksfiles: ksvals.update(dict((line.name, line) for line in \ KsFile(ksfile, strip_names=opts.strip_names))) fp = open(pairsfile) fw = must_open(outfile, "w") if not noheader: print(fields, file=fw) i = j = 0 for row in fp: if row[0] == '#': if block: print(row.strip(), file=fw) continue a, b = row.split()[:2] name = ";".join((a, b)) if name not in ksvals: name = ";".join((b, a)) if name not in ksvals: j += 1 print("\t".join((a, b, ".", ".")), file=fw) continue ksline = ksvals[name] if block: print("\t".join(str(x) for x in (a, b, ksline.ks)), file=fw) else: ksline.name = ";".join((a, b)) print(ksline, file=fw) i += 1 fw.close() logging.debug("{0} pairs not found in ksfiles".format(j)) logging.debug("{0} ks records written to `{1}`".format(i, outfile)) return outfile
0.001572
def normalize_data_values(type_string, data_value): """Decodes utf-8 bytes to strings for abi string values. eth-abi v1 returns utf-8 bytes for string values. This can be removed once eth-abi v2 is required. """ _type = parse_type_string(type_string) if _type.base == "string": if _type.arrlist is not None: return tuple((normalize_to_text(value) for value in data_value)) else: return normalize_to_text(data_value) return data_value
0.001992
def has_change_permission(self): """ Returns a boolean if the current user has permission to change the current object being viewed/edited. """ has_permission = False if self.user is not None: # We check for the object level permission here, even though by default the Django # admin doesn't. If the Django ModelAdmin is extended to allow object level # permissions - then this will work as expected. permission_name = '{}.change_{}'.format(self.opts.app_label, self.opts.model_name) has_permission = ( self.user.has_perm(permission_name) or self.user.has_perm(permission_name, obj=self.obj) ) return has_permission
0.007722
def getvalue(self, v): """ Return a list of quantities making up the measures' value. :param v: a measure """ if not is_measure(v): raise TypeError('Incorrect input type for getvalue()') import re rx = re.compile("m\d+") out = [] keys = v.keys()[:] keys.sort() for key in keys: if re.match(rx, key): out.append(dq.quantity(v.get(key))) return out
0.006211
def append_arrays(many, single): """Append an array to another padding with NaNs for constant length. Parameters ---------- many : array_like of rank (j, k) values appended to a copy of this array. This may be a 1-D or 2-D array. single : array_like of rank l values to append. This should be a 1-D array. Returns ------- append : :class:`numpy.ndarray` 2-D array with rank (j + 1, max(k, l)) with missing values padded with :class:`numpy.nan` """ assert np.ndim(single) == 1 # Check if the values need to be padded to for equal length diff = single.shape[0] - many.shape[0] if diff < 0: single = np.pad(single, (0, -diff), 'constant', constant_values=np.nan) elif diff > 0: many = np.pad(many, ((0, diff), ), 'constant', constant_values=np.nan) else: # No padding needed pass return np.c_[many, single]
0.001062
def get_most_recent_bike() -> Optional['Bike']: """ Gets the most recently cached bike from the database. :return: The bike that was cached most recently. """ try: return Bike.select().order_by(Bike.cached_date.desc()).get() except pw.DoesNotExist: return None
0.006024
def show_busy(self): """Hide the question group box and enable the busy cursor.""" self.progress_bar.show() self.question_group.setEnabled(False) self.question_group.setVisible(False) enable_busy_cursor() self.repaint() qApp.processEvents() self.busy = True
0.006231
def check_sub_path_create(sub_path): """ 检查当前路径下的某个子路径是否存在, 不存在则创建; :param: * sub_path: (string) 下一级的某路径名称 :return: * 返回类型 (tuple),有两个值 * True: 路径存在,False: 不需要创建 * False: 路径不存在,True: 创建成功 举例如下:: print('--- check_sub_path_create demo ---') # 定义子路径名称 sub_path = 'demo_sub_dir' # 检查当前路径下的一个子路径是否存在,不存在则创建 print('check sub path:', sub_path) result = check_sub_path_create(sub_path) print(result) print('---') 输出结果:: --- check_sub_path_create demo --- check sub path: demo_sub_dir (True, False) --- """ # 获得当前路径 temp_path = pathlib.Path() cur_path = temp_path.resolve() # 生成 带有 sub_path_name 的路径 path = cur_path / pathlib.Path(sub_path) # 判断是否存在带有 sub_path 路径 if path.exists(): # 返回 True: 路径存在, False: 不需要创建 return True, False else: path.mkdir(parents=True) # 返回 False: 路径不存在 True: 路径已经创建 return False, True
0.003756
def unreduce_array(array, shape, axis, keepdims): """Reverse summing over a dimension, NumPy implementation. Args: array: The array that was reduced. shape: The original shape of the array before reduction. axis: The axis or axes that were summed. keepdims: Whether these axes were kept as singleton axes. Returns: An array with axes broadcast to match the shape of the original array. """ # NumPy uses a special default value for keepdims, which is equivalent to # False. if axis is not None and (not keepdims or keepdims is numpy._NoValue): # pylint: disable=protected-access if isinstance(axis, int): axis = axis, for ax in sorted(axis): array = numpy.expand_dims(array, ax) return numpy.broadcast_to(array, shape)
0.011598
def smoothed_hazards_(self, bandwidth=1): """ Using the epanechnikov kernel to smooth the hazard function, with sigma/bandwidth """ timeline = self._index.values return pd.DataFrame( np.dot(epanechnikov_kernel(timeline[:, None], timeline, bandwidth), self.hazards_.values), columns=self.hazards_.columns, index=timeline, )
0.009828
def set_wrapped(self, wrapped): """This will decide what wrapped is and set .wrapped_func or .wrapped_class accordingly :param wrapped: either a function or class """ self.wrapped = wrapped functools.update_wrapper(self, self.wrapped, updated=()) self.wrapped_func = False self.wrapped_class = False if inspect.isroutine(wrapped): self.wrapped_func = True elif isinstance(wrapped, type): self.wrapped_class = True
0.005769
def reconstruct_url(environ, port): """Reconstruct the remote url from the given WSGI ``environ`` dictionary. :param environ: the WSGI environment :type environ: :class:`collections.MutableMapping` :returns: the remote url to proxy :rtype: :class:`basestring` """ # From WSGI spec, PEP 333 url = environ.get('PATH_INFO', '') if not url.startswith(('http://', 'https://')): url = '%s://%s%s' % ( environ['wsgi.url_scheme'], environ['HTTP_HOST'], url ) # Fix ;arg=value in url if '%3B' in url: url, arg = url.split('%3B', 1) url = ';'.join([url, arg.replace('%3D', '=')]) # Stick query string back in try: query_string = environ['QUERY_STRING'] except KeyError: pass else: url += '?' + query_string parsed = urlparse(url) replaced = parsed._replace(netloc="localhost:{}".format(port)) url = urlunparse(replaced) environ['reconstructed_url'] = url return url
0.000971
def host(self, host): """ Return the configuration of a specific host as a dictionary. Dictionary always contains lowercase versions of the attribute names. Parameters ---------- host : the host to return values for. Returns ------- dict of key value pairs, excluding "Host", empty map if host is not found. """ if host in self.hosts_: vals = defaultdict(list) for k, value in [(x.key.lower(), x.value) for x in self.lines_ if x.host == host and x.key.lower() != "host"]: vals[k].append(value) flatten = lambda x: x[0] if len(x) == 1 else x return {k: flatten(v) for k, v in vals.items()} return {}
0.005063
def start_record(): """ Install an httplib wrapper that records but does not modify calls. """ global record, playback, current if record: raise StateError("Already recording.") if playback: raise StateError("Currently playing back.") record = True current = ReplayData() install(RecordingHTTPConnection, RecordingHTTPSConnection)
0.002618
def get_provider_info(self, user_descriptor): """GetProviderInfo. [Preview API] :param str user_descriptor: :rtype: :class:`<GraphProviderInfo> <azure.devops.v5_0.graph.models.GraphProviderInfo>` """ route_values = {} if user_descriptor is not None: route_values['userDescriptor'] = self._serialize.url('user_descriptor', user_descriptor, 'str') response = self._send(http_method='GET', location_id='1e377995-6fa2-4588-bd64-930186abdcfa', version='5.0-preview.1', route_values=route_values) return self._deserialize('GraphProviderInfo', response)
0.006944
def _filter_seqs(fn): """Convert names of sequences to unique ids""" out_file = op.splitext(fn)[0] + "_unique.fa" idx = 0 if not file_exists(out_file): with open(out_file, 'w') as out_handle: with open(fn) as in_handle: for line in in_handle: if line.startswith("@") or line.startswith(">"): fixed_name = _make_unique(line.strip(), idx) seq = in_handle.next().strip() counts = _get_freq(fixed_name) if len(seq) < 26 and (counts > 1 or counts == 0): idx += 1 print(fixed_name, file=out_handle, end="\n") print(seq, file=out_handle, end="\n") if line.startswith("@"): in_handle.next() in_handle.next() return out_file
0.001052
def parse(self, fo, width, seed=None): """ Convert Posmo output to motifs Parameters ---------- fo : file-like File object containing Posmo output. Returns ------- motifs : list List of Motif instances. """ motifs = [] lines = [fo.readline() for x in range(6)] while lines[0]: matrix = [[float(x) for x in line.strip().split("\t")] for line in lines[2:]] matrix = [[matrix[x][y] for x in range(4)] for y in range(len(matrix[0]))] m = Motif(matrix) m.trim(0.1) m.id = lines[0].strip().split(" ")[-1] motifs.append(m) lines = [fo.readline() for x in range(6)] for i,motif in enumerate(motifs): if seed: motif.id = "%s_w%s.%s_%s" % (self.name, width, seed, i + 1) else: motif.id = "%s_w%s_%s" % (self.name, width, i + 1) motif.trim(0.25) return motifs
0.007512
def _lu_solve_assertions(lower_upper, perm, rhs, validate_args): """Returns list of assertions related to `lu_solve` assumptions.""" assertions = _lu_reconstruct_assertions(lower_upper, perm, validate_args) message = 'Input `rhs` must have at least 2 dimensions.' if rhs.shape.ndims is not None: if rhs.shape.ndims < 2: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_rank_at_least(rhs, rank=2, message=message)) message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.' if (tf.compat.dimension_value(lower_upper.shape[-1]) is not None and tf.compat.dimension_value(rhs.shape[-2]) is not None): if lower_upper.shape[-1] != rhs.shape[-2]: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_equal( tf.shape(input=lower_upper)[-1], tf.shape(input=rhs)[-2], message=message)) return assertions
0.013361
def get_candidates(self, docs=None, split=0, sort=False): """Return a list of lists of the candidates associated with this extractor. Each list of the return will contain the candidates for one of the candidate classes associated with the CandidateExtractor. :param docs: If provided, return candidates from these documents from all splits. :type docs: list, tuple of ``Documents``. :param split: If docs is None, then return all the candidates from this split. :type split: int :param sort: If sort is True, then return all candidates sorted by stable_id. :type sort: bool :return: Candidates for each candidate_class. :rtype: List of lists of ``Candidates``. """ result = [] if docs: docs = docs if isinstance(docs, (list, tuple)) else [docs] # Get cands from all splits for candidate_class in self.candidate_classes: cands = ( self.session.query(candidate_class) .filter(candidate_class.document_id.in_([doc.id for doc in docs])) .order_by(candidate_class.id) .all() ) if sort: cands = sorted( cands, key=lambda x: " ".join( [x[i][0].get_stable_id() for i in range(len(x))] ), ) result.append(cands) else: for candidate_class in self.candidate_classes: # Filter by candidate_ids in a particular split sub_query = ( self.session.query(Candidate.id) .filter(Candidate.split == split) .subquery() ) cands = ( self.session.query(candidate_class) .filter(candidate_class.id.in_(sub_query)) .order_by(candidate_class.id) .all() ) if sort: cands = sorted( cands, key=lambda x: " ".join( [x[i][0].get_stable_id() for i in range(len(x))] ), ) result.append(cands) return result
0.002042
def universe(self): """ Data universe available at the current time. Universe contains the data passed in when creating a Backtest. Use this data to determine strategy logic. """ # avoid windowing every time # if calling and on same date return # cached value if self.now == self._last_chk: return self._funiverse else: self._last_chk = self.now self._funiverse = self._universe.loc[:self.now] return self._funiverse
0.003676
def get_admin_urls_for_registration(self): """ Utilised by Wagtail's 'register_admin_urls' hook to register urls for our the views that class offers. """ urls = super(OrderModelAdmin, self).get_admin_urls_for_registration() urls = urls + ( url(self.url_helper.get_action_url_pattern('detail'), self.detail_view, name=self.url_helper.get_action_url_name('detail')), ) return urls
0.004115
def get_tunnel_info_input_filter_type_filter_by_cfg_src_config_src(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_tunnel_info = ET.Element("get_tunnel_info") config = get_tunnel_info input = ET.SubElement(get_tunnel_info, "input") filter_type = ET.SubElement(input, "filter-type") filter_by_cfg_src = ET.SubElement(filter_type, "filter-by-cfg-src") config_src = ET.SubElement(filter_by_cfg_src, "config-src") config_src.text = kwargs.pop('config_src') callback = kwargs.pop('callback', self._callback) return callback(config)
0.004594
def daily_txns_with_bar_data(transactions, market_data): """ Sums the absolute value of shares traded in each name on each day. Adds columns containing the closing price and total daily volume for each day-ticker combination. Parameters ---------- transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in tears.create_full_tear_sheet market_data : pd.Panel Contains "volume" and "price" DataFrames for the tickers in the passed positions DataFrames Returns ------- txn_daily : pd.DataFrame Daily totals for transacted shares in each traded name. price and volume columns for close price and daily volume for the corresponding ticker, respectively. """ transactions.index.name = 'date' txn_daily = pd.DataFrame(transactions.assign( amount=abs(transactions.amount)).groupby( ['symbol', pd.TimeGrouper('D')]).sum()['amount']) txn_daily['price'] = market_data['price'].unstack() txn_daily['volume'] = market_data['volume'].unstack() txn_daily = txn_daily.reset_index().set_index('date') return txn_daily
0.000831
def files(self): """ Get a generator that yields all files in the directory. """ filelist_p = new_gp_object("CameraList") lib.gp_camera_folder_list_files(self._cam._cam, self.path.encode(), filelist_p, self._cam._ctx) for idx in range(lib.gp_list_count(filelist_p)): fname = get_string(lib.gp_list_get_name, filelist_p, idx) yield File(name=fname, directory=self, camera=self._cam) lib.gp_list_free(filelist_p)
0.003891
def required_attributes(element, *attributes): """Check element for required attributes. Raise ``NotValidXmlException`` on error. :param element: ElementTree element :param attributes: list of attributes names to check :raises NotValidXmlException: if some argument is missing """ if not reduce(lambda still_valid, param: still_valid and param in element.attrib, attributes, True): raise NotValidXmlException(msg_err_missing_attributes(element.tag, *attributes))
0.008081
def safe_makedir(dname): """Make a directory if it doesn't exist, handling concurrent race conditions. """ if not dname: return dname num_tries = 0 max_tries = 5 while not os.path.exists(dname): # we could get an error here if multiple processes are creating # the directory at the same time. Grr, concurrency. try: os.makedirs(dname) except OSError: if num_tries > max_tries: raise num_tries += 1 time.sleep(2) return dname
0.003597
def get_assessment_taken_form(self, *args, **kwargs): """Pass through to provider AssessmentTakenAdminSession.get_assessment_taken_form_for_update""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.get_resource_form_for_update # This method might be a bit sketchy. Time will tell. if isinstance(args[-1], list) or 'assessment_taken_record_types' in kwargs: return self.get_assessment_taken_form_for_create(*args, **kwargs) else: return self.get_assessment_taken_form_for_update(*args, **kwargs)
0.0067