text
stringlengths
78
104k
score
float64
0
0.18
def stack_trace(depth=None): """ returns a print friendly stack trace at the current frame, without aborting the application. :param depth: The depth of the stack trace. if omitted, the entire stack will be printed. usage:: print stack_trace(10) """ frames = inspect.stack()[2:] if depth: frames = frames[:depth] result = StringIO() result.write("----------------------------------------------------\n") for (frame, file, line, context, code, status) in frames: result.write("In %s from %s\n%s %s" % (context, file, line, "\n".join(code))) result.write("----------------------------------------------------\n") return result.getvalue()
0.009272
def get_queue_attributes(self, queue, attribute='All', callback=None): """ Gets one or all attributes of a Queue :type queue: A Queue object :param queue: The SQS queue to be deleted :type attribute: str :type attribute: The specific attribute requested. If not supplied, the default is to return all attributes. Valid attributes are: ApproximateNumberOfMessages| ApproximateNumberOfMessagesNotVisible| VisibilityTimeout| CreatedTimestamp| LastModifiedTimestamp| Policy :rtype: :class:`boto.sqs.attributes.Attributes` :return: An Attributes object containing request value(s). """ params = {'AttributeName' : attribute} return self.get_object('GetQueueAttributes', params, Attributes, queue.id, callback=callback)
0.005479
def prompt(name, default=None): """ Grab user input from command line. :param name: prompt text :param default: default value if no input provided. """ prompt = name + (default and ' [%s]' % default or '') prompt += name.endswith('?') and ' ' or ': ' while True: rv = raw_input(prompt) if rv: return rv if default is not None: return default
0.002364
def send_keys(self, text: str = 'cerium') -> None: '''Simulates typing keys.''' for char in text: if '\u4e00' <= char <= '\u9fff': raise CharactersException( f'Text cannot contain non-English characters, such as {char!r}.') text = re.escape(text) self._execute('-s', self.device_sn, 'shell', 'input', 'text', text)
0.007212
def encrypt(self, mesg): ''' Wrap a message with a sequence number and encrypt it. Args: mesg: The mesg to encrypt. Returns: bytes: The encrypted message. ''' seqn = next(self._tx_sn) rv = self._tx_tinh.enc(s_msgpack.en((seqn, mesg))) return rv
0.005988
def write_py2k_header(file_list): """Write Python 2 shebang and add encoding cookie if needed.""" if not isinstance(file_list, list): file_list = [file_list] python_re = re.compile(br"^(#!.*\bpython)(.*)([\r\n]+)$") coding_re = re.compile(br"coding[:=]\s*([-\w.]+)") new_line_re = re.compile(br"([\r\n]+)$") version_3 = LooseVersion('3') for file in file_list: if not os.path.getsize(file): continue rewrite_needed = False python_found = False coding_found = False lines = [] f = open(file, 'rb') try: while len(lines) < 2: line = f.readline() match = python_re.match(line) if match: python_found = True version = LooseVersion(match.group(2).decode() or '2') try: version_test = version >= version_3 except TypeError: version_test = True if version_test: line = python_re.sub(br"\g<1>2\g<3>", line) rewrite_needed = True elif coding_re.search(line): coding_found = True lines.append(line) if not coding_found: match = new_line_re.search(lines[0]) newline = match.group(1) if match else b"\n" line = b"# -*- coding: utf-8 -*-" + newline lines.insert(1 if python_found else 0, line) rewrite_needed = True if rewrite_needed: lines += f.readlines() finally: f.close() if rewrite_needed: f = open(file, 'wb') try: f.writelines(lines) finally: f.close()
0.000532
def sort_pattern(self): """Extract query pattern from operations.""" if not self._sort_pattern: # trigger evaluation of operation if self.operation in ['query', 'getmore']: self._sort_pattern = self._find_pattern('orderby: ') return self._sort_pattern
0.006309
def _delete(self, pk): """ Delete function logic, override to implement different logic deletes the record with primary_key = pk :param pk: record primary key to delete """ item = self.datamodel.get(pk, self._base_filters) if not item: abort(404) try: self.pre_delete(item) except Exception as e: flash(str(e), "danger") else: if self.datamodel.delete(item): self.post_delete(item) flash(*self.datamodel.message) self.update_redirect()
0.003165
def _get_remote_video_url(self, remote_node, session_id): """Get grid-extras url to download videos :param remote_node: remote node name :param session_id: test session id :returns: grid-extras url to download videos """ url = '{}/video'.format(self._get_remote_node_url(remote_node)) timeout = time.time() + 5 # 5 seconds from now # Requests videos list until timeout or the video url is found video_url = None while time.time() < timeout: response = requests.get(url).json() try: video_url = response['available_videos'][session_id]['download_url'] break except KeyError: time.sleep(1) return video_url
0.003851
def hist2d(h, axes=None, colorbar=False, **kwargs): """ Draw a 2D matplotlib histogram plot from a 2D ROOT histogram. Parameters ---------- h : Hist2D A rootpy Hist2D axes : matplotlib Axes instance, optional (default=None) The axes to plot on. If None then use the global current axes. colorbar : Boolean, optional (default=False) If True, include a colorbar in the produced plot kwargs : additional keyword arguments, optional Additional keyword arguments are passed directly to matplotlib's hist2d function. Returns ------- Returns the value from matplotlib's hist2d function. """ if axes is None: axes = plt.gca() X, Y = np.meshgrid(list(h.x()), list(h.y())) x = X.ravel() y = Y.ravel() z = np.array(h.z()).T # returns of hist2d: (counts, xedges, yedges, Image) return_values = axes.hist2d(x, y, weights=z.ravel(), bins=(list(h.xedges()), list(h.yedges())), **kwargs) if colorbar: mappable = return_values[-1] plt.colorbar(mappable, ax=axes) return return_values
0.000843
def _get_memmap(self): """Get the memory map for the SEVIRI data""" with open(self.filename) as fp: data_dtype = self._get_data_dtype() hdr_size = native_header.itemsize return np.memmap(fp, dtype=data_dtype, shape=(self.mda['number_of_lines'],), offset=hdr_size, mode="r")
0.005181
def raw(text): """Returns a raw string representation of text""" new_string = '' for char in text: try: new_string += escape_dict[char] except KeyError: new_string += char return new_string
0.004082
def activate(self): """Activate the Router.""" if lib.EnvActivateRouter(self._env, self._name.encode()) == 0: raise RuntimeError("Unable to activate router %s" % self._name)
0.00995
def time(self, time): """Add a request for a specific time to the query. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing temporal queries that have been set. Parameters ---------- time : datetime.datetime The time to request Returns ------- self : DataQuery Returns self for chaining calls """ self._set_query(self.time_query, time=self._format_time(time)) return self
0.005042
async def genSchema(self, name, version, attrNames) -> Schema: """ Generates and submits Schema. :param name: schema name :param version: schema version :param attrNames: a list of attributes the schema contains :return: submitted Schema """ schema = Schema(name, version, attrNames, self.issuerId) return await self.wallet.submitSchema(schema)
0.004796
def first_ipv4(self) -> Optional[AddressInfo]: '''The first IPv4 address.''' for info in self._address_infos: if info.family == socket.AF_INET: return info
0.01005
def start(self): """Create the Interchange process and connect to it. """ self.outgoing_q = zmq_pipes.TasksOutgoing( "127.0.0.1", self.interchange_port_range) self.incoming_q = zmq_pipes.ResultsIncoming( "127.0.0.1", self.interchange_port_range) self.is_alive = True self._queue_management_thread = None self._start_queue_management_thread() self._start_local_queue_process() logger.debug("Created management thread: {}" .format(self._queue_management_thread)) if self.provider: # debug_opts = "--debug" if self.worker_debug else "" l_cmd = self.launch_cmd.format( # debug=debug_opts, task_url=self.worker_task_url, workers_per_node=self.workers_per_node, logdir="{}/{}".format(self.run_dir, self.label)) self.launch_cmd = l_cmd logger.debug("Launch command: {}".format(self.launch_cmd)) self._scaling_enabled = self.provider.scaling_enabled logger.debug( "Starting LowLatencyExecutor with provider:\n%s", self.provider) if hasattr(self.provider, 'init_blocks'): try: for i in range(self.provider.init_blocks): block = self.provider.submit( self.launch_cmd, 1, self.workers_per_node) logger.debug("Launched block {}:{}".format(i, block)) if not block: raise(ScalingFailed(self.provider.label, "Attempts to provision nodes via provider has failed")) self.blocks.extend([block]) except Exception as e: logger.error("Scaling out failed: {}".format(e)) raise e else: self._scaling_enabled = False logger.debug("Starting LowLatencyExecutor with no provider")
0.001945
def compute_rewards(self, scores): """Compute the velocity of thte k+1 most recent scores. The velocity is the average distance between scores. Return a list with those k velocities padded out with zeros so that the count remains the same. """ # take the k + 1 most recent scores so we can get k velocities recent_scores = scores[:-self.k - 2:-1] velocities = [recent_scores[i] - recent_scores[i + 1] for i in range(len(recent_scores) - 1)] # pad the list out with zeros, so the length of the list is # maintained zeros = (len(scores) - self.k) * [0] return velocities + zeros
0.004367
def file(ctx, data_dir, data_file): """Use the File SWAG Backend""" if not ctx.file: ctx.data_file = data_file if not ctx.data_dir: ctx.data_dir = data_dir ctx.type = 'file'
0.004831
def _create_tables(self): """ Set up the subdomain db's tables """ cursor = self.conn.cursor() create_cmd = """CREATE TABLE IF NOT EXISTS {} ( fully_qualified_subdomain TEXT NOT NULL, domain TEXT NOT NULL, sequence INTEGER NOT NULL, owner TEXT NOT NULL, zonefile_hash TEXT NOT NULL, signature TEXT NOT NULL, block_height INTEGER NOT NULL, parent_zonefile_hash TEXT NOT NULL, parent_zonefile_index INTEGER NOT NULL, zonefile_offset INTEGER NOT NULL, txid TEXT NOT NULL, missing TEXT NOT NULL, accepted INTEGER NOT NULL, resolver TEXT, PRIMARY KEY(fully_qualified_subdomain,parent_zonefile_index)); """.format(self.subdomain_table) db_query_execute(cursor, create_cmd, ()) # set up a queue as well queue_con = queuedb_open(self.queue_path) queue_con.close()
0.00209
def spread(nodes, n): """Distrubute master instances in different nodes { "192.168.0.1": [node1, node2], "192.168.0.2": [node3, node4], "192.168.0.3": [node5, node6] } => [node1, node3, node5] """ target = [] while len(target) < n and nodes: for ip, node_group in list(nodes.items()): if not node_group: nodes.pop(ip) continue target.append(node_group.pop(0)) if len(target) >= n: break return target
0.001832
def get_best_span(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor) -> torch.Tensor: """ This acts the same as the static method ``BidirectionalAttentionFlow.get_best_span()`` in ``allennlp/models/reading_comprehension/bidaf.py``. We keep it here so that users can directly import this function without the class. We call the inputs "logits" - they could either be unnormalized logits or normalized log probabilities. A log_softmax operation is a constant shifting of the entire logit vector, so taking an argmax over either one gives the same result. """ if span_start_logits.dim() != 2 or span_end_logits.dim() != 2: raise ValueError("Input shapes must be (batch_size, passage_length)") batch_size, passage_length = span_start_logits.size() device = span_start_logits.device # (batch_size, passage_length, passage_length) span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1) # Only the upper triangle of the span matrix is valid; the lower triangle has entries where # the span ends before it starts. span_log_mask = torch.triu(torch.ones((passage_length, passage_length), device=device)).log() valid_span_log_probs = span_log_probs + span_log_mask # Here we take the span matrix and flatten it, then find the best span using argmax. We # can recover the start and end indices from this flattened list using simple modular # arithmetic. # (batch_size, passage_length * passage_length) best_spans = valid_span_log_probs.view(batch_size, -1).argmax(-1) span_start_indices = best_spans // passage_length span_end_indices = best_spans % passage_length return torch.stack([span_start_indices, span_end_indices], dim=-1)
0.005525
def InferUserAndSubjectFromUrn(self): """Infers user name and subject urn from self.urn.""" _, cron_str, cron_job_name, user, _ = self.urn.Split(5) if cron_str != "cron": raise access_control.UnauthorizedAccess( "Approval object has invalid urn %s." % self.urn, requested_access=self.token.requested_access) return (user, aff4.ROOT_URN.Add("cron").Add(cron_job_name))
0.004866
def tables(self, db_id, schema, substr, force_refresh='false'): """Endpoint to fetch the list of tables for given database""" db_id = int(db_id) force_refresh = force_refresh.lower() == 'true' schema = utils.js_string_to_python(schema) substr = utils.js_string_to_python(substr) database = db.session.query(models.Database).filter_by(id=db_id).one() if schema: table_names = database.all_table_names_in_schema( schema=schema, force=force_refresh, cache=database.table_cache_enabled, cache_timeout=database.table_cache_timeout) view_names = database.all_view_names_in_schema( schema=schema, force=force_refresh, cache=database.table_cache_enabled, cache_timeout=database.table_cache_timeout) else: table_names = database.all_table_names_in_database( cache=True, force=False, cache_timeout=24 * 60 * 60) view_names = database.all_view_names_in_database( cache=True, force=False, cache_timeout=24 * 60 * 60) table_names = security_manager.accessible_by_user(database, table_names, schema) view_names = security_manager.accessible_by_user(database, view_names, schema) if substr: table_names = [tn for tn in table_names if substr in tn] view_names = [vn for vn in view_names if substr in vn] if not schema and database.default_schemas: def get_schema(tbl_or_view_name): return tbl_or_view_name.split('.')[0] if '.' in tbl_or_view_name else None user_schema = g.user.email.split('@')[0] valid_schemas = set(database.default_schemas + [user_schema]) table_names = [tn for tn in table_names if get_schema(tn) in valid_schemas] view_names = [vn for vn in view_names if get_schema(vn) in valid_schemas] max_items = config.get('MAX_TABLE_NAMES') or len(table_names) total_items = len(table_names) + len(view_names) max_tables = len(table_names) max_views = len(view_names) if total_items and substr: max_tables = max_items * len(table_names) // total_items max_views = max_items * len(view_names) // total_items table_options = [{'value': tn, 'label': tn} for tn in table_names[:max_tables]] table_options.extend([{'value': vn, 'label': '[view] {}'.format(vn)} for vn in view_names[:max_views]]) payload = { 'tableLength': len(table_names) + len(view_names), 'options': table_options, } return json_success(json.dumps(payload))
0.002519
def add_bpmn_xml(self, bpmn, svg=None, filename=None): """ Add the given lxml representation of the BPMN file to the parser's set. :param svg: Optionally, provide the text data for the SVG of the BPMN file :param filename: Optionally, provide the source filename. """ xpath = xpath_eval(bpmn) processes = xpath('.//bpmn:process') for process in processes: process_parser = self.PROCESS_PARSER_CLASS( self, process, svg, filename=filename, doc_xpath=xpath) if process_parser.get_id() in self.process_parsers: raise ValidationException( 'Duplicate process ID', node=process, filename=filename) if process_parser.get_name() in self.process_parsers_by_name: raise ValidationException( 'Duplicate process name', node=process, filename=filename) self.process_parsers[process_parser.get_id()] = process_parser self.process_parsers_by_name[ process_parser.get_name()] = process_parser
0.001792
def _make_nodes(self, cwd=None): """ Cast generated nodes to be Arcana nodes """ for i, node in NipypeMapNode._make_nodes(self, cwd=cwd): # "Cast" NiPype node to a Arcana Node and set Arcana Node # parameters node.__class__ = self.node_cls node._environment = self._environment node._versions = self._versions node._wall_time = self._wall_time node._annotations = self._annotations yield i, node
0.003824
def imageinfo(self, files): """ Returns imageinfo query string """ files = '|'.join([safequote(x) for x in files]) self.set_status('imageinfo', files) return self.IMAGEINFO.substitute( WIKI=self.uri, ENDPOINT=self.endpoint, FILES=files)
0.006211
def update(self, pop: Union[pd.DataFrame, pd.Series]): """Update the simulation's state to match ``pop`` Parameters ---------- pop : The data which should be copied into the simulation's state. If ``pop`` is a DataFrame only those columns included in the view's columns will be used. If ``pop`` is a Series it must have a name that matches one of the view's columns unless the view only has one column in which case the Series will be assumed to refer to that regardless of its name. """ if not pop.empty: if isinstance(pop, pd.Series): if pop.name in self._columns: affected_columns = [pop.name] elif len(self._columns) == 1: affected_columns = self._columns else: raise PopulationError('Cannot update with a Series unless the series name equals a column ' 'name or there is only a single column in the view') else: affected_columns = set(pop.columns) affected_columns = set(affected_columns).intersection(self._columns) state_table = self.manager.get_population(True) if not self.manager.growing: affected_columns = set(affected_columns).intersection(state_table.columns) for c in affected_columns: if c in state_table: v = state_table[c].values if isinstance(pop, pd.Series): v2 = pop.values else: v2 = pop[c].values v[pop.index] = v2 if v.dtype != v2.dtype: # This happens when the population is being grown because extending # the index forces columns that don't have a natural null type # to become 'object' if not self.manager.growing: raise PopulationError('Component corrupting population table. ' 'Old column type: {} New column type: {}'.format(v.dtype, v2.dtype)) v = v.astype(v2.dtype) else: if isinstance(pop, pd.Series): v = pop.values else: v = pop[c].values self.manager._population[c] = v
0.005096
def view_meta_admonition(admonition_name, name=None): """List all found admonition from all the rst files found in directory. view_meta_admonition is called by the 'meta' url: /__XXXXXXX__ where XXXXXXX represents and admonition name, like: * todo * warning * danger * ... .. note:: this function may works for any docutils node, not only admonition Keyword Arguments: :admonition_name: (str) -- name of the admonition """ print("meta admo: %s - %s" % (admonition_name, name)) admonition = None if admonition_name == 'todo': admonition = todo elif admonition_name == 'done': admonition = done elif hasattr(nodes, admonition_name): admonition = getattr(nodes, admonition_name) else: return abort(404) doc2_content = "" doc2_output, doc2_pub = docutils.core.publish_programmatically( source_class=io.StringInput, source=doc2_content, source_path=None, destination_class=io.StringOutput, destination=None, destination_path=None, reader=None, reader_name='standalone', parser=None, parser_name='restructuredtext', writer=AttowikiWriter(), writer_name=None, settings=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=False) section1 = nodes.section("{0}_list_file".format(admonition_name)) doc2_pub.reader.document.append(section1) title1 = nodes.title("{0} LIST".format(admonition_name.upper()), "{0} LIST".format(admonition_name.upper())) doc2_pub.reader.document.append(title1) if name is None: rst_files = [filename[2:-4] for filename in sorted( glob.glob("./*.rst"))] rst_files.reverse() else: rst_files = [filename[2:-4] for filename in sorted(glob.glob("./{0}.rst".format(name)))] for file in rst_files: file_title = False file_handle = open(file + '.rst', 'r') file_content = file_handle.read() file_handle.close() file_content = file_content.decode('utf-8') output, pub = docutils.core.publish_programmatically( source_class=io.StringInput, source=file_content, source_path=None, destination_class=io.StringOutput, destination=None, destination_path=None, reader=None, reader_name='standalone', parser=None, parser_name='restructuredtext', writer=None, writer_name='html', settings=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=False) my_settings = pub.get_settings() parser = docutils.parsers.rst.Parser() document = docutils.utils.new_document('test', my_settings) parser.parse(file_content, document) for node in document.traverse(admonition): if not file_title: file_title = True # new section section2 = nodes.section(file) doc2_pub.reader.document.append(section2) # add link to the originating file paragraph = nodes.paragraph() file_target = nodes.target(ids=[file], names=[file], refuri="/" + file) file_ref = nodes.reference(file, file, name=file, refuri="/" + file) paragraph.append(nodes.Text("in ")) paragraph.append(file_ref) paragraph.append(file_target) paragraph.append(nodes.Text(":")) doc2_pub.reader.document.append(paragraph) # doc2_pub.reader.document.append(file_target) doc2_pub.reader.document.append(node) doc2_pub.apply_transforms() doc2_pub.writer.write(doc2_pub.document, doc2_pub.destination) doc2_pub.writer.assemble_parts() if name is None: display_file_name = '__{0}__'.format(admonition_name) extended_name = None else: display_file_name = '{0}'.format(name) extended_name = '__{0}__'.format(admonition_name) return template('page', type="view", name=display_file_name, extended_name=extended_name, is_repo=check_repo(), history=[], gitref=None, content=doc2_pub.writer.parts['html_body'])
0.000213
def PathList(self, pathlist): """ Returns the cached _PathList object for the specified pathlist, creating and caching a new object as necessary. """ pathlist = self._PathList_key(pathlist) try: memo_dict = self._memo['PathList'] except KeyError: memo_dict = {} self._memo['PathList'] = memo_dict else: try: return memo_dict[pathlist] except KeyError: pass result = _PathList(pathlist) memo_dict[pathlist] = result return result
0.003273
def is_colliding(self, other): """Check to see if two circles are colliding.""" if isinstance(other, BoundingCircle): #Calculate the distance between two circles. distance = Vector2.distance(self.coords, other.coords) #Check to see if the sum of thier radi are greater than or equal to the distance. radi_sum = self.radius + other.radius if distance <= radi_sum: #There has been a collision ## print "Distance: ", distance, "\nRadi Sum: ", radi_sum ## print "Self Coords: ", self.coords, "\nOther Coords: ", other.coords return True #No collision. return False
0.016552
def pythonize(self, val): """If value is a single list element just return the element does nothing otherwise :param val: value to convert :type val: :return: converted value :rtype: """ if isinstance(val, list) and len(set(val)) == 1: # If we have a list with a unique value just use it return val[0] # Well, can't choose to remove something. return val
0.004348
def _check_bullets(lines, **kwargs): """Check that the bullet point list is well formatted. Each bullet point shall have one space before and after it. The bullet character is the "*" and there is no space before it but one after it meaning the next line are starting with two blanks spaces to respect the indentation. :param lines: all the lines of the message :type lines: list :param max_lengths: maximum length of any line. (Default 72) :return: errors as in (code, line number, *args) :rtype: list """ max_length = kwargs.get("max_length", 72) labels = {l for l, _ in kwargs.get("commit_msg_labels", tuple())} def _strip_ticket_directives(line): return re.sub(r'( \([^)]*\)){1,}$', '', line) errors = [] missed_lines = [] skipped = [] for (i, line) in enumerate(lines[1:]): if line.startswith('*'): dot_found = False if len(missed_lines) > 0: errors.append(("M130", i + 2)) if lines[i].strip() != '': errors.append(("M120", i + 2)) if _strip_ticket_directives(line).endswith('.'): dot_found = True label = _re_bullet_label.search(line) if label and label.group('label') not in labels: errors.append(("M122", i + 2, label.group('label'))) for (j, indented) in enumerate(lines[i + 2:]): if indented.strip() == '': break if not re.search(r"^ {2}\S", indented): errors.append(("M121", i + j + 3)) else: skipped.append(i + j + 1) stripped_line = _strip_ticket_directives(indented) if stripped_line.endswith('.'): dot_found = True elif stripped_line.strip(): dot_found = False if not dot_found: errors.append(("M123", i + 2)) elif i not in skipped and line.strip(): missed_lines.append((i + 2, line)) if len(line) > max_length: errors.append(("M190", i + 2, max_length, len(line))) return errors, missed_lines
0.000891
def gettext_lazy(message, domain=DEFAULT_DOMAIN): """Mark a message as translatable, but delay the translation until the message is used. Sometimes, there are some messages that need to be translated, but the translation can't be done at the point the message itself is written. For example, the names of the fields in a Model can't be translated at the point they are written, otherwise the translation would be done when the file is imported, long before a user even connects. To avoid this, `gettext_lazy` should be used. For example: .. code-block:: python from zengine.lib.translation import gettext_lazy, InstalledLocale from pyoko import model, fields class User(model.Model): name = fields.String(gettext_lazy('User Name')) print(User.name.title) 'User Name' InstalledLocale.install_language('tr') print(User.name.title) 'Kullanıcı Adı' Args: message (basestring, unicode): The input message. domain (basestring): The domain of the message. Defaults to 'messages', which is the domain where all application messages should be located. Returns: unicode: The translated message, with the translation itself being delayed until the text is actually used. """ return LazyProxy(gettext, message, domain=domain, enable_cache=False)
0.006351
def on_mouse(self, event): '''handle mouse events''' state = self.state pos = event.GetPosition() if event.Leaving(): self.mouse_pos = None else: self.mouse_pos = pos self.update_position() if hasattr(event, 'ButtonIsDown'): any_button_down = event.ButtonIsDown(wx.MOUSE_BTN_ANY) left_button_down = event.ButtonIsDown(wx.MOUSE_BTN_LEFT) right_button_down = event.ButtonIsDown(wx.MOUSE_BTN_RIGHT) else: left_button_down = event.leftIsDown right_button_down = event.rightIsDown any_button_down = left_button_down or right_button_down if any_button_down or event.ButtonUp(): # send any event with a mouse button to the parent latlon = self.coordinates(pos.x, pos.y) selected = self.selected_objects(pos) state.event_queue.put(SlipMouseEvent(latlon, event, selected)) if event.RightDown(): state.popup_object = None state.popup_latlon = None if len(selected) > 0: obj = state.layers[selected[0].layer][selected[0].objkey] if obj.popup_menu is not None: state.popup_object = obj state.popup_latlon = latlon self.show_popup(obj, pos) state.popup_started = True if not state.popup_started and state.default_popup is not None: state.popup_latlon = latlon self.show_default_popup(pos) state.popup_started = True if not right_button_down: state.popup_started = False if event.LeftDown() or event.RightDown(): self.mouse_down = pos self.last_click_pos = self.click_pos self.click_pos = self.coordinates(pos.x, pos.y) if event.Dragging() and left_button_down: # drag map to new position newpos = pos if self.mouse_down and newpos: dx = (self.mouse_down.x - newpos.x) dy = -(self.mouse_down.y - newpos.y) pdist = math.sqrt(dx**2 + dy**2) if pdist > state.drag_step: bearing = math.degrees(math.atan2(dx, dy)) distance = (state.ground_width/float(state.width)) * pdist newlatlon = mp_util.gps_newpos(state.lat, state.lon, bearing, distance) (state.lat, state.lon) = newlatlon self.mouse_down = newpos self.redraw_map()
0.001116
def setup_options(): """Sets Streamlink options.""" if args.hls_live_edge: streamlink.set_option("hls-live-edge", args.hls_live_edge) if args.hls_segment_attempts: streamlink.set_option("hls-segment-attempts", args.hls_segment_attempts) if args.hls_playlist_reload_attempts: streamlink.set_option("hls-playlist-reload-attempts", args.hls_playlist_reload_attempts) if args.hls_segment_threads: streamlink.set_option("hls-segment-threads", args.hls_segment_threads) if args.hls_segment_timeout: streamlink.set_option("hls-segment-timeout", args.hls_segment_timeout) if args.hls_segment_ignore_names: streamlink.set_option("hls-segment-ignore-names", args.hls_segment_ignore_names) if args.hls_segment_key_uri: streamlink.set_option("hls-segment-key-uri", args.hls_segment_key_uri) if args.hls_timeout: streamlink.set_option("hls-timeout", args.hls_timeout) if args.hls_audio_select: streamlink.set_option("hls-audio-select", args.hls_audio_select) if args.hls_start_offset: streamlink.set_option("hls-start-offset", args.hls_start_offset) if args.hls_duration: streamlink.set_option("hls-duration", args.hls_duration) if args.hls_live_restart: streamlink.set_option("hls-live-restart", args.hls_live_restart) if args.hds_live_edge: streamlink.set_option("hds-live-edge", args.hds_live_edge) if args.hds_segment_attempts: streamlink.set_option("hds-segment-attempts", args.hds_segment_attempts) if args.hds_segment_threads: streamlink.set_option("hds-segment-threads", args.hds_segment_threads) if args.hds_segment_timeout: streamlink.set_option("hds-segment-timeout", args.hds_segment_timeout) if args.hds_timeout: streamlink.set_option("hds-timeout", args.hds_timeout) if args.http_stream_timeout: streamlink.set_option("http-stream-timeout", args.http_stream_timeout) if args.ringbuffer_size: streamlink.set_option("ringbuffer-size", args.ringbuffer_size) if args.rtmp_proxy: streamlink.set_option("rtmp-proxy", args.rtmp_proxy) if args.rtmp_rtmpdump: streamlink.set_option("rtmp-rtmpdump", args.rtmp_rtmpdump) if args.rtmp_timeout: streamlink.set_option("rtmp-timeout", args.rtmp_timeout) if args.stream_segment_attempts: streamlink.set_option("stream-segment-attempts", args.stream_segment_attempts) if args.stream_segment_threads: streamlink.set_option("stream-segment-threads", args.stream_segment_threads) if args.stream_segment_timeout: streamlink.set_option("stream-segment-timeout", args.stream_segment_timeout) if args.stream_timeout: streamlink.set_option("stream-timeout", args.stream_timeout) if args.ffmpeg_ffmpeg: streamlink.set_option("ffmpeg-ffmpeg", args.ffmpeg_ffmpeg) if args.ffmpeg_verbose: streamlink.set_option("ffmpeg-verbose", args.ffmpeg_verbose) if args.ffmpeg_verbose_path: streamlink.set_option("ffmpeg-verbose-path", args.ffmpeg_verbose_path) if args.ffmpeg_video_transcode: streamlink.set_option("ffmpeg-video-transcode", args.ffmpeg_video_transcode) if args.ffmpeg_audio_transcode: streamlink.set_option("ffmpeg-audio-transcode", args.ffmpeg_audio_transcode) streamlink.set_option("subprocess-errorlog", args.subprocess_errorlog) streamlink.set_option("subprocess-errorlog-path", args.subprocess_errorlog_path) streamlink.set_option("locale", args.locale)
0.003055
def macro_def(self, node, frame): """Dump the macro definition for the def created by macro_body.""" arg_tuple = ', '.join(repr(x.name) for x in node.args) name = getattr(node, 'name', None) if len(node.args) == 1: arg_tuple += ',' self.write('Macro(environment, macro, %r, (%s), (' % (name, arg_tuple)) for arg in node.defaults: self.visit(arg, frame) self.write(', ') self.write('), %r, %r, %r)' % ( bool(frame.accesses_kwargs), bool(frame.accesses_varargs), bool(frame.accesses_caller) ))
0.003096
def start(self, io_loop): """ Run the ``before_run`` callbacks and queue to ``on_start`` callbacks. :param tornado.ioloop.IOLoop io_loop: loop to start the app on. """ for callback in self.before_run_callbacks: try: callback(self.tornado_application, io_loop) except Exception: self.logger.error('before_run callback %r cancelled start', callback, exc_info=1) self.stop(io_loop) raise for callback in self.on_start_callbacks: io_loop.spawn_callback(callback, self.tornado_application, io_loop)
0.00295
def ecdsa_sign_compact(msg32, seckey): """ Takes the same message and seckey as _ecdsa_sign_recoverable Returns an unsigned char array of length 65 containing the signed message """ # Assign 65 bytes to output output64 = ffi.new("unsigned char[65]") # ffi definition of recid recid = ffi.new("int *") lib.secp256k1_ecdsa_recoverable_signature_serialize_compact( ctx, output64, recid, _ecdsa_sign_recoverable(msg32, seckey) ) # Assign recid to the last byte in the output array r = ffi.buffer(output64)[:64] + struct.pack("B", recid[0]) assert len(r) == 65, len(r) return r
0.002985
def log_reject( self, block_id, vtxindex, op, op_data ): """ Log a rejected operation """ debug_op = self.sanitize_op( op_data ) if 'history' in debug_op: del debug_op['history'] log.debug("REJECT %s at (%s, %s) data: %s", op_get_opcode_name( op ), block_id, vtxindex, ", ".join( ["%s='%s'" % (k, debug_op[k]) for k in sorted(debug_op.keys())] )) return
0.029545
def setLib(self, lib): """ Copy the lib items into our font. """ for name, item in lib.items(): self.font.lib[name] = item
0.013333
def decimal_default(obj): """Properly parse out the Decimal datatypes into proper int/float types.""" if isinstance(obj, decimal.Decimal): if obj % 1: return float(obj) return int(obj) raise TypeError
0.004167
def compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0): """Helper method for `extract_anomalies` method. Calculates the epsilon (threshold) for anomalies. """ mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) max_epsilon = 0 sd_threshold = sd_limit # The treshold is determined dynamically by testing multiple Zs. # z is drawn from an ordered set of positive values representing the # number of standard deviations above mean(smoothed_errors) # here we iterate in increments of 0.5 on the range that the NASA paper found to be good for z in np.arange(2.5, sd_limit, 0.5): epsilon = mu + (sigma * z) below_epsilon, below_indices, above_epsilon = [], [], [] for i in range(len(smoothed_errors)): e = smoothed_errors[i] if e < epsilon: # save to compute delta mean and delta std # these are important for epsilon calculation below_epsilon.append(e) below_indices.append(i) if e > epsilon: # above_epsilon values are anomalies for j in range(0, error_buffer): if (i + j) not in above_epsilon and (i + j) < len(smoothed_errors): above_epsilon.append(i + j) if (i - j) not in above_epsilon and (i - j) >= 0: above_epsilon.append(i - j) if len(above_epsilon) == 0: continue # generate sequences above_epsilon = sorted(list(set(above_epsilon))) groups = [list(group) for group in mit.consecutive_groups(above_epsilon)] above_sequences = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]] mean_perc_decrease = (mu - np.mean(below_epsilon)) / mu sd_perc_decrease = (sigma - np.std(below_epsilon)) / sigma epsilon = (mean_perc_decrease + sd_perc_decrease) /\ (len(above_sequences)**2 + len(above_epsilon)) # update the largest epsilon we've seen so far if epsilon > max_epsilon: sd_threshold = z max_epsilon = epsilon # sd_threshold can be multiplied by sigma to get epsilon return max_epsilon, sd_threshold
0.001764
def cat(self, i1, i2, format='html'): """Display the DataFrame from row i1 till i2 For format, see https://pypi.org/project/tabulate/ :param int i1: Start row :param int i2: End row. :param str format: Format to use, e.g. 'html', 'plain', 'latex' """ from IPython import display if format == 'html': output = self._as_html_table(i1, i2) display.display(display.HTML(output)) else: output = self._as_table(i1, i2, format=format) print(output)
0.003552
def classifySPoutput(targetOutputColumns, outputColumns): """ Classify the SP output @param targetOutputColumns (list) The target outputs, corresponding to different classes @param outputColumns (array) The current output @return classLabel (int) classification outcome """ numTargets, numDims = targetOutputColumns.shape overlap = np.zeros((numTargets,)) for i in range(numTargets): overlap[i] = percentOverlap(outputColumns, targetOutputColumns[i, :]) classLabel = np.argmax(overlap) return classLabel
0.012389
def call_all(sequence, method_name, *args, **kwargs): """Call a method on each element of a sequence, in parallel. Returns: list of results """ kwargs = kwargs.copy() kwargs['block'] = False results = [] for obj in sequence: results.append(methodcall(obj, method_name, *args, **kwargs)) for i in range(len(results)): results[i] = convert_result(results[i]) return results
0.002331
def translate(term=None, phrase=None, api_key=GIPHY_PUBLIC_KEY, strict=False, rating=None): """ Shorthand for creating a Giphy api wrapper with the given api key and then calling the translate method. """ return Giphy(api_key=api_key, strict=strict).translate( term=term, phrase=phrase, rating=rating)
0.002915
def main(cls): """ Command-line entry point for running a PySOA server. The chain of method calls is as follows:: cls.main | -> cls.initialize => new_cls -> new_cls.__init__ => self -> self.run | -> self.setup -> loop: self.handle_next_request while not self.shutting_down | -> transport.receive_request_message -> self.perform_idle_actions (if no request) -> self.perform_pre_request_actions -> self.process_job | -> middleware(self.execute_job) -> transport.send_response_message -> self.perform_post_request_actions """ parser = argparse.ArgumentParser( description='Server for the {} SOA service'.format(cls.service_name), ) parser.add_argument( '-d', '--daemon', action='store_true', help='run the server process as a daemon', ) if not cls.use_django: # If Django mode is turned on, we use the Django settings framework to get our settings, so the caller # needs to set DJANGO_SETTINGS_MODULE. Otherwise, the caller must pass in the -s/--settings argument. parser.add_argument( '-s', '--settings', help='The settings module to use', required=True, ) cmd_options, _ = parser.parse_known_args(sys.argv[1:]) # Load settings from the given file (or use Django and grab from its settings) if cls.use_django: # noinspection PyUnresolvedReferences if not django_settings: raise ImportError( 'Could not import Django. You must install Django if you enable Django support in your service.' ) try: settings = cls.settings_class(django_settings.SOA_SERVER_SETTINGS) except AttributeError: raise ValueError('Cannot find `SOA_SERVER_SETTINGS` in the Django settings.') else: try: settings_module = importlib.import_module(cmd_options.settings) except ImportError as e: raise ValueError('Cannot import settings module `%s`: %s' % (cmd_options.settings, e)) try: settings_dict = getattr(settings_module, 'SOA_SERVER_SETTINGS') except AttributeError: try: settings_dict = getattr(settings_module, 'settings') except AttributeError: raise ValueError( "Cannot find `SOA_SERVER_SETTINGS` or `settings` variable in settings module `{}`.".format( cmd_options.settings, ) ) settings = cls.settings_class(settings_dict) PySOALogContextFilter.set_service_name(cls.service_name) # Set up logging logging.config.dictConfig(settings['logging']) # Optionally daemonize if cmd_options.daemon: pid = os.fork() if pid > 0: print('PID={}'.format(pid)) sys.exit() # Set up server and signal handling server = cls.initialize(settings)(settings) # Start server event loop server.run()
0.003592
def children(self, value): """ Setter for **self.__children** attribute. :param value: Attribute value. :type value: list """ if value is not None: assert type(value) is list, "'{0}' attribute: '{1}' type is not 'list'!".format("children", value) for element in value: assert issubclass(element.__class__, AbstractNode), "'{0}' attribute: '{1}' is not a '{2}' subclass!".format( "children", element, AbstractNode.__class__.__name__) self.__children = value
0.006579
def _get_dbal_column_type(self, type_): """ Get the dbal column type. :param type_: The fluent type :type type_: str :rtype: str """ type_ = type_.lower() if type_ == "enum": return "string" return super(SQLiteSchemaGrammar, self)._get_dbal_column_type(type_)
0.005764
def populateFromRow(self, peerRecord): """ This method accepts a model record and sets class variables. """ self.setUrl(peerRecord.url) \ .setAttributesJson(peerRecord.attributes) return self
0.00823
def _data_scan(self, data_id=None, executor='resolwe.flow.executors.local', **kwargs): """Scan for new Data objects and execute them. :param data_id: Optional id of Data object which (+ its children) should be scanned. If it is not given, all resolving objects are processed. :param executor: The fully qualified name of the executor to use for all :class:`~resolwe.flow.models.Data` objects discovered in this pass. """ def process_data_object(data): """Process a single data object.""" # Lock for update. Note that we want this transaction to be as short as possible in # order to reduce contention and avoid deadlocks. This is why we do not lock all # resolving objects for update, but instead only lock one object at a time. This # allows managers running in parallel to process different objects. data = Data.objects.select_for_update().get(pk=data.pk) if data.status != Data.STATUS_RESOLVING: # The object might have already been processed while waiting for the lock to be # obtained. In this case, skip the object. return dep_status = dependency_status(data) if dep_status == Data.STATUS_ERROR: data.status = Data.STATUS_ERROR data.process_error.append("One or more inputs have status ERROR") data.process_rc = 1 data.save() return elif dep_status != Data.STATUS_DONE: return if data.process.run: try: execution_engine = data.process.run.get('language', None) # Evaluation by the execution engine may spawn additional data objects and # perform other queries on the database. Queries of all possible execution # engines need to be audited for possibilities of deadlocks in case any # additional locks are introduced. Currently, we only take an explicit lock on # the currently processing object. program = self.get_execution_engine(execution_engine).evaluate(data) except (ExecutionError, InvalidEngineError) as error: data.status = Data.STATUS_ERROR data.process_error.append("Error in process script: {}".format(error)) data.save() return # Set allocated resources: resource_limits = data.process.get_resource_limits() data.process_memory = resource_limits['memory'] data.process_cores = resource_limits['cores'] else: # If there is no run section, then we should not try to run anything. But the # program must not be set to None as then the process will be stuck in waiting # state. program = '' if data.status != Data.STATUS_DONE: # The data object may already be marked as done by the execution engine. In this # case we must not revert the status to STATUS_WAITING. data.status = Data.STATUS_WAITING data.save(render_name=True) # Actually run the object only if there was nothing with the transaction. transaction.on_commit( # Make sure the closure gets the right values here, since they're # changed in the loop. lambda d=data, p=program: self._data_execute(d, p, executor) ) logger.debug(__("Manager processing communicate command triggered by Data with id {}.", data_id)) if is_testing(): # NOTE: This is a work-around for Django issue #10827 # (https://code.djangoproject.com/ticket/10827), same as in # TestCaseHelpers._pre_setup(). Because the worker is running # independently, it must clear the cache on its own. ContentType.objects.clear_cache() # Ensure settings overrides apply self.discover_engines(executor=executor) try: queryset = Data.objects.filter(status=Data.STATUS_RESOLVING) if data_id is not None: # Scan only given data object and its children. queryset = queryset.filter(Q(parents=data_id) | Q(id=data_id)).distinct() for data in queryset: try: with transaction.atomic(): process_data_object(data) # All data objects created by the execution engine are commited after this # point and may be processed by other managers running in parallel. At the # same time, the lock for the current data object is released. except Exception as error: # pylint: disable=broad-except logger.exception(__( "Unhandled exception in _data_scan while processing data object {}.", data.pk )) # Unhandled error while processing a data object. We must set its # status to STATUS_ERROR to prevent the object from being retried # on next _data_scan run. We must perform this operation without # using the Django ORM as using the ORM may be the reason the error # occurred in the first place. error_msg = "Internal error: {}".format(error) process_error_field = Data._meta.get_field('process_error') # pylint: disable=protected-access max_length = process_error_field.base_field.max_length if len(error_msg) > max_length: error_msg = error_msg[:max_length - 3] + '...' try: with connection.cursor() as cursor: cursor.execute( """ UPDATE {table} SET status = %(status)s, process_error = process_error || (%(error)s)::varchar[] WHERE id = %(id)s """.format( table=Data._meta.db_table # pylint: disable=protected-access ), { 'status': Data.STATUS_ERROR, 'error': [error_msg], 'id': data.pk } ) except Exception as error: # pylint: disable=broad-except # If object's state cannot be changed due to some database-related # issue, at least skip the object for this run. logger.exception(__( "Unhandled exception in _data_scan while trying to emit error for {}.", data.pk )) except IntegrityError as exp: logger.error(__("IntegrityError in manager {}", exp)) return
0.004524
def replace_ext(file_path, new_ext): """ >>> replace_ext('one/two/three.four.doc', '.html') 'one/two/three.four.html' >>> replace_ext('one/two/three.four.DOC', '.html') 'one/two/three.four.html' >>> replace_ext('one/two/three.four.DOC', 'html') 'one/two/three.four.html' """ if not new_ext.startswith(os.extsep): new_ext = os.extsep + new_ext index = file_path.rfind(os.extsep) return file_path[:index] + new_ext
0.002155
def validate_groupby_func(name, args, kwargs, allowed=None): """ 'args' and 'kwargs' should be empty, except for allowed kwargs because all of their necessary parameters are explicitly listed in the function signature """ if allowed is None: allowed = [] kwargs = set(kwargs) - set(allowed) if len(args) + len(kwargs) > 0: raise UnsupportedFunctionCall(( "numpy operations are not valid " "with groupby. Use .groupby(...)." "{func}() instead".format(func=name)))
0.001812
def prior_dates(*args, **kwargs): """Get the prior distribution of calibrated radiocarbon dates""" try: chron = args[0] except IndexError: chron = kwargs['coredates'] d_r = np.array(kwargs['d_r']) d_std = np.array(kwargs['d_std']) t_a = np.array(kwargs['t_a']) t_b = np.array(kwargs['t_b']) try: normal_distr = kwargs['normal_distr'] except KeyError: normal_distr = None cc_int = kwargs['cc'] ccdict = {0: 'ConstCal', 1: 'IntCal3', 2: 'Marine13', 3: 'SHCal13', 4: 'ConstCal'} # There is a better way to do this. if 'cc1' in kwargs: ccdict[1] = str(kwargs['cc1']) if 'cc2' in kwargs: ccdict[2] = str(kwargs['cc2']) if 'cc3' in kwargs: ccdict[3] = str(kwargs['cc3']) if 'cc4' in kwargs: ccdict[4] = str(kwargs['cc4']) cc = [] for i in cc_int: i = int(i) cc.append(fetch_calibcurve(ccdict[i])) d, p = calibrate_dates(chron, calib_curve=cc, d_r=d_r, d_std=d_std, t_a=t_a, t_b=t_b, normal_distr=normal_distr) return d, p
0.0016
def getHighOrderSequenceChunk(it, switchover=1000, w=40, n=2048): """ Given an iteration index, returns a list of vectors to be appended to the input stream, as well as a string label identifying the sequence. This version generates a bunch of high order sequences. The first element always provides sufficient context to predict the rest of the elements. After switchover iterations, it will generate a different set of sequences. """ if it%10==3: s = numpy.random.randint(5) if it <= switchover: if s==0: label="XABCDE" elif s==1: label="YCBEAF" elif s==2: label="GHIJKL" elif s==3: label="WABCMN" else: label="ZDBCAE" else: if s==0: label="XCBEAF" elif s==1: label="YABCDE" elif s==2: label="GABCMN" elif s==3: label="WHIJKL" else: label="ZDHICF" vecs = letterSequence(label) else: vecs= [getRandomVector(w, n)] label="." return vecs,label
0.036929
def _get_session(self, session): """Creates a new session with basic auth, unless one was provided, and sets headers. :param session: (optional) Session to re-use :return: - :class:`requests.Session` object """ if not session: logger.debug('(SESSION_CREATE) User: %s' % self._user) s = requests.Session() s.auth = HTTPBasicAuth(self._user, self._password) else: logger.debug('(SESSION_CREATE) Object: %s' % session) s = session s.headers.update( { 'content-type': 'application/json', 'accept': 'application/json', 'User-Agent': 'pysnow/%s' % pysnow.__version__ } ) return s
0.003783
def mnemonic(self, index, verbose=False): """Give mnemonic representation of meaning. verbose compresses strings of x's """ if index<16: return ['last', '2last', '3last', '4last', 'last-1', 'last+1', 'last-2', 'last+2', 'last-3', 'last+3', '2last-1', '2last+1', '2last-2', '2last+2', '2last-3', '2last+3' ][index] if index<16+self.NDIRECT: return str(index-16) #construct strings like "1xx01-15" index -= self.NDIRECT+16 hcode = index >> self.NPOSTFIX lcode = index & (1<<self.NPOSTFIX)-1 if self.NPOSTFIX: formatString = '1{0}{1}{2:0{3}b}{4:+d}' else: formatString = '1{0}{1}{4:+d}' return formatString.format( hcode&1, 'x'*(2+hcode>>1) if hcode<13 or verbose else '[{}*x]'.format(2+hcode>>1), lcode, self.NPOSTFIX, self.NDIRECT+1-(4<<self.NPOSTFIX))
0.018614
def walk(textRoot, currentTag, level, prefix=None, postfix=None, unwrapUntilPara=False): ''' .. note:: This method does not cover all possible input doxygen types! This means that when an unsupported / unrecognized doxygen tag appears in the xml listing, the **raw xml will appear on the file page being documented**. This traverser is greedily designed to work for what testing revealed as the *bare minimum* required. **Please** see the :ref:`Doxygen ALIASES <doxygen_aliases>` section for how to bypass invalid documentation coming form Exhale. Recursive traverser method to parse the input parsed xml tree and convert the nodes into raw reStructuredText from the input doxygen format. **Not all doxygen markup types are handled**. The current supported doxygen xml markup tags are: - ``para`` - ``orderedlist`` - ``itemizedlist`` - ``verbatim`` (specifically: ``embed:rst:leading-asterisk``) - ``formula`` - ``ref`` - ``emphasis`` (e.g., using `em`_) - ``computeroutput`` (e.g., using `c`_) - ``bold`` (e.g., using `b`_) .. _em: http://www.doxygen.nl/manual/commands.html#cmdem .. _c: http://www.doxygen.nl/manual/commands.html#cmdc .. _b: http://www.doxygen.nl/manual/commands.html#cmdb The goal of this method is to "explode" input ``xml`` data into raw reStructuredText to put at the top of the file pages. Wielding beautiful soup, this essentially means that you need to expand every non ``para`` tag into a ``para``. So if an ordered list appears in the xml, then the raw listing must be built up from the child nodes. After this is finished, though, the :meth:`bs4.BeautifulSoup.get_text` method will happily remove all remaining ``para`` tags to produce the final reStructuredText **provided that** the original "exploded" tags (such as the ordered list definition and its ``listitem`` children) have been *removed* from the soup. **Parameters** ``textRoot`` (:class:`~exhale.graph.ExhaleRoot`) The text root object that is calling this method. This parameter is necessary in order to retrieve / convert the doxygen ``\\ref SomeClass`` tag and link it to the appropriate node page. The ``textRoot`` object is not modified by executing this method. ``currentTag`` (:class:`bs4.element.Tag`) The current xml tag being processed, either to have its contents directly modified or unraveled. ``level`` (int) .. warning:: This variable does **not** represent "recursion depth" (as one would typically see with a variable like this)! The **block** level of indentation currently being parsed. Because we are parsing a tree in order to generate raw reStructuredText code, we need to maintain a notion of "block level". This means tracking when there are nested structures such as a list within a list: .. code-block:: rst 1. This is an outer ordered list. - There is a nested unordered list. - It is a child of the outer list. 2. This is another item in the outer list. The outer ordered (numbers ``1`` and ``2``) list is at indentation level ``0``, and the inner unordered (``-``) list is at indentation level ``1``. Meaning that level is used as .. code-block:: py indent = " " * level # ... later ... some_text = "\\n{indent}{text}".format(indent=indent, text=some_text) to indent the ordered / unordered lists accordingly. ''' if not currentTag: return if prefix: currentTag.insert_before(prefix) if postfix: currentTag.insert_after(postfix) children = currentTag.findChildren(recursive=False) indent = " " * level if currentTag.name == "orderedlist": idx = 1 for child in children: walk(textRoot, child, level + 1, "\n{0}{1}. ".format(indent, idx), None, True) idx += 1 child.unwrap() currentTag.unwrap() elif currentTag.name == "itemizedlist": for child in children: walk(textRoot, child, level + 1, "\n{0}- ".format(indent), None, True) child.unwrap() currentTag.unwrap() elif currentTag.name == "verbatim": # TODO: find relevant section in breathe.sphinxrenderer and include the versions # for both leading /// as well as just plain embed:rst. leading_asterisk = "embed:rst:leading-asterisk\n*" if currentTag.string.startswith(leading_asterisk): cont = currentTag.string.replace(leading_asterisk, "") cont = textwrap.dedent(cont.replace("\n*", "\n")) currentTag.string = cont elif currentTag.name == "formula": currentTag.string = ":math:`{0}`".format(currentTag.string[1:-1]) elif currentTag.name == "ref": signal = None if "refid" not in currentTag.attrs: signal = "No 'refid' in `ref` tag attributes of file documentation. Attributes were: {0}".format( currentTag.attrs ) else: refid = currentTag.attrs["refid"] if refid not in textRoot.node_by_refid: signal = "Found unknown 'refid' of [{0}] in file level documentation.".format(refid) else: currentTag.string = ":ref:`{0}`".format(textRoot.node_by_refid[refid].link_name) if signal: # << verboseBuild utils.verbose_log(signal, utils.AnsiColors.BOLD_YELLOW) elif currentTag.name == "emphasis": currentTag.string = "*{0}*".format(currentTag.string) elif currentTag.name == "computeroutput": currentTag.string = "``{0}``".format(currentTag.string) elif currentTag.name == "bold": currentTag.string = "**{0}**".format(currentTag.string) else: ctr = 0 for child in children: c_prefix = None c_postfix = None if ctr > 0 and child.name == "para": c_prefix = "\n{0}".format(indent) walk(textRoot, child, level, c_prefix, c_postfix) ctr += 1
0.005154
def get_default_config(self): """ Returns the default collector settings """ config = super(DropwizardCollector, self).get_default_config() config.update({ 'host': '127.0.0.1', 'port': 8081, 'path': 'dropwizard', }) return config
0.006006
def cmd(send, msg, args): """Searches tumblr Syntax: {command} <blogname> <--submit content|--random> """ parser = arguments.ArgParser(args['config']) parser.add_argument('blogname', action=arguments.TumblrParser) group = parser.add_mutually_exclusive_group() group.add_argument('--submit', nargs='*') group.add_argument('--random', action='store_true') try: cmdargs = parser.parse_args(msg) except arguments.ArgumentException as e: send(str(e)) return if cmdargs.random: apikey = args['config']['api']['tumblrconsumerkey'] # First, get the number of posts response = get('https://api.tumblr.com/v2/blog/%s/posts' % cmdargs.blogname, params={'api_key': apikey, 'type': 'text'}).json() postcount = response['response']['total_posts'] if postcount <= 1: send("No text posts found.") return # No random post functionality and we can only get 20 posts per API call, so pick a random offset to get the random post offset = randint(0, postcount - 1) response = get('https://api.tumblr.com/v2/blog/%s/posts' % cmdargs.blogname, params={ 'api_key': apikey, 'offset': offset, 'limit': 1, 'type': 'text', 'filter': 'text' }).json() entry = response['response']['posts'][0]['body'] # Account for possibility of multiple lines lines = entry.splitlines() for line in lines: send(line) elif cmdargs.submit: if not cmdargs.submit: send('Post what?') return if isinstance(cmdargs.submit, list): cmdargs.submit = ' '.join(cmdargs.submit) if args['is_admin']: send(post_tumblr(args['config'], cmdargs.blogname, cmdargs.submit)[0]) else: row = Tumblrs(post=cmdargs.submit, submitter=args['nick'], nick=args['nick'], blogname=cmdargs.blogname) args['db'].add(row) args['db'].flush() send("New Tumblr Post: %s -- %s, Submitted by %s" % (cmdargs.submit, cmdargs.blogname, args['nick']), target=args['config']['core']['ctrlchan']) send("Issue submitted for approval.", target=args['nick']) else: send("Did not get an argument (choices are --random, --submit)")
0.002808
def make_quad(points, size_u, size_v): """ Converts linear sequence of input points into a quad structure. :param points: list of points to be ordered :type points: list, tuple :param size_v: number of elements in a row :type size_v: int :param size_u: number of elements in a column :type size_u: int :return: re-ordered points :rtype: list """ # Start with generating a zig-zag shape in row direction and then take its reverse new_points = make_zigzag(points, size_v) new_points.reverse() # Start generating a zig-zag shape in col direction forward = True for row in range(0, size_v): temp = [] for col in range(0, size_u): temp.append(points[row + (col * size_v)]) if forward: forward = False else: forward = True temp.reverse() new_points += temp return new_points
0.002157
def _check_for_crypto_done(self): # type: (Downloader) -> None """Check queue for crypto done :param Downloader self: this """ cv = self._crypto_offload.done_cv while not self.termination_check: result = None cv.acquire() while True: result = self._crypto_offload.pop_done_queue() if result is None: # use cv timeout due to possible non-wake while running cv.wait(0.1) # check for terminating conditions if self.termination_check: break else: break cv.release() if result is not None: try: final_path, offsets = result with self._transfer_lock: dd = self._dd_map[final_path] self._finalize_chunk(dd, offsets) except KeyError: # this can happen if all of the last integrity # chunks are processed at once pass
0.002577
def _create_doc(self): ''' Create document. :return: ''' root = etree.Element('image') root.set('schemaversion', '6.3') root.set('name', self.name) return root
0.008889
def multiply(df, new_column, column_1, column_2): """ DEPRECATED - use `formula` instead """ return _basic_math_operation(df, new_column, column_1, column_2, op='mul')
0.005435
def sortDictList(dictList,**kwargs): ''' students = [ {'name':'john','class':'A', 'year':15}, {'name':'jane','class':'B', 'year':12}, {'name':'dave','class':'B', 'year':10} ] rslt = sortDictList(students,cond_keys=['name','class','year']) pobj(rslt) rslt = sortDictList(students,cond_keys=['name','year','class']) pobj(rslt) rslt = sortDictList(students,cond_keys=['class','name','year']) pobj(rslt) rslt = sortDictList(students,cond_keys=['class','year','name']) pobj(rslt) rslt = sortDictList(students,cond_keys=['year','name','class']) pobj(rslt) rslt = sortDictList(students,cond_keys=['year','name','class']) pobj(rslt) ''' def default_eq_func(value1,value2): cond = (value1 == value2) return(cond) def default_gt_func(value1,value2): cond = (value1 > value2) return(cond) def default_lt_func(value1,value2): cond = (value1 < value2) return(cond) if('eq_func' in kwargs): eq_func = kwargs['eq_func'] else: eq_func = default_eq_func if('gt_func' in kwargs): gt_func = kwargs['gt_func'] else: gt_func = default_gt_func if('lt_func' in kwargs): lt_func = kwargs['lt_func'] else: lt_func = default_lt_func if('reverse' in kwargs): reverse = kwargs['reverse'] else: reverse = False keys = kwargs['cond_keys'] def cmp_dict(d1,d2): ''' ''' length = keys.__len__() for i in range(0,length): key = keys[i] cond = eq_func(d1[key],d2[key]) if(cond): pass else: cond = gt_func(d1[key],d2[key]) if(cond): return(1) else: return(-1) return(0) ndl = dictList ndl = sorted(ndl,key=functools.cmp_to_key(cmp_dict),reverse=reverse) return(ndl)
0.007274
def start_shell(local_ns: Dict=None, banner: str=''): """Create and immediately drop into a Python shell. If IPython version 5 or greater is available it will be used instead of the built-in python shell. :param local_ns: An optional dict containing the global namespace of the newly created shell. :param banner: An optional banner to render when terminal starts. """ if IPYTHON_SHELL_AVAILABLE: # Don't try to stop IPython from displaying its banner, since # it's different in every major version terminal = embed.InteractiveShellEmbed(user_ns={}) terminal.mainloop(local_ns=local_ns) else: code.interact(banner=banner, local=local_ns)
0.006812
def build(self, **kwargs): """ Build an image and return it. Similar to the ``docker build`` command. Either ``path`` or ``fileobj`` must be set. If you have a tar file for the Docker build context (including a Dockerfile) already, pass a readable file-like object to ``fileobj`` and also pass ``custom_context=True``. If the stream is compressed also, set ``encoding`` to the correct value (e.g ``gzip``). If you want to get the raw output of the build, use the :py:meth:`~docker.api.build.BuildApiMixin.build` method in the low-level API. Args: path (str): Path to the directory containing the Dockerfile fileobj: A file object to use as the Dockerfile. (Or a file-like object) tag (str): A tag to add to the final image quiet (bool): Whether to return the status nocache (bool): Don't use the cache when set to ``True`` rm (bool): Remove intermediate containers. The ``docker build`` command now defaults to ``--rm=true``, but we have kept the old default of `False` to preserve backward compatibility timeout (int): HTTP timeout custom_context (bool): Optional if using ``fileobj`` encoding (str): The encoding for a stream. Set to ``gzip`` for compressing pull (bool): Downloads any updates to the FROM image in Dockerfiles forcerm (bool): Always remove intermediate containers, even after unsuccessful builds dockerfile (str): path within the build context to the Dockerfile buildargs (dict): A dictionary of build arguments container_limits (dict): A dictionary of limits applied to each container created by the build process. Valid keys: - memory (int): set memory limit for build - memswap (int): Total memory (memory + swap), -1 to disable swap - cpushares (int): CPU shares (relative weight) - cpusetcpus (str): CPUs in which to allow execution, e.g., ``"0-3"``, ``"0,1"`` shmsize (int): Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB labels (dict): A dictionary of labels to set on the image cache_from (list): A list of images used for build cache resolution target (str): Name of the build-stage to build in a multi-stage Dockerfile network_mode (str): networking mode for the run commands during build squash (bool): Squash the resulting images layers into a single layer. extra_hosts (dict): Extra hosts to add to /etc/hosts in building containers, as a mapping of hostname to IP address. platform (str): Platform in the format ``os[/arch[/variant]]``. isolation (str): Isolation technology used during build. Default: `None`. use_config_proxy (bool): If ``True``, and if the docker client configuration file (``~/.docker/config.json`` by default) contains a proxy configuration, the corresponding environment variables will be set in the container being built. Returns: (tuple): The first item is the :py:class:`Image` object for the image that was build. The second item is a generator of the build logs as JSON-decoded objects. Raises: :py:class:`docker.errors.BuildError` If there is an error during the build. :py:class:`docker.errors.APIError` If the server returns any other error. ``TypeError`` If neither ``path`` nor ``fileobj`` is specified. """ resp = self.client.api.build(**kwargs) if isinstance(resp, six.string_types): return self.get(resp) last_event = None image_id = None result_stream, internal_stream = itertools.tee(json_stream(resp)) for chunk in internal_stream: if 'error' in chunk: raise BuildError(chunk['error'], result_stream) if 'stream' in chunk: match = re.search( r'(^Successfully built |sha256:)([0-9a-f]+)$', chunk['stream'] ) if match: image_id = match.group(2) last_event = chunk if image_id: return (self.get(image_id), result_stream) raise BuildError(last_event or 'Unknown', result_stream)
0.000414
def from_conv_part_data(conv_part_data, self_user_id): """Construct user from ``ConversationParticipantData`` message. Args: conv_part_id: ``ConversationParticipantData`` message. self_user_id (~hangups.user.UserID or None): The ID of the current user. If ``None``, assume ``conv_part_id`` is the current user. Returns: :class:`~hangups.user.User` object. """ user_id = UserID(chat_id=conv_part_data.id.chat_id, gaia_id=conv_part_data.id.gaia_id) return User(user_id, conv_part_data.fallback_name, None, None, [], (self_user_id == user_id) or (self_user_id is None))
0.002805
def _getLogger(cls, logLevel=None): """ Gets a logger for the given class in this module """ logger = logging.getLogger( ".".join(['com.numenta', _MODULE_NAME, cls.__name__])) if logLevel is not None: logger.setLevel(logLevel) return logger
0.019231
def read_config(config_file=CONFIG_FILE_DEFAULT, override_url=None): ''' Read configuration file, perform sanity check and return configuration dictionary used by other functions.''' config = ConfigParser() config.read_dict(DEFAULT_SETTINGS) try: config.readfp(open(config_file)) logger.debug("Using config file at " + config_file) except: logger.error( "Could not find {0}, running with defaults.".format(config_file)) if not logger.handlers: # Before doing anything else, configure logging # Handlers might be already registered in repeated test suite runs # In production, this should never happen if config.getboolean("Logging", "to_file"): handler = logging.FileHandler(config.get("Logging", "file")) else: handler = logging.StreamHandler() handler.setFormatter(logging.Formatter( config.get("Logging", "format"))) logger.addHandler(handler) logger.setLevel(config.get("Logging", "level")) if override_url: config['Server']['url'] = override_url return config
0.001742
def find_link(self, href_pattern, make_absolute=True): """ Find link in response body which href value matches ``href_pattern``. Returns found url or None. """ if make_absolute: self.tree.make_links_absolute(self.doc.url) if isinstance(href_pattern, six.text_type): raise GrabMisuseError('Method `find_link` accepts only ' 'byte-string argument') href_pattern = make_unicode(href_pattern) for elem, _, link, _ in self.tree.iterlinks(): if elem.tag == 'a' and href_pattern in link: return link return None
0.003008
def _textToTable(self, text, separator='\t'): """ format csv, [[...]], ((..)) strings to a 2d table """ table = None if text.startswith('[[') or text.startswith('(('): try: # maybe it's already formated as a list e.g. "[['1','2'],[...]]" # check it: t = eval(text) # has to be a 2d-list: if isinstance(t, list) and isinstance(t[0], list): table = t except SyntaxError: # not a valid list pass if not table: # create the list from the clipboard-text # therefore the text has to be formated like this: # "1\t2\3\n4\t5\6\n" table = text.split('\n') n = 0 while n < len(table): sline = table[n].split(separator) if sline != ['']: table[n] = sline else: table.pop(n) n -= 1 n += 1 return table
0.00365
def _output_format_sector(func, override=None): """ Decorator in charge of giving the output its right format, either json or pandas (replacing the % for usable floats, range 0-1.0) Keyword Arguments: func: The function to be decorated override: Override the internal format of the call, default None Returns: A decorator for the format sector api call """ @wraps(func) def _format_wrapper(self, *args, **kwargs): json_response, data_key, meta_data_key = func( self, *args, **kwargs) if isinstance(data_key, list): # Replace the strings into percentage data = {key: {k: self.percentage_to_float(v) for k, v in json_response[key].items()} for key in data_key} else: data = json_response[data_key] # TODO: Fix orientation in a better way meta_data = json_response[meta_data_key] # Allow to override the output parameter in the call if override is None: output_format = self.output_format.lower() elif 'json' or 'pandas' in override.lower(): output_format = override.lower() # Choose output format if output_format == 'json': return data, meta_data elif output_format == 'pandas': data_pandas = pandas.DataFrame.from_dict(data, orient='columns') # Rename columns to have a nicer name col_names = [re.sub(r'\d+.', '', name).strip(' ') for name in list(data_pandas)] data_pandas.columns = col_names return data_pandas, meta_data else: raise ValueError('Format: {} is not supported'.format( self.output_format)) return _format_wrapper
0.001487
def save_grade_system(self, grade_system_form, *args, **kwargs): """Pass through to provider GradeSystemAdminSession.update_grade_system""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.update_resource if grade_system_form.is_for_update(): return self.update_grade_system(grade_system_form, *args, **kwargs) else: return self.create_grade_system(grade_system_form, *args, **kwargs)
0.006276
def set_cmd_env_var(value): """Decorator that sets the temple command env var to value""" def func_decorator(function): @functools.wraps(function) def wrapper(*args, **kwargs): previous_cmd_env_var = os.getenv(temple.constants.TEMPLE_ENV_VAR) os.environ[temple.constants.TEMPLE_ENV_VAR] = value try: ret_val = function(*args, **kwargs) finally: if previous_cmd_env_var is None: del os.environ[temple.constants.TEMPLE_ENV_VAR] else: os.environ[temple.constants.TEMPLE_ENV_VAR] = previous_cmd_env_var return ret_val return wrapper return func_decorator
0.002721
def addToNetwork(grph, nds, count, weighted, nodeType, nodeInfo, fullInfo, coreCitesDict, coreValues, detailedValues, addCR, recordToCite = True, headNd = None): """Addeds the citations _nds_ to _grph_, according to the rules give by _nodeType_, _fullInfo_, etc. _headNd_ is the citation of the Record """ if headNd is not None: hID = makeID(headNd, nodeType) if nodeType == 'full' or nodeType == 'original': hYear = getattr(headNd, "year") if hID not in grph: nodeName, nodeDat = makeNodeTuple(headNd, hID, nodeInfo, fullInfo, nodeType, count, coreCitesDict, coreValues, detailedValues, addCR) grph.add_node(nodeName, **nodeDat) else: hID = None idList = [] yearList = [] for n in nds: nID = makeID(n, nodeType) if nodeType == 'full' or nodeType == 'original': try: nYear = getattr(n, "year") except: nYear = None yearList.append(nYear) if nID not in grph: nodeName, nodeDat = makeNodeTuple(n, nID, nodeInfo, fullInfo, nodeType, count, coreCitesDict, coreValues, detailedValues, addCR) grph.add_node(nodeName, **nodeDat) elif count: grph.node[nID]['count'] += 1 idList.append(nID) addedEdges = [] if hID: for i in range(len(idList)): nID = idList[i] if nodeType == 'full' or nodeType == 'original': nYear = yearList[i] try: yearDiff = abs(hYear - nYear) except: yearDiff = None if weighted: try: if recordToCite: grph[hID][nID]['weight'] += 1 else: grph[nID][hID]['weight'] += 1 except KeyError: if recordToCite: grph.add_edge(hID, nID, weight=1, yearDiff=yearDiff) else: grph.add_edge(nID, hID, weight=1, yearDiff=yearDiff) elif nID not in grph[hID]: addedEdges.append((hID, nID)) elif weighted: try: if recordToCite: grph[hID][nID]['weight'] += 1 else: grph[nID][hID]['weight'] += 1 except KeyError: if recordToCite: grph.add_edge(hID, nID, weight=1) else: grph.add_edge(hID, nID, weight=1) elif nID not in grph[hID]: addedEdges.append((hID, nID, {yearDiff: yearDiff})) elif len(idList) > 1: for i, outerID in enumerate(idList): for innerID in idList[i + 1:]: if weighted: try: grph[outerID][innerID]['weight'] += 1 except KeyError: grph.add_edge(outerID, innerID, weight = 1) elif innerID not in grph[outerID]: addedEdges.append((outerID, innerID)) grph.add_edges_from(addedEdges)
0.004577
def decode(string, encoding=None, errors=None): """Decode from specified encoding. ``encoding`` defaults to the preferred encoding. ``errors`` defaults to the preferred error handler. """ if encoding is None: encoding = getpreferredencoding() if errors is None: errors = getpreferrederrors() return string.decode(encoding, errors)
0.002667
def get(self, path, query, **options): """Parses GET request options and dispatches a request.""" api_options = self._parse_api_options(options, query_string=True) query_options = self._parse_query_options(options) parameter_options = self._parse_parameter_options(options) # options in the query takes precendence query = _merge(query_options, api_options, parameter_options, query) return self.request('get', path, params=query, **options)
0.004016
def sum_of_squares(obs, pred): """ Sum of squares between observed and predicted data Parameters ---------- obs : iterable Observed data pred : iterable Predicted data Returns ------- float Sum of squares Notes ----- The length of observed and predicted data must match. """ return np.sum((np.array(obs) - np.array(pred)) ** 2)
0.002427
def screenshot(filename="screenshot.png"): """ Save a screenshot of the current rendering window. """ if not settings.plotter_instance.window: colors.printc('~bomb screenshot(): Rendering window is not present, skip.', c=1) return w2if = vtk.vtkWindowToImageFilter() w2if.ShouldRerenderOff() w2if.SetInput(settings.plotter_instance.window) w2if.ReadFrontBufferOff() # read from the back buffer w2if.Update() pngwriter = vtk.vtkPNGWriter() pngwriter.SetFileName(filename) pngwriter.SetInputConnection(w2if.GetOutputPort()) pngwriter.Write()
0.003289
def derive_ordering(self): """ Returns what field should be used for ordering (using a prepended '-' to indicate descending sort). If the default order of the queryset should be used, returns None """ if '_order' in self.request.GET: return self.request.GET['_order'] elif self.default_order: return self.default_order else: return None
0.006993
def time_to_repeats(self, bins, integration_time): """Convert integration time to number of repeats""" return math.ceil((self.device.sample_rate * integration_time) / bins)
0.010638
def get_current_head(graph): """ Get the current database head revision, if any. """ session = new_session(graph) try: result = session.execute("SELECT version_num FROM alembic_version") except ProgrammingError: return None else: return result.scalar() finally: session.close()
0.002924
def _reduce_datetimes(row): """Receives a row, converts datetimes to strings.""" row = list(row) for i in range(len(row)): if hasattr(row[i], 'isoformat'): row[i] = row[i].isoformat() return tuple(row)
0.004184
def close(self): """ Close the pickle file, and the zip archive file. The single zip archive file can now be shipped around to be loaded by the unpickler. """ if self.file is None: return # Close the pickle file. self.file.close() self.file = None for f in self.mark_for_delete: error = [False] def register_error(*args): error[0] = True _shutil.rmtree(f, onerror = register_error) if error[0]: _atexit.register(_shutil.rmtree, f, ignore_errors=True)
0.006462
def add_eval(self, agent, e, fr=None): """Add or change agent's evaluation of the artifact with given framing information. :param agent: Name of the agent which did the evaluation. :param float e: Evaluation for the artifact. :param object fr: Framing information for the evaluation. """ self._evals[agent.name] = e self._framings[agent.name] = fr
0.004854
def prediction(self, input_data='', mode='test_data'): ''' Make prediction input test data output the prediction ''' prediction = {} if (self.status != 'train'): print("Please load train data and init W then train the W first.") return prediction if (input_data == ''): print("Please input test data for prediction.") return prediction if mode == 'future_data': data = input_data.split() input_data_x = [float(v) for v in data] input_data_x = utility.DatasetLoader.feature_transform( np.array(input_data_x).reshape(1, -1), self.feature_transform_mode, self.feature_transform_degree ) input_data_x = np.ravel(input_data_x) prediction = self.score_function(input_data_x, self.W) return {"input_data_x": input_data_x, "input_data_y": None, "prediction": prediction} else: data = input_data.split() input_data_x = [float(v) for v in data[:-1]] input_data_x = utility.DatasetLoader.feature_transform( np.array(input_data_x).reshape(1, -1), self.feature_transform_mode, self.feature_transform_degree ) input_data_x = np.ravel(input_data_x) input_data_y = float(data[-1]) prediction = self.score_function(input_data_x, self.W) return {"input_data_x": input_data_x, "input_data_y": input_data_y, "prediction": prediction}
0.002466
def get_program(self, program_resource_name: str) -> Dict: """Returns the previously created quantum program. Params: program_resource_name: A string of the form `projects/project_id/programs/program_id`. Returns: A dictionary containing the metadata and the program. """ return self.service.projects().programs().get( name=program_resource_name).execute()
0.004444
def sourcess_list(self, *args): """Display a list of all registered events""" from pprint import pprint sources = {} sources.update(self.authorized_events) sources.update(self.anonymous_events) for source in sources: pprint(source)
0.006803
def cb_histogram(fastq, umi_histogram): ''' Counts the number of reads for each cellular barcode Expects formatted fastq files. ''' annotations = detect_fastq_annotations(fastq) re_string = construct_transformed_regex(annotations) parser_re = re.compile(re_string) cb_counter = collections.Counter() umi_counter = collections.Counter() for read in read_fastq(fastq): match = parser_re.search(read).groupdict() cb = match['CB'] cb_counter[cb] += 1 if umi_histogram: umi = match['MB'] umi_counter[(cb, umi)] += 1 for bc, count in cb_counter.most_common(): sys.stdout.write('{}\t{}\n'.format(bc, count)) if umi_histogram: with open(umi_histogram, "w") as umi_handle: for cbumi, count in umi_counter.most_common(): umi_handle.write('{}\t{}\t{}\n'.format(cbumi[0], cbumi[1], count))
0.00216
def voxel_loop(self): '''iterator that loops through each voxel and yields the coords and time series as a tuple''' # Prob not the most efficient, but the best I can do for now: for x in xrange(len(self.data)): for y in xrange(len(self.data[x])): for z in xrange(len(self.data[x][y])): yield ((x,y,z),self.data[x][y][z])
0.015306
def stats(self): """Lists some metrics of the capturing system: Tasks processed: the total number of reentrant tasks processed, which includes retry attempts. Events processed: number of events captured and processed. Tasks stored: actual number of unique tasks processed. Workers stored: number of unique workers already seen. """ stats = self._stub.get_stats(clearly_pb2.Empty()) print(Colors.DIM('Processed:'), '\ttasks', Colors.RED(stats.task_count), '\tevents', Colors.RED(stats.event_count)) print(Colors.DIM('Stored:'), '\ttasks', Colors.RED(stats.len_tasks), '\tworkers', Colors.RED(stats.len_workers))
0.002594
def is_same_host(host1, host2): """ Returns true if host1 == host2 OR map to the same host (using DNS) """ try: if host1 == host2: return True else: ips1 = get_host_ips(host1) ips2 = get_host_ips(host2) return len(set(ips1) & set(ips2)) > 0 except Exception, ex: log_exception(ex) return False
0.002532
def close(self, code: int = None, reason: str = None) -> None: """Closes this Web Socket. Once the close handshake is successful the socket will be closed. ``code`` may be a numeric status code, taken from the values defined in `RFC 6455 section 7.4.1 <https://tools.ietf.org/html/rfc6455#section-7.4.1>`_. ``reason`` may be a textual message about why the connection is closing. These values are made available to the client, but are not otherwise interpreted by the websocket protocol. .. versionchanged:: 4.0 Added the ``code`` and ``reason`` arguments. """ if self.ws_connection: self.ws_connection.close(code, reason) self.ws_connection = None
0.002577
def configure_vlan(self, vid, commands): """ Configures the specified Vlan using commands Args: vid (str): The VLAN ID to configure commands: The list of commands to configure Returns: True if the commands completed successfully """ commands = make_iterable(commands) commands.insert(0, 'vlan %s' % vid) return self.configure(commands)
0.004662
def xdr(self): """Get the base64 encoded XDR string representing this :class:`TransactionEnvelope`. """ te = Xdr.StellarXDRPacker() te.pack_TransactionEnvelope(self.to_xdr_object()) te = base64.b64encode(te.get_buffer()) return te
0.006969
def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content
0.001274
def geckoboard_rag_widget(request): """ Searches the GET variables for metric UIDs, and displays them in a RAG widget. """ params = get_gecko_params(request) print params['uids'] max_date = datetime.now()-timedelta(days=params['days_back']) metrics = Metric.objects.filter(uid__in=params['uids']) results = [(metric.latest_count(frequency=params['frequency'], count=not params['cumulative'], cumulative=params['cumulative'], max_date=max_date), metric.title) for metric in metrics] return tuple(results)
0.008961
def call_inkscape(args_strings, inkscape_binpath=None): """Call inkscape CLI with arguments and returns its return value. Parameters ---------- args_string: list of str inkscape_binpath: str Returns ------- return_value Inkscape command CLI call return value. """ log.debug('Looking for the binary file for inkscape.') if inkscape_binpath is None: inkscape_binpath = get_inkscape_binpath() if inkscape_binpath is None or not os.path.exists(inkscape_binpath): raise IOError( 'Inkscape binary has not been found. Please check configuration.' ) return call_command(inkscape_binpath, args_strings)
0.001439