text
stringlengths
78
104k
score
float64
0
0.18
def tuplecount(text): '''Changes a dictionary into a list of tuples.''' worddict = wordcount(text) countlist = [] for key in worddict.keys(): countlist.append((key,worddict[key])) countlist = list(reversed(sorted(countlist,key = lambda x: x[1]))) return countlist
0.016949
def sync_clock(self): """ This will send all the needed messages to sync the cloc """ self.send(velbus.SetRealtimeClock()) self.send(velbus.SetDate()) self.send(velbus.SetDaylightSaving())
0.008475
def wait_for_tasks(self, raise_if_error=True): """ Wait for the running tasks lauched from the sessions. Note that it also wait for tasks that are started from other tasks callbacks, like on_finished. :param raise_if_error: if True, raise all possible encountered errors using :class:`TaskErrors`. Else the errors are returned as a list. """ errors = [] tasks_seen = TaskCache() while True: for session in self.values(): errs = session.wait_for_tasks(raise_if_error=False) errors.extend(errs) # look for tasks created after the wait (in callbacks of # tasks from different sessions) tasks = [] for session in self.values(): tasks.extend(session.tasks()) # if none, then just break - else loop to wait for them if not any(t for t in tasks if t not in tasks_seen): break if raise_if_error and errors: raise TaskErrors(errors) return errors
0.0018
def compute_K_numerical(dataframe, settings=None, keep_dir=None): """Use a finite-element modeling code to infer geometric factors for meshes with topography or irregular electrode spacings. Parameters ---------- dataframe : pandas.DataFrame the data frame that contains the data settings : dict The settings required to compute the geometric factors. See examples down below for more information in the required content. keep_dir : path if not None, copy modeling dir here Returns ------- K : :class:`numpy.ndarray` K factors (are also directly written to the dataframe) Examples -------- :: settings = { 'rho': 100, 'elem': 'elem.dat', 'elec': 'elec.dat', 'sink_node': '100', '2D': False, } """ inversion_code = reda.rcParams.get('geom_factor.inversion_code', 'crtomo') if inversion_code == 'crtomo': import reda.utils.geom_fac_crtomo as geom_fac_crtomo if keep_dir is not None: keep_dir = os.path.abspath(keep_dir) K = geom_fac_crtomo.compute_K( dataframe, settings, keep_dir) else: raise Exception( 'Inversion code {0} not implemented for K computation'.format( inversion_code )) return K
0.000724
def error(self, status_code, request, message=None): """Handle error response. :param int status_code: :param request: :return: """ status_code_text = HTTP_STATUS_CODES.get(status_code, 'http error') status_error_tag = status_code_text.lower().replace(' ', '_') custom_response_map = { 404: self.make_error_response( status_error_tag, 'The requested URL {} was not found on this service.'.format( request.path ) ), 400: self.make_error_response(status_error_tag, message), 405: self.make_error_response( status_error_tag, 'The requested URL {} was not allowed HTTP method {}.'.format( request.path, request.method ) ), } return self._raw_response( status_code, custom_response_map.get( status_code, self.make_error_response( status_error_tag, message or status_code_text ) ) )
0.001715
def save_html(p, *vsheets): 'Save vsheets as HTML tables in a single file' with open(p.resolve(), 'w', encoding='ascii', errors='xmlcharrefreplace') as fp: for sheet in vsheets: fp.write('<h2 class="sheetname">%s</h2>\n'.format(sheetname=html.escape(sheet.name))) fp.write('<table id="{sheetname}">\n'.format(sheetname=html.escape(sheet.name))) # headers fp.write('<tr>') for col in sheet.visibleCols: contents = html.escape(col.name) fp.write('<th>{colname}</th>'.format(colname=contents)) fp.write('</tr>\n') # rows for r in Progress(sheet.rows, 'saving'): fp.write('<tr>') for col in sheet.visibleCols: fp.write('<td>') fp.write(html.escape(col.getDisplayValue(r))) fp.write('</td>') fp.write('</tr>\n') fp.write('</table>') status('%s save finished' % p)
0.003846
def parse_substitution_from_list(list_rep): """ Parse a substitution from the list representation in the config file. """ # We are expecting [pattern, replacement [, is_multiline]] if type(list_rep) is not list: raise SyntaxError('Substitution must be a list') if len(list_rep) < 2: raise SyntaxError('Substitution must be a list of size 2') pattern = list_rep[0] replacement = list_rep[1] # By default, substitutions are not multiline. is_multiline = False if (len(list_rep) > 2): is_multiline = list_rep[2] if type(is_multiline) is not bool: raise SyntaxError('is_multiline must be a boolean') result = substitute.Substitution(pattern, replacement, is_multiline) return result
0.001287
def stop(self, kill=False, timeout=15): """ terminate process and wipe out the temp work directory, but only if we actually started it""" super(AbstractDcsController, self).stop(kill=kill, timeout=timeout) if self._work_directory: shutil.rmtree(self._work_directory)
0.009934
def _from_dict(cls, _dict): """Initialize a QueryNoticesResponse object from a json dictionary.""" args = {} if 'matching_results' in _dict: args['matching_results'] = _dict.get('matching_results') if 'results' in _dict: args['results'] = [ QueryNoticesResult._from_dict(x) for x in (_dict.get('results')) ] if 'aggregations' in _dict: args['aggregations'] = [ QueryAggregation._from_dict(x) for x in (_dict.get('aggregations')) ] if 'passages' in _dict: args['passages'] = [ QueryPassages._from_dict(x) for x in (_dict.get('passages')) ] if 'duplicates_removed' in _dict: args['duplicates_removed'] = _dict.get('duplicates_removed') return cls(**args)
0.003425
def fence_point_encode(self, target_system, target_component, idx, count, lat, lng): ''' A fence point. Used to set a point when from GCS -> MAV. Also used to return a point from MAV -> GCS target_system : System ID (uint8_t) target_component : Component ID (uint8_t) idx : point index (first point is 1, 0 is for return point) (uint8_t) count : total number of points (for sanity checking) (uint8_t) lat : Latitude of point (float) lng : Longitude of point (float) ''' return MAVLink_fence_point_message(target_system, target_component, idx, count, lat, lng)
0.008294
def cdna_codon_sequence_after_deletion_or_substitution_frameshift( sequence_from_start_codon, cds_offset, trimmed_cdna_ref, trimmed_cdna_alt): """ Logic for any frameshift which isn't an insertion. We have insertions as a special case since our base-inclusive indexing means something different for insertions: cds_offset = base before insertion Whereas in this case: cds_offset = first reference base affected by a variant Returns index of first modified codon and sequence from that codon onward. """ mutated_codon_index = cds_offset // 3 # get the sequence starting from the first modified codon until the end # of the transcript. sequence_after_mutated_codon = \ sequence_from_start_codon[mutated_codon_index * 3:] # the variant's ref nucleotides should start either 0, 1, or 2 nucleotides # into `sequence_after_mutated_codon` offset_into_mutated_codon = cds_offset % 3 sequence_from_mutated_codon = substitute( sequence=sequence_after_mutated_codon, offset=offset_into_mutated_codon, ref=trimmed_cdna_ref, alt=trimmed_cdna_alt) return mutated_codon_index, sequence_from_mutated_codon
0.000803
def _decode_sense_packet(self, version, packet): """Decode a sense packet into the list of sensors.""" data = self._sense_packet_to_data(packet) offset = 4 i = 0 datalen = len(data) - offset - 6 temp_count = int(datalen / 2) temp = [] for i in range(temp_count): temp_index = i * 2 + offset temp.append(self._decode_temp(data[temp_index], data[temp_index + 1])) self._debug(PROP_LOGLEVEL_DEBUG, "T: " + str(temp)) for sensor in self._sense_sensor: if (sensor.sensor_type == PROP_SENSOR_TEMPERATURE): sensor.value = temp[sensor.index] elif (sensor.sensor_type == PROP_SENSOR_RAW): sensor.value = packet self._debug(PROP_LOGLEVEL_DEBUG, str(self))
0.003659
def run_config(repl, config_file='~/.ptpython/config.py'): """ Execute REPL config file. :param repl: `PythonInput` instance. :param config_file: Path of the configuration file. """ assert isinstance(repl, PythonInput) assert isinstance(config_file, six.text_type) # Expand tildes. config_file = os.path.expanduser(config_file) def enter_to_continue(): six.moves.input('\nPress ENTER to continue...') # Check whether this file exists. if not os.path.exists(config_file): print('Impossible to read %r' % config_file) enter_to_continue() return # Run the config file in an empty namespace. try: namespace = {} with open(config_file, 'rb') as f: code = compile(f.read(), config_file, 'exec') six.exec_(code, namespace, namespace) # Now we should have a 'configure' method in this namespace. We call this # method with the repl as an argument. if 'configure' in namespace: namespace['configure'](repl) except Exception: traceback.print_exc() enter_to_continue()
0.006076
def cleanup(self): """Cleans up this result so that all its pointers reference memory controlled *outside* of the shared library loaded with ctypes. """ #First we *copy* the arrays that we currently have pointers to. This is not #the optimal solution; however, because of limitations in ctypes, we don't #know anything better at the moment. for key in self.result: self.result[key] = self.result[key].copy() #Now deallocate the pointers managed by Fortran in the shared library so that #any subsequent calls to the executable that generated this result creates #new instances in memory. for key in list(self._finalizers.keys()): self._finalizers[key].clean()
0.016861
def get_unit_id(unit_name): """ Return the unit id to the unit 'unit_name' """ unit_name = unit_name.lower() attribute = 'uniqueIdentifier' response = LDAP_search( pattern_search='(cn={})'.format(unit_name), attribute=attribute ) unit_id = "" try: for element in response: if 'dn' in element and element['dn'].startswith('ou={},'.format(unit_name)): unit_id = element['attributes'][attribute][0] except Exception: raise EpflLdapException("The unit named '{}' was not found".format(unit_name)) finally: if not unit_id: raise EpflLdapException("The unit named '{}' was not found".format(unit_name)) return unit_id
0.005398
def extended_status_request(self): """Send status request for group/button.""" self._status_received = False user_data = Userdata({'d1': self.group, 'd2': 0x00}) cmd = ExtendedSend(self._address, COMMAND_EXTENDED_GET_SET_0X2E_0X00, userdata=user_data) cmd.set_checksum() self._send_method(cmd, self._status_message_received, True)
0.004329
def os_open(document): """Open document by the default handler of the OS, could be a url opened by a browser, a text file by an editor etc""" osname = platform.system().lower() if osname == "darwin": os.system("open \"" + document + "\"") if osname == "linux": cmd = "xdg-open \"" + document + "\"&" os.system(cmd) if osname == "windows": os.system("start \"" + document + "\"")
0.004651
def travis(branch: str): """ Performs necessary checks to ensure that the travis build is one that should create releases. :param branch: The branch the environment should be running against. """ assert os.environ.get('TRAVIS_BRANCH') == branch assert os.environ.get('TRAVIS_PULL_REQUEST') == 'false'
0.00304
def remoteLocation_resolve(self, d_remote): """ Resolve the remote path location :param d_remote: the "remote" specification :return: a string representation of the remote path """ b_status = False str_remotePath = "" if 'path' in d_remote.keys(): str_remotePath = d_remote['path'] b_status = True if 'key' in d_remote.keys(): d_ret = self.storage_resolveBasedOnKey(key = d_remote['key']) if d_ret['status']: b_status = True str_remotePath = d_ret['path'] return { 'status': b_status, 'path': str_remotePath }
0.015007
def lock(self): """Lock thread. Requires that the currently authenticated user has the modposts oauth scope or has user/password authentication as a mod of the subreddit. :returns: The json response from the server. """ url = self.reddit_session.config['lock'] data = {'id': self.fullname} return self.reddit_session.request_json(url, data=data)
0.004854
def _equivalent(self, char, prev, next, implicitA): """ Transliterate a Latin character equivalent to Devanagari. Add VIRAMA for ligatures. Convert standalone to dependent vowels. """ result = [] if char.isVowel == False: result.append(char.chr) if char.isConsonant \ and ((next is not None and next.isConsonant) \ or next is None): result.append(DevanagariCharacter._VIRAMA) else: if prev is None or prev.isConsonant == False: result.append(char.chr) else: if char._dependentVowel is not None: result.append(char._dependentVowel) return result
0.013038
def to_kirbidir(self, directory_path): """ Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz. The kirbi file format supports one credential per file, so prepare for a lot of files being generated. directory_path: str the directory to write the kirbi files to """ kf_abs = os.path.abspath(directory_path) for cred in self.credentials: kirbi, filename = cred.to_kirbi() filename = '%s.kirbi' % filename.replace('..','!') filepath = os.path.join(kf_abs, filename) with open(filepath, 'wb') as o: o.write(kirbi.dump())
0.032149
def _add_item(self, item, indent_amt): """Add an item to the line. Reflow the line to get the best formatting after the item is inserted. The bracket depth indicates if the item is being inserted inside of a container or not. """ if self._prev_item and self._prev_item.is_string and item.is_string: # Place consecutive string literals on separate lines. self._lines.append(self._LineBreak()) self._lines.append(self._Indent(indent_amt)) item_text = unicode(item) if self._lines and self._bracket_depth: # Adding the item into a container. self._prevent_default_initializer_splitting(item, indent_amt) if item_text in '.,)]}': self._split_after_delimiter(item, indent_amt) elif self._lines and not self.line_empty(): # Adding the item outside of a container. if self.fits_on_current_line(len(item_text)): self._enforce_space(item) else: # Line break for the new item. self._lines.append(self._LineBreak()) self._lines.append(self._Indent(indent_amt)) self._lines.append(item) self._prev_item, self._prev_prev_item = item, self._prev_item if item_text in '([{': self._bracket_depth += 1 elif item_text in '}])': self._bracket_depth -= 1 assert self._bracket_depth >= 0
0.001331
def _assemble_flowtable(self, values): """ generate a flowtable from a tuple of descriptors. """ values = map(lambda x: [] if x is None else x, values) src = values[0] + values[1] dst = values[2] + values[3] thistable = dict() for s in src: thistable[s] = dst return thistable
0.005666
def expand_fc_groups(users): """ If user is a firecloud group, return all members of the group. Caveat is that only group admins may do this. """ groups = None for user in users: fcgroup = None if '@' not in user: fcgroup = user elif user.lower().endswith('@firecloud.org'): if groups is None: r = fapi.get_groups() fapi._check_response_code(r, 200) groups = {group['groupEmail'].lower():group['groupName'] \ for group in r.json() if group['role'] == 'Admin'} if user.lower() not in groups: if fcconfig.verbosity: eprint("You do not have access to the members of {}".format(user)) yield user continue else: fcgroup = groups[user.lower()] else: yield user continue r = fapi.get_group(fcgroup) fapi._check_response_code(r, 200) fcgroup_data = r.json() for admin in fcgroup_data['adminsEmails']: yield admin for member in fcgroup_data['membersEmails']: yield member
0.003311
def make_charlist(self): """Parses the complete text and stores format for each character.""" def worker_output(worker, output, error): """Worker finished callback.""" self._charlist = output if error is None and output: self._allow_highlight = True self.rehighlight() self._allow_highlight = False text = to_text_string(self.document().toPlainText()) tokens = self._lexer.get_tokens(text) # Before starting a new worker process make sure to end previous # incarnations self._worker_manager.terminate_all() worker = self._worker_manager.create_python_worker( self._make_charlist, tokens, self._tokmap, self.formats, ) worker.sig_finished.connect(worker_output) worker.start()
0.002174
def startswith(haystack, prefix): """ py3 comp startswith :param haystack: :param prefix: :return: """ if haystack is None: return None if sys.version_info[0] < 3: return haystack.startswith(prefix) return to_bytes(haystack).startswith(to_bytes(prefix))
0.003257
def create_project(self, project_name, project_des): """ Create a project Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type project_des: string :param project_des: the description of a project :return: CreateProjectResponse :raise: LogException """ params = {} body = {"projectName": project_name, "description": project_des} body = six.b(json.dumps(body)) headers = {'Content-Type': 'application/json', 'x-log-bodyrawsize': str(len(body))} resource = "/" (resp, header) = self._send("POST", project_name, body, resource, params, headers) return CreateProjectResponse(header, resp)
0.00738
def username(anon, obj, field, val): """ Generates a random username """ return anon.faker.user_name(field=field)
0.007752
def permutation_from_disjoint_cycles(cycles, offset=0): """Reconstruct a permutation image tuple from a list of disjoint cycles :param cycles: sequence of disjoint cycles :type cycles: list or tuple :param offset: Offset to subtract from the resulting permutation image points :type offset: int :return: permutation image tuple :rtype: tuple """ perm_length = sum(map(len, cycles)) res_perm = list(range(perm_length)) for c in cycles: p1 = c[0] - offset for p2 in c[1:]: p2 = p2 - offset res_perm[p1] = p2 p1 = p2 res_perm[p1] = c[0] - offset #close cycle assert sorted(res_perm) == list(range(perm_length)) return tuple(res_perm)
0.005391
def wasModified(self): """ Check to see if this module has been modified on disk since the last time it was cached. @return: True if it has been modified, False if not. """ self.filePath.restat() mtime = self.filePath.getmtime() if mtime >= self.lastModified: return True else: return False
0.005168
def message(self, msg): """Send a message to third party applications """ for broker in self.message_brokers: try: broker(msg) except Exception as exc: utils.error(exc)
0.008065
def raise_expired_not_yet_valid(certificate): """ Raises a TLSVerificationError due to certificate being expired, or not yet being valid :param certificate: An asn1crypto.x509.Certificate object :raises: TLSVerificationError """ validity = certificate['tbs_certificate']['validity'] not_after = validity['not_after'].native not_before = validity['not_before'].native now = datetime.now(timezone.utc) if not_before > now: formatted_before = not_before.strftime('%Y-%m-%d %H:%M:%SZ') message = 'Server certificate verification failed - certificate not valid until %s' % formatted_before elif not_after < now: formatted_after = not_after.strftime('%Y-%m-%d %H:%M:%SZ') message = 'Server certificate verification failed - certificate expired %s' % formatted_after raise TLSVerificationError(message, certificate)
0.003282
def getTlvProperties(cardConnection, featureList=None, controlCode=None): """ return the GET_TLV_PROPERTIES structure @param cardConnection: L{CardConnection} object @param featureList: feature list as returned by L{getFeatureRequest()} @param controlCode: control code for L{FEATURE_GET_TLV_PROPERTIES} @rtype: dict @return: a dict """ if controlCode is None: if featureList is None: featureList = getFeatureRequest(cardConnection) controlCode = hasFeature(featureList, FEATURE_GET_TLV_PROPERTIES) if controlCode is None: return {'raw': []} response = cardConnection.control(controlCode, []) return parseTlvProperties(response)
0.001412
def blocks(self, start=None, stop=None): """ Yields blocks starting from ``start``. :param int start: Starting block :param int stop: Stop at this block :param str mode: We here have the choice between * "head": the last block * "irreversible": the block that is confirmed by 2/3 of all block producers and is thus irreversible! """ # Let's find out how often blocks are generated! block_interval = self.config().get("STEEMIT_BLOCK_INTERVAL") if not start: start = self.get_current_block_num() # We are going to loop indefinitely while True: # Get chain properies to identify the head_block = self.get_current_block_num() # Blocks from start until head block for blocknum in range(start, head_block + 1): # Get full block block = self.steem.rpc.get_block(blocknum) block.update({"block_num": blocknum}) yield block # Set new start start = head_block + 1 if stop and start > stop: break # Sleep for one block time.sleep(block_interval)
0.002366
def find_usage(self): """ Determine the current usage for each limit of this service, and update corresponding Limit via :py:meth:`~.AwsLimit._add_current_usage`. """ logger.debug("Checking usage for service %s", self.service_name) for lim in self.limits.values(): lim._reset_usage() elb_usage = self._find_usage_elbv1() alb_usage = self._find_usage_elbv2() logger.debug('ELBs in use: %d, ALBs in use: %d', elb_usage, alb_usage) self.limits['Active load balancers']._add_current_usage( (elb_usage + alb_usage), aws_type='AWS::ElasticLoadBalancing::LoadBalancer', ) self._have_usage = True logger.debug("Done checking usage.")
0.002587
def _prepare_text(self, text): """Returns `text` with each consituent token wrapped in HTML markup for later match annotation. :param text: text to be marked up :type text: `str` :rtype: `str` """ # Remove characters that should be escaped for XML input (but # which cause problems when escaped, since they become # tokens). text = re.sub(r'[<>&]', '', text) pattern = r'({})'.format(self._tokenizer.pattern) return re.sub(pattern, self._base_token_markup, text)
0.003571
def make_choices(*args): """Convert a 1-D sequence into a 2-D sequence of tuples for use in a Django field choices attribute >>> make_choices(range(3)) ((0, u'0'), (1, u'1'), (2, u'2')) >>> make_choices(dict(enumerate('abcd'))) ((0, u'a'), (1, u'b'), (2, u'c'), (3, u'd')) >>> make_choices('hello') (('hello', u'hello'),) >>> make_choices('hello', 'world') == make_choices(['hello', 'world']) == (('hello', u'hello'), ('world', u'world')) True """ if not args: return tuple() if isinstance(args[0], (list, tuple)): return make_choices(*tuple(args[0])) elif isinstance(args[0], collections.Mapping): return tuple((k, unicode(v)) for (k, v) in args[0].iteritems()) elif all(isinstance(arg, (int, float, Decimal, basestring)) for arg in args): return tuple((k, unicode(k)) for k in args)
0.004582
def gevent_run(app, port=5000, log=None, error_log=None, address='', monkey_patch=True, start=True, **kwargs): # pragma: no cover """Run your app in gevent.wsgi.WSGIServer :param app: wsgi application, ex. Microservice instance :param port: int, listen port, default 5000 :param address: str, listen address, default: "" :param log: logger instance, default app.logger :param error_log: logger instance, default app.logger :param monkey_patch: boolean, use gevent.monkey.patch_all() for patching standard modules, default: True :param start: boolean, if True, server will be start (server.serve_forever()) :param kwargs: other params for WSGIServer(**kwargs) :return: server """ if log is None: log = app.logger if error_log is None: error_log = app.logger if monkey_patch: from gevent import monkey monkey.patch_all() from gevent.wsgi import WSGIServer http_server = WSGIServer((address, port), app, log=log, error_log=error_log, **kwargs) if start: http_server.serve_forever() return http_server
0.003457
def add_f08_to_env(env): """Add Builders and construction variables for f08 to an Environment.""" try: F08Suffixes = env['F08FILESUFFIXES'] except KeyError: F08Suffixes = ['.f08'] try: F08PPSuffixes = env['F08PPFILESUFFIXES'] except KeyError: F08PPSuffixes = [] DialectAddToEnv(env, "F08", F08Suffixes, F08PPSuffixes, support_module = 1)
0.007229
def shutdown(self): """Shutdown internal workers by pushing terminate signals.""" if not self._shutdown: # send shutdown signal to the fetcher and join data queue first # Remark: loop_fetcher need to be joined prior to the workers. # otherwise, the the fetcher may fail at getting data self._data_queue.put((None, None)) self._fetcher.join() # send shutdown signal to all worker processes for _ in range(self._num_workers): self._key_queue.put((None, None)) # force shut down any alive worker processes for w in self._workers: if w.is_alive(): w.terminate() self._shutdown = True
0.002567
def covariance(x, y=None, sample_axis=0, event_axis=-1, keepdims=False, name=None): """Sample covariance between observations indexed by `event_axis`. Given `N` samples of scalar random variables `X` and `Y`, covariance may be estimated as ```none Cov[X, Y] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(Y_n - Ybar)} Xbar := N^{-1} sum_{n=1}^N X_n Ybar := N^{-1} sum_{n=1}^N Y_n ``` For vector-variate random variables `X = (X1, ..., Xd)`, `Y = (Y1, ..., Yd)`, one is often interested in the covariance matrix, `C_{ij} := Cov[Xi, Yj]`. ```python x = tf.random_normal(shape=(100, 2, 3)) y = tf.random_normal(shape=(100, 2, 3)) # cov[i, j] is the sample covariance between x[:, i, j] and y[:, i, j]. cov = tfp.stats.covariance(x, y, sample_axis=0, event_axis=None) # cov_matrix[i, m, n] is the sample covariance of x[:, i, m] and y[:, i, n] cov_matrix = tfp.stats.covariance(x, y, sample_axis=0, event_axis=-1) ``` Notice we divide by `N` (the numpy default), which does not create `NaN` when `N = 1`, but is slightly biased. Args: x: A numeric `Tensor` holding samples. y: Optional `Tensor` with same `dtype` and `shape` as `x`. Default value: `None` (`y` is effectively set to `x`). sample_axis: Scalar or vector `Tensor` designating axis holding samples, or `None` (meaning all axis hold samples). Default value: `0` (leftmost dimension). event_axis: Scalar or vector `Tensor`, or `None` (scalar events). Axis indexing random events, whose covariance we are interested in. If a vector, entries must form a contiguous block of dims. `sample_axis` and `event_axis` should not intersect. Default value: `-1` (rightmost axis holds events). keepdims: Boolean. Whether to keep the sample axis as singletons. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., `'covariance'`). Returns: cov: A `Tensor` of same `dtype` as the `x`, and rank equal to `rank(x) - len(sample_axis) + 2 * len(event_axis)`. Raises: AssertionError: If `x` and `y` are found to have different shape. ValueError: If `sample_axis` and `event_axis` are found to overlap. ValueError: If `event_axis` is found to not be contiguous. """ with tf.compat.v1.name_scope( name, 'covariance', values=[x, y, event_axis, sample_axis]): x = tf.convert_to_tensor(value=x, name='x') # Covariance *only* uses the centered versions of x (and y). x -= tf.reduce_mean(input_tensor=x, axis=sample_axis, keepdims=True) if y is None: y = x else: y = tf.convert_to_tensor(value=y, name='y', dtype=x.dtype) # If x and y have different shape, sample_axis and event_axis will likely # be wrong for one of them! x.shape.assert_is_compatible_with(y.shape) y -= tf.reduce_mean(input_tensor=y, axis=sample_axis, keepdims=True) if event_axis is None: return tf.reduce_mean( input_tensor=x * tf.math.conj(y), axis=sample_axis, keepdims=keepdims) if sample_axis is None: raise ValueError( 'sample_axis was None, which means all axis hold events, and this ' 'overlaps with event_axis ({})'.format(event_axis)) event_axis = _make_positive_axis(event_axis, tf.rank(x)) sample_axis = _make_positive_axis(sample_axis, tf.rank(x)) # If we get lucky and axis is statically defined, we can do some checks. if _is_list_like(event_axis) and _is_list_like(sample_axis): if set(event_axis).intersection(sample_axis): raise ValueError( 'sample_axis ({}) and event_axis ({}) overlapped'.format( sample_axis, event_axis)) if (np.diff(sorted(event_axis)) > 1).any(): raise ValueError( 'event_axis must be contiguous. Found: {}'.format(event_axis)) batch_axis = list( sorted( set(range(x.shape.ndims)).difference(sample_axis + event_axis))) else: batch_axis, _ = tf.compat.v1.setdiff1d( tf.range(0, tf.rank(x)), tf.concat((sample_axis, event_axis), 0)) event_axis = tf.convert_to_tensor( value=event_axis, name='event_axis', dtype=tf.int32) sample_axis = tf.convert_to_tensor( value=sample_axis, name='sample_axis', dtype=tf.int32) batch_axis = tf.convert_to_tensor( value=batch_axis, name='batch_axis', dtype=tf.int32) # Permute x/y until shape = B + E + S perm_for_xy = tf.concat((batch_axis, event_axis, sample_axis), 0) x_permed = tf.transpose(a=x, perm=perm_for_xy) y_permed = tf.transpose(a=y, perm=perm_for_xy) batch_ndims = tf.size(input=batch_axis) batch_shape = tf.shape(input=x_permed)[:batch_ndims] event_ndims = tf.size(input=event_axis) event_shape = tf.shape(input=x_permed)[batch_ndims:batch_ndims + event_ndims] sample_shape = tf.shape(input=x_permed)[batch_ndims + event_ndims:] sample_ndims = tf.size(input=sample_shape) n_samples = tf.reduce_prod(input_tensor=sample_shape) n_events = tf.reduce_prod(input_tensor=event_shape) # Flatten sample_axis into one long dim. x_permed_flat = tf.reshape( x_permed, tf.concat((batch_shape, event_shape, [n_samples]), 0)) y_permed_flat = tf.reshape( y_permed, tf.concat((batch_shape, event_shape, [n_samples]), 0)) # Do the same for event_axis. x_permed_flat = tf.reshape( x_permed, tf.concat((batch_shape, [n_events], [n_samples]), 0)) y_permed_flat = tf.reshape( y_permed, tf.concat((batch_shape, [n_events], [n_samples]), 0)) # After matmul, cov.shape = batch_shape + [n_events, n_events] cov = tf.matmul( x_permed_flat, y_permed_flat, adjoint_b=True) / tf.cast( n_samples, x.dtype) # Insert some singletons to make # cov.shape = batch_shape + event_shape**2 + [1,...,1] # This is just like x_permed.shape, except the sample_axis is all 1's, and # the [n_events] became event_shape**2. cov = tf.reshape( cov, tf.concat( ( batch_shape, # event_shape**2 used here because it is the same length as # event_shape, and has the same number of elements as one # batch of covariance. event_shape**2, tf.ones([sample_ndims], tf.int32)), 0)) # Permuting by the argsort inverts the permutation, making # cov.shape have ones in the position where there were samples, and # [n_events * n_events] in the event position. cov = tf.transpose(a=cov, perm=tf.math.invert_permutation(perm_for_xy)) # Now expand event_shape**2 into event_shape + event_shape. # We here use (for the first time) the fact that we require event_axis to be # contiguous. e_start = event_axis[0] e_len = 1 + event_axis[-1] - event_axis[0] cov = tf.reshape( cov, tf.concat((tf.shape(input=cov)[:e_start], event_shape, event_shape, tf.shape(input=cov)[e_start + e_len:]), 0)) # tf.squeeze requires python ints for axis, not Tensor. This is enough to # require our axis args to be constants. if not keepdims: squeeze_axis = tf.where(sample_axis < e_start, sample_axis, sample_axis + e_len) cov = _squeeze(cov, axis=squeeze_axis) return cov
0.002662
def pause_tube(self, tube, delay=3600): """Pause a tube for some number of seconds, preventing it from issuing jobs. :param delay: Time to pause for, in seconds :type delay: int There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube. .. seealso:: :func:`unpause_tube()` """ with self._sock_ctx() as socket: delay = int(delay) self._send_message('pause-tube {0} {1}'.format(tube, delay), socket) return self._receive_word(socket, b'PAUSED')
0.008562
def download_fundflow(self, bill_date, account_type='Basic', tar_type=None): """ 下载资金账单 https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_18&index=7 :param bill_date: 下载对账单的日期 :param account_type: 账单的资金来源账户 Basic 基本账户 Operation 运营账户 Fees 手续费账户 :param tar_type: 非必传参数,固定值:GZIP,返回格式为.gzip的压缩包账单。 不传则默认为数据流形式。 """ if isinstance(bill_date, (datetime, date)): bill_date = bill_date.strftime('%Y%m%d') data = { 'appid': self.appid, 'bill_date': bill_date, 'account_type': account_type, 'sign_type': 'HMAC-SHA256' } if tar_type is not None: data['tar_type'] = tar_type return self._post('pay/downloadfundflow', data=data)
0.003219
def _R2deriv(self,R,z,phi=0.,t=0.): """ NAME: _R2deriv PURPOSE: evaluate the second radial derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the second radial derivative HISTORY: 2011-10-09 - Written - Bovy (IAS) """ return 1./(R**2.+(self._a+nu.sqrt(z**2.+self._b2))**2.)**1.5 \ -3.*R**2./(R**2.+(self._a+nu.sqrt(z**2.+self._b2))**2.)**2.5
0.012069
def combine_lists_reducer( key: str, merged_list: list, component: COMPONENT ) -> list: """ Reducer function to combine the lists for the specified key into a single, flat list :param key: The key on the COMPONENT instances to operate upon :param merged_list: The accumulated list of values populated by previous calls to this reducer function :param component: The COMPONENT instance from which to append values to the merged_list :return: The updated merged_list with the values for the COMPONENT added onto it """ merged_list.extend(getattr(component, key)) return merged_list
0.001427
def get_urgent(self, sensors): """ Determine if any sensors should set the urgent flag. """ if self.urgent_on not in ('warning', 'critical'): raise Exception("urgent_on must be one of (warning, critical)") for sensor in sensors: if self.urgent_on == 'warning' and sensor.is_warning(): return True elif self.urgent_on == 'critical' and sensor.is_critical(): return True return False
0.004158
def split_namedpipe_cls(pair1_file, pair2_file, data): """Create a commandline suitable for use as a named pipe with reads in a given region. """ if "align_split" in data: start, end = [int(x) for x in data["align_split"].split("-")] else: start, end = None, None if pair1_file.endswith(".sdf"): assert not pair2_file, pair2_file return rtg.to_fastq_apipe_cl(pair1_file, start, end) else: out = [] for in_file in pair1_file, pair2_file: if in_file: assert _get_grabix_index(in_file), "Need grabix index for %s" % in_file out.append("<(grabix grab {in_file} {start} {end})".format(**locals())) else: out.append(None) return out
0.005141
def get_compound_amount(self, compound): """ Determine the mole amount of the specified compound. :returns: Amount. [kmol] """ index = self.material.get_compound_index(compound) return stoich.amount(compound, self._compound_masses[index])
0.006944
def execute(context=None, lens=None, commands=(), load_path=None): ''' Execute Augeas commands .. versionadded:: 2014.7.0 CLI Example: .. code-block:: bash salt '*' augeas.execute /files/etc/redis/redis.conf \\ commands='["set bind 0.0.0.0", "set maxmemory 1G"]' context The Augeas context lens The Augeas lens to use commands The Augeas commands to execute .. versionadded:: 2016.3.0 load_path A colon-spearated list of directories that modules should be searched in. This is in addition to the standard load path and the directories in AUGEAS_LENS_LIB. ''' ret = {'retval': False} arg_map = { 'set': (1, 2), 'setm': (2, 3), 'move': (2,), 'insert': (3,), 'remove': (1,), } def make_path(path): ''' Return correct path ''' if not context: return path if path.lstrip('/'): if path.startswith(context): return path path = path.lstrip('/') return os.path.join(context, path) else: return context load_path = _check_load_paths(load_path) flags = _Augeas.NO_MODL_AUTOLOAD if lens and context else _Augeas.NONE aug = _Augeas(flags=flags, loadpath=load_path) if lens and context: aug.add_transform(lens, re.sub('^/files', '', context)) aug.load() for command in commands: try: # first part up to space is always the # command name (i.e.: set, move) cmd, arg = command.split(' ', 1) if cmd not in METHOD_MAP: ret['error'] = 'Command {0} is not supported (yet)'.format(cmd) return ret method = METHOD_MAP[cmd] nargs = arg_map[method] parts = salt.utils.args.shlex_split(arg) if len(parts) not in nargs: err = '{0} takes {1} args: {2}'.format(method, nargs, parts) raise ValueError(err) if method == 'set': path = make_path(parts[0]) value = parts[1] if len(parts) == 2 else None args = {'path': path, 'value': value} elif method == 'setm': base = make_path(parts[0]) sub = parts[1] value = parts[2] if len(parts) == 3 else None args = {'base': base, 'sub': sub, 'value': value} elif method == 'move': path = make_path(parts[0]) dst = parts[1] args = {'src': path, 'dst': dst} elif method == 'insert': label, where, path = parts if where not in ('before', 'after'): raise ValueError( 'Expected "before" or "after", not {0}'.format(where)) path = make_path(path) args = { 'path': path, 'label': label, 'before': where == 'before'} elif method == 'remove': path = make_path(parts[0]) args = {'path': path} except ValueError as err: log.error(err) # if command.split fails arg will not be set if 'arg' not in locals(): arg = command ret['error'] = 'Invalid formatted command, ' \ 'see debug log for details: {0}'.format(arg) return ret args = salt.utils.data.decode(args, to_str=True) log.debug('%s: %s', method, args) func = getattr(aug, method) func(**args) try: aug.save() ret['retval'] = True except IOError as err: ret['error'] = six.text_type(err) if lens and not lens.endswith('.lns'): ret['error'] += '\nLenses are normally configured as "name.lns". ' \ 'Did you mean "{0}.lns"?'.format(lens) aug.close() return ret
0.000489
def ec2_instances(): "Use the EC2 API to get a list of all machines" region = boto.ec2.get_region(REGION) reservations = region.connect().get_all_instances() instances = [] for reservation in reservations: instances += reservation.instances return instances
0.00346
def julian_datetime(julianday, milisecond=0): """Return datetime from days since 1/1/4713 BC and ms since midnight. Convert Julian dates according to MetaMorph. >>> julian_datetime(2451576, 54362783) datetime.datetime(2000, 2, 2, 15, 6, 2, 783) """ if julianday <= 1721423: # no datetime before year 1 return None a = julianday + 1 if a > 2299160: alpha = math.trunc((a - 1867216.25) / 36524.25) a += 1 + alpha - alpha // 4 b = a + (1524 if a > 1721423 else 1158) c = math.trunc((b - 122.1) / 365.25) d = math.trunc(365.25 * c) e = math.trunc((b - d) / 30.6001) day = b - d - math.trunc(30.6001 * e) month = e - (1 if e < 13.5 else 13) year = c - (4716 if month > 2.5 else 4715) hour, milisecond = divmod(milisecond, 1000 * 60 * 60) minute, milisecond = divmod(milisecond, 1000 * 60) second, milisecond = divmod(milisecond, 1000) return datetime.datetime(year, month, day, hour, minute, second, milisecond)
0.000952
def path(self, which=None): """Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: incremental_update /content_view_versions/incremental_update promote /content_view_versions/<id>/promote ``super`` is called otherwise. """ if which in ('incremental_update', 'promote'): prefix = 'base' if which == 'incremental_update' else 'self' return '{0}/{1}'.format( super(ContentViewVersion, self).path(prefix), which ) return super(ContentViewVersion, self).path(which)
0.002937
def get_imports(project, pydefined): """A shortcut for getting the `ImportInfo`\s used in a scope""" pymodule = pydefined.get_module() module = module_imports.ModuleImports(project, pymodule) if pymodule == pydefined: return [stmt.import_info for stmt in module.imports] return module.get_used_imports(pydefined)
0.005882
def getSampleTypes(self, active_only=True): """Return all sampletypes """ catalog = api.get_tool("bika_setup_catalog") query = { "portal_type": "SampleType", # N.B. The `sortable_title` index sorts case sensitive. Since there # is no sort key for sample types, it makes more sense to sort # them alphabetically in the selection "sort_on": "title", "sort_order": "ascending" } results = catalog(query) if active_only: results = filter(api.is_active, results) sampletypes = map( lambda brain: (brain.UID, brain.Title), results) return DisplayList(sampletypes)
0.002721
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = create_engine(get_url()) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata, version_table="alembic_ziggurat_foundations_version", transaction_per_migration=True, ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close()
0.001721
def validate( message, get_certificate=lambda url: urlopen(url).read(), certificate_url_regex=DEFAULT_CERTIFICATE_URL_REGEX, max_age=DEFAULT_MAX_AGE ): """ Validate a decoded SNS message. Parameters: message: Decoded SNS message. get_certificate: Function that receives a URL, and returns the certificate from that URL as a string. The default doesn't implement caching. certificate_url_regex: Regex that validates the signing certificate URL. Default value checks it's hosted on an AWS-controlled domain, in the format "https://sns.<data-center>.amazonaws.com/" max_age: Maximum age of an SNS message before it fails validation, expressed as a `datetime.timedelta`. Defaults to one hour, the max. lifetime of an SNS message. """ # Check the signing certicate URL. SigningCertURLValidator(certificate_url_regex).validate(message) # Check the message age. if not isinstance(max_age, datetime.timedelta): raise ValueError("max_age must be None or a timedelta object") MessageAgeValidator(max_age).validate(message) # Passed the basic checks, let's download the cert. # We've validated the URL, so aren't worried about a malicious server. certificate = get_certificate(message["SigningCertURL"]) # Check the cryptographic signature. SignatureValidator(certificate).validate(message)
0.000664
def get_rdns_name(rdns): """ Gets the rdns String name :param rdns: RDNS object :type rdns: cryptography.x509.RelativeDistinguishedName :return: RDNS name """ name = '' for rdn in rdns: for attr in rdn._attributes: if len(name) > 0: name = name + ',' if attr.oid in OID_NAMES: name = name + OID_NAMES[attr.oid] else: name = name + attr.oid._name name = name + '=' + attr.value return name
0.001894
def serialize_encrypted_data_key(encrypted_data_key): """Serializes an encrypted data key. .. versionadded:: 1.3.0 :param encrypted_data_key: Encrypted data key to serialize :type encrypted_data_key: aws_encryption_sdk.structures.EncryptedDataKey :returns: Serialized encrypted data key :rtype: bytes """ encrypted_data_key_format = ( ">" # big endian "H" # key provider ID length "{provider_id_len}s" # key provider ID "H" # key info length "{provider_info_len}s" # key info "H" # encrypted data key length "{enc_data_key_len}s" # encrypted data key ) return struct.pack( encrypted_data_key_format.format( provider_id_len=len(encrypted_data_key.key_provider.provider_id), provider_info_len=len(encrypted_data_key.key_provider.key_info), enc_data_key_len=len(encrypted_data_key.encrypted_data_key), ), len(encrypted_data_key.key_provider.provider_id), to_bytes(encrypted_data_key.key_provider.provider_id), len(encrypted_data_key.key_provider.key_info), to_bytes(encrypted_data_key.key_provider.key_info), len(encrypted_data_key.encrypted_data_key), encrypted_data_key.encrypted_data_key, )
0.00077
def _newRemoteException(ErrorType): '''create a new RemoteExceptionType from a given errortype''' RemoteErrorBaseType = _RemoteExceptionMeta('', (ErrorType,), {}) class RemoteException(RemoteErrorBaseType): BaseExceptionType = ErrorType def __init__(self, thrownError, tracebackString): self.thrownError = thrownError self.tracebackString = tracebackString RemoteErrorBaseType.__init__(self, *thrownError.args) loadError = staticmethod(_loadError) def __str__(self): return '\n%s\n%s' % (self.tracebackString, self.thrownError) def __reduce__(self): args = (ErrorType, self.thrownError, self.tracebackString) return self.loadError, args RemoteException.__name__ = 'Remote' + ErrorType.__name__ return RemoteException
0.002347
def read(path, encoding="utf-8"): """Read string from text file. """ is_gzip = is_gzip_file(path) with open(path, "rb") as f: if is_gzip: return zlib.decompress(f.read()).decode(encoding) else: return f.read().decode(encoding)
0.003534
def debug(func): """Print the function name and arguments for debugging.""" @wraps(func) def wrapper(*args, **kwargs): print("{} args: {} kwargs: {}".format(func.__name__, args, kwargs)) return func(*args, **kwargs) return wrapper
0.003802
def _get_property_for(self, p, indexed=True, depth=0): """Internal helper to get the Property for a protobuf-level property.""" parts = p.name().split('.') if len(parts) <= depth: # Apparently there's an unstructured value here. # Assume it is a None written for a missing value. # (It could also be that a schema change turned an unstructured # value into a structured one. In that case, too, it seems # better to return None than to return an unstructured value, # since the latter doesn't match the current schema.) return None next = parts[depth] prop = self._properties.get(next) if prop is None: prop = self._fake_property(p, next, indexed) return prop
0.012245
def parse_raw_token(self, raw_token): "Parse token and secret from raw token response." if raw_token is None: return (None, None) # Load as json first then parse as query string try: token_data = json.loads(raw_token) except ValueError: qs = parse_qs(raw_token) token = qs.get('access_token', [None])[0] else: token = token_data.get('access_token', None) return (token, None)
0.004073
def boolean(cls, true_code, false_code=None): """Callback to validate a response code. The returned callback checks whether a given response has a ``status_code`` that is considered good (``true_code``) and raise an appropriate error if not. The optional ``false_code`` allows for a non-successful status code to return False instead of throwing an error. This is used, for example in relationship mutation to indicate that the relationship was not modified. Args: true_code(int): The http status code to consider as a success Keyword Args: false_code(int): The http status code to consider a failure Returns: A function that given a response returns ``True`` if the response's status code matches the given code. Raises a :class:`HeliumError` if the response code does not match. """ def func(response): if response is not None: status_code = response.status if status_code == true_code: return True if false_code is not None and status_code == false_code: return False raise error_for(response) return func
0.001505
def print( self, x: int, y: int, string: str, fg: Optional[Tuple[int, int, int]] = None, bg: Optional[Tuple[int, int, int]] = None, bg_blend: int = tcod.constants.BKGND_SET, alignment: int = tcod.constants.LEFT, ) -> None: """Print a string on a console with manual line breaks. `x` and `y` are the starting tile, with ``0,0`` as the upper-left corner of the console. You can use negative numbers if you want to start printing relative to the bottom-right corner, but this behavior may change in future versions. `string` is a Unicode string which may include color control characters. Strings which are too long will be truncated until the next newline character ``"\\n"``. `fg` and `bg` are the foreground text color and background tile color respectfully. This is a 3-item tuple with (r, g, b) color values from 0 to 255. These parameters can also be set to `None` to leave the colors unchanged. `bg_blend` is the blend type used by libtcod. `alignment` can be `tcod.LEFT`, `tcod.CENTER`, or `tcod.RIGHT`. .. versionadded:: 8.5 .. versionchanged:: 9.0 `fg` and `bg` now default to `None` instead of white-on-black. """ x, y = self._pythonic_index(x, y) string_ = string.encode("utf-8") # type: bytes lib.console_print( self.console_c, x, y, string_, len(string_), (fg,) if fg is not None else ffi.NULL, (bg,) if bg is not None else ffi.NULL, bg_blend, alignment, )
0.001729
def _find_header_row(self): """ Evaluate all rows and determine header position, based on greatest number of 'th' tagged elements """ th_max = 0 header_idx = 0 for idx, tr in enumerate(self._tr_nodes): th_count = len(tr.contents.filter_tags(matches=ftag('th'))) if th_count > th_max: th_max = th_count header_idx = idx if not th_max: return self._log('found header at row %d (%d <th> elements)' % \ (header_idx, th_max)) header_row = self._tr_nodes.pop(header_idx) return header_row.contents.filter_tags(matches=ftag('th'))
0.005698
def show_output_dynamic(fsync_output_dynamic): """! @brief Shows output dynamic (output of each oscillator) during simulation. @param[in] fsync_output_dynamic (fsync_dynamic): Output dynamic of the fSync network. @see show_output_dynamics """ pyclustering.utils.draw_dynamics(fsync_output_dynamic.time, fsync_output_dynamic.output, x_title = "t", y_title = "amplitude");
0.030172
def _import_public_names(module): "Import public names from module into this module, like import *" self = sys.modules[__name__] for name in module.__all__: if hasattr(self, name): # don't overwrite existing names continue setattr(self, name, getattr(module, name))
0.003155
def build(self, builder): """Build XML by appending to builder""" params = dict(OID=self.oid) params["mdsol:ProjectType"] = self.project_type builder.start("Study", params) # Ask children if self.global_variables is not None: self.global_variables.build(builder) if self.basic_definitions is not None: self.basic_definitions.build(builder) if self.metadata_version is not None: self.metadata_version.build(builder) builder.end("Study")
0.00365
def _prm_write_into_pytable(self, tablename, data, hdf5_group, fullname, **kwargs): """Stores data as pytable. :param tablename: Name of the data table :param data: Data to store :param hdf5_group: Group node where to store data in hdf5 file :param fullname: Full name of the `data_to_store`s original container, only needed for throwing errors. """ datasize = data.shape[0] try: # Get a new pytables description from the data and create a new table description_dict, data_type_dict = self._prm_make_description(data, fullname) description_dicts = [{}] if len(description_dict) > ptpa.MAX_COLUMNS: # For optimization we want to store the original data types into another table # and split the tables into several ones new_table_group = self._hdf5file.create_group(where=hdf5_group, name=tablename, filters=self._all_get_filters(kwargs.copy())) count = 0 for innerkey in description_dict: val = description_dict[innerkey] if count == ptpa.MAX_COLUMNS: description_dicts.append({}) count = 0 description_dicts[-1][innerkey] = val count += 1 setattr(new_table_group._v_attrs, HDF5StorageService.STORAGE_TYPE, HDF5StorageService.TABLE) setattr(new_table_group._v_attrs, HDF5StorageService.SPLIT_TABLE, 1) hdf5_group = new_table_group else: description_dicts = [description_dict] for idx, descr_dict in enumerate(description_dicts): if idx == 0: tblname = tablename else: tblname = tablename + '_%d' % idx table = self._hdf5file.create_table(where=hdf5_group, name=tblname, description=descr_dict, title=tblname, expectedrows=datasize, filters=self._all_get_filters(kwargs.copy())) row = table.row for n in range(datasize): # Fill the columns with data, note if the parameter was extended nstart!=0 for key in descr_dict: row[key] = data[key][n] row.append() # Remember the original types of the data for perfect recall if idx == 0 and len(description_dict) <= ptpa.MAX_COLUMNS: # We only have a single table and # we can store the original data types as attributes for field_name in data_type_dict: type_description = data_type_dict[field_name] self._all_set_attr(table, field_name, type_description) setattr(table._v_attrs, HDF5StorageService.STORAGE_TYPE, HDF5StorageService.TABLE) table.flush() self._hdf5file.flush() if len(description_dict) > ptpa.MAX_COLUMNS: # We have potentially many split tables and the data types are # stored into an additional table for performance reasons tblname = tablename + '__' + HDF5StorageService.STORAGE_TYPE field_names, data_types = list(zip(*data_type_dict.items())) data_type_table_dict = {'field_name': field_names, 'data_type': data_types} descr_dict, _ = self._prm_make_description(data_type_table_dict, fullname) table = self._hdf5file.create_table(where=hdf5_group, name=tblname, description=descr_dict, title=tblname, expectedrows=len(field_names), filters=self._all_get_filters(kwargs)) row = table.row for n in range(len(field_names)): # Fill the columns with data for key in data_type_table_dict: row[key] = data_type_table_dict[key][n] row.append() setattr(table._v_attrs, HDF5StorageService.DATATYPE_TABLE, 1) table.flush() self._hdf5file.flush() except: self._logger.error('Failed storing table `%s` of `%s`.' % (tablename, fullname)) raise
0.005893
def get_all_child_edges(self): """Return tuples for all child GO IDs, containing current GO ID and child GO ID.""" all_child_edges = set() for parent in self.children: all_child_edges.add((parent.item_id, self.item_id)) all_child_edges |= parent.get_all_child_edges() return all_child_edges
0.008671
def present( name, policy_document=None, policy_document_from_pillars=None, path=None, policies=None, policies_from_pillars=None, managed_policies=None, create_instance_profile=True, region=None, key=None, keyid=None, profile=None, delete_policies=True): ''' Ensure the IAM role exists. name Name of the IAM role. policy_document The policy that grants an entity permission to assume the role. (See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role) policy_document_from_pillars A pillar key that contains a role policy document. The statements defined here will be appended with the policy document statements defined in the policy_document argument. .. versionadded:: 2017.7.0 path The path to the role/instance profile. (See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role) policies A dict of IAM role policies. policies_from_pillars A list of pillars that contain role policy dicts. Policies in the pillars will be merged in the order defined in the list and key conflicts will be handled by later defined keys overriding earlier defined keys. The policies defined here will be merged with the policies defined in the policies argument. If keys conflict, the keys in the policies argument will override the keys defined in policies_from_pillars. managed_policies A list of (AWS or Customer) managed policies to be attached to the role. create_instance_profile A boolean of whether or not to create an instance profile and associate it with this role. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. delete_policies Deletes existing policies that are not in the given list of policies. Default value is ``True``. If ``False`` is specified, existing policies will not be deleted allowing manual modifications on the IAM role to be persistent. .. versionadded:: 2015.8.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Build up _policy_document _policy_document = {} if policy_document_from_pillars: from_pillars = __salt__['pillar.get'](policy_document_from_pillars) if from_pillars: _policy_document['Version'] = from_pillars['Version'] _policy_document.setdefault('Statement', []) _policy_document['Statement'].extend(from_pillars['Statement']) if policy_document: _policy_document['Version'] = policy_document['Version'] _policy_document.setdefault('Statement', []) _policy_document['Statement'].extend(policy_document['Statement']) _ret = _role_present(name, _policy_document, path, region, key, keyid, profile) # Build up _policies if not policies: policies = {} if not policies_from_pillars: policies_from_pillars = [] if not managed_policies: managed_policies = [] _policies = {} for policy in policies_from_pillars: _policy = __salt__['pillar.get'](policy) _policies.update(_policy) _policies.update(policies) ret['changes'] = _ret['changes'] ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret if create_instance_profile: _ret = _instance_profile_present(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _instance_profile_associated(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _policies_present(name, _policies, region, key, keyid, profile, delete_policies) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] _ret = _policies_attached(name, managed_policies, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret
0.001343
def add_summary(self, summary, global_step=None): """Adds a `Summary` protocol buffer to the event file. This method wraps the provided summary in an `Event` protocol buffer and adds it to the event file. Parameters ---------- summary : A `Summary` protocol buffer Optionally serialized as a string. global_step: Number Optional global step value to record with the summary. """ if isinstance(summary, bytes): summ = summary_pb2.Summary() summ.ParseFromString(summary) summary = summ # We strip metadata from values with tags that we have seen before in order # to save space - we just store the metadata on the first value with a # specific tag. for value in summary.value: if not value.metadata: continue if value.tag in self._seen_summary_tags: # This tag has been encountered before. Strip the metadata. value.ClearField("metadata") continue # We encounter a value with a tag we have not encountered previously. And # it has metadata. Remember to strip metadata from future values with this # tag string. self._seen_summary_tags.add(value.tag) event = event_pb2.Event(summary=summary) self._add_event(event, global_step)
0.004129
def CheckForQuestionPending(task): """ Check to see if VM needs to ask a question, throw exception """ vm = task.info.entity if vm is not None and isinstance(vm, vim.VirtualMachine): qst = vm.runtime.question if qst is not None: raise TaskBlocked("Task blocked, User Intervention required")
0.00295
def instanceStarted(self, *args, **kwargs): """ Report an instance starting An instance will report in by giving its instance id as well as its security token. The token is given and checked to ensure that it matches a real token that exists to ensure that random machines do not check in. We could generate a different token but that seems like overkill This method is ``stable`` """ return self._makeApiCall(self.funcinfo["instanceStarted"], *args, **kwargs)
0.005505
def load_entry_point_group(self, entry_point_group): """Load administration interface from entry point group. :param str entry_point_group: Name of the entry point group. """ for ep in pkg_resources.iter_entry_points(group=entry_point_group): admin_ep = dict(ep.load()) keys = tuple( k in admin_ep for k in ('model', 'modelview', 'view_class')) if keys == (False, False, True): self.register_view( admin_ep.pop('view_class'), *admin_ep.pop('args', []), **admin_ep.pop('kwargs', {}) ) elif keys == (True, True, False): warnings.warn( 'Usage of model and modelview kwargs are deprecated in ' 'favor of view_class, args and kwargs.', PendingDeprecationWarning ) self.register_view( admin_ep.pop('modelview'), admin_ep.pop('model'), admin_ep.pop('session', db.session), **admin_ep ) else: raise Exception( 'Admin entry point dictionary must contain ' 'either "view_class" OR "model" and "modelview" keys.')
0.001466
def save_resource(self, name, resource, pushable=False): 'Saves an object such that it can be used by other tests.' if pushable: self.pushable_resources[name] = resource else: self.resources[name] = resource
0.007813
def get_transport(name): ''' Return the transport class. ''' try: log.debug('Using %s as transport', name) return TRANSPORT_LOOKUP[name] except KeyError: msg = 'Transport {} is not available. Are the dependencies installed?'.format(name) log.error(msg, exc_info=True) raise InvalidTransportException(msg)
0.005495
def _cleanup(self): """Cleanup the stored sessions""" current_time = time.time() timeout = self._config.timeout if current_time - self._last_cleanup_time > timeout: self.store.cleanup(timeout) self._last_cleanup_time = current_time
0.006969
def validate_transaction_schema(tx): """Validate a transaction dict. TX_SCHEMA_COMMON contains properties that are common to all types of transaction. TX_SCHEMA_[TRANSFER|CREATE] add additional constraints on top. """ _validate_schema(TX_SCHEMA_COMMON, tx) if tx['operation'] == 'TRANSFER': _validate_schema(TX_SCHEMA_TRANSFER, tx) else: _validate_schema(TX_SCHEMA_CREATE, tx)
0.002375
def get_institution(self, **kwargs): """Get the dissertation institution.""" qualifier = kwargs.get('qualifier', '') content = kwargs.get('content', '') if qualifier == 'grantor': return content return None
0.007752
def MD_ConfigsPermutate(df_md): """Given a MD DataFrame, return a Nx4 array which permutes the current injection dipoles. """ g_current_injections = df_md.groupby(['a', 'b']) ab = np.array(list(g_current_injections.groups.keys())) config_mgr = ConfigManager(nr_of_electrodes=ab.max()) config_mgr.gen_configs_permutate(ab, silent=True) return config_mgr.configs
0.002551
def show_message(self): """Show message updatable.""" print( 'current version: {current_version}\n' 'latest version : {latest_version}'.format( current_version=self.current_version, latest_version=self.latest_version))
0.006897
def update_counts(self): "Update counts of fields and wells." # Properties.attrib['TotalCountOfFields'] fields = str(len(self.fields)) self.properties.attrib['TotalCountOfFields'] = fields # Properties.CountOfWellsX/Y wx, wy = (str(x) for x in self.count_of_wells) self.properties.CountOfWellsX = wx self.properties.CountOfWellsY = wy # Properties.attrib['TotalCountOfWells'] wells = str(len(self.wells)) self.properties.attrib['TotalCountOfWells'] = wells # Properties.attrib['TotalAssignedJobs'] self.properties.attrib['TotalAssignedJobs'] = str(self.count_of_assigned_jobs)
0.004386
def delete_consumer(self, consumer=None): """ Remove a specific consumer from a consumer group. :consumer: name of consumer to delete. If not provided, will be the default consumer for this stream. :returns: number of pending messages that the consumer had before being deleted. """ if consumer is None: consumer = self._consumer return self.database.xgroup_delconsumer(self.key, self.group, consumer)
0.006224
def _get_localzone(_root='/'): """Tries to find the local timezone configuration. This method prefers finding the timezone name and passing that to pytz, over passing in the localtime file, as in the later case the zoneinfo name is unknown. The parameter _root makes the function look for files like /etc/localtime beneath the _root directory. This is primarily used by the tests. In normal usage you call the function without parameters.""" tzenv = os.environ.get('TZ') if tzenv: try: return _tz_from_env(tzenv) except pytz.UnknownTimeZoneError: pass # Now look for distribution specific configuration files # that contain the timezone name. for configfile in ('etc/timezone', 'var/db/zoneinfo'): tzpath = os.path.join(_root, configfile) if os.path.exists(tzpath): with open(tzpath, 'rb') as tzfile: data = tzfile.read() # Issue #3 was that /etc/timezone was a zoneinfo file. # That's a misconfiguration, but we need to handle it gracefully: if data[:5] != 'TZif2': etctz = data.strip().decode() # Get rid of host definitions and comments: if ' ' in etctz: etctz, dummy = etctz.split(' ', 1) if '#' in etctz: etctz, dummy = etctz.split('#', 1) return pytz.timezone(etctz.replace(' ', '_')) # CentOS has a ZONE setting in /etc/sysconfig/clock, # OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and # Gentoo has a TIMEZONE setting in /etc/conf.d/clock # We look through these files for a timezone: zone_re = re.compile('\s*ZONE\s*=\s*\"') timezone_re = re.compile('\s*TIMEZONE\s*=\s*\"') end_re = re.compile('\"') for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'): tzpath = os.path.join(_root, filename) if not os.path.exists(tzpath): continue with open(tzpath, 'rt') as tzfile: data = tzfile.readlines() for line in data: # Look for the ZONE= setting. match = zone_re.match(line) if match is None: # No ZONE= setting. Look for the TIMEZONE= setting. match = timezone_re.match(line) if match is not None: # Some setting existed line = line[match.end():] etctz = line[:end_re.search(line).start()] # We found a timezone return pytz.timezone(etctz.replace(' ', '_')) # systemd distributions use symlinks that include the zone name, # see manpage of localtime(5) and timedatectl(1) tzpath = os.path.join(_root, 'etc/localtime') if os.path.exists(tzpath) and os.path.islink(tzpath): tzpath = os.path.realpath(tzpath) start = tzpath.find("/")+1 while start is not 0: tzpath = tzpath[start:] try: return pytz.timezone(tzpath) except pytz.UnknownTimeZoneError: pass start = tzpath.find("/")+1 # No explicit setting existed. Use localtime for filename in ('etc/localtime', 'usr/local/etc/localtime'): tzpath = os.path.join(_root, filename) if not os.path.exists(tzpath): continue with open(tzpath, 'rb') as tzfile: return pytz.tzfile.build_tzinfo('local', tzfile) raise pytz.UnknownTimeZoneError('Can not find any timezone configuration')
0.002212
def do_save(self, line): """save [config_file] Save session variables to file save (without parameters): Save session to default file ~/.dataone_cli.conf save. <file>: Save session to specified file. """ config_file = self._split_args(line, 0, 1)[0] self._command_processor.get_session().save(config_file) if config_file is None: config_file = ( self._command_processor.get_session().get_default_pickle_file_path() ) self._print_info_if_verbose("Saved session to file: {}".format(config_file))
0.008347
def start(): ''' Start the saltnado! ''' mod_opts = __opts__.get(__virtualname__, {}) if 'num_processes' not in mod_opts: mod_opts['num_processes'] = 1 if mod_opts['num_processes'] > 1 and mod_opts.get('debug', False) is True: raise Exception(( 'Tornado\'s debug implementation is not compatible with multiprocess. ' 'Either disable debug, or set num_processes to 1.' )) # the kwargs for the HTTPServer kwargs = {} if not mod_opts.get('disable_ssl', False): if 'ssl_crt' not in mod_opts: log.error("Not starting '%s'. Options 'ssl_crt' and " "'ssl_key' are required if SSL is not disabled.", __name__) return None # cert is required, key may be optional # https://docs.python.org/2/library/ssl.html#ssl.wrap_socket ssl_opts = {'certfile': mod_opts['ssl_crt']} if mod_opts.get('ssl_key', False): ssl_opts.update({'keyfile': mod_opts['ssl_key']}) kwargs['ssl_options'] = ssl_opts import tornado.httpserver http_server = tornado.httpserver.HTTPServer(get_application(__opts__), **kwargs) try: http_server.bind(mod_opts['port'], address=mod_opts.get('address'), backlog=mod_opts.get('backlog', 128), ) http_server.start(mod_opts['num_processes']) except Exception: log.error('Rest_tornado unable to bind to port %s', mod_opts['port'], exc_info=True) raise SystemExit(1) try: tornado.ioloop.IOLoop.current().start() except KeyboardInterrupt: raise SystemExit(0)
0.003494
def clean_locale(configuration, locale): """ Strips out the warning from all of a locale's translated po files about being an English source file. Iterates over machine-generated files. """ dirname = configuration.get_messages_dir(locale) if not dirname.exists(): # Happens when we have a supported locale that doesn't exist in Transifex return for filename in dirname.files('*.po'): clean_file(configuration, dirname.joinpath(filename))
0.004057
def fvga(a, i, g, n): """ This function is for the future value of an annuity with growth rate. It is the future value of a growing stream of periodic investments. a = Periodic Investment (1000) i = interest rate as decimal (.0675) g = the growth rate (.05) n = the number of compound periods (20) Example: fv(1000, .0675, .05, 20) """ return a * ((((1 + i) ** n) - (((1 + g) ** n)))/(i - g))
0.004587
def get_fun(fun): ''' Return a dict of the last function called for all minions ''' serv = _get_serv(ret=None) sql = '''select first(id) as fid, first(full_ret) as fret from returns where fun = '{0}' group by fun, id '''.format(fun) data = serv.query(sql) ret = {} if data: points = data[0]['points'] for point in points: ret[point[1]] = salt.utils.json.loads(point[2]) return ret
0.002028
def contrib_phone(contrib_tag): """ Given a contrib tag, look for an phone tag """ phone = None if raw_parser.phone(contrib_tag): phone = first(raw_parser.phone(contrib_tag)).text return phone
0.004464
def UNIFAC_Dortmund_groups(self): r'''Dictionary of Dortmund UNIFAC subgroup: count groups for the Dortmund UNIFAC subgroups, as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_. Examples -------- >>> pprint(Chemical('Cumene').UNIFAC_Dortmund_groups) {1: 2, 9: 5, 13: 1} ''' if self.__UNIFAC_Dortmund_groups: return self.__UNIFAC_Dortmund_groups else: load_group_assignments_DDBST() if self.InChI_Key in DDBST_MODIFIED_UNIFAC_assignments: self.__UNIFAC_Dortmund_groups = DDBST_MODIFIED_UNIFAC_assignments[self.InChI_Key] return self.__UNIFAC_Dortmund_groups else: return None
0.005181
def _set_default_vertex_attributes(self) -> None: """Assign default values on attributes to all vertices.""" self.graph.vs["l2fc"] = 0 self.graph.vs["padj"] = 0.5 self.graph.vs["symbol"] = self.graph.vs["name"] self.graph.vs["diff_expressed"] = False self.graph.vs["up_regulated"] = False self.graph.vs["down_regulated"] = False
0.005208
def add_port_callback(self, port, cb): """Add a callback for data that comes on a specific port""" logger.debug('Adding callback on port [%d] to [%s]', port, cb) self.add_header_callback(cb, port, 0, 0xff, 0x0)
0.008547
def get_config_value(self, section_name, option, default_option="default"): """ Read a value from the configuration, with a default. Args: section_name (str): name of the section in the configuration from which the option should be found. option (str): name of the configuration option. default_option (str): name of the default configuration option whose value should be returned if the requested option is not found. Returns: str: the value from the ini file. """ if self.config is None: self.config = configparser.ConfigParser() self.config.read(self.ini_file_name) if option: try: return self.config.get(section_name, option) except configparser.NoOptionError: log.debug( "Didn't find a configuration option for '%s' section and '%s' option", section_name, option, ) return self.config.get(section_name, default_option)
0.004513
def _hash(self, obj, parent, parents_ids=EMPTY_FROZENSET): """The main diff method""" try: result = self[obj] except (TypeError, KeyError): pass else: return result result = not_hashed if self._skip_this(obj, parent): return elif obj is None: result = 'NONE' elif isinstance(obj, strings): result = prepare_string_for_hashing( obj, ignore_string_type_changes=self.ignore_string_type_changes, ignore_string_case=self.ignore_string_case) elif isinstance(obj, numbers): result = self._prep_number(obj) elif isinstance(obj, MutableMapping): result = self._prep_dict(obj=obj, parent=parent, parents_ids=parents_ids) elif isinstance(obj, tuple): result = self._prep_tuple(obj=obj, parent=parent, parents_ids=parents_ids) elif isinstance(obj, Iterable): result = self._prep_iterable(obj=obj, parent=parent, parents_ids=parents_ids) else: result = self._prep_obj(obj=obj, parent=parent, parents_ids=parents_ids) if result is not_hashed: # pragma: no cover self[UNPROCESSED].append(obj) elif result is unprocessed: pass elif self.apply_hash: if isinstance(obj, strings): result_cleaned = result else: result_cleaned = prepare_string_for_hashing( result, ignore_string_type_changes=self.ignore_string_type_changes, ignore_string_case=self.ignore_string_case) result = self.hasher(result_cleaned) # It is important to keep the hash of all objects. # The hashes will be later used for comparing the objects. try: self[obj] = result except TypeError: obj_id = get_id(obj) self[obj_id] = result return result
0.00398
def rowsBeforeItem(self, item, count): """ The inverse of rowsAfterItem. @param item: then L{Item} to request rows before. @type item: this L{InequalityModel}'s L{itemType} attribute. @param count: The maximum number of rows to return. @type count: L{int} @return: A list of row data, ordered by the current sort column, beginning immediately after C{item}. """ currentSortAttribute = self.currentSortColumn.sortAttribute() value = currentSortAttribute.__get__(item, type(item)) firstQuery = self.inequalityQuery( AND(currentSortAttribute == value, self.itemType.storeID < item.storeID), count, False) results = self.constructRows(firstQuery) count -= len(results) if count: secondQuery = self.inequalityQuery(currentSortAttribute < value, count, False) results.extend(self.constructRows(secondQuery)) return results[::-1]
0.00188
def _find_image_id(self, image_id): """Finds an image id to a given id or name. :param str image_id: name or id of image :return: str - identifier of image """ if not self._images: connection = self._connect() self._images = connection.get_all_images() image_id_cloud = None for i in self._images: if i.id == image_id or i.name == image_id: image_id_cloud = i.id break if image_id_cloud: return image_id_cloud else: raise ImageError( "Could not find given image id `%s`" % image_id)
0.003008