text
stringlengths
78
104k
score
float64
0
0.18
def profile_tilt(data, mask): """Fit a 2D tilt to `data[mask]`""" params = lmfit.Parameters() params.add(name="mx", value=0) params.add(name="my", value=0) params.add(name="off", value=np.average(data[mask])) fr = lmfit.minimize(tilt_residual, params, args=(data, mask)) bg = tilt_model(fr.params, data.shape) return bg
0.002849
def get_all_activities(self, autoscale_group, activity_ids=None, max_records=None, next_token=None): """ Get all activities for the given autoscaling group. This action supports pagination by returning a token if there are more pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter :type autoscale_group: str or :class:`boto.ec2.autoscale.group.AutoScalingGroup` object :param autoscale_group: The auto scaling group to get activities on. :type max_records: int :param max_records: Maximum amount of activities to return. :rtype: list :returns: List of :class:`boto.ec2.autoscale.activity.Activity` instances. """ name = autoscale_group if isinstance(autoscale_group, AutoScalingGroup): name = autoscale_group.name params = {'AutoScalingGroupName' : name} if max_records: params['MaxRecords'] = max_records if next_token: params['NextToken'] = next_token if activity_ids: self.build_list_params(params, activity_ids, 'ActivityIds') return self.get_list('DescribeScalingActivities', params, [('member', Activity)])
0.002946
def _downloads_for_num_days(self, num_days): """ Given a number of days of historical data to look at (starting with today and working backwards), return the total number of downloads for that time range, and the number of days of data we had (in cases where we had less data than requested). :param num_days: number of days of data to look at :type num_days: int :return: 2-tuple of (download total, number of days of data) :rtype: tuple """ logger.debug("Getting download total for last %d days", num_days) dates = self.cache_dates logger.debug("Cache has %d days of data", len(dates)) if len(dates) > num_days: dates = dates[(-1 * num_days):] logger.debug("Looking at last %d days of data", len(dates)) dl_sum = 0 for cache_date in dates: data = self._cache_get(cache_date) dl_sum += sum(data['by_version'].values()) logger.debug("Sum of download counts: %d", dl_sum) return dl_sum, len(dates)
0.001843
def setup(): """ Set up the database """ try: db.bind(**config.database_config) except OSError: # Attempted to connect to a file-based database where the file didn't # exist db.bind(**config.database_config, create_db=True) rebuild = True try: db.generate_mapping(create_tables=True) with orm.db_session: version = GlobalConfig.get(key='schema_version') if version and version.int_value != SCHEMA_VERSION: logger.info("Existing database has schema version %d", version.int_value) else: rebuild = False except: # pylint:disable=bare-except logger.exception("Error mapping schema") if rebuild: logger.info("Rebuilding schema") try: db.drop_all_tables(with_all_data=True) db.create_tables() except: raise RuntimeError("Unable to upgrade schema automatically; please " + "delete the existing database and try again.") with orm.db_session: if not GlobalConfig.get(key='schema_version'): logger.info("setting schema version to %d", SCHEMA_VERSION) GlobalConfig(key='schema_version', int_value=SCHEMA_VERSION) orm.commit()
0.002933
def get_postgres_encoding(python_encoding: str) -> str: """Python to postgres encoding map.""" encoding = normalize_encoding(python_encoding.lower()) encoding_ = aliases.aliases[encoding.replace('_', '', 1)].upper() pg_encoding = PG_ENCODING_MAP[encoding_.replace('_', '')] return pg_encoding
0.003185
def to_kaf(self): """ Converts the coreference layer to KAF """ if self.type == 'NAF': for node_coref in self.__get_corefs_nodes(): node_coref.set('coid',node_coref.get('id')) del node_coref.attrib['id']
0.010753
def _begin_connection_action(self, action): """Begin a connection attempt Args: action (ConnectionAction): the action object describing what we are connecting to """ conn_id = action.data['connection_id'] int_id = action.data['internal_id'] callback = action.data['callback'] # Make sure we are not reusing an id that is currently connected to something if self._get_connection_state(conn_id) != self.Disconnected: print(self._connections[conn_id]) callback(conn_id, self.id, False, 'Connection ID is already in use for another connection') return if self._get_connection_state(int_id) != self.Disconnected: callback(conn_id, self.id, False, 'Internal ID is already in use for another connection') return conn_data = { 'state': self.Connecting, 'microstate': None, 'conn_id': conn_id, 'int_id': int_id, 'callback': callback, 'timeout': action.timeout, 'context': action.data['context'] } self._connections[conn_id] = conn_data self._int_connections[int_id] = conn_data
0.004
def set_title(self, title=None): """Sets the title. :param title: the new title :type title: ``string`` :raise: ``InvalidArgument`` -- ``title`` is invalid :raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true`` :raise: ``NullArgument`` -- ``title`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if title is None: raise NullArgument() metadata = Metadata(**settings.METADATA['title']) if metadata.is_read_only(): raise NoAccess() if self._is_valid_input(title, metadata, array=False): self._my_map['title']['text'] = title else: raise InvalidArgument()
0.002692
def _change_secure_boot_settings(self, property, value): """Change secure boot settings on the server.""" system = self._get_host_details() # find the BIOS URI if ('links' not in system['Oem']['Hp'] or 'SecureBoot' not in system['Oem']['Hp']['links']): msg = (' "SecureBoot" resource or feature is not ' 'supported on this system') raise exception.IloCommandNotSupportedError(msg) secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href'] # Change the property required new_secure_boot_settings = {} new_secure_boot_settings[property] = value # perform the patch status, headers, response = self._rest_patch( secure_boot_uri, None, new_secure_boot_settings) if status >= 300: msg = self._get_extended_error(response) raise exception.IloError(msg) # Change the bios setting as a workaround to enable secure boot # Can be removed when fixed for Gen9 snap2 val = self._get_bios_setting('CustomPostMessage') val = val.rstrip() if val.endswith(" ") else val+" " self._change_bios_setting({'CustomPostMessage': val})
0.001608
def _build_generator_list(network): """Builds DataFrames with all generators in MV and LV grids Returns ------- :pandas:`pandas.DataFrame<dataframe>` A DataFrame with id of and reference to MV generators :pandas:`pandas.DataFrame<dataframe>` A DataFrame with id of and reference to LV generators :pandas:`pandas.DataFrame<dataframe>` A DataFrame with id of and reference to aggregated LV generators """ genos_mv = pd.DataFrame(columns= ('id', 'obj')) genos_lv = pd.DataFrame(columns= ('id', 'obj')) genos_lv_agg = pd.DataFrame(columns= ('la_id', 'id', 'obj')) # MV genos for geno in network.mv_grid.graph.nodes_by_attribute('generator'): genos_mv.loc[len(genos_mv)] = [int(geno.id), geno] for geno in network.mv_grid.graph.nodes_by_attribute('generator_aggr'): la_id = int(geno.id.split('-')[1].split('_')[-1]) genos_lv_agg.loc[len(genos_lv_agg)] = [la_id, geno.id, geno] # LV genos for lv_grid in network.mv_grid.lv_grids: for geno in lv_grid.generators: genos_lv.loc[len(genos_lv)] = [int(geno.id), geno] return genos_mv, genos_lv, genos_lv_agg
0.004633
def _open(file_, mode='r'): """Open file object given filenames, open files or even archives.""" if isinstance(file_, string_types): _, ext = path.splitext(file_) if ext in {'.bz2', '.gz'}: s = tarfile.open(file_) return s.extractfile(s.next()) else: return open(file_, mode) return file_
0.021605
def seek(self, offset, whence=os.SEEK_SET): """Set the file's current offset. Note if the new offset is out of bound, it is adjusted to either 0 or EOF. Args: offset: seek offset as number. whence: seek mode. Supported modes are os.SEEK_SET (absolute seek), os.SEEK_CUR (seek relative to the current position), and os.SEEK_END (seek relative to the end, offset should be negative). Raises: IOError: When this buffer is closed. ValueError: When whence is invalid. """ self._check_open() self._buffer.reset() self._buffer_future = None if whence == os.SEEK_SET: self._offset = offset elif whence == os.SEEK_CUR: self._offset += offset elif whence == os.SEEK_END: self._offset = self._file_size + offset else: raise ValueError('Whence mode %s is invalid.' % str(whence)) self._offset = min(self._offset, self._file_size) self._offset = max(self._offset, 0) if self._remaining(): self._request_next_buffer()
0.005775
def preprX(*attributes, address=True, full_name=False, pretty=False, keyless=False, **kwargs): """ `Creates prettier object representations` @*attributes: (#str) instance attributes within the object you wish to display. Attributes can be recursive e.g. |one.two.three| for access to |self.one.two.three| @address: (#bool) |True| to include the memory address @full_name: (#bool) |True| to include the full path to the object vs. the qualified name @pretty: (#bool) |True| to allow bolding and coloring @keyless: (#bool) |True| to display the values of @attributes withotu their attribute names .. class Foo(object): def __init__(self, bar, baz=None): self.bar = bar self.baz = baz __repr__ = prepr('bar', 'baz', address=False) foo = Foo('foobar') repr(foo) .. |<Foo:bar=`foobar`, baz=None>| """ def _format(obj, attribute): try: if keyless: val = getattr_in(obj, attribute) if val is not None: return repr(val) else: return '%s=%s' % (attribute, repr(getattr_in(obj, attribute))) except AttributeError: return None def prep(obj, address=address, full_name=full_name, pretty=pretty, keyless=keyless, **kwargs): if address: address = ":%s" % hex(id(obj)) else: address = "" data = list(filter(lambda x: x is not None, map(lambda a: _format(obj, a), attributes))) if data: data = ':%s' % ', '.join(data) else: data = '' return stdout_encode("<%s%s%s>" % (get_obj_name(obj), data, address)) return prep
0.000514
def write_data(self, chunksize, dropna=False): """ we form the data into a 2-d including indexes,values,mask write chunk-by-chunk """ names = self.dtype.names nrows = self.nrows_expected # if dropna==True, then drop ALL nan rows masks = [] if dropna: for a in self.values_axes: # figure the mask: only do if we can successfully process this # column, otherwise ignore the mask mask = isna(a.data).all(axis=0) if isinstance(mask, np.ndarray): masks.append(mask.astype('u1', copy=False)) # consolidate masks if len(masks): mask = masks[0] for m in masks[1:]: mask = mask & m mask = mask.ravel() else: mask = None # broadcast the indexes if needed indexes = [a.cvalues for a in self.index_axes] nindexes = len(indexes) bindexes = [] for i, idx in enumerate(indexes): # broadcast to all other indexes except myself if i > 0 and i < nindexes: repeater = np.prod( [indexes[bi].shape[0] for bi in range(0, i)]) idx = np.tile(idx, repeater) if i < nindexes - 1: repeater = np.prod([indexes[bi].shape[0] for bi in range(i + 1, nindexes)]) idx = np.repeat(idx, repeater) bindexes.append(idx) # transpose the values so first dimension is last # reshape the values if needed values = [a.take_data() for a in self.values_axes] values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1)) for v in values] bvalues = [] for i, v in enumerate(values): new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape bvalues.append(values[i].reshape(new_shape)) # write the chunks if chunksize is None: chunksize = 100000 rows = np.empty(min(chunksize, nrows), dtype=self.dtype) chunks = int(nrows / chunksize) + 1 for i in range(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, nrows) if start_i >= end_i: break self.write_data_chunk( rows, indexes=[a[start_i:end_i] for a in bindexes], mask=mask[start_i:end_i] if mask is not None else None, values=[v[start_i:end_i] for v in bvalues])
0.000763
def calibrate_band_pass_N1(self): """ One way to calibrate the band pass is to take the median value for every frequency fine channel, and divide by it. """ band_pass = np.median(self.data.squeeze(),axis=0) self.data = self.data/band_pass
0.010601
def assert_headers(context): """ :type context: behave.runner.Context """ expected_headers = [(k, v) for k, v in row_table(context).items()] request = httpretty.last_request() actual_headers = request.headers.items() for expected_header in expected_headers: assert_in(expected_header, actual_headers)
0.002941
def template_exists_db(self, template): """ Receives a template and checks if it exists in the database using the template name and language """ name = utils.camel_to_snake(template[0]).upper() language = utils.camel_to_snake(template[3]) try: models.EmailTemplate.objects.get(name=name, language=language) except models.EmailTemplate.DoesNotExist: return False return True
0.004264
def version(serial=None): """ Returns version information for MicroPython running on the connected device. If such information is not available or the device is not running MicroPython, raise a ValueError. If any other exception is thrown, the device was running MicroPython but there was a problem parsing the output. """ try: out, err = execute([ 'import os', 'print(os.uname())', ], serial) if err: raise ValueError(clean_error(err)) except ValueError: # Re-raise any errors from stderr raised in the try block. raise except Exception: # Raise a value error to indicate unable to find something on the # microbit that will return parseable information about the version. # It doesn't matter what the error is, we just need to indicate a # failure with the expected ValueError exception. raise ValueError() raw = out.decode('utf-8').strip() raw = raw[1:-1] items = raw.split(', ') result = {} for item in items: key, value = item.split('=') result[key] = value[1:-1] return result
0.000845
def read_midc_raw_data_from_nrel(site, start, end): """Request and read MIDC data directly from the raw data api. Parameters ---------- site: string The MIDC station id. start: datetime Start date for requested data. end: datetime End date for requested data. Returns ------- data: Dataframe with DatetimeIndex localized to the station location. Notes ----- Requests spanning an instrumentation change will yield an error. See the MIDC raw data api page `here <https://midcdmz.nrel.gov/apps/data_api_doc.pl?_idtextlist>`_ for more details and considerations. """ args = {'site': site, 'begin': start.strftime('%Y%m%d'), 'end': end.strftime('%Y%m%d')} endpoint = 'https://midcdmz.nrel.gov/apps/data_api.pl?' url = endpoint + '&'.join(['{}={}'.format(k, v) for k, v in args.items()]) return read_midc(url, raw_data=True)
0.001045
async def receive_json(self, content: typing.Dict, **kwargs): """ Called with decoded JSON content. """ # TODO assert format, if does not match return message. request_id = content.pop('request_id') action = content.pop('action') await self.handle_action(action, request_id=request_id, **content)
0.005682
def getAllSystemVariables(self, remote): """Get all system variables from CCU / Homegear""" variables = {} if self.remotes[remote]['username'] and self.remotes[remote]['password']: LOG.debug( "ServerThread.getAllSystemVariables: Getting all System variables via JSON-RPC") session = self.jsonRpcLogin(remote) if not session: return try: params = {"_session_id_": session} response = self._rpcfunctions.jsonRpcPost( self.remotes[remote]['ip'], self.remotes[remote].get('jsonport', DEFAULT_JSONPORT), "SysVar.getAll", params) if response['error'] is None and response['result']: for var in response['result']: key, value = self.parseCCUSysVar(var) variables[key] = value self.jsonRpcLogout(remote, session) except Exception as err: self.jsonRpcLogout(remote, session) LOG.warning( "ServerThread.getAllSystemVariables: Exception: %s" % str(err)) else: try: variables = self.proxies[ "%s-%s" % (self._interface_id, remote)].getAllSystemVariables() except Exception as err: LOG.debug( "ServerThread.getAllSystemVariables: Exception: %s" % str(err)) return variables
0.005358
def fromFile(filename): """ Parses the inputted xml file information and generates a builder for it. :param filename | <str> :return <Builder> || None """ xdata = None ydata = None # try parsing an XML file try: xdata = ElementTree.parse(filename).getroot() except StandardError: xdata = None if xdata is None: # try parsing a yaml file if yaml: with open(filename, 'r') as f: text = f.read() try: ydata = yaml.load(text) except StandardError: return None else: log.warning('Could not process yaml builder!') # load a yaml definition if type(ydata) == dict: typ = ydata.get('type') module = ydata.get('module') builder = Builder.plugin(typ, module) if builder: return builder.fromYaml(ydata, os.path.dirname(filename)) else: log.warning('Could not find builder: {0}'.format(typ)) # load an xml definition elif xdata is not None: typ = xdata.get('type') module = xdata.get('module') builder = Builder.plugin(typ, module) if builder: return builder.fromXml(xdata, os.path.dirname(filename)) else: log.warning('Could not find builder: {0}'.format(typ)) return None
0.002503
def format_auto_patching_settings(result): ''' Formats the AutoPatchingSettings object removing arguments that are empty ''' from collections import OrderedDict # Only display parameters that have content order_dict = OrderedDict() if result.enable is not None: order_dict['enable'] = result.enable if result.day_of_week is not None: order_dict['dayOfWeek'] = result.day_of_week if result.maintenance_window_starting_hour is not None: order_dict['maintenanceWindowStartingHour'] = result.maintenance_window_starting_hour if result.maintenance_window_duration is not None: order_dict['maintenanceWindowDuration'] = result.maintenance_window_duration return order_dict
0.004038
def process_view(self, request, view_func, view_args, view_kwargs): """ Capture details about the view_func that is about to execute """ try: if ignore_path(request.path): TrackedRequest.instance().tag("ignore_transaction", True) view_name = request.resolver_match._func_path span = TrackedRequest.instance().current_span() if span is not None: span.operation = "Controller/" + view_name Context.add("path", request.path) Context.add("user_ip", RemoteIp.lookup_from_headers(request.META)) if getattr(request, "user", None) is not None: Context.add("username", request.user.get_username()) except Exception: pass
0.003695
def member_present(ip, port, balancer_id, profile, **libcloud_kwargs): ''' Ensure a load balancer member is present :param ip: IP address for the new member :type ip: ``str`` :param port: Port for the new member :type port: ``int`` :param balancer_id: id of a load balancer you want to attach the member to :type balancer_id: ``str`` :param profile: The profile key :type profile: ``str`` ''' existing_members = __salt__['libcloud_loadbalancer.list_balancer_members'](balancer_id, profile) for member in existing_members: if member['ip'] == ip and member['port'] == port: return state_result(True, "Member already present", balancer_id) member = __salt__['libcloud_loadbalancer.balancer_attach_member'](balancer_id, ip, port, profile, **libcloud_kwargs) return state_result(True, "Member added to balancer, id: {0}".format(member['id']), balancer_id, member)
0.004228
def _create_chrome_options(self): """Create and configure a chrome options object :returns: chrome options object """ # Create Chrome options options = webdriver.ChromeOptions() if self.config.getboolean_optional('Driver', 'headless'): self.logger.debug("Running Chrome in headless mode") options.add_argument('--headless') if os.name == 'nt': # Temporarily needed if running on Windows. options.add_argument('--disable-gpu') # Add Chrome preferences, mobile emulation options and chrome arguments self._add_chrome_options(options, 'prefs') self._add_chrome_options(options, 'mobileEmulation') self._add_chrome_arguments(options) return options
0.002535
def copy(self): '''Create a copy of the current instance. :returns: A safely-editable copy of the current sequence. :rtype: coral.DNA ''' # Significant performance improvements by skipping alphabet check features_copy = [feature.copy() for feature in self.features] copy = type(self)(self.top.seq, circular=self.circular, features=features_copy, name=self.name, bottom=self.bottom.seq, run_checks=False) return copy
0.003752
def start(self) -> asyncio.Future: """Starts an RTM Session with Slack. Makes an authenticated call to Slack's RTM API to retrieve a websocket URL and then connects to the message server. As events stream-in we run any associated callbacks stored on the client. If 'auto_reconnect' is specified we retrieve a new url and reconnect any time the connection is lost unintentionally or an exception is thrown. Raises: SlackApiError: Unable to retreive RTM URL from Slack. """ # TODO: Add Windows support for graceful shutdowns. if os.name != "nt": signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT) for s in signals: self._event_loop.add_signal_handler(s, self.stop) future = asyncio.ensure_future(self._connect_and_read(), loop=self._event_loop) if self.run_async or self._event_loop.is_running(): return future return self._event_loop.run_until_complete(future)
0.002844
def _pdf(self, xloc, left, right, cache): """ Probability density function. Example: >>> print(chaospy.Uniform().pdf([-0.5, 0.5, 1.5, 2.5])) [0. 1. 0. 0.] >>> print(chaospy.Pow(chaospy.Uniform(), 2).pdf([-0.5, 0.5, 1.5, 2.5])) [0. 0.70710678 0. 0. ] >>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).pdf([0.4, 0.6, 0.8, 1.2])) [0. 2.77777778 1.5625 0. ] >>> print(chaospy.Pow(2, chaospy.Uniform()).pdf([-0.5, 0.5, 1.5, 2.5])) [0. 0. 0.96179669 0. ] >>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).pdf([0.4, 0.6, 0.8, 1.2])) [0. 2.40449173 1.8033688 0. ] >>> print(chaospy.Pow(2, 3).pdf([7, 8, 9])) [ 0. inf 0.] """ left = evaluation.get_forward_cache(left, cache) right = evaluation.get_forward_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise StochasticallyDependentError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): return numpy.inf else: assert numpy.all(left > 0), "imaginary result" x_ = numpy.where(xloc <= 0, -numpy.inf, numpy.log(xloc + 1.*(xloc<=0))/numpy.log(left+1.*(left == 1))) num_ = numpy.log(left+1.*(left == 1))*xloc num_ = num_ + 1.*(num_==0) out = evaluation.evaluate_density(right, x_, cache=cache)/num_ return out x_ = numpy.sign(xloc)*numpy.abs(xloc)**(1./right -1) xloc = numpy.sign(xloc)*numpy.abs(xloc)**(1./right) pairs = numpy.sign(xloc**right) == 1 out = evaluation.evaluate_density(left, xloc, cache=cache) if numpy.any(pairs): out = out + pairs*evaluation.evaluate_density(left, -xloc, cache=cache) out = numpy.sign(right)*out * x_ / right out[numpy.isnan(out)] = numpy.inf return out
0.005631
def add_apt_key(filename=None, url=None, keyid=None, keyserver='subkeys.pgp.net', update=False): """ Trust packages signed with this public key. Example:: import burlap # Varnish signing key from URL and verify fingerprint) burlap.deb.add_apt_key(keyid='C4DEFFEB', url='http://repo.varnish-cache.org/debian/GPG-key.txt') # Nginx signing key from default key server (subkeys.pgp.net) burlap.deb.add_apt_key(keyid='7BD9BF62') # From custom key server burlap.deb.add_apt_key(keyid='7BD9BF62', keyserver='keyserver.ubuntu.com') # From a file burlap.deb.add_apt_key(keyid='7BD9BF62', filename='nginx.asc' """ if keyid is None: if filename is not None: run_as_root('apt-key add %(filename)s' % locals()) elif url is not None: run_as_root('wget %(url)s -O - | apt-key add -' % locals()) else: raise ValueError('Either filename, url or keyid must be provided as argument') else: if filename is not None: _check_pgp_key(filename, keyid) run_as_root('apt-key add %(filename)s' % locals()) elif url is not None: tmp_key = '/tmp/tmp.burlap.key.%(keyid)s.key' % locals() run_as_root('wget %(url)s -O %(tmp_key)s' % locals()) _check_pgp_key(tmp_key, keyid) run_as_root('apt-key add %(tmp_key)s' % locals()) else: keyserver_opt = '--keyserver %(keyserver)s' % locals() if keyserver is not None else '' run_as_root('apt-key adv %(keyserver_opt)s --recv-keys %(keyid)s' % locals()) if update: update_index()
0.004152
def description(self): """:class:`str`: Returns the cog's description, typically the cleaned docstring.""" try: return self.__cog_cleaned_doc__ except AttributeError: self.__cog_cleaned_doc__ = cleaned = inspect.getdoc(self) return cleaned
0.010033
def porttree_matches(name): ''' Returns a list containing the matches for a given package name from the portage tree. Note that the specific version of the package will not be provided for packages that have several versions in the portage tree, but rather the name of the package (i.e. "dev-python/paramiko"). ''' matches = [] for category in _porttree().dbapi.categories: if _porttree().dbapi.cp_list(category + "/" + name): matches.append(category + "/" + name) return matches
0.001869
async def check_record(self, record, timeout=60): """Measures the time for a DNS record to become available. Query a provided DNS server multiple times until the reply matches the information in the record or until timeout is reached. Args: record (dict): DNS record as a dict with record properties. timeout (int): Time threshold to query the DNS server. """ start_time = time.time() name, rr_data, r_type, ttl = self._extract_record_data(record) r_type_code = async_dns.types.get_code(r_type) resolvable_record = False retries = 0 sleep_time = 5 while not resolvable_record and \ timeout > retries * sleep_time: retries += 1 resolver_res = await self._resolver.query(name, r_type_code) possible_ans = resolver_res.an resolvable_record = \ await self._check_resolver_ans(possible_ans, name, rr_data, ttl, r_type_code) if not resolvable_record: await asyncio.sleep(sleep_time) if not resolvable_record: logging.info( f'Sending metric record-checker-failed: {record}.') else: final_time = float(time.time() - start_time) success_msg = (f'This record: {record} took {final_time} to ' 'register.') logging.info(success_msg)
0.001324
def set_note_attribute(data): """ add the link of the 'source' in the note """ na = False if data.get('link'): na = Types.NoteAttributes() # add the url na.sourceURL = data.get('link') # add the object to the note return na
0.006211
def json_call(cls, method, url, **kwargs): """ Call a remote api using json format """ # retrieve api key if needed empty_key = kwargs.pop('empty_key', False) send_key = kwargs.pop('send_key', True) return_header = kwargs.pop('return_header', False) try: apikey = cls.get('apirest.key') if not apikey and not empty_key: cls.echo("No apikey found for REST API, please use " "'gandi setup' command") sys.exit(1) if send_key: if 'headers' in kwargs: kwargs['headers'].update({'X-Api-Key': apikey}) else: kwargs['headers'] = {'X-Api-Key': apikey} except MissingConfiguration: if not empty_key: return [] # make the call cls.debug('calling url: %s %s' % (method, url)) cls.debug('with params: %r' % kwargs) try: resp, resp_headers = JsonClient.request(method, url, **kwargs) cls.dump('responded: %r' % resp) if return_header: return resp, resp_headers return resp except APICallFailed as err: cls.echo('An error occured during call: %s' % err.errors) sys.exit(1)
0.001499
def turbulent_Petukhov_Kirillov_Popov(Re=None, Pr=None, fd=None): r'''Calculates internal convection Nusselt number for turbulent flows in pipe according to [2]_ and [3]_ as in [1]_. .. math:: Nu = \frac{(f/8)RePr}{C+12.7(f/8)^{1/2}(Pr^{2/3}-1)}\\ C = 1.07 + 900/Re - [0.63/(1+10Pr)] Parameters ---------- Re : float Reynolds number, [-] Pr : float Prandtl number, [-] fd : float Darcy friction factor [-] Returns ------- Nu : float Nusselt number, [-] Notes ----- Range according to [1]_ is 0.5 < Pr ≤ 10^6 and 4000 ≤ Re ≤ 5*10^6 Examples -------- >>> turbulent_Petukhov_Kirillov_Popov(Re=1E5, Pr=1.2, fd=0.0185) 250.11935088905105 References ---------- .. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat Transfer, 3E. New York: McGraw-Hill, 1998. .. [2] B. S. Petukhov, and V. V. Kirillov, "The Problem of Heat Exchange in the Turbulent Flow of Liquids in Tubes," (Russian) Teploenergetika, (4): 63-68, 1958 .. [3] B. S. Petukhov and V. N. Popov, "Theoretical Calculation of Heat Exchange in Turbulent Flow in Tubes of an Incompressible Fluidwith Variable Physical Properties," High Temp., (111): 69-83, 1963. ''' C = 1.07 + 900./Re - (0.63/(1. + 10.*Pr)) return (fd/8.)*Re*Pr/(C + 12.7*(fd/8.)**0.5*(Pr**(2/3.) - 1.))
0.000695
def export(context, keywords, module, update): """Operate on libraries and exported functions. Query the module name containing the function by default. Windows database must be prepared before using this. """ logging.info(_('Export Mode')) database = context.obj['sense'] none = True if update: exports = OrderedDict() from .executables.pe import PE for filename in keywords: module = split_ext(filename, basename=True)[0] with open(filename, 'rb') as stream: exports.update( {module: PE(stream).get_export_table()}) database.make_export(exports) none = False elif module: for module_name in keywords: funcs = database.query_module_funcs(module_name) if funcs: none = False print(', '.join(map(str, funcs))) else: logging.warning(_('No function for module: %s'), module_name) else: for func_name in keywords: module_name = database.query_func_module(func_name) if module_name: none = False print(repr(module_name)) else: logging.warning(_('No module for function: %s'), func_name) sys.exit(1 if none else 0)
0.000713
def _handle_register_response(self, response): """Called when a register response (RegisterInstanceResponse) arrives""" if response.status.status != common_pb2.StatusCode.Value("OK"): raise RuntimeError("Stream Manager returned a not OK response for register") Log.info("We registered ourselves to the Stream Manager") self.is_registered = True if response.HasField("pplan"): Log.info("Handling assignment message from response") self._handle_assignment_message(response.pplan) else: Log.debug("Received a register response with no pplan")
0.010204
def ste(command, nindent, mdir, fpointer, env=None): """ Print STDOUT of a shell command formatted in reStructuredText. This is a simplified version of :py:func:`pmisc.term_echo`. :param command: Shell command (relative to **mdir** if **env** is not given) :type command: string :param nindent: Indentation level :type nindent: integer :param mdir: Module directory, used if **env** is not given :type mdir: string :param fpointer: Output function pointer. Normally is :code:`cog.out` but :code:`print` or other functions can be used for debugging :type fpointer: function object :param env: Environment dictionary. If not provided, the environment dictionary is the key "PKG_BIN_DIR" with the value of the **mdir** :type env: dictionary For example:: .. This is a reStructuredText file snippet .. [[[cog .. import os, sys .. from docs.support.term_echo import term_echo .. file_name = sys.modules['docs.support.term_echo'].__file__ .. mdir = os.path.realpath( .. os.path.dirname( .. os.path.dirname(os.path.dirname(file_name)) .. ) .. ) .. [[[cog ste('build_docs.py -h', 0, mdir, cog.out) ]]] .. code-block:: console $ ${PKG_BIN_DIR}/build_docs.py -h usage: build_docs.py [-h] [-d DIRECTORY] [-n NUM_CPUS] ... $ .. ]]] """ sdir = LDELIM + "PKG_BIN_DIR" + RDELIM command = ( sdir + ("{sep}{cmd}".format(sep=os.path.sep, cmd=command)) if env is None else command ) env = {"PKG_BIN_DIR": mdir} if env is None else env term_echo(command, nindent, env, fpointer)
0.001108
def knot_removal_kv(knotvector, span, r): """ Computes the knot vector of the rational/non-rational spline after knot removal. Part of Algorithm A5.8 of The NURBS Book by Piegl & Tiller, 2nd Edition. :param knotvector: knot vector :type knotvector: list, tuple :param span: knot span :type span: int :param r: number of knot removals :type r: int :return: updated knot vector :rtype: list """ # Edge case if r < 1: return knotvector # Create a deep copy of the input knot vector kv_updated = deepcopy(knotvector) # Shift knots for k in range(span + 1, len(knotvector)): kv_updated[k - r] = knotvector[k] # Slice to get the new knot vector kv_updated = kv_updated[0:-r] # Return the new knot vector return kv_updated
0.002433
def luminosity_within_ellipse_in_units(self, major_axis : dim.Length, unit_luminosity='eps', kpc_per_arcsec=None, exposure_time=None): """Compute the total luminosity of the galaxy's light profiles, within an ellipse of specified major axis. This is performed via integration of each light profile and is centred, oriented and aligned with each light model's individual geometry. See *light_profiles.luminosity_within_ellipse* for details of how this is performed. Parameters ---------- major_axis : float The major-axis radius of the ellipse. unit_luminosity : str The units the luminosity is returned in (eps | counts). exposure_time : float The exposure time of the observation, which converts luminosity from electrons per second units to counts. """ if self.has_light_profile: return sum(map(lambda p: p.luminosity_within_ellipse_in_units(major_axis=major_axis, unit_luminosity=unit_luminosity, kpc_per_arcsec=kpc_per_arcsec, exposure_time=exposure_time), self.light_profiles)) else: return None
0.00873
def execute_async(self, output_options=None, sampling=None, context=None, query_params=None): """ Initiate the query and return a QueryJob. Args: output_options: a QueryOutput object describing how to execute the query sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling context: an optional Context object providing project_id and credentials. If a specific project id or credentials are unspecified, the default ones configured at the global level are used. query_params: a dictionary containing query parameter types and values, passed to BigQuery. Returns: A Job object that can wait on creating a table or exporting to a file If the output is a table, the Job object additionally has run statistics and query results Raises: Exception if query could not be executed. """ # Default behavior is to execute to a table if output_options is None: output_options = QueryOutput.table() # First, execute the query into a table, using a temporary one if no name is specified batch = output_options.priority == 'low' append = output_options.table_mode == 'append' overwrite = output_options.table_mode == 'overwrite' table_name = output_options.table_name context = context or google.datalab.Context.default() api = _api.Api(context) if table_name is not None: table_name = _utils.parse_table_name(table_name, api.project_id) sql = self._expanded_sql(sampling) try: query_result = api.jobs_insert_query(sql, table_name=table_name, append=append, overwrite=overwrite, batch=batch, use_cache=output_options.use_cache, allow_large_results=output_options.allow_large_results, table_definitions=self.data_sources, query_params=query_params) except Exception as e: raise e if 'jobReference' not in query_result: raise Exception('Unexpected response from server') job_id = query_result['jobReference']['jobId'] if not table_name: try: destination = query_result['configuration']['query']['destinationTable'] table_name = (destination['projectId'], destination['datasetId'], destination['tableId']) except KeyError: # The query was in error raise Exception(_utils.format_query_errors(query_result['status']['errors'])) execute_job = _query_job.QueryJob(job_id, table_name, sql, context=context) # If all we need is to execute the query to a table, we're done if output_options.type == 'table': return execute_job # Otherwise, build an async Job that waits on the query execution then carries out # the specific export operation else: export_args = export_kwargs = None if output_options.type == 'file': if output_options.file_path.startswith('gs://'): export_func = execute_job.result().extract export_args = [output_options.file_path] export_kwargs = { 'format': output_options.file_format, 'csv_delimiter': output_options.csv_delimiter, 'csv_header': output_options.csv_header, 'compress': output_options.compress_file } else: export_func = execute_job.result().to_file export_args = [output_options.file_path] export_kwargs = { 'format': output_options.file_format, 'csv_delimiter': output_options.csv_delimiter, 'csv_header': output_options.csv_header } elif output_options.type == 'dataframe': export_func = execute_job.result().to_dataframe export_args = [] export_kwargs = { 'start_row': output_options.dataframe_start_row, 'max_rows': output_options.dataframe_max_rows } # Perform the export operation with the specified parameters export_func = google.datalab.utils.async_function(export_func) return export_func(*export_args, **export_kwargs)
0.007816
def fix_multi_T1w_source_name(in_files): """ Make up a generic source name when there are multiple T1s >>> fix_multi_T1w_source_name([ ... '/path/to/sub-045_ses-test_T1w.nii.gz', ... '/path/to/sub-045_ses-retest_T1w.nii.gz']) '/path/to/sub-045_T1w.nii.gz' """ import os from nipype.utils.filemanip import filename_to_list base, in_file = os.path.split(filename_to_list(in_files)[0]) subject_label = in_file.split("_", 1)[0].split("-")[1] return os.path.join(base, "sub-%s_T1w.nii.gz" % subject_label)
0.001792
def log_shapes(logger): """ Decorator to log the shapes of input and output dataframes It considers all the dataframes passed either as arguments or keyword arguments as inputs and all the dataframes returned as outputs. """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): input_shapes = _get_dfs_shapes(*args, **kwargs) result = func(*args, **kwargs) output_shapes = _get_dfs_shapes(result) _log_shapes(logger, func.__name__, input_shapes, output_shapes) return result return wrapper return decorator
0.003175
def get_rollup_ttl(self, use_cached=True): """Retrieve the rollupTtl for this stream The rollupTtl is the time to live (TTL) in seconds for the aggregate roll-ups of data points stored in the stream. A roll-up expires after the configured amount of time and is automatically deleted. :param bool use_cached: If False, the function will always request the latest from Device Cloud. If True, the device will not make a request if it already has cached data. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error :raises devicecloud.streams.NoSuchStreamException: if this stream has not yet been created :return: The rollupTtl associated with this stream in seconds :rtype: int or None """ rollup_ttl_text = self._get_stream_metadata(use_cached).get("rollupTtl") return int(rollup_ttl_text)
0.009677
def convert_characteristicFaultSource(self, node): """ Convert the given node into a characteristic fault object. :param node: a node with tag areaGeometry :returns: a :class:`openquake.hazardlib.source.CharacteristicFaultSource` instance """ char = source.CharacteristicFaultSource( source_id=node['id'], name=node['name'], tectonic_region_type=node.attrib.get('tectonicRegion'), mfd=self.convert_mfdist(node), surface=self.convert_surfaces(node.surface), rake=~node.rake, temporal_occurrence_model=self.get_tom(node)) return char
0.002825
def WriteEventBody(self, event): """Writes the body of an event to the output. Args: event (EventObject): event. """ # sqlite seems to support milli seconds precision but that seems # not to be used by 4n6time row = self._GetSanitizedEventValues(event) self._cursor.execute(self._INSERT_QUERY, row) self._count += 1 # Commit the current transaction every 10000 inserts. if self._count % 10000 == 0: self._connection.commit() if self._set_status: self._set_status('Inserting event: {0:d}'.format(self._count))
0.005208
def _get_query_argument(args, cell, env): """ Get a query argument to a cell magic. The query is specified with args['query']. We look that up and if it is a BQ query object, just return it. If it is a string, build a query object out of it and return that Args: args: the dictionary of magic arguments. cell: the cell contents which can be variable value overrides (if args has a 'query' value) or inline SQL otherwise. env: a dictionary that is used for looking up variable values. Returns: A Query object. """ sql_arg = args.get('query', None) if sql_arg is None: # Assume we have inline SQL in the cell if not isinstance(cell, basestring): raise Exception('Expected a --query argument or inline SQL') return bigquery.Query(cell, env=env) item = google.datalab.utils.commands.get_notebook_item(sql_arg) if isinstance(item, bigquery.Query): return item else: raise Exception('Expected a query object, got %s.' % type(item))
0.010967
def _impopts(self, opts): """ :param opts: a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows): - datarow is a number - delimiter is a character - getnames is a boolean - guessingrows is a numbers or the string 'MAX' .. code-block:: python {'datarow' : 2 'delimiter' : ','' 'getnames' : True 'guessingrows': 20 } :return: str """ optstr = '' if len(opts): for key in opts: if len(str(opts[key])): if key == 'datarow': optstr += 'datarow=' + str(opts[key]) + ';' elif key == 'delimiter': optstr += 'delimiter=' optstr += "'" + '%02x' % ord(opts[key].encode(self._io.sascfg.encoding)) + "'x; " elif key == 'getnames': optstr += 'getnames=' if opts[key]: optstr += 'YES; ' else: optstr += 'NO; ' elif key == 'guessingrows': optstr += 'guessingrows=' if opts[key] == 'MAX': optstr += 'MAX; ' else: optstr += str(opts[key]) + '; ' return optstr
0.002468
def __merge_json_values(current, previous): """Merges the values between the current and previous run of the script.""" for value in current: name = value['name'] # Find the previous value previous_value = __find_and_remove_value(previous, value) if previous_value is not None: flags = value['flags'] previous_flags = previous_value['flags'] if flags != previous_flags: logging.warning( 'Flags for %s are different. Using previous value.', name) value['flags'] = previous_flags else: logging.warning('Value %s is a new value', name) for value in previous: name = value['name'] logging.warning( 'Value %s not present in current run. Appending value.', name) current.append(value)
0.001147
def getMetricsColumnLengths(self): """ Gets the maximum length of each column """ displayLen = 0 descLen = 0 for m in self.metrics: displayLen = max(displayLen, len(m['displayName'])) descLen = max(descLen, len(m['description'])) return (displayLen, descLen)
0.005917
def get(self, bucket: str, key: str) -> bytes: """ Retrieves the data for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: the data """ bucket_obj = self._ensure_bucket_loaded(bucket) blob_obj = bucket_obj.blob(key) try: return blob_obj.download_as_string() except NotFound: raise BlobNotFoundError(f"Could not find gs://{bucket}/{key}")
0.003534
def enterprise_login_required(view): """ View decorator for allowing authenticated user with valid enterprise UUID. This decorator requires enterprise identifier as a parameter `enterprise_uuid`. This decorator will throw 404 if no kwarg `enterprise_uuid` is provided to the decorated view . If there is no enterprise in database against the kwarg `enterprise_uuid` or if the user is not authenticated then it will redirect the user to the enterprise-linked SSO login page. Usage:: @enterprise_login_required() def my_view(request, enterprise_uuid): # Some functionality ... OR class MyView(View): ... @method_decorator(enterprise_login_required) def get(self, request, enterprise_uuid): # Some functionality ... """ @wraps(view) def wrapper(request, *args, **kwargs): """ Wrap the decorator. """ if 'enterprise_uuid' not in kwargs: raise Http404 enterprise_uuid = kwargs['enterprise_uuid'] enterprise_customer = get_enterprise_customer_or_404(enterprise_uuid) # Now verify if the user is logged in. If user is not logged in then # send the user to the login screen to sign in with an # Enterprise-linked IdP and the pipeline will get them back here. if not request.user.is_authenticated: parsed_current_url = urlparse(request.get_full_path()) parsed_query_string = parse_qs(parsed_current_url.query) parsed_query_string.update({ 'tpa_hint': enterprise_customer.identity_provider, FRESH_LOGIN_PARAMETER: 'yes' }) next_url = '{current_path}?{query_string}'.format( current_path=quote(parsed_current_url.path), query_string=urlencode(parsed_query_string, doseq=True) ) return redirect( '{login_url}?{params}'.format( login_url='/login', params=urlencode( {'next': next_url} ) ) ) # Otherwise, they can proceed to the original view. return view(request, *args, **kwargs) return wrapper
0.000429
def get(key, default=-1): """Backport support for original codes.""" if isinstance(key, int): return ErrorCode(key) if key not in ErrorCode._member_map_: extend_enum(ErrorCode, key, default) return ErrorCode[key]
0.007463
def fit(self, X, y=None): """Compute the Deterministic Shared Response Model Parameters ---------- X : list of 2D arrays, element i has shape=[voxels_i, samples] Each element in the list contains the fMRI data of one subject. y : not used """ logger.info('Starting Deterministic SRM') # Check the number of subjects if len(X) <= 1: raise ValueError("There are not enough subjects " "({0:d}) to train the model.".format(len(X))) # Check for input data sizes if X[0].shape[1] < self.features: raise ValueError( "There are not enough samples to train the model with " "{0:d} features.".format(self.features)) # Check if all subjects have same number of TRs number_trs = X[0].shape[1] number_subjects = len(X) for subject in range(number_subjects): assert_all_finite(X[subject]) if X[subject].shape[1] != number_trs: raise ValueError("Different number of samples between subjects" ".") # Run SRM self.w_, self.s_ = self._srm(X) return self
0.001592
def get_neighbor_out_filter(neigh_ip_address): """Returns a neighbor out_filter for given ip address if exists.""" core = CORE_MANAGER.get_core_service() ret = core.peer_manager.get_by_addr(neigh_ip_address).out_filters return ret
0.004065
def worker(): """ Initialize the distributed environment. """ import torch import torch.distributed as dist from torch.multiprocessing import Process import numpy as np print("Initializing distributed pytorch") os.environ['MASTER_ADDR'] = str(args.master_addr) os.environ['MASTER_PORT'] = str(args.master_port) # Use TCP backend. Gloo needs nightly, where it currently fails with # dist.init_process_group('gloo', rank=args.rank, # AttributeError: module 'torch.distributed' has no attribute 'init_process_group' dist.init_process_group('tcp', rank=args.rank, world_size=args.size) tensor = torch.ones(args.size_mb*250*1000)*(args.rank+1) time_list = [] outfile = 'out' if args.rank == 0 else '/dev/null' log = util.FileLogger(outfile) for i in range(args.iters): # print('before: rank ', args.rank, ' has data ', tensor[0]) start_time = time.perf_counter() if args.rank == 0: dist.send(tensor=tensor, dst=1) else: dist.recv(tensor=tensor, src=0) elapsed_time_ms = (time.perf_counter() - start_time)*1000 time_list.append(elapsed_time_ms) # print('after: rank ', args.rank, ' has data ', tensor[0]) rate = args.size_mb/(elapsed_time_ms/1000) log('%03d/%d added %d MBs in %.1f ms: %.2f MB/second' % (i, args.iters, args.size_mb, elapsed_time_ms, rate)) min = np.min(time_list) median = np.median(time_list) log(f"min: {min:8.2f}, median: {median:8.2f}, mean: {np.mean(time_list):8.2f}")
0.017787
def _get_local_image_id(docker_binary, docker_tag): """ Get the image id of the local docker layer with the passed tag :param docker_tag: docker tag :return: Image id as string or None if tag does not exist """ cmd = [docker_binary, "images", "-q", docker_tag] image_id_b = check_output(cmd) image_id = image_id_b.decode('utf-8').strip() if not image_id: raise RuntimeError('Unable to find docker image id matching with tag {}'.format(docker_tag)) return image_id
0.003914
def __make_thumbnail(self, width, height): """ Create the page's thumbnail """ (w, h) = self.size factor = max( (float(w) / width), (float(h) / height) ) w /= factor h /= factor return self.get_image((round(w), round(h)))
0.006309
def viz_live_trace(view): """ Given a Manticore trace file, highlight the basic blocks. """ tv = TraceVisualizer(view, None, live=True) if tv.workspace is None: tv.workspace = get_workspace() # update due to singleton in case we are called after a clear tv.live_update = True tv.visualize()
0.00303
def iter_items(cls, repo, common_path=None): """Find all refs in the repository :param repo: is the Repo :param common_path: Optional keyword argument to the path which is to be shared by all returned Ref objects. Defaults to class specific portion if None assuring that only refs suitable for the actual class are returned. :return: git.SymbolicReference[], each of them is guaranteed to be a symbolic ref which is not detached and pointing to a valid ref List is lexicographically sorted The returned objects represent actual subclasses, such as Head or TagReference""" return (r for r in cls._iter_items(repo, common_path) if r.__class__ == SymbolicReference or not r.is_detached)
0.006075
def __check_mem(self): ''' raise exception on RAM exceeded ''' mem_free = psutil.virtual_memory().available / 2**20 self.log.debug("Memory free: %s/%s", mem_free, self.mem_limit) if mem_free < self.mem_limit: raise RuntimeError( "Not enough resources: free memory less " "than %sMB: %sMB" % (self.mem_limit, mem_free))
0.005076
def filter(self, source_file, encoding): # noqa A001 """Parse XML file.""" sources = [] if encoding: with codecs.open(source_file, 'r', encoding=encoding) as f: src = f.read() sources.extend(self._filter(src, source_file, encoding)) else: for content, filename, enc in self.get_content(source_file): sources.extend(self._filter(content, source_file, enc)) return sources
0.004167
def _eval(self, m: EvalParam) -> object: """ Evaluate m returning the method / function invocation or value. Kind of like a static method :param m: object to evaluate :return: return """ if inspect.ismethod(m) or inspect.isroutine(m): return m() elif inspect.isfunction(m): return m(self) if len(inspect.signature(m)) > 0 else m() else: return m
0.006726
def set_banner(self, banner_type, value=None, default=False, disable=False): """Configures system banners Args: banner_type(str): banner to be changed (likely login or motd) value(str): value to set for the banner default (bool): Controls the use of the default keyword disable (bool): Controls the use of the no keyword` Returns: bool: True if the commands completed successfully otherwise False """ command_string = "banner %s" % banner_type if default is True or disable is True: cmd = self.command_builder(command_string, value=None, default=default, disable=disable) return self.configure(cmd) else: if not value.endswith("\n"): value = value + "\n" command_input = dict(cmd=command_string, input=value) return self.configure([command_input])
0.003
def search_ip(self, searchterm): """Search for ips :type searchterm: str :rtype: list """ return self.__search(type_attribute=self.__mispiptypes(), value=searchterm)
0.018692
def download_file_insecure(url, target): """Use Python to download the file, without connection authentication.""" src = urlopen(url) try: # Read all the data in one block. data = src.read() finally: src.close() # Write all the data in one block to avoid creating a partial file. with open(target, "wb") as dst: dst.write(data)
0.002604
def metadata(sceneid, pmin=2, pmax=98, **kwargs): """ Return band bounds and statistics. Attributes ---------- sceneid : str CBERS sceneid. pmin : int, optional, (default: 2) Histogram minimum cut. pmax : int, optional, (default: 98) Histogram maximum cut. kwargs : optional These are passed to 'rio_tiler.utils.raster_get_stats' e.g: histogram_bins=20, dst_crs='epsg:4326' Returns ------- out : dict Dictionary with bounds and bands statistics. """ scene_params = _cbers_parse_scene_id(sceneid) cbers_address = "{}/{}".format(CBERS_BUCKET, scene_params["key"]) bands = scene_params["bands"] ref_band = scene_params["reference_band"] info = {"sceneid": sceneid} addresses = [ "{}/{}_BAND{}.tif".format(cbers_address, sceneid, band) for band in bands ] _stats_worker = partial( utils.raster_get_stats, indexes=[1], nodata=0, overview_level=2, percentiles=(pmin, pmax), **kwargs ) with futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor: responses = list(executor.map(_stats_worker, addresses)) info["bounds"] = [r["bounds"] for b, r in zip(bands, responses) if b == ref_band][0] info["statistics"] = { b: v for b, d in zip(bands, responses) for k, v in d["statistics"].items() } return info
0.002793
def send_ready(self): """ Returns true if data can be written to this channel without blocking. This means the channel is either closed (so any write attempt would return immediately) or there is at least one byte of space in the outbound buffer. If there is at least one byte of space in the outbound buffer, a `send` call will succeed immediately and return the number of bytes actually written. :return: ``True`` if a `send` call on this channel would immediately succeed or fail """ self.lock.acquire() try: if self.closed or self.eof_sent: return True return self.out_window_size > 0 finally: self.lock.release()
0.002532
def param_help(self, args): '''show help on a parameter''' if len(args) == 0: print("Usage: param help PARAMETER_NAME") return htree = self.param_help_tree() if htree is None: return for h in args: h = h.upper() if h in htree: help = htree[h] print("%s: %s\n" % (h, help.get('humanName'))) print(help.get('documentation')) try: print("\n") for f in help.field: print("%s : %s" % (f.get('name'), str(f))) except Exception as e: pass try: # The entry "values" has been blatted by a cython # function at this point, so we instead get the # "values" by offset rather than name. children = help.getchildren() vchild = children[0] values = vchild.getchildren() if len(values): print("\nValues: ") for v in values: print("\t%s : %s" % (v.get('code'), str(v))) except Exception as e: print("Caught exception %s" % repr(e)) pass else: print("Parameter '%s' not found in documentation" % h)
0.001368
async def create_authenticator_async(self, connection, debug=False, loop=None, **kwargs): """Create the async AMQP session and the CBS channel with which to negotiate the token. :param connection: The underlying AMQP connection on which to create the session. :type connection: ~uamqp.async_ops.connection_async.ConnectionAsync :param debug: Whether to emit network trace logging events for the CBS session. Default is `False`. Logging events are set at INFO level. :type debug: bool :param loop: A user specified event loop. :type loop: ~asycnio.AbstractEventLoop :rtype: uamqp.c_uamqp.CBSTokenAuth """ self.loop = loop or asyncio.get_event_loop() self._connection = connection self._session = SessionAsync(connection, loop=self.loop, **kwargs) try: self._cbs_auth = c_uamqp.CBSTokenAuth( self.audience, self.token_type, self.token, int(self.expires_at), self._session._session, # pylint: disable=protected-access self.timeout, self._connection.container_id) self._cbs_auth.set_trace(debug) except ValueError: await self._session.destroy_async() raise errors.AMQPConnectionError( "Unable to open authentication session on connection {}.\n" "Please confirm target hostname exists: {}".format( connection.container_id, connection.hostname)) from None return self._cbs_auth
0.001839
def GetHostMemMappedMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetHostMemMappedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
0.014388
def hourly_dew_point_values(self, dry_bulb_condition): """Get a list of dew points (C) at each hour over the design day. args: dry_bulb_condition: The dry bulb condition for the day. """ hourly_dew_point = [] max_dpt = self.dew_point(dry_bulb_condition.dry_bulb_max) for db in dry_bulb_condition.hourly_values: if db >= max_dpt: hourly_dew_point.append(max_dpt) else: hourly_dew_point.append(db) return hourly_dew_point
0.003676
def Parse(self): """Iterator returning a list for each entry in history. We store all the download events in an array (choosing this over visits since there are likely to be less of them). We later interleave them with visit events to get an overall correct time order. Yields: a list of attributes for each entry """ # Query for old style and newstyle downloads storage. query_iter = itertools.chain( self.Query(self.DOWNLOADS_QUERY), self.Query(self.DOWNLOADS_QUERY_2)) results = [] for timestamp, url, path, received_bytes, total_bytes in query_iter: timestamp = self.ConvertTimestamp(timestamp) results.append((timestamp, "CHROME_DOWNLOAD", url, path, received_bytes, total_bytes)) for timestamp, url, title, typed_count in self.Query(self.VISITS_QUERY): timestamp = self.ConvertTimestamp(timestamp) results.append((timestamp, "CHROME_VISIT", url, title, typed_count, "")) results.sort(key=lambda it: it[0]) for it in results: yield it
0.00565
def maybe_2to3(filename, modname=None): """Returns a python3 version of filename.""" need_2to3 = False filename = os.path.abspath(filename) if any(filename.startswith(d) for d in DIRS): need_2to3 = True elif modname is not None and any(modname.startswith(p) for p in PACKAGES): need_2to3 = True if not need_2to3: return filename outfilename = '/_auto2to3_'.join(os.path.split(filename)) if (not os.path.exists(outfilename) or os.stat(filename).st_mtime > os.stat(outfilename).st_mtime): try: with open(filename) as file: contents = file.read() contents = rt.refactor_docstring(contents, filename) tree = rt.refactor_string(contents, filename) except Exception as err: raise ImportError("2to3 couldn't convert %r" % filename) outfile = open(outfilename, 'wb') outfile.write(str(tree).encode('utf8')) outfile.close() return outfilename
0.00199
def dictionary(self, value): """Set dictionary""" self._dictionary = value or {} if not isinstance(self._dictionary, dict): raise BadArgumentError("dictionary must be dict: {}".format( self._dictionary))
0.007843
def _get_running_workers_names(running_workers: list): """May contain sensitive info (like user ids). Use with care.""" return FormattedText().join( [ FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format() for worker in running_workers ] )
0.00831
def get_topic(self): """ Returns the topic to consider. """ if not hasattr(self, 'topic'): self.topic = get_object_or_404( Topic.objects.select_related('forum').all(), pk=self.kwargs['pk'], ) return self.topic
0.010989
def remove_successor(self, successor): """! @brief Remove successor from the node. @param[in] successor (cfnode): Successor for removing. """ self.feature -= successor.feature; self.successors.append(successor); successor.parent = self;
0.026549
def _add_goterms_kws(self, go2obj_user, kws_gos): """Add more GOTerms to go2obj_user, if requested and relevant.""" if 'go2color' in kws_gos: for goid in kws_gos['go2color'].keys(): self._add_goterms(go2obj_user, goid)
0.007634
def find_longest_match(self, alo, ahi, blo, bhi): """Find longest matching block in a[alo:ahi] and b[blo:bhi]. Wrapper for the C implementation of this function. """ besti, bestj, bestsize = _cdifflib.find_longest_match(self, alo, ahi, blo, bhi) return _Match(besti, bestj, bestsize)
0.009259
def _computeArray(self, funcTilde, R, z, phi): """ NAME: _computeArray PURPOSE: evaluate the density or potential for a given array of coordinates INPUT: funcTidle - must be _rhoTilde or _phiTilde R - Cylindrical Galactocentric radius z - vertical height phi - azimuth OUTPUT: density or potential evaluated at (R,z, phi) HISTORY: 2016-06-02 - Written - Aladdin """ R = nu.array(R,dtype=float); z = nu.array(z,dtype=float); phi = nu.array(phi,dtype=float); shape = (R*z*phi).shape if shape == (): return nu.sum(self._compute(funcTilde, R,z,phi)) R = R*nu.ones(shape); z = z*nu.ones(shape); phi = phi*nu.ones(shape); func = nu.zeros(shape, float) li = _cartesian(shape) for i in range(li.shape[0]): j = nu.split(li[i], li.shape[1]) func[j] = nu.sum(self._compute(funcTilde, R[j][0],z[j][0],phi[j][0])) return func
0.020677
def get_instance(self, payload): """ Build an instance of SampleInstance :param dict payload: Payload response from the API :returns: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance """ return SampleInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], )
0.003876
def process_view(self, request, view_func, view_args, view_kwargs): """ Forwards unauthenticated requests to the admin page to the CAS login URL, as well as calls to django.contrib.auth.views.login and logout. """ if view_func == login: return cas_login(request, *view_args, **view_kwargs) elif view_func == logout: return cas_logout(request, *view_args, **view_kwargs) if settings.CAS_ADMIN_PREFIX: if not request.path.startswith(settings.CAS_ADMIN_PREFIX): return None elif not view_func.__module__.startswith('django.contrib.admin.'): return None try: # use callable for pre-django 2.0 is_authenticated = request.user.is_authenticated() except TypeError: is_authenticated = request.user.is_authenticated if is_authenticated: if request.user.is_staff: return None else: error = ('<h1>Forbidden</h1><p>You do not have staff ' 'privileges.</p>') return HttpResponseForbidden(error) params = urlencode({REDIRECT_FIELD_NAME: request.get_full_path()}) return HttpResponseRedirect(reverse(cas_login) + '?' + params)
0.001509
def csv2yaml(in_file, out_file=None): """Convert a CSV SampleSheet to YAML run_info format. """ if out_file is None: out_file = "%s.yaml" % os.path.splitext(in_file)[0] barcode_ids = _generate_barcode_ids(_read_input_csv(in_file)) lanes = _organize_lanes(_read_input_csv(in_file), barcode_ids) with open(out_file, "w") as out_handle: out_handle.write(yaml.safe_dump(lanes, default_flow_style=False)) return out_file
0.002179
def create_token(secret, data, options=None): """ Generates a secure authentication token. Our token format follows the JSON Web Token (JWT) standard: header.claims.signature Where: 1) "header" is a stringified, base64-encoded JSON object containing version and algorithm information. 2) "claims" is a stringified, base64-encoded JSON object containing a set of claims: Library-generated claims: "iat" -> The issued at time in seconds since the epoch as a number "d" -> The arbitrary JSON object supplied by the user. User-supplied claims (these are all optional): "exp" (optional) -> The expiration time of this token, as a number of seconds since the epoch. "nbf" (optional) -> The "not before" time before which the token should be rejected (seconds since the epoch) "admin" (optional) -> If set to true, this client will bypass all security rules (use this to authenticate servers) "debug" (optional) -> "set to true to make this client receive debug information about security rule execution. "simulate" (optional, internal-only for now) -> Set to true to neuter all API operations (listens / puts will run security rules but not actually write or return data). 3) A signature that proves the validity of this token (see: http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-07) For base64-encoding we use URL-safe base64 encoding. This ensures that the entire token is URL-safe and could, for instance, be placed as a query argument without any encoding (and this is what the JWT spec requires). Args: secret - the Firebase Application secret data - a json serializable object of data to be included in the token options - An optional dictionary of additional claims for the token. Possible keys include: a) "expires" -- A datetime or timestamp (as a number of seconds since the epoch) denoting a time after which this token should no longer be valid. b) "notBefore" -- A datetime or timestamp (as a number of seconds since the epoch) denoting a time before which this token should be rejected by the server. c) "admin" -- Set to true to bypass all security rules (use this for your trusted servers). d) "debug" -- Set to true to enable debug mode (so you can see the results of Rules API operations) e) "simulate" -- (internal-only for now) Set to true to neuter all API operations (listens / puts will run security rules but not actually write or return data) Returns: A signed Firebase Authentication Token Raises: ValueError: if an invalid key is specified in options """ if not isinstance(secret, basestring): raise ValueError("firebase_token_generator.create_token: secret must be a string.") if not options and not data: raise ValueError("firebase_token_generator.create_token: data is empty and no options are set. This token will have no effect on Firebase."); if not options: options = {} is_admin_token = ('admin' in options and options['admin'] == True) _validate_data(data, is_admin_token) claims = _create_options_claims(options) claims['v'] = TOKEN_VERSION claims['iat'] = int(time.time()) claims['d'] = data token = _encode_token(secret, claims) if len(token) > 1024: raise RuntimeError("firebase_token_generator.create_token: generated token is too long.") return token
0.007765
def update(self, dct): """Update the configuration with new parameters. Must use same kwargs as __init__""" d = self.__dict__.copy() d.update(dct) self.set_attrs(**d)
0.009709
def rating(request): """ Handle a ``RatingForm`` submission and redirect back to its related object. """ response = initial_validation(request, "rating") if isinstance(response, HttpResponse): return response obj, post_data = response url = add_cache_bypass(obj.get_absolute_url().split("#")[0]) response = redirect(url + "#rating-%s" % obj.id) rating_form = RatingForm(request, obj, post_data) if rating_form.is_valid(): rating_form.save() if request.is_ajax(): # Reload the object and return the rating fields as json. obj = obj.__class__.objects.get(id=obj.id) rating_name = obj.get_ratingfield_name() json = {} for f in ("average", "count", "sum"): json["rating_" + f] = getattr(obj, "%s_%s" % (rating_name, f)) response = HttpResponse(dumps(json)) if rating_form.undoing: ratings = set(rating_form.previous) ^ set([rating_form.current]) else: ratings = rating_form.previous + [rating_form.current] set_cookie(response, "yacms-rating", ",".join(ratings)) return response
0.000846
def _mappingGetValueSet(mapping, keys): """Return a combined set of values from the mapping. :param mapping: dict, for each key contains a set of entries returns a set of combined entries """ setUnion = set() for k in keys: setUnion = setUnion.union(mapping[k]) return setUnion
0.003175
def is_connectable(host: str, port: Union[int, str]) -> bool: """Tries to connect to the device to see if it is connectable. Args: host: The host to connect. port: The port to connect. Returns: True or False. """ socket_ = None try: socket_ = socket.create_connection((host, port), 1) result = True except socket.timeout: result = False finally: if socket_: socket_.close() return result
0.002028
def create_snapshot(config='root', snapshot_type='single', pre_number=None, description=None, cleanup_algorithm='number', userdata=None, **kwargs): ''' Creates an snapshot config Configuration name. snapshot_type Specifies the type of the new snapshot. Possible values are single, pre and post. pre_number For post snapshots the number of the pre snapshot must be provided. description Description for the snapshot. If not given, the salt job will be used. cleanup_algorithm Set the cleanup algorithm for the snapshot. number Deletes old snapshots when a certain number of snapshots is reached. timeline Deletes old snapshots but keeps a number of hourly, daily, weekly, monthly and yearly snapshots. empty-pre-post Deletes pre/post snapshot pairs with empty diffs. userdata Set userdata for the snapshot (key-value pairs). Returns the number of the created snapshot. CLI example: .. code-block:: bash salt '*' snapper.create_snapshot ''' if not userdata: userdata = {} jid = kwargs.get('__pub_jid') if description is None and jid is not None: description = 'salt job {0}'.format(jid) if jid is not None: userdata['salt_jid'] = jid new_nr = None try: if snapshot_type == 'single': new_nr = snapper.CreateSingleSnapshot(config, description, cleanup_algorithm, userdata) elif snapshot_type == 'pre': new_nr = snapper.CreatePreSnapshot(config, description, cleanup_algorithm, userdata) elif snapshot_type == 'post': if pre_number is None: raise CommandExecutionError( "pre snapshot number 'pre_number' needs to be" "specified for snapshots of the 'post' type") new_nr = snapper.CreatePostSnapshot(config, pre_number, description, cleanup_algorithm, userdata) else: raise CommandExecutionError( "Invalid snapshot type '{0}'".format(snapshot_type)) except dbus.DBusException as exc: raise CommandExecutionError( 'Error encountered while listing changed files: {0}' .format(_dbus_exception_to_reason(exc, locals())) ) return new_nr
0.001176
def issuer_type(self, issuer_type): """ Sets the issuer_type of this CertificateIssuerInfo. The type of the certificate issuer. - GLOBAL_SIGN: Certificates are issued by GlobalSign service. The users must provide their own GlobalSign account credentials. - CFSSL_AUTH: Certificates are issued by CFSSL authenticated signing service. The users must provide their own CFSSL host_url and credentials. :param issuer_type: The issuer_type of this CertificateIssuerInfo. :type: str """ if issuer_type is None: raise ValueError("Invalid value for `issuer_type`, must not be `None`") allowed_values = ["GLOBAL_SIGN", "CFSSL_AUTH"] if issuer_type not in allowed_values: raise ValueError( "Invalid value for `issuer_type` ({0}), must be one of {1}" .format(issuer_type, allowed_values) ) self._issuer_type = issuer_type
0.005176
def _fix_up_fields(cls): """Add names to all of the Resource fields. This method will get called on class declaration because of Resource's metaclass. The functionality is based on Google's NDB implementation. `Endpoint` does something similar for `arguments`. """ cls._fields = {} if cls.__module__ == __name__ and cls.__name__ != 'DebugResource': return for name in set(dir(cls)): attr = getattr(cls, name, None) if isinstance(attr, BaseField): if name.startswith('_'): raise TypeError("Resource field %s cannot begin with an " "underscore. Underscore attributes are reserved " "for instance variables that aren't intended to " "propagate out to the HTTP caller." % name) attr._fix_up(cls, name) cls._fields[attr.name] = attr if cls._default_fields is None: cls._default_fields = tuple(cls._fields.keys())
0.004583
def set_reason(self, reason): """ Set the reason of this revocation. If :data:`reason` is ``None``, delete the reason instead. :param reason: The reason string. :type reason: :class:`bytes` or :class:`NoneType` :return: ``None`` .. seealso:: :meth:`all_reasons`, which gives you a list of all supported reasons which you might pass to this method. """ if reason is None: self._delete_reason() elif not isinstance(reason, bytes): raise TypeError("reason must be None or a byte string") else: reason = reason.lower().replace(b' ', b'') reason_code = [r.lower() for r in self._crl_reasons].index(reason) new_reason_ext = _lib.ASN1_ENUMERATED_new() _openssl_assert(new_reason_ext != _ffi.NULL) new_reason_ext = _ffi.gc(new_reason_ext, _lib.ASN1_ENUMERATED_free) set_result = _lib.ASN1_ENUMERATED_set(new_reason_ext, reason_code) _openssl_assert(set_result != _ffi.NULL) self._delete_reason() add_result = _lib.X509_REVOKED_add1_ext_i2d( self._revoked, _lib.NID_crl_reason, new_reason_ext, 0, 0) _openssl_assert(add_result == 1)
0.001535
def to_kraus(self): """ Compute the Kraus operator representation of the estimated process. :return: The process as a list of Kraus operators. :rytpe: List[np.array] """ return [k.data.toarray() for k in qt.to_kraus(self.sop)]
0.007273
def create_graph_from_data(self, data): """Run the algorithm on data. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the algorithm. """ # Building setup w/ arguments. self.arguments['{SCORE}'] = self.score self.arguments['{VERBOSE}'] = str(self.verbose).upper() self.arguments['{BETA}'] = str(self.beta) self.arguments['{OPTIM}'] = str(self.optim).upper() self.arguments['{ALPHA}'] = str(self.alpha) results = self._run_bnlearn(data, verbose=self.verbose) graph = nx.DiGraph() graph.add_edges_from(results) return graph
0.002786
def verify(self, anchors): """ Return True if the CRL is signed by one of the provided anchors. False on error (invalid signature, missing anchorand, ...) """ cafile = create_temporary_ca_file(anchors) if cafile is None: return False try: cmd = self.osslcmdbase + ["-noout", "-CAfile", cafile] cmdres = self._apply_ossl_cmd(cmd, self.rawcrl) except: os.unlink(cafile) return False os.unlink(cafile) return "verify OK" in cmdres
0.005291
def update_id(self, sequence_id=None, force=True): """Alter the sequence id, and all of the names and ids derived from it. This often needs to be don after an IntegrityError in a multiprocessing run""" from ..identity import ObjectNumber if sequence_id: self.sequence_id = sequence_id assert self.d_vid if self.id is None or force: dataset_id = ObjectNumber.parse(self.d_vid).rev(None) self.d_id = str(dataset_id) self.id = str(TableNumber(dataset_id, self.sequence_id)) if self.vid is None or force: dataset_vid = ObjectNumber.parse(self.d_vid) self.vid = str(TableNumber(dataset_vid, self.sequence_id))
0.005435
def write_file(filename, text): """Write text to a file.""" logging.debug(_('Writing file: %s'), filename) try: with open(filename, 'w') as writable: writable.write(text) except (PermissionError, NotADirectoryError): logging.error(_('Error writing file: %s'), filename) return False return True
0.002857
def from_dict(cls, d, identifier_str=None): """Load a `ResolvedContext` from a dict. Args: d (dict): Dict containing context data. identifier_str (str): String identifying the context, this is only used to display in an error string if a serialization version mismatch is detected. Returns: `ResolvedContext` object. """ # check serialization version def _print_version(value): return '.'.join(str(x) for x in value) toks = str(d["serialize_version"]).split('.') load_ver = tuple(int(x) for x in toks) curr_ver = ResolvedContext.serialize_version if load_ver[0] > curr_ver[0]: msg = ["The context"] if identifier_str: msg.append("in %s" % identifier_str) msg.append("was written by a newer version of Rez. The load may " "fail (serialize version %d > %d)" % (_print_version(load_ver), _print_version(curr_ver))) print >> sys.stderr, ' '.join(msg) # create and init the context r = ResolvedContext.__new__(ResolvedContext) r.load_path = None r.pre_resolve_bindings = None r.timestamp = d["timestamp"] r.building = d["building"] r.caching = d["caching"] r.implicit_packages = [PackageRequest(x) for x in d["implicit_packages"]] r._package_requests = [PackageRequest(x) for x in d["package_requests"]] r.package_paths = d["package_paths"] r.rez_version = d["rez_version"] r.rez_path = d["rez_path"] r.user = d["user"] r.host = d["host"] r.platform = d["platform"] r.arch = d["arch"] r.os = d["os"] r.created = d["created"] r.verbosity = d.get("verbosity", 0) r.status_ = ResolverStatus[d["status"]] r.failure_description = d["failure_description"] r.solve_time = d["solve_time"] r.load_time = d["load_time"] r.graph_string = d["graph"] r.graph_ = None r._resolved_packages = [] for d_ in d["resolved_packages"]: variant_handle = d_ if load_ver < (4, 0): # -- SINCE SERIALIZE VERSION 4.0 from rez.utils.backcompat import convert_old_variant_handle variant_handle = convert_old_variant_handle(variant_handle) variant = get_variant(variant_handle) variant.set_context(r) r._resolved_packages.append(variant) # -- SINCE SERIALIZE VERSION 1 r.requested_timestamp = d.get("requested_timestamp", 0) # -- SINCE SERIALIZE VERSION 2 r.parent_suite_path = d.get("parent_suite_path") r.suite_context_name = d.get("suite_context_name") # -- SINCE SERIALIZE VERSION 3 r.default_patch_lock = PatchLock[d.get("default_patch_lock", "no_lock")] patch_locks = d.get("patch_locks", {}) r.patch_locks = dict((k, PatchLock[v]) for k, v in patch_locks) # -- SINCE SERIALIZE VERSION 4.0 r.from_cache = d.get("from_cache", False) # -- SINCE SERIALIZE VERSION 4.1 data = d.get("package_filter", []) r.package_filter = PackageFilterList.from_pod(data) # -- SINCE SERIALIZE VERSION 4.2 data = d.get("package_orderers") if data: r.package_orderers = [package_order.from_pod(x) for x in data] else: r.package_orderers = None # -- SINCE SERIALIZE VERSION 4.3 r.num_loaded_packages = d.get("num_loaded_packages", -1) # track context usage if config.context_tracking_host: data = dict((k, v) for k, v in d.iteritems() if k in config.context_tracking_context_fields) r._track_context(data, action="sourced") return r
0.001263