Search is not available for this dataset
text
stringlengths
75
104k
def _interpolationFunctionFactory(self, spline_order=None, cval=None): """Returns a function F(x,y,z) that interpolates any values on the grid. _interpolationFunctionFactory(self,spline_order=3,cval=None) --> F *cval* is set to :meth:`Grid.grid.min`. *cval* cannot be chosen too large or too small or NaN because otherwise the spline interpolation breaks down near that region and produces wild oscillations. .. Note:: Only correct for equally spaced values (i.e. regular edges with constant delta). .. SeeAlso:: http://www.scipy.org/Cookbook/Interpolation """ # for scipy >=0.9: should use scipy.interpolate.griddata # http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html#scipy.interpolate.griddata # (does it work for nD?) import scipy.ndimage if spline_order is None: # must be compatible with whatever :func:`scipy.ndimage.spline_filter` takes. spline_order = self.interpolation_spline_order if cval is None: cval = self.interpolation_cval data = self.grid if cval is None: cval = data.min() try: # masked arrays, fill with min: should keep spline happy _data = data.filled(cval) except AttributeError: _data = data coeffs = scipy.ndimage.spline_filter(_data, order=spline_order) x0 = self.origin dx = self.delta def _transform(cnew, c0, dc): return (numpy.atleast_1d(cnew) - c0) / dc def interpolatedF(*coordinates): """B-spline function over the data grid(x,y,z). interpolatedF([x1,x2,...],[y1,y2,...],[z1,z2,...]) -> F[x1,y1,z1],F[x2,y2,z2],... Example usage for resampling:: >>> XX,YY,ZZ = numpy.mgrid[40:75:0.5, 96:150:0.5, 20:50:0.5] >>> FF = _interpolationFunction(XX,YY,ZZ) """ _coordinates = numpy.array( [_transform(coordinates[i], x0[i], dx[i]) for i in range(len( coordinates))]) return scipy.ndimage.map_coordinates(coeffs, _coordinates, prefilter=False, mode='nearest', cval=cval) # mode='wrap' would be ideal but is broken: https://github.com/scipy/scipy/issues/1323 return interpolatedF
def read(self, filename): """Populate the instance from the ccp4 file *filename*.""" if filename is not None: self.filename = filename with open(self.filename, 'rb') as ccp4: h = self.header = self._read_header(ccp4) nentries = h['nc'] * h['nr'] * h['ns'] # Quick and dirty... slurp it all in one go. datafmt = h['bsaflag'] + str(nentries) + self._data_bintype a = np.array(struct.unpack(datafmt, ccp4.read(struct.calcsize(datafmt)))) self.header['filename'] = self.filename # TODO: Account for the possibility that y-axis is fastest or # slowest index, which unfortunately is possible in CCP4. order = 'C' if h['mapc'] == 'z' else 'F' self.array = a.reshape(h['nc'], h['nr'], h['ns'], order=order) self.delta = self._delta() self.origin = np.zeros(3) self.rank = 3
def _detect_byteorder(ccp4file): """Detect the byteorder of stream `ccp4file` and return format character. Try all endinaness and alignment options until we find something that looks sensible ("MAPS " in the first 4 bytes). (The ``machst`` field could be used to obtain endianness, but it does not specify alignment.) .. SeeAlso:: :mod:`struct` """ bsaflag = None ccp4file.seek(52 * 4) mapbin = ccp4file.read(4) for flag in '@=<>': mapstr = struct.unpack(flag + '4s', mapbin)[0].decode('utf-8') if mapstr.upper() == 'MAP ': bsaflag = flag break # Only possible value according to spec. else: raise TypeError( "Cannot decode header --- corrupted or wrong format?") ccp4file.seek(0) return bsaflag
def _read_header(self, ccp4file): """Read header bytes""" bsaflag = self._detect_byteorder(ccp4file) # Parse the top of the header (4-byte words, 1 to 25). nheader = struct.calcsize(self._headerfmt) names = [r.key for r in self._header_struct] bintopheader = ccp4file.read(25 * 4) def decode_header(header, bsaflag='@'): h = dict(zip(names, struct.unpack(bsaflag + self._headerfmt, header))) h['bsaflag'] = bsaflag return h header = decode_header(bintopheader, bsaflag) for rec in self._header_struct: if not rec.is_legal_dict(header): warnings.warn( "Key %s: Illegal value %r" % (rec.key, header[rec.key])) # Parse the latter half of the header (4-byte words, 26 to 256). if (header['lskflg']): skewmatrix = np.fromfile(ccp4file, dtype=np.float32, count=9) header['skwmat'] = skewmatrix.reshape((3, 3)) header['skwtrn'] = np.fromfile(ccp4file, dtype=np.float32, count=3) else: header['skwmat'] = header['skwtrn'] = None ccp4file.seek(12 * 4, 1) ccp4file.seek(15 * 4, 1) # Skip future use section. ccp4file.seek(4, 1) # Skip map text, already used above to verify format. # TODO: Compare file specified endianness to one obtained above. endiancode = struct.unpack(bsaflag + '4b', ccp4file.read(4)) header['endianness'] = 'little' if endiancode == (0x44, 0x41, 0, 0 ) else 'big' header['arms'] = struct.unpack(bsaflag + 'f', ccp4file.read(4))[0] header['nlabl'] = struct.unpack(bsaflag + 'I', ccp4file.read(4))[0] if header['nlabl']: binlabel = ccp4file.read(80 * header['nlabl']) flag = bsaflag + str(80 * header['nlabl']) + 's' label = struct.unpack(flag, binlabel)[0] header['label'] = label.decode('utf-8').rstrip('\x00') else: header['label'] = None ccp4file.seek(256 * 4) # TODO: Parse symmetry records, if any. return header
def get_data(self, **kwargs): """ Get the data for a specific device for a specific end date Keyword Arguments: limit - max 288 end_date - is Epoch in milliseconds :return: """ limit = int(kwargs.get('limit', 288)) end_date = kwargs.get('end_date', False) if end_date and isinstance(end_date, datetime.datetime): end_date = self.convert_datetime(end_date) if self.mac_address is not None: service_address = 'devices/%s' % self.mac_address self.api_instance.log('SERVICE ADDRESS: %s' % service_address) data = dict(limit=limit) # If endDate is left blank (not passed in), the most recent results will be returned. if end_date: data.update({'endDate': end_date}) self.api_instance.log('DATA:') self.api_instance.log(data) return self.api_instance.api_call(service_address, **data)
def get_devices(self): """ Get all devices :return: A list of AmbientWeatherStation instances. """ retn = [] api_devices = self.api_call('devices') self.log('DEVICES:') self.log(api_devices) for device in api_devices: retn.append(AmbientWeatherStation(self, device)) self.log('DEVICE INSTANCE LIST:') self.log(retn) return retn
def create_url(self, path, params={}, opts={}): """ Create URL with supplied path and `opts` parameters dict. Parameters ---------- path : str opts : dict Dictionary specifying URL parameters. Non-imgix parameters are added to the URL unprocessed. For a complete list of imgix supported parameters, visit https://docs.imgix.com/apis/url . (default {}) Returns ------- str imgix URL """ if opts: warnings.warn('`opts` has been deprecated. Use `params` instead.', DeprecationWarning, stacklevel=2) params = params or opts if self._shard_strategy == SHARD_STRATEGY_CRC: crc = zlib.crc32(path.encode('utf-8')) & 0xffffffff index = crc % len(self._domains) # Deterministically choose domain domain = self._domains[index] elif self._shard_strategy == SHARD_STRATEGY_CYCLE: domain = self._domains[self._shard_next_index] self._shard_next_index = ( self._shard_next_index + 1) % len(self._domains) else: domain = self._domains[0] scheme = "https" if self._use_https else "http" url_obj = UrlHelper( domain, path, scheme, sign_key=self._sign_key, include_library_param=self._include_library_param, params=params) return str(url_obj)
def set_parameter(self, key, value): """ Set a url parameter. Parameters ---------- key : str If key ends with '64', the value provided will be automatically base64 encoded. """ if value is None or isinstance(value, (int, float, bool)): value = str(value) if key.endswith('64'): value = urlsafe_b64encode(value.encode('utf-8')) value = value.replace(b('='), b('')) self._parameters[key] = value
async def rt_connect(self, loop): """Start subscription manager for real time data.""" if self.sub_manager is not None: return self.sub_manager = SubscriptionManager( loop, "token={}".format(self._access_token), SUB_ENDPOINT ) self.sub_manager.start()
async def execute(self, document, variable_values=None): """Execute gql.""" res = await self._execute(document, variable_values) if res is None: return None return res.get("data")
async def _execute(self, document, variable_values=None, retry=2): """Execute gql.""" query_str = print_ast(document) payload = {"query": query_str, "variables": variable_values or {}} post_args = { "headers": {"Authorization": "Bearer " + self._access_token}, "data": payload, } try: with async_timeout.timeout(self._timeout): resp = await self.websession.post(API_ENDPOINT, **post_args) if resp.status != 200: _LOGGER.error("Error connecting to Tibber, resp code: %s", resp.status) return None result = await resp.json() except aiohttp.ClientError as err: _LOGGER.error("Error connecting to Tibber: %s ", err, exc_info=True) if retry > 0: return await self._execute(document, variable_values, retry - 1) raise except asyncio.TimeoutError as err: _LOGGER.error( "Timed out when connecting to Tibber: %s ", err, exc_info=True ) if retry > 0: return await self._execute(document, variable_values, retry - 1) raise errors = result.get("errors") if errors: _LOGGER.error("Received non-compatible response %s", errors) return result
def sync_update_info(self, *_): """Update home info.""" loop = asyncio.get_event_loop() task = loop.create_task(self.update_info()) loop.run_until_complete(task)
async def update_info(self, *_): """Update home info async.""" query = gql( """ { viewer { name homes { subscriptions { status } id } } } """ ) res = await self._execute(query) if res is None: return errors = res.get("errors", []) if errors: msg = errors[0].get("message", "failed to login") _LOGGER.error(msg) raise InvalidLogin(msg) data = res.get("data") if not data: return viewer = data.get("viewer") if not viewer: return self._name = viewer.get("name") homes = viewer.get("homes", []) self._home_ids = [] for _home in homes: home_id = _home.get("id") self._all_home_ids += [home_id] subs = _home.get("subscriptions") if subs: status = subs[0].get("status", "ended").lower() if not home_id or status != "running": continue self._home_ids += [home_id]
def get_homes(self, only_active=True): """Return list of Tibber homes.""" return [self.get_home(home_id) for home_id in self.get_home_ids(only_active)]
def get_home(self, home_id): """Retun an instance of TibberHome for given home id.""" if home_id not in self._all_home_ids: _LOGGER.error("Could not find any Tibber home with id: %s", home_id) return None if home_id not in self._homes.keys(): self._homes[home_id] = TibberHome(home_id, self) return self._homes[home_id]
async def send_notification(self, title, message): """Send notification.""" query = gql( """ mutation{ sendPushNotification(input: { title: "%s", message: "%s", }){ successful pushedToNumberOfDevices } } """ % (title, message) ) res = await self.execute(query) if not res: return False noti = res.get("sendPushNotification", {}) successful = noti.get("successful", False) pushed_to_number_of_devices = noti.get("pushedToNumberOfDevices", 0) _LOGGER.debug( "send_notification: status %s, send to %s devices", successful, pushed_to_number_of_devices, ) return successful
async def update_info(self): """Update current price info async.""" query = gql( """ { viewer { home(id: "%s") { appNickname features { realTimeConsumptionEnabled } currentSubscription { status } address { address1 address2 address3 city postalCode country latitude longitude } meteringPointData { consumptionEan energyTaxType estimatedAnnualConsumption gridCompany productionEan vatType } owner { name isCompany language contactInfo { email mobile } } timeZone subscriptions { id status validFrom validTo statusReason } currentSubscription { priceInfo { current { currency } } } } } } """ % self._home_id ) self.info = await self._tibber_control.execute(query)
def sync_update_current_price_info(self): """Update current price info.""" loop = asyncio.get_event_loop() task = loop.create_task(self.update_current_price_info()) loop.run_until_complete(task)
async def update_current_price_info(self): """Update current price info async.""" query = gql( """ { viewer { home(id: "%s") { currentSubscription { priceInfo { current { energy tax total startsAt } } } } } } """ % self.home_id ) price_info_temp = await self._tibber_control.execute(query) if not price_info_temp: _LOGGER.error("Could not find current price info.") return try: home = price_info_temp["viewer"]["home"] current_subscription = home["currentSubscription"] price_info = current_subscription["priceInfo"]["current"] except (KeyError, TypeError): _LOGGER.error("Could not find current price info.") return if price_info: self._current_price_info = price_info
def sync_update_price_info(self): """Update current price info.""" loop = asyncio.get_event_loop() task = loop.create_task(self.update_price_info()) loop.run_until_complete(task)
async def update_price_info(self): """Update price info async.""" query = gql( """ { viewer { home(id: "%s") { currentSubscription { priceInfo { current { energy tax total startsAt level } today { total startsAt level } tomorrow { total startsAt level } } } } } } """ % self.home_id ) price_info_temp = await self._tibber_control.execute(query) if not price_info_temp: _LOGGER.error("Could not find price info.") return self._price_info = {} self._level_info = {} for key in ["current", "today", "tomorrow"]: try: home = price_info_temp["viewer"]["home"] current_subscription = home["currentSubscription"] price_info = current_subscription["priceInfo"][key] except (KeyError, TypeError): _LOGGER.error("Could not find price info for %s.", key) continue if key == "current": self._current_price_info = price_info continue for data in price_info: self._price_info[data.get("startsAt")] = data.get("total") self._level_info[data.get("startsAt")] = data.get("level")
def currency(self): """Return the currency.""" try: current_subscription = self.info["viewer"]["home"]["currentSubscription"] return current_subscription["priceInfo"]["current"]["currency"] except (KeyError, TypeError, IndexError): _LOGGER.error("Could not find currency.") return ""
def price_unit(self): """Return the price unit.""" currency = self.currency consumption_unit = self.consumption_unit if not currency or not consumption_unit: _LOGGER.error("Could not find price_unit.") return " " return currency + "/" + consumption_unit
async def rt_subscribe(self, loop, async_callback): """Connect to Tibber and subscribe to Tibber rt subscription.""" if self._subscription_id is not None: _LOGGER.error("Already subscribed.") return await self._tibber_control.rt_connect(loop) document = gql( """ subscription{ liveMeasurement(homeId:"%s"){ timestamp power powerProduction accumulatedProduction accumulatedConsumption accumulatedCost currency minPower averagePower maxPower voltagePhase1 voltagePhase2 voltagePhase3 currentPhase1 currentPhase2 currentPhase3 lastMeterConsumption lastMeterProduction } } """ % self.home_id ) sub_query = print_ast(document) self._subscription_id = await self._tibber_control.sub_manager.subscribe( sub_query, async_callback )
async def rt_unsubscribe(self): """Unsubscribe to Tibber rt subscription.""" if self._subscription_id is None: _LOGGER.error("Not subscribed.") return await self._tibber_control.sub_manager.unsubscribe(self._subscription_id)
def rt_subscription_running(self): """Is real time subscription running.""" return ( self._tibber_control.sub_manager is not None and self._tibber_control.sub_manager.is_running and self._subscription_id is not None )
async def get_historic_data(self, n_data): """Get historic data.""" query = gql( """ { viewer { home(id: "%s") { consumption(resolution: HOURLY, last: %s) { nodes { from totalCost consumption } } } } } """ % (self.home_id, n_data) ) data = await self._tibber_control.execute(query) if not data: _LOGGER.error("Could not find current the data.") return data = data["viewer"]["home"]["consumption"] if data is None: self._data = [] return self._data = data["nodes"]
def sync_get_historic_data(self, n_data): """get_historic_data.""" loop = asyncio.get_event_loop() task = loop.create_task(self.get_historic_data(n_data)) loop.run_until_complete(task) return self._data
def cleanup_none(self): """ Removes the temporary value set for None attributes. """ for (prop, default) in self.defaults.items(): if getattr(self, prop) == '_None': setattr(self, prop, None)
def build_environ(self, sock_file, conn): """ Build the execution environment. """ # Grab the request line request = self.read_request_line(sock_file) # Copy the Base Environment environ = self.base_environ.copy() # Grab the headers for k, v in self.read_headers(sock_file).items(): environ[str('HTTP_'+k)] = v # Add CGI Variables environ['REQUEST_METHOD'] = request['method'] environ['PATH_INFO'] = request['path'] environ['SERVER_PROTOCOL'] = request['protocol'] environ['SERVER_PORT'] = str(conn.server_port) environ['REMOTE_PORT'] = str(conn.client_port) environ['REMOTE_ADDR'] = str(conn.client_addr) environ['QUERY_STRING'] = request['query_string'] if 'HTTP_CONTENT_LENGTH' in environ: environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH'] if 'HTTP_CONTENT_TYPE' in environ: environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE'] # Save the request method for later self.request_method = environ['REQUEST_METHOD'] # Add Dynamic WSGI Variables if conn.ssl: environ['wsgi.url_scheme'] = 'https' environ['HTTPS'] = 'on' else: environ['wsgi.url_scheme'] = 'http' if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked': environ['wsgi.input'] = ChunkedReader(sock_file) else: environ['wsgi.input'] = sock_file return environ
def write(self, data, sections=None): """ Write the data to the output socket. """ if self.error[0]: self.status = self.error[0] data = b(self.error[1]) if not self.headers_sent: self.send_headers(data, sections) if self.request_method != 'HEAD': try: if self.chunked: self.conn.sendall(b('%x\r\n%s\r\n' % (len(data), data))) else: self.conn.sendall(data) except socket.timeout: self.closeConnection = True except socket.error: # But some clients will close the connection before that # resulting in a socket error. self.closeConnection = True
def start_response(self, status, response_headers, exc_info=None): """ Store the HTTP status and headers to be sent when self.write is called. """ if exc_info: try: if self.headers_sent: # Re-raise original exception if headers sent # because this violates WSGI specification. raise finally: exc_info = None elif self.header_set: raise AssertionError("Headers already set!") if PY3K and not isinstance(status, str): self.status = str(status, 'ISO-8859-1') else: self.status = status # Make sure headers are bytes objects try: self.header_set = Headers(response_headers) except UnicodeDecodeError: self.error = ('500 Internal Server Error', 'HTTP Headers should be bytes') self.err_log.error('Received HTTP Headers from client that contain' ' invalid characters for Latin-1 encoding.') return self.write_warning
def CherryPyWSGIServer(bind_addr, wsgi_app, numthreads = 10, server_name = None, max = -1, request_queue_size = 5, timeout = 10, shutdown_timeout = 5): """ A Cherrypy wsgiserver-compatible wrapper. """ max_threads = max if max_threads < 0: max_threads = 0 return Rocket(bind_addr, 'wsgi', {'wsgi_app': wsgi_app}, min_threads = numthreads, max_threads = max_threads, queue_size = request_queue_size, timeout = timeout)
def get_bgp_neighbors(self): def generate_vrf_query(vrf_name): """ Helper to provide XML-query for the VRF-type we're interested in. """ if vrf_name == "global": rpc_command = '<Get><Operational><BGP><InstanceTable><Instance><Naming>\ <InstanceName>default</InstanceName></Naming><InstanceActive><DefaultVRF>\ <GlobalProcessInfo></GlobalProcessInfo><NeighborTable></NeighborTable></DefaultVRF>\ </InstanceActive></Instance></InstanceTable></BGP></Operational></Get>' else: rpc_command = '<Get><Operational><BGP><InstanceTable><Instance><Naming>\ <InstanceName>default</InstanceName></Naming><InstanceActive><VRFTable><VRF>\ <Naming>{vrf_name}</Naming><GlobalProcessInfo></GlobalProcessInfo><NeighborTable>\ </NeighborTable></VRF></VRFTable></InstanceActive></Instance></InstanceTable>\ </BGP></Operational></Get>'.format(vrf_name=vrf_name) return rpc_command """ Initial run to figure out what VRF's are available Decided to get this one from Configured-section because bulk-getting all instance-data to do the same could get ridiculously heavy Assuming we're always interested in the DefaultVRF """ active_vrfs = ["global"] rpc_command = '<Get><Operational><BGP><ConfigInstanceTable><ConfigInstance><Naming>\ <InstanceName>default</InstanceName></Naming><ConfigInstanceVRFTable>\ </ConfigInstanceVRFTable></ConfigInstance></ConfigInstanceTable></BGP></Operational></Get>' result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) for node in result_tree.xpath('.//ConfigVRF'): active_vrfs.append(napalm_base.helpers.find_txt(node, 'Naming/VRFName')) result = {} for vrf in active_vrfs: rpc_command = generate_vrf_query(vrf) result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) this_vrf = {} this_vrf['peers'] = {} if vrf == "global": this_vrf['router_id'] = napalm_base.helpers.convert( text_type, napalm_base.helpers.find_txt(result_tree, 'Get/Operational/BGP/InstanceTable/Instance/InstanceActive/DefaultVRF\ /GlobalProcessInfo/VRF/RouterID')) else: this_vrf['router_id'] = napalm_base.helpers.convert( text_type, napalm_base.helpers.find_txt(result_tree, 'Get/Operational/BGP/InstanceTable/Instance/InstanceActive/VRFTable/VRF\ /GlobalProcessInfo/VRF/RouterID')) neighbors = {} for neighbor in result_tree.xpath('.//Neighbor'): this_neighbor = {} this_neighbor['local_as'] = napalm_base.helpers.convert( int, napalm_base.helpers.find_txt(neighbor, 'LocalAS')) this_neighbor['remote_as'] = napalm_base.helpers.convert( int, napalm_base.helpers.find_txt(neighbor, 'RemoteAS')) this_neighbor['remote_id'] = napalm_base.helpers.convert( text_type, napalm_base.helpers.find_txt(neighbor, 'RouterID')) if napalm_base.helpers.find_txt(neighbor, 'ConnectionAdminStatus') is "1": this_neighbor['is_enabled'] = True try: this_neighbor['description'] = napalm_base.helpers.convert( text_type, napalm_base.helpers.find_txt(neighbor, 'Description')) except AttributeError: this_neighbor['description'] = u'' this_neighbor['is_enabled'] = ( napalm_base.helpers.find_txt(neighbor, 'ConnectionAdminStatus') == "1") if str(napalm_base.helpers.find_txt(neighbor, 'ConnectionAdminStatus')) is "1": this_neighbor['is_enabled'] = True else: this_neighbor['is_enabled'] = False if str(napalm_base.helpers.find_txt(neighbor, 'ConnectionState')) == "BGP_ST_ESTAB": this_neighbor['is_up'] = True this_neighbor['uptime'] = napalm_base.helpers.convert( int, napalm_base.helpers.find_txt(neighbor, 'ConnectionEstablishedTime')) else: this_neighbor['is_up'] = False this_neighbor['uptime'] = -1 this_neighbor['address_family'] = {} if napalm_base.helpers.find_txt(neighbor, 'ConnectionRemoteAddress/AFI') == "IPv4": this_afi = "ipv4" elif napalm_base.helpers.find_txt(neighbor, 'ConnectionRemoteAddress/AFI') == "IPv6": this_afi = "ipv6" else: this_afi = napalm_base.helpers.find_txt(neighbor, 'ConnectionRemoteAddress/AFI') this_neighbor['address_family'][this_afi] = {} try: this_neighbor['address_family'][this_afi]["received_prefixes"] = \ napalm_base.helpers.convert(int, napalm_base.helpers.find_txt( neighbor, 'AFData/Entry/PrefixesAccepted'), 0) + \ napalm_base.helpers.convert(int, napalm_base.helpers.find_txt( neighbor, 'AFData/Entry/PrefixesDenied'), 0) this_neighbor['address_family'][this_afi]["accepted_prefixes"] = \ napalm_base.helpers.convert(int, napalm_base.helpers.find_txt( neighbor, 'AFData/Entry/PrefixesAccepted'), 0) this_neighbor['address_family'][this_afi]["sent_prefixes"] = \ napalm_base.helpers.convert(int, napalm_base.helpers.find_txt( neighbor, 'AFData/Entry/PrefixesAdvertised'), 0) except AttributeError: this_neighbor['address_family'][this_afi]["received_prefixes"] = -1 this_neighbor['address_family'][this_afi]["accepted_prefixes"] = -1 this_neighbor['address_family'][this_afi]["sent_prefixes"] = -1 neighbor_ip = napalm_base.helpers.ip( napalm_base.helpers.find_txt( neighbor, 'Naming/NeighborAddress/IPV4Address') or napalm_base.helpers.find_txt( neighbor, 'Naming/NeighborAddress/IPV6Address') ) neighbors[neighbor_ip] = this_neighbor this_vrf['peers'] = neighbors result[vrf] = this_vrf return result
def aggregate(l): """Aggregate a `list` of prefixes. Keyword arguments: l -- a python list of prefixes Example use: >>> aggregate(["10.0.0.0/8", "10.0.0.0/24"]) ['10.0.0.0/8'] """ tree = radix.Radix() for item in l: try: tree.add(item) except (ValueError) as err: raise Exception("ERROR: invalid IP prefix: {}".format(item)) return aggregate_tree(tree).prefixes()
def aggregate_tree(l_tree): """Walk a py-radix tree and aggregate it. Arguments l_tree -- radix.Radix() object """ def _aggregate_phase1(tree): # phase1 removes any supplied prefixes which are superfluous because # they are already included in another supplied prefix. For example, # 2001:67c:208c:10::/64 would be removed if 2001:67c:208c::/48 was # also supplied. n_tree = radix.Radix() for prefix in tree.prefixes(): if tree.search_worst(prefix).prefix == prefix: n_tree.add(prefix) return n_tree def _aggregate_phase2(tree): # phase2 identifies adjacent prefixes that can be combined under a # single, shorter-length prefix. For example, 2001:67c:208c::/48 and # 2001:67c:208d::/48 can be combined into the single prefix # 2001:67c:208c::/47. n_tree = radix.Radix() for rnode in tree: p = text(ip_network(text(rnode.prefix)).supernet()) r = tree.search_covered(p) if len(r) == 2: if r[0].prefixlen == r[1].prefixlen == rnode.prefixlen: n_tree.add(p) else: n_tree.add(rnode.prefix) else: n_tree.add(rnode.prefix) return n_tree l_tree = _aggregate_phase1(l_tree) if len(l_tree.prefixes()) == 1: return l_tree while True: r_tree = _aggregate_phase2(l_tree) if l_tree.prefixes() == r_tree.prefixes(): break else: l_tree = r_tree del r_tree return l_tree
def _ordinal_metric(_v1, _v2, i1, i2, n_v): """Metric for ordinal data.""" if i1 > i2: i1, i2 = i2, i1 return (np.sum(n_v[i1:(i2 + 1)]) - (n_v[i1] + n_v[i2]) / 2) ** 2
def _ratio_metric(v1, v2, **_kwargs): """Metric for ratio data.""" return (((v1 - v2) / (v1 + v2)) ** 2) if v1 + v2 != 0 else 0
def _coincidences(value_counts, value_domain, dtype=np.float64): """Coincidence matrix. Parameters ---------- value_counts : ndarray, with shape (N, V) Number of coders that assigned a certain value to a determined unit, where N is the number of units and V is the value count. value_domain : array_like, with shape (V,) Possible values V the units can take. If the level of measurement is not nominal, it must be ordered. dtype : data-type Result and computation data-type. Returns ------- o : ndarray, with shape (V, V) Coincidence matrix. """ value_counts_matrices = value_counts.reshape(value_counts.shape + (1,)) pairable = np.maximum(np.sum(value_counts, axis=1), 2) diagonals = np.tile(np.eye(len(value_domain)), (len(value_counts), 1, 1)) \ * value_counts.reshape((value_counts.shape[0], 1, value_counts.shape[1])) unnormalized_coincidences = value_counts_matrices * value_counts_matrices.transpose((0, 2, 1)) - diagonals return np.sum(np.divide(unnormalized_coincidences, (pairable - 1).reshape((-1, 1, 1)), dtype=dtype), axis=0)
def _random_coincidences(value_domain, n, n_v): """Random coincidence matrix. Parameters ---------- value_domain : array_like, with shape (V,) Possible values V the units can take. If the level of measurement is not nominal, it must be ordered. n : scalar Number of pairable values. n_v : ndarray, with shape (V,) Number of pairable elements for each value. Returns ------- e : ndarray, with shape (V, V) Random coincidence matrix. """ n_v_column = n_v.reshape(-1, 1) return (n_v_column.dot(n_v_column.T) - np.eye(len(value_domain)) * n_v_column) / (n - 1)
def _distances(value_domain, distance_metric, n_v): """Distances of the different possible values. Parameters ---------- value_domain : array_like, with shape (V,) Possible values V the units can take. If the level of measurement is not nominal, it must be ordered. distance_metric : callable Callable that return the distance of two given values. n_v : ndarray, with shape (V,) Number of pairable elements for each value. Returns ------- d : ndarray, with shape (V, V) Distance matrix for each value pair. """ return np.array([[distance_metric(v1, v2, i1=i1, i2=i2, n_v=n_v) for i2, v2 in enumerate(value_domain)] for i1, v1 in enumerate(value_domain)])
def _reliability_data_to_value_counts(reliability_data, value_domain): """Return the value counts given the reliability data. Parameters ---------- reliability_data : ndarray, with shape (M, N) Reliability data matrix which has the rate the i coder gave to the j unit, where M is the number of raters and N is the unit count. Missing rates are represented with `np.nan`. value_domain : array_like, with shape (V,) Possible values the units can take. Returns ------- value_counts : ndarray, with shape (N, V) Number of coders that assigned a certain value to a determined unit, where N is the number of units and V is the value count. """ return np.array([[sum(1 for rate in unit if rate == v) for v in value_domain] for unit in reliability_data.T])
def alpha(reliability_data=None, value_counts=None, value_domain=None, level_of_measurement='interval', dtype=np.float64): """Compute Krippendorff's alpha. See https://en.wikipedia.org/wiki/Krippendorff%27s_alpha for more information. Parameters ---------- reliability_data : array_like, with shape (M, N) Reliability data matrix which has the rate the i coder gave to the j unit, where M is the number of raters and N is the unit count. Missing rates are represented with `np.nan`. If it's provided then `value_counts` must not be provided. value_counts : ndarray, with shape (N, V) Number of coders that assigned a certain value to a determined unit, where N is the number of units and V is the value count. If it's provided then `reliability_data` must not be provided. value_domain : array_like, with shape (V,) Possible values the units can take. If the level of measurement is not nominal, it must be ordered. If `reliability_data` is provided, then the default value is the ordered list of unique rates that appear. Else, the default value is `list(range(V))`. level_of_measurement : string or callable Steven's level of measurement of the variable. It must be one of 'nominal', 'ordinal', 'interval', 'ratio' or a callable. dtype : data-type Result and computation data-type. Returns ------- alpha : `dtype` Scalar value of Krippendorff's alpha of type `dtype`. Examples -------- >>> reliability_data = [[np.nan, np.nan, np.nan, np.nan, np.nan, 3, 4, 1, 2, 1, 1, 3, 3, np.nan, 3], ... [1, np.nan, 2, 1, 3, 3, 4, 3, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], ... [np.nan, np.nan, 2, 1, 3, 4, 4, np.nan, 2, 1, 1, 3, 3, np.nan, 4]] >>> print(round(alpha(reliability_data=reliability_data, level_of_measurement='nominal'), 6)) 0.691358 >>> print(round(alpha(reliability_data=reliability_data, level_of_measurement='interval'), 6)) 0.810845 >>> value_counts = np.array([[1, 0, 0, 0], ... [0, 0, 0, 0], ... [0, 2, 0, 0], ... [2, 0, 0, 0], ... [0, 0, 2, 0], ... [0, 0, 2, 1], ... [0, 0, 0, 3], ... [1, 0, 1, 0], ... [0, 2, 0, 0], ... [2, 0, 0, 0], ... [2, 0, 0, 0], ... [0, 0, 2, 0], ... [0, 0, 2, 0], ... [0, 0, 0, 0], ... [0, 0, 1, 1]]) >>> print(round(alpha(value_counts=value_counts, level_of_measurement='nominal'), 6)) 0.691358 >>> # The following examples were extracted from >>> # https://www.statisticshowto.datasciencecentral.com/wp-content/uploads/2016/07/fulltext.pdf, page 8. >>> reliability_data = [[1, 2, 3, 3, 2, 1, 4, 1, 2, np.nan, np.nan, np.nan], ... [1, 2, 3, 3, 2, 2, 4, 1, 2, 5, np.nan, 3.], ... [np.nan, 3, 3, 3, 2, 3, 4, 2, 2, 5, 1, np.nan], ... [1, 2, 3, 3, 2, 4, 4, 1, 2, 5, 1, np.nan]] >>> print(round(alpha(reliability_data, level_of_measurement='ordinal'), 3)) 0.815 >>> print(round(alpha(reliability_data, level_of_measurement='ratio'), 3)) 0.797 """ if (reliability_data is None) == (value_counts is None): raise ValueError("Either reliability_data or value_counts must be provided, but not both.") # Don't know if it's a list or numpy array. If it's the latter, the truth value is ambiguous. So, ask for None. if value_counts is None: if type(reliability_data) is not np.ndarray: reliability_data = np.array(reliability_data) value_domain = value_domain or np.unique(reliability_data[~np.isnan(reliability_data)]) value_counts = _reliability_data_to_value_counts(reliability_data, value_domain) else: # elif reliability_data is None if value_domain: assert value_counts.shape[1] == len(value_domain), \ "The value domain should be equal to the number of columns of value_counts." else: value_domain = tuple(range(value_counts.shape[1])) distance_metric = _distance_metric(level_of_measurement) o = _coincidences(value_counts, value_domain, dtype=dtype) n_v = np.sum(o, axis=0) n = np.sum(n_v) e = _random_coincidences(value_domain, n, n_v) d = _distances(value_domain, distance_metric, n_v) return 1 - np.sum(o * d) / np.sum(e * d)
def inquire(self): """Maps to fortran CDF_Inquire. Assigns parameters returned by CDF_Inquire to pysatCDF instance. Not intended for regular direct use by user. """ name = copy.deepcopy(self.fname) stats = fortran_cdf.inquire(name) # break out fortran output into something meaningful status = stats[0] if status == 0: self._num_dims = stats[1] self._dim_sizes = stats[2] self._encoding = stats[3] self._majority = stats[4] self._max_rec = stats[5] self._num_r_vars = stats[6] self._num_z_vars = stats[7] self._num_attrs = stats[8] else: raise IOError(fortran_cdf.statusreporter(status))
def _read_all_z_variable_info(self): """Gets all CDF z-variable information, not data though. Maps to calls using var_inquire. Gets information on data type, number of elements, number of dimensions, etc. """ self.z_variable_info = {} self.z_variable_names_by_num = {} # call Fortran that grabs all of the basic stats on all of the # zVariables in one go. info = fortran_cdf.z_var_all_inquire(self.fname, self._num_z_vars, len(self.fname)) status = info[0] data_types = info[1] num_elems = info[2] rec_varys = info[3] dim_varys = info[4] num_dims = info[5] dim_sizes = info[6] rec_nums = info[7] var_nums = info[8] var_names = info[9] if status == 0: for i in np.arange(len(data_types)): out = {} out['data_type'] = data_types[i] out['num_elems'] = num_elems[i] out['rec_vary'] = rec_varys[i] out['dim_varys'] = dim_varys[i] out['num_dims'] = num_dims[i] # only looking at first possible extra dimension out['dim_sizes'] = dim_sizes[i, :1] if out['dim_sizes'][0] == 0: out['dim_sizes'][0] += 1 out['rec_num'] = rec_nums[i] out['var_num'] = var_nums[i] var_name = ''.join(var_names[i].astype('U')) out['var_name'] = var_name.rstrip() self.z_variable_info[out['var_name']] = out self.z_variable_names_by_num[out['var_num']] = var_name else: raise IOError(fortran_cdf.statusreporter(status))
def load_all_variables(self): """Loads all variables from CDF. Note this routine is called automatically upon instantiation. """ self.data = {} # need to add r variable names file_var_names = self.z_variable_info.keys() # collect variable information for each # organize it neatly for fortran call dim_sizes = [] rec_nums = [] data_types = [] names = [] for i, name in enumerate(file_var_names): dim_sizes.extend(self.z_variable_info[name]['dim_sizes']) rec_nums.append(self.z_variable_info[name]['rec_num']) data_types.append(self.z_variable_info[name]['data_type']) names.append(name.ljust(256)) dim_sizes = np.array(dim_sizes) rec_nums = np.array(rec_nums) data_types = np.array(data_types) # individually load all variables by each data type self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes, self.cdf_data_types['real4'], fortran_cdf.get_multi_z_real4) self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes, self.cdf_data_types['float'], fortran_cdf.get_multi_z_real4) self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes, self.cdf_data_types['real8'], fortran_cdf.get_multi_z_real8) self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes, self.cdf_data_types['double'], fortran_cdf.get_multi_z_real8) self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes, self.cdf_data_types['int4'], fortran_cdf.get_multi_z_int4) self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes, self.cdf_data_types['uint4'], fortran_cdf.get_multi_z_int4, data_offset=2 ** 32) self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes, self.cdf_data_types['int2'], fortran_cdf.get_multi_z_int2) self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes, self.cdf_data_types['uint2'], fortran_cdf.get_multi_z_int2, data_offset=2 ** 16) self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes, self.cdf_data_types['int1'], fortran_cdf.get_multi_z_int1) self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes, self.cdf_data_types['uint1'], fortran_cdf.get_multi_z_int1, data_offset=2 ** 8) self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes, self.cdf_data_types['byte'], fortran_cdf.get_multi_z_int1) self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes, self.cdf_data_types['epoch'], fortran_cdf.get_multi_z_real8, epoch=True) self._call_multi_fortran_z(names, data_types, rec_nums, 2 * dim_sizes, self.cdf_data_types['epoch16'], fortran_cdf.get_multi_z_epoch16, epoch16=True) self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes, self.cdf_data_types['TT2000'], fortran_cdf.get_multi_z_tt2000, epoch=True) # mark data has been loaded self.data_loaded = True
def _call_multi_fortran_z(self, names, data_types, rec_nums, dim_sizes, input_type_code, func, epoch=False, data_offset=None, epoch16=False): """Calls fortran functions to load CDF variable data Parameters ---------- names : list_like list of variables names data_types : list_like list of all loaded data type codes as used by CDF rec_nums : list_like list of record numbers in CDF file. Provided by variable_info dim_sizes : list of dimensions as provided by variable_info. input_type_code : int Specific type code to load func : function Fortran function via python interface that will be used for actual loading. epoch : bool Flag indicating type is epoch. Translates things to datetime standard. data_offset : Offset value to be applied to data. Required for unsigned integers in CDF. epoch16 : bool Flag indicating type is epoch16. Translates things to datetime standard. """ # isolate input type code variables from total supplied types idx, = np.where(data_types == input_type_code) if len(idx) > 0: # read all data of a given type at once max_rec = rec_nums[idx].max() sub_names = np.array(names)[idx] sub_sizes = dim_sizes[idx] status, data = func(self.fname, sub_names.tolist(), sub_sizes, sub_sizes.sum(), max_rec, len(sub_names)) if status == 0: # account for quirks of CDF data storage for certain types if data_offset is not None: data = data.astype(int) idx, idy, = np.where(data < 0) data[idx, idy] += data_offset if epoch: # account for difference in seconds between # CDF epoch and python's epoch, leap year in there # (datetime(1971,1,2) - # datetime(1,1,1)).total_seconds()*1000 data -= 62167219200000 data = data.astype('<M8[ms]') if epoch16: data[0::2, :] -= 62167219200 data = data[0::2, :] * 1E9 + data[1::2, :] / 1.E3 data = data.astype('datetime64[ns]') sub_sizes /= 2 # all data of a type has been loaded and tweaked as necessary # parse through returned array to break out the individual variables # as appropriate self._process_return_multi_z(data, sub_names, sub_sizes) else: raise IOError(fortran_cdf.statusreporter(status))
def _process_return_multi_z(self, data, names, dim_sizes): """process and attach data from fortran_cdf.get_multi_*""" # process data d1 = 0 d2 = 0 for name, dim_size in zip(names, dim_sizes): d2 = d1 + dim_size if dim_size == 1: self.data[name.rstrip()] = data[d1, :] else: self.data[name.rstrip()] = data[d1:d2, :] d1 += dim_size
def _read_all_attribute_info(self): """Read all attribute properties, g, r, and z attributes""" num = copy.deepcopy(self._num_attrs) fname = copy.deepcopy(self.fname) out = fortran_cdf.inquire_all_attr(fname, num, len(fname)) status = out[0] names = out[1].astype('U') scopes = out[2] max_gentries = out[3] max_rentries = out[4] max_zentries = out[5] attr_nums = out[6] global_attrs_info = {} var_attrs_info = {} if status == 0: for name, scope, gentry, rentry, zentry, num in zip(names, scopes, max_gentries, max_rentries, max_zentries, attr_nums): name = ''.join(name) name = name.rstrip() nug = {} nug['scope'] = scope nug['max_gentry'] = gentry nug['max_rentry'] = rentry nug['max_zentry'] = zentry nug['attr_num'] = num flag = (gentry == 0) & (rentry == 0) & (zentry == 0) if not flag: if scope == 1: global_attrs_info[name] = nug elif scope == 2: var_attrs_info[name] = nug self.global_attrs_info = global_attrs_info self.var_attrs_info = var_attrs_info else: raise IOError(fortran_cdf.statusreporter(status))
def _read_all_z_attribute_data(self): """Read all CDF z-attribute data""" self.meta = {} # collect attribute info needed to get more info from # fortran routines max_entries = [] attr_nums = [] names = [] attr_names = [] names = self.var_attrs_info.keys() num_z_attrs = len(names) exp_attr_nums = [] for key in names: max_entries.append(self.var_attrs_info[key]['max_zentry']) attr_nums.append(self.var_attrs_info[key]['attr_num']) attr_nums = np.array(attr_nums) max_entries = np.array(max_entries) info = fortran_cdf.z_attr_all_inquire(self.fname, attr_nums, num_z_attrs, max_entries, self._num_z_vars, len(self.fname)) status = info[0] data_types = info[1] num_elems = info[2] entry_nums = info[3] if status == 0: for i, name in enumerate(names): self.var_attrs_info[name]['data_type'] = data_types[i] self.var_attrs_info[name]['num_elems'] = num_elems[i] self.var_attrs_info[name]['entry_num'] = entry_nums[i] exp_attr_nums.extend([self.var_attrs_info[name]['attr_num']] * len(entry_nums[i])) attr_names.extend([name] * len(entry_nums[i])) else: raise IOError(fortran_cdf.statusreporter(status)) # all the info is now packed up # need to break it out to make it easier to load via fortran # all of this junk # attribute id, entry id (zVariable ID), data_type, num_elems # should just need to flatten this stuff data_types = data_types.flatten() num_elems = num_elems.flatten() entry_nums = entry_nums.flatten() attr_nums = np.array(exp_attr_nums) # drop everything that isn't valid idx, = np.where(entry_nums > 0) data_types = data_types[idx] num_elems = num_elems[idx] entry_nums = entry_nums[idx] attr_nums = attr_nums[idx] attr_names = np.array(attr_names)[idx] # grad corresponding variable name for each attribute var_names = [self.z_variable_names_by_num[i].rstrip() for i in entry_nums] # the names that go along with this are already set up # in attr_names # chunk by data type, grab largest num_elems # get data back, shorten to num_elems, add to structure self._call_multi_fortran_z_attr(attr_names, data_types, num_elems, entry_nums, attr_nums, var_names, self.cdf_data_types['real4'], fortran_cdf.get_multi_z_attr_real4) self._call_multi_fortran_z_attr(attr_names, data_types, num_elems, entry_nums, attr_nums, var_names, self.cdf_data_types['float'], fortran_cdf.get_multi_z_attr_real4) self._call_multi_fortran_z_attr(attr_names, data_types, num_elems, entry_nums, attr_nums, var_names, self.cdf_data_types['real8'], fortran_cdf.get_multi_z_attr_real8) self._call_multi_fortran_z_attr(attr_names, data_types, num_elems, entry_nums, attr_nums, var_names, self.cdf_data_types['double'], fortran_cdf.get_multi_z_attr_real8) self._call_multi_fortran_z_attr(attr_names, data_types, num_elems, entry_nums, attr_nums, var_names, self.cdf_data_types['byte'], fortran_cdf.get_multi_z_attr_int1) self._call_multi_fortran_z_attr(attr_names, data_types, num_elems, entry_nums, attr_nums, var_names, self.cdf_data_types['int1'], fortran_cdf.get_multi_z_attr_int1) self._call_multi_fortran_z_attr(attr_names, data_types, num_elems, entry_nums, attr_nums, var_names, self.cdf_data_types['uint1'], fortran_cdf.get_multi_z_attr_int1, data_offset=256) self._call_multi_fortran_z_attr(attr_names, data_types, num_elems, entry_nums, attr_nums, var_names, self.cdf_data_types['int2'], fortran_cdf.get_multi_z_attr_int2) self._call_multi_fortran_z_attr(attr_names, data_types, num_elems, entry_nums, attr_nums, var_names, self.cdf_data_types['uint2'], fortran_cdf.get_multi_z_attr_int2, data_offset=65536) self._call_multi_fortran_z_attr(attr_names, data_types, num_elems, entry_nums, attr_nums, var_names, self.cdf_data_types['int4'], fortran_cdf.get_multi_z_attr_int4) self._call_multi_fortran_z_attr(attr_names, data_types, num_elems, entry_nums, attr_nums, var_names, self.cdf_data_types['uint4'], fortran_cdf.get_multi_z_attr_int4, data_offset=2 ** 32) self._call_multi_fortran_z_attr(attr_names, data_types, num_elems, entry_nums, attr_nums, var_names, self.cdf_data_types['char'], fortran_cdf.get_multi_z_attr_char) self._call_multi_fortran_z_attr(attr_names, data_types, num_elems, entry_nums, attr_nums, var_names, self.cdf_data_types['uchar'], fortran_cdf.get_multi_z_attr_char)
def _call_multi_fortran_z_attr(self, names, data_types, num_elems, entry_nums, attr_nums, var_names, input_type_code, func, data_offset=None): """Calls Fortran function that reads attribute data. data_offset translates unsigned into signed. If number read in is negative, offset added. """ # isolate input type code variables idx, = np.where(data_types == input_type_code) if len(idx) > 0: # maximimum array dimension max_num = num_elems[idx].max() sub_num_elems = num_elems[idx] sub_names = np.array(names)[idx] sub_var_names = np.array(var_names)[idx] # zVariable numbers, 'entry' number sub_entry_nums = entry_nums[idx] # attribute number sub_attr_nums = attr_nums[idx] status, data = func(self.fname, sub_attr_nums, sub_entry_nums, len(sub_attr_nums), max_num, len(self.fname)) if (status == 0).all(): if data_offset is not None: data = data.astype(int) idx, idy, = np.where(data < 0) data[idx, idy] += data_offset self._process_return_multi_z_attr(data, sub_names, sub_var_names, sub_num_elems) else: # raise ValueError('CDF Error code :', status) idx, = np.where(status != 0) # raise first error raise IOError(fortran_cdf.statusreporter(status[idx][0]))
def _process_return_multi_z_attr(self, data, attr_names, var_names, sub_num_elems): '''process and attach data from fortran_cdf.get_multi_*''' # process data for i, (attr_name, var_name, num_e) in enumerate(zip(attr_names, var_names, sub_num_elems)): if var_name not in self.meta.keys(): self.meta[var_name] = {} if num_e == 1: self.meta[var_name][attr_name] = data[i, 0] else: if data[i].dtype == '|S1': self.meta[var_name][attr_name] = ''.join(data[i, 0:num_e].astype('U')).rstrip() else: self.meta[var_name][attr_name] = data[i, 0:num_e]
def to_pysat(self, flatten_twod=True, units_label='UNITS', name_label='long_name', fill_label='FILLVAL', plot_label='FieldNam', min_label='ValidMin', max_label='ValidMax', notes_label='Var_Notes', desc_label='CatDesc', axis_label = 'LablAxis'): """ Exports loaded CDF data into data, meta for pysat module Notes ----- The *_labels should be set to the values in the file, if present. Note that once the meta object returned from this function is attached to a pysat.Instrument object then the *_labels on the Instrument are assigned to the newly attached Meta object. The pysat Meta object will use data with labels that match the patterns in *_labels even if the case does not match. Parameters ---------- flatten_twod : bool (True) If True, then two dimensional data is flattened across columns. Name mangling is used to group data, first column is 'name', last column is 'name_end'. In between numbers are appended 'name_1', 'name_2', etc. All data for a given 2D array may be accessed via, data.ix[:,'item':'item_end'] If False, then 2D data is stored as a series of DataFrames, indexed by Epoch. data.ix[0, 'item'] units_label : str Identifier within metadata for units. Defults to CDAWab standard. name_label : str Identifier within metadata for variable name. Defults to 'long_name', not normally present within CDAWeb files. If not, will use values from the variable name in the file. fill_label : str Identifier within metadata for Fill Values. Defults to CDAWab standard. plot_label : str Identifier within metadata for variable name used when plotting. Defults to CDAWab standard. min_label : str Identifier within metadata for minimim variable value. Defults to CDAWab standard. max_label : str Identifier within metadata for maximum variable value. Defults to CDAWab standard. notes_label : str Identifier within metadata for notes. Defults to CDAWab standard. desc_label : str Identifier within metadata for a variable description. Defults to CDAWab standard. axis_label : str Identifier within metadata for axis name used when plotting. Defults to CDAWab standard. Returns ------- pandas.DataFrame, pysat.Meta Data and Metadata suitable for attachment to a pysat.Instrument object. """ import string import pysat import pandas # copy data cdata = self.data.copy() # # create pysat.Meta object using data above # and utilizing the attribute labels provided by the user meta = pysat.Meta(pysat.DataFrame.from_dict(self.meta, orient='index'), units_label=units_label, name_label=name_label, fill_label=fill_label, plot_label=plot_label, min_label=min_label, max_label=max_label, notes_label=notes_label, desc_label=desc_label, axis_label=axis_label) # account for different possible cases for Epoch, epoch, EPOCH, epOch lower_names = [name.lower() for name in meta.keys()] for name, true_name in zip(lower_names, meta.keys()): if name == 'epoch': meta.data.rename(index={true_name: 'Epoch'}, inplace=True) epoch = cdata.pop(true_name) cdata['Epoch'] = epoch # ready to format data, iterate over all of the data names # and put into a pandas DataFrame two_d_data = [] drop_list = [] for name in cdata.keys(): temp = np.shape(cdata[name]) # treat 2 dimensional data differently if len(temp) == 2: if not flatten_twod: # put 2D data into a Frame at each time # remove data from dict when adding to the DataFrame frame = pysat.DataFrame(cdata[name].flatten(), columns=[name]) drop_list.append(name) step = temp[0] new_list = [] new_index = np.arange(step) for i in np.arange(len(epoch)): new_list.append(frame.iloc[i*step:(i+1)*step, :]) new_list[-1].index = new_index #new_frame = pandas.DataFrame.from_records(new_list, index=epoch, columns=[name]) new_frame = pandas.Series(new_list, index=epoch, name=name) two_d_data.append(new_frame) else: # flatten 2D into series of 1D columns new_names = [name + '_{i}'.format(i=i) for i in np.arange(temp[0] - 2)] new_names.append(name + '_end') new_names.insert(0, name) # remove data from dict when adding to the DataFrame drop_list.append(name) frame = pysat.DataFrame(cdata[name].T, index=epoch, columns=new_names) two_d_data.append(frame) for name in drop_list: _ = cdata.pop(name) # all of the data left over is 1D, add as Series data = pysat.DataFrame(cdata, index=epoch) two_d_data.append(data) data = pandas.concat(two_d_data, axis=1) data.drop('Epoch', axis=1, inplace=True) return data, meta
def _uptime_linux(): """Returns uptime in seconds or None, on Linux.""" # With procfs try: f = open('/proc/uptime', 'r') up = float(f.readline().split()[0]) f.close() return up except (IOError, ValueError): pass # Without procfs (really?) try: libc = ctypes.CDLL('libc.so') except AttributeError: return None except OSError: # Debian and derivatives do the wrong thing because /usr/lib/libc.so # is a GNU ld script rather than an ELF object. To get around this, we # have to be more specific. # We don't want to use ctypes.util.find_library because that creates a # new process on Linux. We also don't want to try too hard because at # this point we're already pretty sure this isn't Linux. try: libc = ctypes.CDLL('libc.so.6') except OSError: return None if not hasattr(libc, 'sysinfo'): # Not Linux. return None buf = ctypes.create_string_buffer(128) # 64 suffices on 32-bit, whatever. if libc.sysinfo(buf) < 0: return None up = struct.unpack_from('@l', buf.raw)[0] if up < 0: up = None return up
def _boottime_linux(): """A way to figure out the boot time directly on Linux.""" global __boottime try: f = open('/proc/stat', 'r') for line in f: if line.startswith('btime'): __boottime = int(line.split()[1]) if datetime is None: raise NotImplementedError('datetime module required.') return datetime.fromtimestamp(__boottime) except (IOError, IndexError): return None
def _uptime_amiga(): """Returns uptime in seconds or None, on AmigaOS.""" global __boottime try: __boottime = os.stat('RAM:').st_ctime return time.time() - __boottime except (NameError, OSError): return None
def _uptime_beos(): """Returns uptime in seconds on None, on BeOS/Haiku.""" try: libroot = ctypes.CDLL('libroot.so') except (AttributeError, OSError): return None if not hasattr(libroot, 'system_time'): return None libroot.system_time.restype = ctypes.c_int64 return libroot.system_time() / 1000000.
def _uptime_bsd(): """Returns uptime in seconds or None, on BSD (including OS X).""" global __boottime try: libc = ctypes.CDLL('libc.so') except AttributeError: return None except OSError: # OS X; can't use ctypes.util.find_library because that creates # a new process on Linux, which is undesirable. try: libc = ctypes.CDLL('libc.dylib') except OSError: return None if not hasattr(libc, 'sysctlbyname'): # Not BSD. return None # Determine how much space we need for the response. sz = ctypes.c_uint(0) libc.sysctlbyname('kern.boottime', None, ctypes.byref(sz), None, 0) if sz.value != struct.calcsize('@LL'): # Unexpected, let's give up. return None # For real now. buf = ctypes.create_string_buffer(sz.value) libc.sysctlbyname('kern.boottime', buf, ctypes.byref(sz), None, 0) sec, usec = struct.unpack('@LL', buf.raw) # OS X disagrees what that second value is. if usec > 1000000: usec = 0. __boottime = sec + usec / 1000000. up = time.time() - __boottime if up < 0: up = None return up
def _uptime_minix(): """Returns uptime in seconds or None, on MINIX.""" try: f = open('/proc/uptime', 'r') up = float(f.read()) f.close() return up except (IOError, ValueError): return None
def _uptime_plan9(): """Returns uptime in seconds or None, on Plan 9.""" # Apparently Plan 9 only has Python 2.2, which I'm not prepared to # support. Maybe some Linuxes implement /dev/time, though, someone was # talking about it somewhere. try: # The time file holds one 32-bit number representing the sec- # onds since start of epoch and three 64-bit numbers, repre- # senting nanoseconds since start of epoch, clock ticks, and # clock frequency. # -- cons(3) f = open('/dev/time', 'r') s, ns, ct, cf = f.read().split() f.close() return float(ct) / float(cf) except (IOError, ValueError): return None
def _uptime_solaris(): """Returns uptime in seconds or None, on Solaris.""" global __boottime try: kstat = ctypes.CDLL('libkstat.so') except (AttributeError, OSError): return None # kstat doesn't have uptime, but it does have boot time. # Unfortunately, getting at it isn't perfectly straightforward. # First, let's pretend to be kstat.h # Constant KSTAT_STRLEN = 31 # According to every kstat.h I could find. # Data structures class anon_union(ctypes.Union): # The ``value'' union in kstat_named_t actually has a bunch more # members, but we're only using it for boot_time, so we only need # the padding and the one we're actually using. _fields_ = [('c', ctypes.c_char * 16), ('time', ctypes.c_int)] class kstat_named_t(ctypes.Structure): _fields_ = [('name', ctypes.c_char * KSTAT_STRLEN), ('data_type', ctypes.c_char), ('value', anon_union)] # Function signatures kstat.kstat_open.restype = ctypes.c_void_p kstat.kstat_lookup.restype = ctypes.c_void_p kstat.kstat_lookup.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p] kstat.kstat_read.restype = ctypes.c_int kstat.kstat_read.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] kstat.kstat_data_lookup.restype = ctypes.POINTER(kstat_named_t) kstat.kstat_data_lookup.argtypes = [ctypes.c_void_p, ctypes.c_char_p] # Now, let's do something useful. # Initialise kstat control structure. kc = kstat.kstat_open() if not kc: return None # We're looking for unix:0:system_misc:boot_time. ksp = kstat.kstat_lookup(kc, 'unix', 0, 'system_misc') if ksp and kstat.kstat_read(kc, ksp, None) != -1: data = kstat.kstat_data_lookup(ksp, 'boot_time') if data: __boottime = data.contents.value.time # Clean-up. kstat.kstat_close(kc) if __boottime is not None: return time.time() - __boottime return None
def _uptime_syllable(): """Returns uptime in seconds or None, on Syllable.""" global __boottime try: __boottime = os.stat('/dev/pty/mst/pty0').st_mtime return time.time() - __boottime except (NameError, OSError): return None
def _uptime_windows(): """ Returns uptime in seconds or None, on Windows. Warning: may return incorrect answers after 49.7 days on versions older than Vista. """ if hasattr(ctypes, 'windll') and hasattr(ctypes.windll, 'kernel32'): lib = ctypes.windll.kernel32 else: try: # Windows CE uses the cdecl calling convention. lib = ctypes.CDLL('coredll.lib') except (AttributeError, OSError): return None if hasattr(lib, 'GetTickCount64'): # Vista/Server 2008 or later. lib.GetTickCount64.restype = ctypes.c_uint64 return lib.GetTickCount64() / 1000. if hasattr(lib, 'GetTickCount'): # WinCE and Win2k or later; gives wrong answers after 49.7 days. lib.GetTickCount.restype = ctypes.c_uint32 return lib.GetTickCount() / 1000. return None
def uptime(): """Returns uptime in seconds if even remotely possible, or None if not.""" if __boottime is not None: return time.time() - __boottime return {'amiga': _uptime_amiga, 'aros12': _uptime_amiga, 'beos5': _uptime_beos, 'cygwin': _uptime_linux, 'darwin': _uptime_osx, 'haiku1': _uptime_beos, 'linux': _uptime_linux, 'linux-armv71': _uptime_linux, 'linux2': _uptime_linux, 'mac': _uptime_mac, 'minix3': _uptime_minix, 'riscos': _uptime_riscos, 'sunos5': _uptime_solaris, 'syllable': _uptime_syllable, 'win32': _uptime_windows, 'wince': _uptime_windows}.get(sys.platform, _uptime_bsd)() or \ _uptime_bsd() or _uptime_plan9() or _uptime_linux() or \ _uptime_windows() or _uptime_solaris() or _uptime_beos() or \ _uptime_amiga() or _uptime_riscos() or _uptime_posix() or \ _uptime_syllable() or _uptime_mac() or _uptime_osx()
def boottime(): """Returns boot time if remotely possible, or None if not.""" global __boottime if __boottime is None: up = uptime() if up is None: return None if __boottime is None: _boottime_linux() if datetime is None: raise RuntimeError('datetime module required.') return datetime.fromtimestamp(__boottime or time.time() - up)
def _initfile(path, data="dict"): """Initialize an empty JSON file.""" data = {} if data.lower() == "dict" else [] # The file will need to be created if it doesn't exist if not os.path.exists(path): # The file doesn't exist # Raise exception if the directory that should contain the file doesn't # exist dirname = os.path.dirname(path) if dirname and not os.path.exists(dirname): raise IOError( ("Could not initialize empty JSON file in non-existant " "directory '{}'").format(os.path.dirname(path)) ) # Write an empty file there with open(path, "w") as f: json.dump(data, f) return True elif os.path.getsize(path) == 0: # The file is empty with open(path, "w") as f: json.dump(data, f) else: # The file exists and contains content return False
def _data(self): """A simpler version of data to avoid infinite recursion in some cases. Don't use this. """ if self.is_caching: return self.cache with open(self.path, "r") as f: return json.load(f)
def data(self, data): """Overwrite the file with new data. You probably shouldn't do this yourself, it's easy to screw up your whole file with this.""" if self.is_caching: self.cache = data else: fcontents = self.file_contents with open(self.path, "w") as f: try: # Write the file. Keep user settings about indentation, etc indent = self.indent if self.pretty else None json.dump(data, f, sort_keys=self.sort_keys, indent=indent) except Exception as e: # Rollback to prevent data loss f.seek(0) f.truncate() f.write(fcontents) # And re-raise the exception raise e self._updateType()
def _updateType(self): """Make sure that the class behaves like the data structure that it is, so that we don't get a ListFile trying to represent a dict.""" data = self._data() # Change type if needed if isinstance(data, dict) and isinstance(self, ListFile): self.__class__ = DictFile elif isinstance(data, list) and isinstance(self, DictFile): self.__class__ = ListFile
def with_data(path, data): """Initialize a new file that starts out with some data. Pass data as a list, dict, or JSON string. """ # De-jsonize data if necessary if isinstance(data, str): data = json.loads(data) # Make sure this is really a new file if os.path.exists(path): raise ValueError("File exists, not overwriting data. Set the " "'data' attribute on a normally-initialized " "'livejson.File' instance if you really " "want to do this.") else: f = File(path) f.data = data return f
def is_configured(self, project, **kwargs): """ Check if plugin is configured. """ params = self.get_option return bool(params('server_host', project) and params('server_port', project))
def post_process(self, group, event, is_new, is_sample, **kwargs): """ Process error. """ if not self.is_configured(group.project): return host = self.get_option('server_host', group.project) port = int(self.get_option('server_port', group.project)) prefix = self.get_option('prefix', group.project) hostname = self.get_option('hostname', group.project) or socket.gethostname() resolve_age = group.project.get_option('sentry:resolve_age', None) now = int(time.time()) template = '%s.%%s[%s]' % (prefix, group.project.slug) level = group.get_level_display() label = template % level groups = group.project.group_set.filter(status=STATUS_UNRESOLVED) if resolve_age: oldest = timezone.now() - timedelta(hours=int(resolve_age)) groups = groups.filter(last_seen__gt=oldest) num_errors = groups.filter(level=group.level).count() metric = Metric(hostname, label, num_errors, now) log.info('will send %s=%s to zabbix', label, num_errors) send_to_zabbix([metric], host, port)
def as_dict(self): """ ping statistics. Returns: |dict|: Examples: >>> import pingparsing >>> parser = pingparsing.PingParsing() >>> parser.parse(ping_result) >>> parser.as_dict() { "destination": "google.com", "packet_transmit": 60, "packet_receive": 60, "packet_loss_rate": 0.0, "packet_loss_count": 0, "rtt_min": 61.425, "rtt_avg": 99.731, "rtt_max": 212.597, "rtt_mdev": 27.566, "packet_duplicate_rate": 0.0, "packet_duplicate_count": 0 } """ return { "destination": self.destination, "packet_transmit": self.packet_transmit, "packet_receive": self.packet_receive, "packet_loss_count": self.packet_loss_count, "packet_loss_rate": self.packet_loss_rate, "rtt_min": self.rtt_min, "rtt_avg": self.rtt_avg, "rtt_max": self.rtt_max, "rtt_mdev": self.rtt_mdev, "packet_duplicate_count": self.packet_duplicate_count, "packet_duplicate_rate": self.packet_duplicate_rate, }
def as_tuple(self): """ ping statistics. Returns: |namedtuple|: Examples: >>> import pingparsing >>> parser = pingparsing.PingParsing() >>> parser.parse(ping_result) >>> parser.as_tuple() PingResult(destination='google.com', packet_transmit=60, packet_receive=60, packet_loss_rate=0.0, packet_loss_count=0, rtt_min=61.425, rtt_avg=99.731, rtt_max=212.597, rtt_mdev=27.566, packet_duplicate_rate=0.0, packet_duplicate_count=0) """ from collections import namedtuple ping_result = self.as_dict() return namedtuple("PingStatsTuple", ping_result.keys())(**ping_result)
def ping(self): """ Sending ICMP packets. :return: ``ping`` command execution result. :rtype: :py:class:`.PingResult` :raises ValueError: If parameters not valid. """ self.__validate_ping_param() ping_proc = subprocrunner.SubprocessRunner(self.__get_ping_command()) ping_proc.run() return PingResult(ping_proc.stdout, ping_proc.stderr, ping_proc.returncode)
def parse(self, ping_message): """ Parse ping command output. Args: ping_message (str or :py:class:`~pingparsing.PingResult`): ``ping`` command output. Returns: :py:class:`~pingparsing.PingStats`: Parsed result. """ try: # accept PingResult instance as an input if typepy.is_not_null_string(ping_message.stdout): ping_message = ping_message.stdout except AttributeError: pass logger.debug("parsing ping result: {}".format(ping_message)) self.__parser = NullPingParser() if typepy.is_null_string(ping_message): logger.debug("ping_message is empty") self.__stats = PingStats() return self.__stats ping_lines = _to_unicode(ping_message).splitlines() parser_class_list = ( LinuxPingParser, WindowsPingParser, MacOsPingParser, AlpineLinuxPingParser, ) for parser_class in parser_class_list: self.__parser = parser_class() try: self.__stats = self.__parser.parse(ping_lines) return self.__stats except ParseError as e: if e.reason != ParseErrorReason.HEADER_NOT_FOUND: raise e except pp.ParseException: pass self.__parser = NullPingParser() return self.__stats
def send_confirmation(self): """ Send a verification email for the email address. """ confirmation = EmailConfirmation.objects.create(email=self) confirmation.send()
def send_duplicate_notification(self): """ Send a notification about a duplicate signup. """ email_utils.send_email( from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[self.email], subject=_("Registration Attempt"), template_name="rest_email_auth/emails/duplicate-email", ) logger.info("Sent duplicate email notification to: %s", self.email)
def set_primary(self): """ Set this email address as the user's primary email. """ query = EmailAddress.objects.filter(is_primary=True, user=self.user) query = query.exclude(pk=self.pk) # The transaction is atomic so there is never a gap where a user # has no primary email address. with transaction.atomic(): query.update(is_primary=False) self.is_primary = True self.save() logger.info( "Set %s as the primary email address for %s.", self.email, self.user, )
def confirm(self): """ Mark the instance's email as verified. """ self.email.is_verified = True self.email.save() signals.email_verified.send(email=self.email, sender=self.__class__) logger.info("Verified email address: %s", self.email.email)
def is_expired(self): """ Determine if the confirmation has expired. Returns: bool: ``True`` if the confirmation has expired and ``False`` otherwise. """ expiration_time = self.created_at + datetime.timedelta(days=1) return timezone.now() > expiration_time
def send(self): """ Send a verification email to the user. """ context = { "verification_url": app_settings.EMAIL_VERIFICATION_URL.format( key=self.key ) } email_utils.send_email( context=context, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[self.email.email], subject=_("Please Verify Your Email Address"), template_name="rest_email_auth/emails/verify-email", ) logger.info( "Sent confirmation email to %s for user #%d", self.email.email, self.email.user.id, )
def _create(cls, model_class, *args, **kwargs): """ Create a new user instance. Args: model_class: The type of model to create an instance of. args: Positional arguments to create the instance with. kwargs: Keyword arguments to create the instance with. Returns: A new user instance of the type specified by ``model_class``. """ manager = cls._get_manager(model_class) return manager.create_user(*args, **kwargs)
def create(self, validated_data): """ Create a new email and send a confirmation to it. Returns: The newly creating ``EmailAddress`` instance. """ email_query = models.EmailAddress.objects.filter( email=self.validated_data["email"] ) if email_query.exists(): email = email_query.get() email.send_duplicate_notification() else: email = super(EmailSerializer, self).create(validated_data) email.send_confirmation() user = validated_data.get("user") query = models.EmailAddress.objects.filter( is_primary=True, user=user ) if not query.exists(): email.set_primary() return email
def update(self, instance, validated_data): """ Update the instance the serializer is bound to. Args: instance: The instance the serializer is bound to. validated_data: The data to update the serializer with. Returns: The updated instance. """ is_primary = validated_data.pop("is_primary", False) instance = super(EmailSerializer, self).update( instance, validated_data ) if is_primary: instance.set_primary() return instance
def validate_email(self, email): """ Validate the provided email address. The email address is first modified to match the RFC spec. Namely, the domain portion of the email is lowercased. Returns: The validated email address. Raises: serializers.ValidationError: If the serializer is bound and the provided email doesn't match the existing address. """ user, domain = email.rsplit("@", 1) email = "@".join([user, domain.lower()]) if self.instance and email and self.instance.email != email: raise serializers.ValidationError( _( "Existing emails may not be edited. Create a new one " "instead." ) ) return email
def validate_is_primary(self, is_primary): """ Validate the provided 'is_primary' parameter. Returns: The validated 'is_primary' value. Raises: serializers.ValidationError: If the user attempted to mark an unverified email as their primary email address. """ # TODO: Setting 'is_primary' to 'False' should probably not be # allowed. if is_primary and not (self.instance and self.instance.is_verified): raise serializers.ValidationError( _( "Unverified email addresses may not be used as the " "primary address." ) ) return is_primary
def validate(self, data): """ Validate the provided data. Returns: dict: The validated data. Raises: serializers.ValidationError: If the provided password is invalid. """ user = self._confirmation.email.user if ( app_settings.EMAIL_VERIFICATION_PASSWORD_REQUIRED and not user.check_password(data["password"]) ): raise serializers.ValidationError( _("The provided password is invalid.") ) # Add email to returned data data["email"] = self._confirmation.email.email return data
def validate_key(self, key): """ Validate the provided confirmation key. Returns: str: The validated confirmation key. Raises: serializers.ValidationError: If there is no email confirmation with the given key or the confirmation has expired. """ try: confirmation = models.EmailConfirmation.objects.select_related( "email__user" ).get(key=key) except models.EmailConfirmation.DoesNotExist: raise serializers.ValidationError( _("The provided verification key is invalid.") ) if confirmation.is_expired: raise serializers.ValidationError( _("That verification code has expired.") ) # Cache confirmation instance self._confirmation = confirmation return key
def save(self): """ Send out a password reset if the provided data is valid. If the provided email address exists and is verified, a reset email is sent to the address. Returns: The password reset token if it was returned and ``None`` otherwise. """ try: email = models.EmailAddress.objects.get( email=self.validated_data["email"], is_verified=True ) except models.EmailAddress.DoesNotExist: return None token = models.PasswordResetToken.objects.create(email=email) token.send() return token
def save(self): """ Reset the user's password if the provided information is valid. """ token = models.PasswordResetToken.objects.get( key=self.validated_data["key"] ) token.email.user.set_password(self.validated_data["password"]) token.email.user.save() logger.info("Reset password for %s", token.email.user) token.delete()
def validate_key(self, key): """ Validate the provided reset key. Returns: The validated key. Raises: serializers.ValidationError: If the provided key does not exist. """ if not models.PasswordResetToken.valid_tokens.filter(key=key).exists(): raise serializers.ValidationError( _("The provided reset token does not exist, or is expired.") ) return key
def create(self, validated_data): """ Create a new user from the data passed to the serializer. If the provided email has not been verified yet, the user is created and a verification email is sent to the address. Otherwise we send a notification to the email address that someone attempted to register with an email that's already been verified. Args: validated_data (dict): The data passed to the serializer after it has been validated. Returns: A new user created from the provided data. """ email = validated_data.pop("email") password = validated_data.pop("password") # We don't save the user instance yet in case the provided email # address already exists. user = get_user_model()(**validated_data) user.set_password(password) # We set an ephemeral email property so that it is included in # the data returned by the serializer. user.email = email email_query = models.EmailAddress.objects.filter(email=email) if email_query.exists(): existing_email = email_query.get() existing_email.send_duplicate_notification() else: user.save() email_instance = models.EmailAddress.objects.create( email=email, user=user ) email_instance.send_confirmation() signals.user_registered.send(sender=self.__class__, user=user) return user
def validate_email(self, email): """ Validate the provided email address. Args: email: The email address to validate. Returns: The provided email address, transformed to match the RFC spec. Namely, the domain portion of the email must be lowercase. """ user, domain = email.rsplit("@", 1) return "@".join([user, domain.lower()])
def save(self): """ Resend a verification email to the provided address. If the provided email is already verified no action is taken. """ try: email = models.EmailAddress.objects.get( email=self.validated_data["email"], is_verified=False ) logger.debug( "Resending verification email to %s", self.validated_data["email"], ) email.send_confirmation() except models.EmailAddress.DoesNotExist: logger.debug( "Not resending verification email to %s because the address " "doesn't exist in the database.", self.validated_data["email"], )
def create(self, *args, **kwargs): """ Create a new email address. """ is_primary = kwargs.pop("is_primary", False) with transaction.atomic(): email = super(EmailAddressManager, self).create(*args, **kwargs) if is_primary: email.set_primary() return email
def get_queryset(self): """ Return all unexpired password reset tokens. """ oldest = timezone.now() - app_settings.PASSWORD_RESET_EXPIRATION queryset = super(ValidPasswordResetTokenManager, self).get_queryset() return queryset.filter(created_at__gt=oldest)
def handle(self, *args, **kwargs): """ Handle execution of the command. """ cutoff = timezone.now() cutoff -= app_settings.CONFIRMATION_EXPIRATION cutoff -= app_settings.CONFIRMATION_SAVE_PERIOD queryset = models.EmailConfirmation.objects.filter( created_at__lte=cutoff ) count = queryset.count() queryset.delete() if count: self.stdout.write( self.style.SUCCESS( "Removed {count} old email confirmation(s)".format( count=count ) ) ) else: self.stdout.write("No email confirmations to remove.")
def get_user(self, user_id): """ Get a user by their ID. Args: user_id: The ID of the user to fetch. Returns: The user with the specified ID if they exist and ``None`` otherwise. """ try: return get_user_model().objects.get(id=user_id) except get_user_model().DoesNotExist: return None
def authenticate(self, request, email=None, password=None, username=None): """ Attempt to authenticate a set of credentials. Args: request: The request associated with the authentication attempt. email: The user's email address. password: The user's password. username: An alias for the ``email`` field. This is provided for compatability with Django's built in authentication views. Returns: The user associated with the provided credentials if they are valid. Returns ``None`` otherwise. """ email = email or username try: email_instance = models.EmailAddress.objects.get( is_verified=True, email=email ) except models.EmailAddress.DoesNotExist: return None user = email_instance.user if user.check_password(password): return user return None