desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Returns next available data record from the storage as a dict, with the keys being the field names. This also adds in some meta fields: - ``_category``: The value from the category field (if any) - ``_reset``: True if the reset field was True (if any) - ``_sequenceId``: the value from the sequenceId field (if any)'
def getNextRecordDict(self):
values = self.getNextRecord() if (values is None): return None if (not values): return dict() if (self._modelRecordEncoder is None): self._modelRecordEncoder = ModelRecordEncoder(fields=self.getFields(), aggregationPeriod=self.getAggregationMonthsAndSeconds()) return self._modelRecordEncoder.encode(values)
'Returns the aggregation period of the record stream as a dict containing \'months\' and \'seconds\'. The months is always an integer and seconds is a floating point. Only one is allowed to be non-zero. If there is no aggregation associated with the stream, returns None. Typically, a raw file or hbase stream will NOT have any aggregation info, but subclasses of :class:`~nupic.data.record_stream.RecordStreamIface`, like :class:`~nupic.data.stream_reader.StreamReader`, will and will return the aggregation period from this call. This call is used by :meth:`getNextRecordDict` to assign a record number to a record given its timestamp and the aggregation interval. :returns: ``None``'
def getAggregationMonthsAndSeconds(self):
return None
'If underlying implementation does not support min/max stats collection, or if a field type does not support min/max (non scalars), the return value will be None. :param fieldName: (string) name of field to get min :returns: current minimum value for the field ``fieldName``.'
def getFieldMin(self, fieldName):
stats = self.getStats() if (stats == None): return None minValues = stats.get('min', None) if (minValues == None): return None index = self.getFieldNames().index(fieldName) return minValues[index]
'If underlying implementation does not support min/max stats collection, or if a field type does not support min/max (non scalars), the return value will be None. :param fieldName: (string) name of field to get max :returns: current maximum value for the field ``fieldName``.'
def getFieldMax(self, fieldName):
stats = self.getStats() if (stats == None): return None maxValues = stats.get('max', None) if (maxValues == None): return None index = self.getFieldNames().index(fieldName) return maxValues[index]
':returns: (int) index of the ``reset`` field; ``None`` if no such field.'
def getResetFieldIdx(self):
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.reset)
':returns: (int) index of the ``timestamp`` field.'
def getTimestampFieldIdx(self):
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.timestamp)
':returns: (int) index of the ``sequenceId`` field.'
def getSequenceIdFieldIdx(self):
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.sequence)
':returns: (int) index of ``category`` field'
def getCategoryFieldIdx(self):
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.category)
':returns: (int) index of the ``learning`` field.'
def getLearningFieldIdx(self):
return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.learning)
'Closes the stream.'
def close(self):
if (self._file is not None): self._file.close() self._file = None
'Put us back at the beginning of the file again.'
def rewind(self):
super(FileRecordStream, self).rewind() self.close() self._file = open(self._filename, self._mode) self._reader = csv.reader(self._file, dialect='excel') self._reader.next() self._reader.next() self._reader.next() self._recordCount = 0
'Returns next available data record from the file. :returns: a data row (a list or tuple) if available; None, if no more records in the table (End of Stream - EOS); empty sequence (list or tuple) when timing out while waiting for the next record.'
def getNextRecord(self, useCache=True):
assert (self._file is not None) assert (self._mode == self._FILE_READ_MODE) try: line = self._reader.next() except StopIteration: if self.rewindAtEOF: if (self._recordCount == 0): raise Exception(("The source configured to reset at EOF but '%s' appears to be empty" % self._filename)) self.rewind() line = self._reader.next() else: return None self._recordCount += 1 record = [] for (i, f) in enumerate(line): if (f in self._missingValues): record.append(SENTINEL_VALUE_FOR_MISSING_DATA) else: record.append(self._adapters[i](f)) return record
'Saves the record in the underlying csv file. :param record: a list of Python objects that will be string-ified'
def appendRecord(self, record):
assert (self._file is not None) assert (self._mode == self._FILE_WRITE_MODE) assert isinstance(record, (list, tuple)), ('unexpected record type: ' + repr(type(record))) assert (len(record) == self._fieldCount), ('len(record): %s, fieldCount: %s' % (len(record), self._fieldCount)) if (self._recordCount == 0): (names, types, specials) = zip(*self.getFields()) for line in (names, types, specials): self._writer.writerow(line) self._updateSequenceInfo(record) line = [self._adapters[i](f) for (i, f) in enumerate(record)] self._writer.writerow(line) self._recordCount += 1
'Saves multiple records in the underlying storage. :param records: array of records as in :meth:`~.FileRecordStream.appendRecord` :param progressCB: (function) callback to report progress'
def appendRecords(self, records, progressCB=None):
for record in records: self.appendRecord(record) if (progressCB is not None): progressCB()
'Gets a bookmark or anchor to the current position. :returns: an anchor to the current position in the data. Passing this anchor to a constructor makes the current position to be the first returned record.'
def getBookmark(self):
if (self._write and (self._recordCount == 0)): return None rowDict = dict(filepath=os.path.realpath(self._filename), currentRow=self._recordCount) return json.dumps(rowDict)
'Returns whether there are more records from current position. ``bookmark`` is not used in this implementation. :return: True if there are records left after current position.'
def recordsExistAfter(self, bookmark):
return ((self.getDataRowCount() - self.getNextRecordIdx()) > 0)
'Seeks to ``numRecords`` from the end and returns a bookmark to the new position. :param numRecords: how far to seek from end of file. :return: bookmark to desired location.'
def seekFromEnd(self, numRecords):
self._file.seek((self._getTotalLineCount() - numRecords)) return self.getBookmark()
'Controls whether :meth:`~.FileRecordStream.getNextRecord` should automatically rewind the source when EOF is reached. :param autoRewind: (bool) - if True, :meth:`~.FileRecordStream.getNextRecord` will automatically rewind the source on EOF. - if False, :meth:`~.FileRecordStream.getNextRecord` will not automatically rewind the source on EOF.'
def setAutoRewind(self, autoRewind):
self.rewindAtEOF = autoRewind
'Parse the file using dedicated reader and collect fields stats. Never called if user of :class:`~.FileRecordStream` does not invoke :meth:`~.FileRecordStream.getStats` method. :returns: a dictionary of stats. In the current implementation, min and max fields are supported. Example of the return dictionary is: .. code-block:: python \'min\' : [f1_min, f2_min, None, None, fn_min], \'max\' : [f1_max, f2_max, None, None, fn_max] (where fx_min/fx_max are set for scalar fields, or None if not)'
def getStats(self):
if (self._stats == None): assert (self._mode == self._FILE_READ_MODE) inFile = open(self._filename, self._FILE_READ_MODE) reader = csv.reader(inFile, dialect='excel') names = [n.strip() for n in reader.next()] types = [t.strip() for t in reader.next()] reader.next() self._stats = dict() self._stats['min'] = [] self._stats['max'] = [] for i in xrange(len(names)): self._stats['min'].append(None) self._stats['max'].append(None) while True: try: line = reader.next() for (i, f) in enumerate(line): if ((len(types) > i) and (types[i] in [FieldMetaType.integer, FieldMetaType.float]) and (f not in self._missingValues)): value = self._adapters[i](f) if ((self._stats['max'][i] == None) or (self._stats['max'][i] < value)): self._stats['max'][i] = value if ((self._stats['min'][i] == None) or (self._stats['min'][i] > value)): self._stats['min'][i] = value except StopIteration: break return self._stats
'Resets stats collected so far.'
def clearStats(self):
self._stats = None
'Not implemented. CSV file version does not provide storage for the error information'
def getError(self):
return None
'Not implemented. CSV file version does not provide storage for the error information'
def setError(self, error):
return
'Not implemented. CSV file is always considered completed.'
def isCompleted(self):
return True
'Not implemented: CSV file is always considered completed, nothing to do.'
def setCompleted(self, completed=True):
return
':returns: (list) field names associated with the data.'
def getFieldNames(self):
return [f.name for f in self._fields]
':returns: a sequence of :class:`~.FieldMetaInfo` ``name``/``type``/``special`` tuples for each field in the stream.'
def getFields(self):
if (self._fields is None): return None else: return copy.copy(self._fields)
'Keep track of sequence and make sure time goes forward Check if the current record is the beginning of a new sequence A new sequence starts in 2 cases: 1. The sequence id changed (if there is a sequence id field) 2. The reset field is 1 (if there is a reset field) Note that if there is no sequenceId field or resetId field then the entire dataset is technically one big sequence. The function will not return True for the first record in this case. This is Ok because it is important to detect new sequences only when there are multiple sequences in the file.'
def _updateSequenceInfo(self, r):
newSequence = False sequenceId = (r[self._sequenceIdIdx] if (self._sequenceIdIdx is not None) else None) if (sequenceId != self._currSequence): if (sequenceId in self._sequences): raise Exception(('Broken sequence: %s, record: %s' % (sequenceId, r))) self._sequences.add(self._currSequence) self._currSequence = sequenceId if self._resetIdx: assert (r[self._resetIdx] == 1) newSequence = True else: reset = False if self._resetIdx: reset = r[self._resetIdx] if (reset == 1): newSequence = True if (not newSequence): if (self._timeStampIdx and (self._currTime is not None)): t = r[self._timeStampIdx] if (t < self._currTime): raise Exception(('No time travel. Early timestamp for record: %s' % r)) if self._timeStampIdx: self._currTime = r[self._timeStampIdx]
'Extracts start row from the bookmark information'
def _getStartRow(self, bookmark):
bookMarkDict = json.loads(bookmark) realpath = os.path.realpath(self._filename) bookMarkFile = bookMarkDict.get('filepath', None) if (bookMarkFile != realpath): print ("Ignoring bookmark due to mismatch between File's filename realpath vs. bookmark; realpath: %r; bookmark: %r" % (realpath, bookMarkDict)) return 0 else: return bookMarkDict['currentRow']
'Returns: count of ALL lines in dataset, including header lines'
def _getTotalLineCount(self):
if (self._mode == self._FILE_WRITE_MODE): self._file.flush() return sum((1 for line in open(self._filename, self._FILE_READ_MODE)))
':returns: (int) the index of the record that will be read next from :meth:`~.FileRecordStream.getNextRecord`.'
def getNextRecordIdx(self):
return self._recordCount
':returns: (int) count of data rows in dataset (excluding header lines)'
def getDataRowCount(self):
numLines = self._getTotalLineCount() if (numLines == 0): assert ((self._mode == self._FILE_WRITE_MODE) and (self._recordCount == 0)) numDataRows = 0 else: numDataRows = (numLines - self._NUM_HEADER_ROWS) assert (numDataRows >= 0) return numDataRows
'Flushes the file.'
def flush(self):
if (self._file is not None): self._file.flush()
'Context guard - enter Just return the object'
def __enter__(self):
return self
'Context guard - exit Ensures that the file is always closed at the end of the \'with\' block. Lets exceptions propagate.'
def __exit__(self, yupe, value, traceback):
self.close()
'Support for the iterator protocol. Return itself'
def __iter__(self):
return self
'Implement the iterator protocol'
def next(self):
record = self.getNextRecord() if (record is None): raise StopIteration return record
'Construct a Dimensions object The constructor can be called with no arguments or with a list of integers'
def __init__(self, *args):
engine_internal.Dimensions.__init__(self, *args)
'Store the wraped region and hosting network The network is the high-level Network and not the internal Network. This is important in case the user requests the network from the region (never leak a engine object, remember)'
def __init__(self, region, network):
self._network = network self._region = region (self.__class__.__doc__ == region.__class__.__doc__) self._paramTypeCache = {}
'@doc:place_holder(Region.getSpecFromType)'
@staticmethod def getSpecFromType(nodeType):
return Spec(engine_internal.Region.getSpecFromType(nodeType))
'@doc:place_holder(Region.compute) ** This line comes from the original docstring (not generated by Documentor)'
def compute(self):
return self._region.compute()
'@doc:place_holder(Region.getInputData)'
def getInputData(self, inputName):
return self._region.getInputArray(inputName)
'@doc:place_holder(Region.getOutputData)'
def getOutputData(self, outputName):
return self._region.getOutputArray(outputName)
'Returns list of input names in spec.'
def getInputNames(self):
inputs = self.getSpec().inputs return [inputs.getByIndex(i)[0] for i in xrange(inputs.getCount())]
'Returns list of output names in spec.'
def getOutputNames(self):
outputs = self.getSpec().outputs return [outputs.getByIndex(i)[0] for i in xrange(outputs.getCount())]
'@doc:place_holder(Region.executeCommand)'
def executeCommand(self, args):
return self._region.executeCommand(args)
'Spec of the region'
def _getSpec(self):
return Spec(self._region.getSpec())
'Dimensions of the region'
def _getDimensions(self):
return Dimensions(tuple(self._region.getDimensions()))
'Network for the region'
def _getNetwork(self):
return self._network
'Hash a region'
def __hash__(self):
return self._region.__hash__()
'Compare regions'
def __cmp__(self, other):
return (self._region == other._region)
'Returns functions to set/get the parameter. These are the strongly typed functions get/setParameterUInt32, etc. The return value is a pair: setfunc, getfunc If the parameter is not available on this region, setfunc/getfunc are None.'
def _getParameterMethods(self, paramName):
if (paramName in self._paramTypeCache): return self._paramTypeCache[paramName] try: paramSpec = self.getSpec().parameters.getByName(paramName) except: return (None, None) dataType = paramSpec.dataType dataTypeName = basicTypes[dataType] count = paramSpec.count if (count == 1): x = ('etParameter' + dataTypeName) try: g = getattr(self, ('g' + x)) s = getattr(self, ('s' + x)) except AttributeError: raise Exception(('Internal error: unknown parameter type %s' % dataTypeName)) info = (s, g) elif (dataTypeName == 'Byte'): info = (self.setParameterString, self.getParameterString) else: helper = _ArrayParameterHelper(self, dataType) info = (self.setParameterArray, helper.getParameterArray) self._paramTypeCache[paramName] = info return info
'Get parameter value'
def getParameter(self, paramName):
(setter, getter) = self._getParameterMethods(paramName) if (getter is None): import exceptions raise exceptions.Exception(("getParameter -- parameter name '%s' does not exist in region %s of type %s" % (paramName, self.name, self.type))) return getter(paramName)
'Set parameter value'
def setParameter(self, paramName, value):
(setter, getter) = self._getParameterMethods(paramName) if (setter is None): import exceptions raise exceptions.Exception(("setParameter -- parameter name '%s' does not exist in region %s of type %s" % (paramName, self.name, self.type))) setter(paramName, value)
'Auto forwarding of properties to get methods of internal region'
def _get(self, method):
return getattr(self._region, method)()
'Constructor - Initialize the internal engine_internal.Network class generated by Swig - Attach docstrings to selected methods'
def __init__(self, *args):
engine_internal.Network.__init__(self, *args) docTable = ((engine_internal.Network.getRegions, 'Get the collection of regions in a network'),) for (obj, docString) in docTable: if isinstance(obj, str): prop = getattr(Network, obj) assert isinstance(prop, property) setattr(Network, obj, property(prop.fget, prop.fset, prop.fdel, docString)) else: obj.im_func.__doc__ = docString
'Get the collection of regions in a network This is a tricky one. The collection of regions returned from from the internal network is a collection of internal regions. The desired collection is a collelcion of net.Region objects that also points to this network (net.network) and not to the internal network. To achieve that a CollectionWrapper class is used with a custom makeRegion() function (see bellow) as a value wrapper. The CollectionWrapper class wraps each value in the original collection with the result of the valueWrapper.'
def _getRegions(self):
def makeRegion(name, r): 'Wrap a engine region with a nupic.engine_internal.Region\n\n Also passes the containing nupic.engine_internal.Network network in _network. This\n function is passed a value wrapper to the CollectionWrapper\n ' r = Region(r, self) return r regions = CollectionWrapper(engine_internal.Network.getRegions(self), makeRegion) return regions
'@doc:place_holder(Network.addRegion)'
def addRegion(self, name, nodeType, nodeParams):
engine_internal.Network.addRegion(self, name, nodeType, nodeParams) return self._getRegions()[name]
'@doc:place_holder(Network.addRegionFromBundle)'
def addRegionFromBundle(self, name, nodeType, dimensions, bundlePath, label):
engine_internal.Network.addRegionFromBundle(self, name, nodeType, dimensions, bundlePath, label) return self._getRegions()[name]
'@doc:place_holder(Network.setPhases)'
def setPhases(self, name, phases):
phases = engine_internal.UInt32Set(phases) engine_internal.Network.setPhases(self, name, phases)
'@doc:place_holder(Network.run)'
def run(self, n):
engine_internal.Network.run(self, n)
'@doc:place_holder(Network.disableProfiling)'
def disableProfiling(self, *args, **kwargs):
engine_internal.Network.disableProfiling(self, *args, **kwargs)
'@doc:place_holder(Network.enableProfiling)'
def enableProfiling(self, *args, **kwargs):
engine_internal.Network.enableProfiling(self, *args, **kwargs)
'@doc:place_holder(Network.getCallbacks)'
def getCallbacks(self, *args, **kwargs):
engine_internal.Network.getCallbacks(self, *args, **kwargs)
'@doc:place_holder(Network.initialize)'
def initialize(self, *args, **kwargs):
engine_internal.Network.initialize(self, *args, **kwargs)
'@doc:place_holder(Network.link)'
def link(self, *args, **kwargs):
engine_internal.Network.link(self, *args, **kwargs)
'@doc:place_holder(Network.removeLink)'
def removeLink(self, *args, **kwargs):
engine_internal.Network.removeLink(self, *args, **kwargs)
'@doc:place_holder(Network.removeRegion)'
def removeRegion(self, *args, **kwargs):
engine_internal.Network.removeRegion(self, *args, **kwargs)
'@doc:place_holder(Network.resetProfiling)'
def resetProfiling(self, *args, **kwargs):
engine_internal.Network.resetProfiling(self, *args, **kwargs)
'@doc:place_holder(Network.save)'
def save(self, *args, **kwargs):
if ((len(args) > 0) and (not isinstance(args[0], str))): raise TypeError('Save path must be of type {}.'.format(str)) engine_internal.Network.save(self, *args, **kwargs)
'Gets all region instances of a given class (for example, nupic.regions.sp_region.SPRegion).'
def getRegionsByType(self, regionClass):
regions = [] for region in self.regions.values(): if (type(region.getSelf()) is regionClass): regions.append(region) return regions
'Adds the module and class name for the region to the list of classes the network can use regionClass: a pointer to a subclass of PyRegion'
@staticmethod def registerRegion(regionClass):
engine_internal.Network.registerPyRegion(regionClass.__module__, regionClass.__name__)
'Unregisters a region from the internal list of regions :param str regionName: The name of the region to unregister (ex: regionName=regionClass.__name__)'
@staticmethod def unregisterRegion(regionName):
engine_internal.Network.unregisterPyRegion(regionName)
'Example resolver - respond to all requests with NXDOMAIN'
def resolve(self, request, handler):
reply = request.reply() reply.header.rcode = getattr(RCODE, 'NXDOMAIN') return reply
'Selectively enable log hooks depending on log argument (comma separated list of hooks to enable/disable) - If empty enable default log hooks - If entry starts with \'+\' (eg. +send,+recv) enable hook - If entry starts with \'-\' (eg. -data) disable hook - If entry doesn\'t start with +/- replace defaults Prefix argument enables/disables log prefix'
def __init__(self, log='', prefix=True):
default = ['request', 'reply', 'truncated', 'error'] log = (log.split(',') if log else []) enabled = set(([s for s in log if (s[0] not in '+-')] or default)) [enabled.add(l[1:]) for l in log if l.startswith('+')] [enabled.discard(l[1:]) for l in log if l.startswith('-')] for l in ['log_recv', 'log_send', 'log_request', 'log_reply', 'log_truncated', 'log_error', 'log_data']: if (l[4:] not in enabled): setattr(self, l, self.log_pass) self.prefix = prefix
'resolver: resolver instance address: listen address (default: "") port: listen port (default: 53) tcp: UDP (false) / TCP (true) (default: False) logger: logger instance (default: DNSLogger) handler: handler class (default: DNSHandler) server: socketserver class (default: UDPServer/TCPServer)'
def __init__(self, resolver, address='', port=53, tcp=False, logger=None, handler=DNSHandler, server=None):
if (not server): if tcp: server = TCPServer else: server = UDPServer self.server = server((address, port), handler) self.server.resolver = resolver self.server.logger = (logger or DNSLogger())
'Create DNS label instance Label can be specified as: - a list/tuple of byte strings - a byte string (split into components separated by b\'.\') - a unicode string which will be encoded according to RFC3490/IDNA'
def __init__(self, label):
if (type(label) == DNSLabel): self.label = label.label elif (type(label) in (list, tuple)): self.label = tuple(label) elif ((not label) or (label in ('.', '.'))): self.label = () elif (type(label) is not bytes): self.label = tuple(label.encode('idna').rstrip('.').split('.')) else: self.label = tuple(label.rstrip('.').split('.'))
'Prepend name to label'
def add(self, name):
new = DNSLabel(name) if self.label: new.label += self.label return new
'Return True if label suffix matches'
def matchSuffix(self, suffix):
suffix = DNSLabel(suffix) return (self.label[(- len(suffix.label)):] == suffix.label)
'Strip suffix from label'
def stripSuffix(self, suffix):
suffix = DNSLabel(suffix) if (self.label[(- len(suffix.label)):] == suffix.label): return DNSLabel(self.label[:(- len(suffix.label))]) else: return self
'Add \'names\' dict to cache stored labels'
def __init__(self, data=''):
super(DNSBuffer, self).__init__(data) self.names = {}
'Decode label at current offset in buffer (following pointers to cached elements where necessary)'
def decode_name(self, last=(-1)):
label = [] done = False while (not done): (length,) = self.unpack('!B') if (get_bits(length, 6, 2) == 3): self.offset -= 1 pointer = get_bits(self.unpack('!H')[0], 0, 14) save = self.offset if (last == save): raise BufferError(('Recursive pointer in DNSLabel [offset=%d,pointer=%d,length=%d]' % (self.offset, pointer, len(self.data)))) if (pointer < self.offset): self.offset = pointer else: raise BufferError(('Invalid pointer in DNSLabel [offset=%d,pointer=%d,length=%d]' % (self.offset, pointer, len(self.data)))) label.extend(self.decode_name(save).label) self.offset = save done = True elif (length > 0): l = self.get(length) try: l.decode() except UnicodeDecodeError: raise BufferError(('Invalid label <%s>' % l)) label.append(l) else: done = True return DNSLabel(label)
'Encode label and store at end of buffer (compressing cached elements where needed) and store elements in \'names\' dict'
def encode_name(self, name):
if (not isinstance(name, DNSLabel)): name = DNSLabel(name) if (len(name) > 253): raise DNSLabelError(('Domain label too long: %r' % name)) name = list(name.label) while name: if (tuple(name) in self.names): pointer = self.names[tuple(name)] pointer = set_bits(pointer, 3, 14, 2) self.pack('!H', pointer) return else: self.names[tuple(name)] = self.offset element = name.pop(0) if (len(element) > 63): raise DNSLabelError(('Label component too long: %r' % element)) self.pack('!B', len(element)) self.append(element) self.append('\x00')
'Encode and store label with no compression (needed for RRSIG)'
def encode_name_nocompress(self, name):
if (not isinstance(name, DNSLabel)): name = DNSLabel(name) if (len(name) > 253): raise DNSLabelError(('Domain label too long: %r' % name)) name = list(name.label) while name: element = name.pop(0) if (len(element) > 63): raise DNSLabelError(('Label component too long: %r' % element)) self.pack('!B', len(element)) self.append(element) self.append('\x00')
'Initialise resolver from zone file. Stores RRs as a list of (label,type,rr) tuples If \'glob\' is True use glob match against zone file'
def __init__(self, zone, glob=False):
self.zone = [(rr.rname, QTYPE[rr.rtype], rr) for rr in RR.fromZone(zone)] self.glob = glob self.eq = ('matchGlob' if glob else '__eq__')
'Respond to DNS request - parameters are request packet & handler. Method is expected to return DNS response'
def resolve(self, request, handler):
reply = request.reply() qname = request.q.qname qtype = QTYPE[request.q.qtype] for (name, rtype, rr) in self.zone: if (getattr(qname, self.eq)(name) and ((qtype == rtype) or (qtype == 'ANY') or (rtype == 'CNAME'))): if self.glob: a = copy.copy(rr) a.rname = qname reply.add_answer(a) else: reply.add_answer(rr) if (rtype in ['CNAME', 'NS', 'MX', 'PTR']): for (a_name, a_rtype, a_rr) in self.zone: if ((a_name == rr.rdata.label) and (a_rtype in ['A', 'AAAA'])): reply.add_ar(a_rr) if (not reply.rr): reply.header.rcode = RCODE.NXDOMAIN return reply
'Initialise Buffer from data'
def __init__(self, data=''):
self.data = bytearray(data) self.offset = 0
'Return bytes remaining'
def remaining(self):
return (len(self.data) - self.offset)
'Gen len bytes at current offset (& increment offset)'
def get(self, length):
if (length > self.remaining()): raise BufferError(('Not enough bytes [offset=%d,remaining=%d,requested=%d]' % (self.offset, self.remaining(), length))) start = self.offset end = (self.offset + length) self.offset += length return bytes(self.data[start:end])
'Return data as hex string'
def hex(self):
return binascii.hexlify(self.data)
'Pack data at end of data according to fmt (from struct) & increment offset'
def pack(self, fmt, *args):
self.offset += struct.calcsize(fmt) self.data += struct.pack(fmt, *args)
'Append s to end of data & increment offset'
def append(self, s):
self.offset += len(s) self.data += s
'Modify data at offset `ptr`'
def update(self, ptr, fmt, *args):
s = struct.pack(fmt, *args) self.data[ptr:(ptr + len(s))] = s
'Unpack data at current offset according to fmt (from struct)'
def unpack(self, fmt):
try: data = self.get(struct.calcsize(fmt)) return struct.unpack(fmt, data) except struct.error as e: raise BufferError(("Error unpacking struct '%s' <%s>" % (fmt, binascii.hexlify(data).decode())))
'Parse DNS packet data and return DNSRecord instance Recursively parses sections (calling appropriate parse method)'
@classmethod def parse(cls, packet):
buffer = DNSBuffer(packet) try: header = DNSHeader.parse(buffer) questions = [] rr = [] auth = [] ar = [] for i in range(header.q): questions.append(DNSQuestion.parse(buffer)) for i in range(header.a): rr.append(RR.parse(buffer)) for i in range(header.auth): auth.append(RR.parse(buffer)) for i in range(header.ar): ar.append(RR.parse(buffer)) return cls(header, questions, rr, auth=auth, ar=ar) except DNSError: raise except (BufferError, BimapError) as e: raise DNSError(('Error unpacking DNSRecord [offset=%d]: %s' % (buffer.offset, e)))
'Shortcut to create question >>> q = DNSRecord.question("www.google.com") >>> print(q) ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ... ;; flags: rd; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0 ;; QUESTION SECTION: ;www.google.com. IN A >>> q = DNSRecord.question("www.google.com","NS") >>> print(q) ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ... ;; flags: rd; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0 ;; QUESTION SECTION: ;www.google.com. IN NS'
@classmethod def question(cls, qname, qtype='A', qclass='IN'):
return DNSRecord(q=DNSQuestion(qname, getattr(QTYPE, qtype), getattr(CLASS, qclass)))
'Create new DNSRecord'
def __init__(self, header=None, questions=None, rr=None, q=None, a=None, auth=None, ar=None):
self.header = (header or DNSHeader()) self.questions = (questions or []) self.rr = (rr or []) self.auth = (auth or []) self.ar = (ar or []) if q: self.questions.append(q) if a: self.rr.append(a) self.set_header_qa()
'Create skeleton reply packet >>> q = DNSRecord.question("abc.com") >>> a = q.reply() >>> a.add_answer(RR("abc.com",QTYPE.A,rdata=A("1.2.3.4"),ttl=60)) >>> print(a) ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ... ;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0 ;; QUESTION SECTION: ;abc.com. IN A ;; ANSWER SECTION: abc.com. 60 IN A 1.2.3.4'
def reply(self, ra=1, aa=1):
return DNSRecord(DNSHeader(id=self.header.id, bitmap=self.header.bitmap, qr=1, ra=ra, aa=aa), q=self.q)
'Create reply with response data in zone-file format >>> q = DNSRecord.question("abc.com") >>> a = q.replyZone("abc.com 60 A 1.2.3.4") >>> print(a) ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ... ;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0 ;; QUESTION SECTION: ;abc.com. IN A ;; ANSWER SECTION: abc.com. 60 IN A 1.2.3.4'
def replyZone(self, zone, ra=1, aa=1):
return DNSRecord(DNSHeader(id=self.header.id, bitmap=self.header.bitmap, qr=1, ra=ra, aa=aa), q=self.q, rr=RR.fromZone(zone))
'Add question(s) >>> q = DNSRecord() >>> q.add_question(DNSQuestion("abc.com"), ... DNSQuestion("abc.com",QTYPE.MX)) >>> print(q) ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ... ;; flags: rd; QUERY: 2, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0 ;; QUESTION SECTION: ;abc.com. IN A ;abc.com. IN MX'
def add_question(self, *q):
self.questions.extend(q) self.set_header_qa()
'Add answer(s) >>> q = DNSRecord.question("abc.com") >>> a = q.reply() >>> a.add_answer(*RR.fromZone("abc.com A 1.2.3.4")) >>> print(a) ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ... ;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0 ;; QUESTION SECTION: ;abc.com. IN A ;; ANSWER SECTION: abc.com. 0 IN A 1.2.3.4'
def add_answer(self, *rr):
self.rr.extend(rr) self.set_header_qa()