repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
dpkt
|
dpkt-master/dpkt/tcp.py
|
# $Id: tcp.py 42 2007-08-02 22:38:47Z jon.oberheide $
# -*- coding: utf-8 -*-
"""Transmission Control Protocol."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
from .compat import compat_ord
# TCP control flags
TH_FIN = 0x01 # end of data
TH_SYN = 0x02 # synchronize sequence numbers
TH_RST = 0x04 # reset connection
TH_PUSH = 0x08 # push
TH_ACK = 0x10 # acknowledgment number set
TH_URG = 0x20 # urgent pointer set
TH_ECE = 0x40 # ECN echo, RFC 3168
TH_CWR = 0x80 # congestion window reduced
TH_NS = 0x100 # nonce sum, RFC 3540
TCP_PORT_MAX = 65535 # maximum port
TCP_WIN_MAX = 65535 # maximum (unscaled) window
def tcp_flags_to_str(val):
ff = []
if val & TH_FIN:
ff.append('FIN')
if val & TH_SYN:
ff.append('SYN')
if val & TH_RST:
ff.append('RST')
if val & TH_PUSH:
ff.append('PUSH')
if val & TH_ACK:
ff.append('ACK')
if val & TH_URG:
ff.append('URG')
if val & TH_ECE:
ff.append('ECE')
if val & TH_CWR:
ff.append('CWR')
if val & TH_NS:
ff.append('NS')
return ','.join(ff)
class TCP(dpkt.Packet):
"""Transmission Control Protocol.
The Transmission Control Protocol (TCP) is one of the main protocols of the Internet protocol suite.
It originated in the initial network implementation in which it complemented the Internet Protocol (IP).
Attributes:
sport - source port
dport - destination port
seq - sequence number
ack - acknowledgement number
off - data offset in 32-bit words
flags - TCP flags
win - TCP window size
sum - checksum
urp - urgent pointer
opts - TCP options buffer; call parse_opts() to parse
"""
__hdr__ = (
('sport', 'H', 0xdead),
('dport', 'H', 0),
('seq', 'I', 0xdeadbeef),
('ack', 'I', 0),
('_off_flags', 'H', ((5 << 12) | TH_SYN)),
('win', 'H', TCP_WIN_MAX),
('sum', 'H', 0),
('urp', 'H', 0)
)
__bit_fields__ = {
'_off_flags': (
('off', 4), # 4 hi bits
('_rsv', 3), # 3 bits reserved
('flags', 9), # 9 lo bits
)
}
__pprint_funcs__ = {
'flags': tcp_flags_to_str,
'sum': hex, # display checksum in hex
}
opts = b''
def __len__(self):
return self.__hdr_len__ + len(self.opts) + len(self.data)
def __bytes__(self):
return self.pack_hdr() + bytes(self.opts) + bytes(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
ol = ((self._off_flags >> 12) << 2) - self.__hdr_len__
if ol < 0:
raise dpkt.UnpackError('invalid header length')
self.opts = buf[self.__hdr_len__:self.__hdr_len__ + ol]
self.data = buf[self.__hdr_len__ + ol:]
# Options (opt_type) - http://www.iana.org/assignments/tcp-parameters
TCP_OPT_EOL = 0 # end of option list
TCP_OPT_NOP = 1 # no operation
TCP_OPT_MSS = 2 # maximum segment size
TCP_OPT_WSCALE = 3 # window scale factor, RFC 1072
TCP_OPT_SACKOK = 4 # SACK permitted, RFC 2018
TCP_OPT_SACK = 5 # SACK, RFC 2018
TCP_OPT_ECHO = 6 # echo (obsolete), RFC 1072
TCP_OPT_ECHOREPLY = 7 # echo reply (obsolete), RFC 1072
TCP_OPT_TIMESTAMP = 8 # timestamp, RFC 1323
TCP_OPT_POCONN = 9 # partial order conn, RFC 1693
TCP_OPT_POSVC = 10 # partial order service, RFC 1693
TCP_OPT_CC = 11 # connection count, RFC 1644
TCP_OPT_CCNEW = 12 # CC.NEW, RFC 1644
TCP_OPT_CCECHO = 13 # CC.ECHO, RFC 1644
TCP_OPT_ALTSUM = 14 # alt checksum request, RFC 1146
TCP_OPT_ALTSUMDATA = 15 # alt checksum data, RFC 1146
TCP_OPT_SKEETER = 16 # Skeeter
TCP_OPT_BUBBA = 17 # Bubba
TCP_OPT_TRAILSUM = 18 # trailer checksum
TCP_OPT_MD5 = 19 # MD5 signature, RFC 2385
TCP_OPT_SCPS = 20 # SCPS capabilities
TCP_OPT_SNACK = 21 # selective negative acks
TCP_OPT_REC = 22 # record boundaries
TCP_OPT_CORRUPT = 23 # corruption experienced
TCP_OPT_SNAP = 24 # SNAP
TCP_OPT_TCPCOMP = 26 # TCP compression filter
TCP_OPT_MAX = 27
def parse_opts(buf):
"""Parse TCP option buffer into a list of (option, data) tuples."""
opts = []
while buf:
o = compat_ord(buf[0])
if o > TCP_OPT_NOP:
try:
# advance buffer at least 2 bytes = 1 type + 1 length
l_ = max(2, compat_ord(buf[1]))
d, buf = buf[2:l_], buf[l_:]
except (IndexError, ValueError):
# print 'bad option', repr(str(buf))
opts.append(None) # XXX
break
else:
# options 0 and 1 are not followed by length byte
d, buf = b'', buf[1:]
opts.append((o, d))
return opts
def test_parse_opts():
# normal scenarios
buf = b'\x02\x04\x23\x00\x01\x01\x04\x02'
opts = parse_opts(buf)
assert opts == [
(TCP_OPT_MSS, b'\x23\x00'),
(TCP_OPT_NOP, b''),
(TCP_OPT_NOP, b''),
(TCP_OPT_SACKOK, b'')
]
buf = b'\x01\x01\x05\x0a\x37\xf8\x19\x70\x37\xf8\x29\x78'
opts = parse_opts(buf)
assert opts == [
(TCP_OPT_NOP, b''),
(TCP_OPT_NOP, b''),
(TCP_OPT_SACK, b'\x37\xf8\x19\x70\x37\xf8\x29\x78')
]
# test a zero-length option
buf = b'\x02\x00\x01'
opts = parse_opts(buf)
assert opts == [
(TCP_OPT_MSS, b''),
(TCP_OPT_NOP, b'')
]
# test a one-byte malformed option
buf = b'\xff'
opts = parse_opts(buf)
assert opts == [None]
def test_offset():
tcpheader = TCP(b'\x01\xbb\xc0\xd7\xb6\x56\xa8\xb9\xd1\xac\xaa\xb1\x50\x18\x40\x00\x56\xf8\x00\x00')
assert tcpheader.off == 5
# test setting header offset
tcpheader.off = 8
assert bytes(tcpheader) == b'\x01\xbb\xc0\xd7\xb6\x56\xa8\xb9\xd1\xac\xaa\xb1\x80\x18\x40\x00\x56\xf8\x00\x00'
def test_tcp_flags_to_str():
assert tcp_flags_to_str(0x18) == 'PUSH,ACK'
assert tcp_flags_to_str(0x12) == 'SYN,ACK'
# for code coverage
assert tcp_flags_to_str(0x1ff) == 'FIN,SYN,RST,PUSH,ACK,URG,ECE,CWR,NS'
def test_tcp_unpack():
data = (b'\x00\x50\x0d\x2c\x11\x4c\x61\x8b\x38\xaf\xfe\x14\x70\x12\x16\xd0'
b'\x5b\xdc\x00\x00\x02\x04\x05\x64\x01\x01\x04\x02')
tcp = TCP(data)
assert tcp.flags == (TH_SYN | TH_ACK)
assert tcp.off == 7
assert tcp.win == 5840
assert tcp.dport == 3372
assert tcp.seq == 290218379
assert tcp.ack == 951057940
def test_tcp_pack():
tcp = TCP(
sport=3372,
dport=80,
seq=951057939,
ack=0,
off=7,
flags=TH_SYN,
win=8760,
sum=0xc30c,
urp=0,
opts=b'\x02\x04\x05\xb4\x01\x01\x04\x02'
)
assert bytes(tcp) == (
b'\x0d\x2c\x00\x50\x38\xaf\xfe\x13\x00\x00\x00\x00\x70\x02\x22\x38'
b'\xc3\x0c\x00\x00\x02\x04\x05\xb4\x01\x01\x04\x02')
# TODO: add checksum calculation
| 6,969 | 28.659574 | 114 |
py
|
dpkt
|
dpkt-master/dpkt/rip.py
|
# $Id: rip.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Routing Information Protocol."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
# RIP v2 - RFC 2453
# http://tools.ietf.org/html/rfc2453
REQUEST = 1
RESPONSE = 2
class RIP(dpkt.Packet):
"""Routing Information Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of RIP.
TODO.
"""
__hdr__ = (
('cmd', 'B', REQUEST),
('v', 'B', 2),
('rsvd', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l_ = []
self.auth = None
while self.data:
rte = RTE(self.data[:20])
if rte.family == 0xFFFF:
self.auth = Auth(self.data[:20])
else:
l_.append(rte)
self.data = self.data[20:]
self.data = self.rtes = l_
def __len__(self):
n = self.__hdr_len__
if self.auth:
n += len(self.auth)
n += sum(map(len, self.rtes))
return n
def __bytes__(self):
auth = b''
if self.auth:
auth = bytes(self.auth)
return self.pack_hdr() + auth + b''.join(map(bytes, self.rtes))
class RTE(dpkt.Packet):
__hdr__ = (
('family', 'H', 2),
('route_tag', 'H', 0),
('addr', 'I', 0),
('subnet', 'I', 0),
('next_hop', 'I', 0),
('metric', 'I', 1)
)
class Auth(dpkt.Packet):
__hdr__ = (
('rsvd', 'H', 0xFFFF),
('type', 'H', 2),
('auth', '16s', 0)
)
def test_creation_with_auth():
from binascii import unhexlify
buf_auth = unhexlify(
'ffff' # rsvd
'0002' # type
'0123456789abcdef' # auth
'0123456789abcdef' # auth
)
auth_direct = Auth(buf_auth)
assert bytes(auth_direct) == buf_auth
buf_rte = unhexlify(
'0002' # family
'0000' # route_tag
'01020300' # addr
'ffffff00' # subnet
'00000000' # next_hop
'00000001' # metric
)
rte = RTE(buf_rte)
assert bytes(rte) == buf_rte
buf_rip = unhexlify(
'02' # cmd
'02' # v
'0000' # rsvd
)
rip = RIP(buf_rip + buf_auth + buf_rte)
assert rip.auth
assert rip.auth.rsvd == 0xffff
assert rip.auth.type == 2
assert rip.auth.auth == unhexlify('0123456789abcdef') * 2
assert len(rip.rtes) == 1
rte = rip.rtes[0]
assert rte.family == 2
assert rte.route_tag == 0
assert rte.metric == 1
assert bytes(rip) == buf_rip + buf_auth + buf_rte
assert len(rip) == len(buf_rip + buf_auth + buf_rte)
| 2,760 | 21.447154 | 71 |
py
|
dpkt
|
dpkt-master/dpkt/diameter.py
|
# $Id: diameter.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Diameter."""
from __future__ import print_function
from __future__ import absolute_import
import struct
from . import dpkt
from .compat import compat_ord
# Diameter Base Protocol - RFC 3588
# http://tools.ietf.org/html/rfc3588
# Request/Answer Command Codes
ABORT_SESSION = 274
ACCOUNTING = 271
CAPABILITIES_EXCHANGE = 257
DEVICE_WATCHDOG = 280
DISCONNECT_PEER = 282
RE_AUTH = 258
SESSION_TERMINATION = 275
class Diameter(dpkt.Packet):
"""Diameter.
Diameter is an authentication, authorization, and accounting protocol for computer networks. It evolved from the
earlier RADIUS protocol. It belongs to the application layer protocols in the internet protocol suite.
Attributes:
__hdr__: Header fields of Diameter.
v: (int) Version. The version of the Diameter Base Protocol.
As of 2014, the only value supported is 1. (1 byte)
len: (bytes): Message Length. The Message Length field indicates the length of the Diameter message in
bytes, including the header fields and the padded AVPs. (3 bytes)
flags: (int): Command flags. (Request, Proxiable, Error, Potentially re-transmitted message) (1 byte)
cmd: (bytes): Commands. Determine the action that is to be taken for a particular message. (3 bytes)
app_id: (int): Application-ID. Application-ID is used to identify for which Diameter application the
message is applicable. (4 bytes)
hop_id: (int): Hop-by-Hop Identifier. Used to match the requests with their answers as the same value in
the request is used in the response. (4 bytes)
end_id: (int): End-to-End Identifier. used to detect duplicate messages along with the combination of the
Origin-Host AVP. (4 bytes)
"""
__hdr__ = (
('v', 'B', 1),
('len', '3s', 0),
('flags', 'B', 0),
('cmd', '3s', 0),
('app_id', 'I', 0),
('hop_id', 'I', 0),
('end_id', 'I', 0)
)
@property
def request_flag(self):
return (self.flags >> 7) & 0x1
@request_flag.setter
def request_flag(self, r):
self.flags = (self.flags & ~0x80) | ((r & 0x1) << 7)
@property
def proxiable_flag(self):
return (self.flags >> 6) & 0x1
@proxiable_flag.setter
def proxiable_flag(self, p):
self.flags = (self.flags & ~0x40) | ((p & 0x1) << 6)
@property
def error_flag(self):
return (self.flags >> 5) & 0x1
@error_flag.setter
def error_flag(self, e):
self.flags = (self.flags & ~0x20) | ((e & 0x1) << 5)
@property
def retransmit_flag(self):
return (self.flags >> 4) & 0x1
@retransmit_flag.setter
def retransmit_flag(self, t):
self.flags = (self.flags & ~0x10) | ((t & 0x1) << 4)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.cmd = (compat_ord(self.cmd[0]) << 16) | \
(compat_ord(self.cmd[1]) << 8) | \
(compat_ord(self.cmd[2]))
self.len = (compat_ord(self.len[0]) << 16) | \
(compat_ord(self.len[1]) << 8) | \
(compat_ord(self.len[2]))
self.data = self.data[:self.len - self.__hdr_len__]
l_ = []
while self.data:
avp = AVP(self.data)
l_.append(avp)
self.data = self.data[len(avp):]
self.data = self.avps = l_
def pack_hdr(self):
self.len = struct.pack("BBB", (self.len >> 16) & 0xff, (self.len >> 8) & 0xff, self.len & 0xff)
self.cmd = struct.pack("BBB", (self.cmd >> 16) & 0xff, (self.cmd >> 8) & 0xff, self.cmd & 0xff)
return dpkt.Packet.pack_hdr(self)
def __len__(self):
return self.__hdr_len__ + sum(map(len, self.data))
def __bytes__(self):
return self.pack_hdr() + b''.join(map(bytes, self.data))
class AVP(dpkt.Packet):
__hdr__ = (
('code', 'I', 0),
('flags', 'B', 0),
('len', '3s', 0),
)
@property
def vendor_flag(self):
return (self.flags >> 7) & 0x1
@vendor_flag.setter
def vendor_flag(self, v):
self.flags = (self.flags & ~0x80) | ((v & 0x1) << 7)
@property
def mandatory_flag(self):
return (self.flags >> 6) & 0x1
@mandatory_flag.setter
def mandatory_flag(self, m):
self.flags = (self.flags & ~0x40) | ((m & 0x1) << 6)
@property
def protected_flag(self):
return (self.flags >> 5) & 0x1
@protected_flag.setter
def protected_flag(self, p):
self.flags = (self.flags & ~0x20) | ((p & 0x1) << 5)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.len = (compat_ord(self.len[0]) << 16) | \
(compat_ord(self.len[1]) << 8) | \
(compat_ord(self.len[2]))
if self.vendor_flag:
self.vendor = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:self.len - self.__hdr_len__]
else:
self.data = self.data[:self.len - self.__hdr_len__]
def pack_hdr(self):
self.len = struct.pack("BBB", (self.len >> 16) & 0xff, (self.len >> 8) & 0xff, self.len & 0xff)
data = dpkt.Packet.pack_hdr(self)
if self.vendor_flag:
data += struct.pack('>I', self.vendor)
return data
def __len__(self):
length = self.__hdr_len__ + len(self.data)
if self.vendor_flag:
length += 4
return length
__s = (b'\x01\x00\x00\x28\x80\x00\x01\x18\x00\x00\x00\x00\x00\x00\x41\xc8\x00\x00\x00\x0c\x00\x00'
b'\x01\x08\x40\x00\x00\x0c\x68\x30\x30\x32\x00\x00\x01\x28\x40\x00\x00\x08')
__t = (b'\x01\x00\x00\x2c\x80\x00\x01\x18\x00\x00\x00\x00\x00\x00\x41\xc8\x00\x00\x00\x0c\x00\x00'
b'\x01\x08\xc0\x00\x00\x10\xde\xad\xbe\xef\x68\x30\x30\x32\x00\x00\x01\x28\x40\x00\x00\x08')
def test_pack():
d = Diameter(__s)
assert (__s == bytes(d))
assert len(d) == len(__s)
d = Diameter(__t)
assert (__t == bytes(d))
assert len(d) == len(__t)
def test_unpack():
d = Diameter(__s)
assert (d.len == 40)
# assert (d.cmd == DEVICE_WATCHDOG_REQUEST)
assert (d.request_flag == 1)
assert (d.error_flag == 0)
assert (len(d.avps) == 2)
avp = d.avps[0]
# assert (avp.code == ORIGIN_HOST)
assert (avp.mandatory_flag == 1)
assert (avp.vendor_flag == 0)
assert (avp.len == 12)
assert (len(avp) == 12)
assert (avp.data == b'\x68\x30\x30\x32')
# also test the optional vendor id support
d = Diameter(__t)
assert (d.len == 44)
avp = d.avps[0]
assert (avp.vendor_flag == 1)
assert (avp.len == 16)
assert (len(avp) == 16)
assert (avp.vendor == 3735928559)
assert (avp.data == b'\x68\x30\x30\x32')
def test_diameter_properties():
diameter = Diameter()
for prop in ['request_flag', 'proxiable_flag', 'error_flag', 'retransmit_flag']:
assert hasattr(diameter, prop)
assert getattr(diameter, prop) == 0
setattr(diameter, prop, 1)
assert getattr(diameter, prop) == 1
def test_avp_properties():
avp = AVP()
for prop in ['vendor_flag', 'mandatory_flag', 'protected_flag']:
assert hasattr(avp, prop)
assert getattr(avp, prop) == 0
setattr(avp, prop, 1)
assert getattr(avp, prop) == 1
| 7,453 | 30.991416 | 117 |
py
|
dpkt
|
dpkt-master/dpkt/loopback.py
|
# $Id: loopback.py 38 2007-03-17 03:33:16Z dugsong $
# -*- coding: utf-8 -*-
"""Platform-dependent loopback header."""
# https://wiki.wireshark.org/NullLoopback
from __future__ import absolute_import
from . import dpkt
from . import ethernet
from . import ip
from . import ip6
class Loopback(dpkt.Packet):
"""Platform-dependent loopback header.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of Loopback.
TODO.
"""
__hdr__ = (('family', 'I', 0), )
__byte_order__ = '@'
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.family in (0x02, 0x02000000):
self.family = 2
self.data = ip.IP(self.data)
elif self.family in (0x18, 0x18000000):
self.family = 24
self.data = ip6.IP6(self.data)
elif self.family in (0x1c, 0x1c000000):
self.family = 28
self.data = ip6.IP6(self.data)
elif self.family in (0x1e, 0x1e000000):
self.family = 30
self.data = ip6.IP6(self.data)
else:
self.data = ethernet.Ethernet(self.data)
def test_ethernet_unpack():
buf = b'\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x08\x00'
hdr = b'\x00\x02\x00\x02'
lo = Loopback(hdr + buf)
assert lo.family in (0x02000200, 0x00020002) # little endian, big endian
assert isinstance(lo.data, ethernet.Ethernet)
assert lo.data.src == b'\x07\x08\t\n\x0b\x0c'
assert lo.data.dst == b'\x01\x02\x03\x04\x05\x06'
def test_ip_unpack():
buf = b'E\x00\x004\xbd\x04@\x00@\x06\x7f\xbd\x7f\x00\x00\x02\x7f\x00\x00\x01'
for hdr in (b'\x00\x00\x00\x02', b'\x02\x00\x00\x00'):
lo = Loopback(hdr + buf)
assert lo.family == 2
assert isinstance(lo.data, ip.IP)
assert lo.data.src == b'\x7f\x00\x00\x02'
assert lo.data.dst == b'\x7f\x00\x00\x01'
def test_ip6_unpack():
import struct
buf = (b'\x60\x00\x00\x00\x00\x14\x06\x38\x26\x07\xf8\xb0\x40\x0c\x0c\x03\x00\x00\x00\x00\x00\x00'
b'\x00\x1a\x20\x01\x04\x70\xe5\xbf\xde\xad\x49\x57\x21\x74\xe8\x2c\x48\x87')
hdr_suffix = b'\x00' * 3
for family in (24, 28, 30):
hdr = struct.pack('B', family) + hdr_suffix
lo = Loopback(hdr + buf)
assert lo.family == family
assert isinstance(lo.data, ip6.IP6)
assert lo.data.src == b'&\x07\xf8\xb0@\x0c\x0c\x03\x00\x00\x00\x00\x00\x00\x00\x1a'
assert lo.data.dst == b' \x01\x04p\xe5\xbf\xde\xadIW!t\xe8,H\x87'
| 2,542 | 28.569767 | 102 |
py
|
dpkt
|
dpkt-master/dpkt/icmp.py
|
# $Id: icmp.py 45 2007-08-03 00:05:22Z jon.oberheide $
# -*- coding: utf-8 -*-
"""Internet Control Message Protocol."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
# Types (icmp_type) and codes (icmp_code) -
# http://www.iana.org/assignments/icmp-parameters
ICMP_CODE_NONE = 0 # for types without codes
ICMP_ECHOREPLY = 0 # echo reply
ICMP_UNREACH = 3 # dest unreachable, codes:
ICMP_UNREACH_NET = 0 # bad net
ICMP_UNREACH_HOST = 1 # bad host
ICMP_UNREACH_PROTO = 2 # bad protocol
ICMP_UNREACH_PORT = 3 # bad port
ICMP_UNREACH_NEEDFRAG = 4 # IP_DF caused drop
ICMP_UNREACH_SRCFAIL = 5 # src route failed
ICMP_UNREACH_NET_UNKNOWN = 6 # unknown net
ICMP_UNREACH_HOST_UNKNOWN = 7 # unknown host
ICMP_UNREACH_ISOLATED = 8 # src host isolated
ICMP_UNREACH_NET_PROHIB = 9 # for crypto devs
ICMP_UNREACH_HOST_PROHIB = 10 # ditto
ICMP_UNREACH_TOSNET = 11 # bad tos for net
ICMP_UNREACH_TOSHOST = 12 # bad tos for host
ICMP_UNREACH_FILTER_PROHIB = 13 # prohibited access
ICMP_UNREACH_HOST_PRECEDENCE = 14 # precedence error
ICMP_UNREACH_PRECEDENCE_CUTOFF = 15 # precedence cutoff
ICMP_SRCQUENCH = 4 # packet lost, slow down
ICMP_REDIRECT = 5 # shorter route, codes:
ICMP_REDIRECT_NET = 0 # for network
ICMP_REDIRECT_HOST = 1 # for host
ICMP_REDIRECT_TOSNET = 2 # for tos and net
ICMP_REDIRECT_TOSHOST = 3 # for tos and host
ICMP_ALTHOSTADDR = 6 # alternate host address
ICMP_ECHO = 8 # echo service
ICMP_RTRADVERT = 9 # router advertise, codes:
ICMP_RTRADVERT_NORMAL = 0 # normal
ICMP_RTRADVERT_NOROUTE_COMMON = 16 # selective routing
ICMP_RTRSOLICIT = 10 # router solicitation
ICMP_TIMEXCEED = 11 # time exceeded, code:
ICMP_TIMEXCEED_INTRANS = 0 # ttl==0 in transit
ICMP_TIMEXCEED_REASS = 1 # ttl==0 in reass
ICMP_PARAMPROB = 12 # ip header bad
ICMP_PARAMPROB_ERRATPTR = 0 # req. opt. absent
ICMP_PARAMPROB_OPTABSENT = 1 # req. opt. absent
ICMP_PARAMPROB_LENGTH = 2 # bad length
ICMP_TSTAMP = 13 # timestamp request
ICMP_TSTAMPREPLY = 14 # timestamp reply
ICMP_INFO = 15 # information request
ICMP_INFOREPLY = 16 # information reply
ICMP_MASK = 17 # address mask request
ICMP_MASKREPLY = 18 # address mask reply
ICMP_TRACEROUTE = 30 # traceroute
ICMP_DATACONVERR = 31 # data conversion error
ICMP_MOBILE_REDIRECT = 32 # mobile host redirect
ICMP_IP6_WHEREAREYOU = 33 # IPv6 where-are-you
ICMP_IP6_IAMHERE = 34 # IPv6 i-am-here
ICMP_MOBILE_REG = 35 # mobile registration req
ICMP_MOBILE_REGREPLY = 36 # mobile registration reply
ICMP_DNS = 37 # domain name request
ICMP_DNSREPLY = 38 # domain name reply
ICMP_SKIP = 39 # SKIP
ICMP_PHOTURIS = 40 # Photuris
ICMP_PHOTURIS_UNKNOWN_INDEX = 0 # unknown sec index
ICMP_PHOTURIS_AUTH_FAILED = 1 # auth failed
ICMP_PHOTURIS_DECOMPRESS_FAILED = 2 # decompress failed
ICMP_PHOTURIS_DECRYPT_FAILED = 3 # decrypt failed
ICMP_PHOTURIS_NEED_AUTHN = 4 # no authentication
ICMP_PHOTURIS_NEED_AUTHZ = 5 # no authorization
ICMP_TYPE_MAX = 40
class ICMP(dpkt.Packet):
"""Internet Control Message Protocol.
The Internet Control Message Protocol (ICMP) is a supporting protocol in the Internet protocol suite.
It is used by network devices, including routers, to send error messages and operational information
indicating success or failure when communicating with another IP address.
Attributes:
__hdr__: Header fields of ICMP.
type: (int): ICMP type (1 byte)
code: (int): ICMP subtype (1 byte)
sum: (int): Internet checksum (RFC 1071) for error checking,
calculated from the ICMP header and data with value 0 substituted for this field. (2 bytes)
"""
__hdr__ = (
('type', 'B', 8),
('code', 'B', 0),
('sum', 'H', 0)
)
class Echo(dpkt.Packet):
__hdr__ = (('id', 'H', 0), ('seq', 'H', 0))
class Quote(dpkt.Packet):
__hdr__ = (('pad', 'I', 0),)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
from . import ip
self.data = self.ip = ip.IP(self.data)
class Unreach(Quote):
__hdr__ = (('pad', 'H', 0), ('mtu', 'H', 0))
class Quench(Quote):
pass
class Redirect(Quote):
__hdr__ = (('gw', 'I', 0),)
class ParamProbe(Quote):
__hdr__ = (('ptr', 'B', 0), ('pad1', 'B', 0), ('pad2', 'H', 0))
class TimeExceed(Quote):
pass
_typesw = {0: Echo, 3: Unreach, 4: Quench, 5: Redirect, 8: Echo, 11: TimeExceed}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
try:
self.data = self._typesw[self.type](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
pass
def __bytes__(self):
if not self.sum:
self.sum = dpkt.in_cksum(dpkt.Packet.__bytes__(self))
return dpkt.Packet.__bytes__(self)
def test_icmp():
s = (
b'\x03\x0a\x6b\x19\x00\x00\x00\x00\x45\x00\x00\x28\x94\x1f\x00\x00\xe3\x06\x99\xb4\x23\x2b'
b'\x24\x00\xde\x8e\x84\x42\xab\xd1\x00\x50\x00\x35\xe1\x29\x20\xd9\x00\x00\x00\x22\x9b\xf0'
b'\xe2\x04\x65\x6b'
)
r = ICMP(s)
assert bytes(r) == s
# construction
s = (
b'\x00\x00\x53\x87\x00\x01\x03\xd6\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e'
b'\x4f\x50\x51\x52\x53\x54\x55\x56\x57\x41\x42\x43\x44\x45\x46\x47\x48\x49'
)
p = ICMP(
type=0,
sum=0x5387,
data=ICMP.Echo(
id=1,
seq=0x03d6,
data=b'ABCDEFGHIJKLMNOPQRSTUVWABCDEFGHI'
)
)
assert bytes(p) == s
# test checksum
p = ICMP(
type=0,
data=ICMP.Echo(
id=1,
seq=0x03d6,
data=b'ABCDEFGHIJKLMNOPQRSTUVWABCDEFGHI'
)
)
assert bytes(p) == s
assert p.sum == 0x5387
def test_invalid_data():
from binascii import unhexlify
buf = unhexlify(
'01' # type (invalid entry)
'00' # code
'0000' # sum
'abcd' # data
)
icmp = ICMP(buf)
# no additional attributes have been added due to the type being invalid
assert dir(icmp) == dir(ICMP())
| 6,235 | 31.649215 | 107 |
py
|
dpkt
|
dpkt-master/dpkt/ospf.py
|
# $Id: ospf.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Open Shortest Path First."""
from __future__ import absolute_import
from . import dpkt
AUTH_NONE = 0
AUTH_PASSWORD = 1
AUTH_CRYPTO = 2
class OSPF(dpkt.Packet):
"""Open Shortest Path First.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of OSPF.
TODO.
"""
__hdr__ = (
('v', 'B', 0),
('type', 'B', 0),
('len', 'H', 0),
('router', 'I', 0),
('area', 'I', 0),
('sum', 'H', 0),
('atype', 'H', 0),
('auth', '8s', b'')
)
def __bytes__(self):
if not self.sum:
self.sum = dpkt.in_cksum(dpkt.Packet.__bytes__(self))
return dpkt.Packet.__bytes__(self)
def test_creation():
ospf = OSPF()
assert ospf.v == 0
assert ospf.type == 0
assert ospf.len == 0
assert ospf.router == 0
assert ospf.area == 0
assert ospf.sum == 0
assert ospf.atype == 0
assert ospf.auth == b''
# sum is 0, so it will be recalculated
assert bytes(ospf) == b''.join([
b'\x00' * 12,
b'\xff\xff',
b'\x00' * 10
])
ospf.sum = 0x1234
# sum is not 0, so it will be used
assert bytes(ospf) == b''.join([
b'\x00' * 12,
b'\x12\x34',
b'\x00' * 10
])
| 1,354 | 19.846154 | 65 |
py
|
dpkt
|
dpkt-master/dpkt/tftp.py
|
# $Id: tftp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Trivial File Transfer Protocol."""
from __future__ import print_function
from __future__ import absolute_import
import struct
from . import dpkt
# Opcodes
OP_RRQ = 1 # read request
OP_WRQ = 2 # write request
OP_DATA = 3 # data packet
OP_ACK = 4 # acknowledgment
OP_ERR = 5 # error code
# Error codes
EUNDEF = 0 # not defined
ENOTFOUND = 1 # file not found
EACCESS = 2 # access violation
ENOSPACE = 3 # disk full or allocation exceeded
EBADOP = 4 # illegal TFTP operation
EBADID = 5 # unknown transfer ID
EEXISTS = 6 # file already exists
ENOUSER = 7 # no such user
class TFTP(dpkt.Packet):
"""Trivial File Transfer Protocol.
Trivial File Transfer Protocol (TFTP) is a simple lockstep File Transfer Protocol which allows a client to get
a file from or put a file onto a remote host. One of its primary uses is in the early stages of nodes booting
from a local area network. TFTP has been used for this application because it is very simple to implement.
Attributes:
__hdr__: Header fields of TFTP.
opcode: Operation Code (2 bytes)
"""
__hdr__ = (('opcode', 'H', 1), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.opcode in (OP_RRQ, OP_WRQ):
l_ = self.data.split(b'\x00')
self.filename = l_[0]
self.mode = l_[1]
self.data = b''
elif self.opcode in (OP_DATA, OP_ACK):
self.block = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
elif self.opcode == OP_ERR:
self.errcode = struct.unpack('>H', self.data[:2])[0]
self.errmsg = self.data[2:].split(b'\x00')[0]
self.data = b''
def __len__(self):
return len(bytes(self))
def __bytes__(self):
if self.opcode in (OP_RRQ, OP_WRQ):
s = self.filename + b'\x00' + self.mode + b'\x00'
elif self.opcode in (OP_DATA, OP_ACK):
s = struct.pack('>H', self.block)
elif self.opcode == OP_ERR:
s = struct.pack('>H', self.errcode) + (b'%s\x00' % self.errmsg)
else:
s = b''
return self.pack_hdr() + s + self.data
def test_op_rrq():
from binascii import unhexlify
buf = unhexlify(
'0001' # opcode (OP_RRQ)
'726663313335302e747874' # filename (rfc1350.txt)
'00' # null terminator
'6f63746574' # mode (octet)
'00' # null terminator
)
tftp = TFTP(buf)
assert tftp.filename == b'rfc1350.txt'
assert tftp.mode == b'octet'
assert bytes(tftp) == buf
assert len(tftp) == len(buf)
def test_op_data():
from binascii import unhexlify
buf = unhexlify(
'0003' # opcode (OP_DATA)
'0001' # block
'0a0a4e6574776f726b20576f726b696e672047726f7570'
)
tftp = TFTP(buf)
assert tftp.block == 1
assert tftp.data == b'\x0a\x0aNetwork Working Group'
assert bytes(tftp) == buf
assert len(tftp) == len(buf)
def test_op_err():
from binascii import unhexlify
buf = unhexlify(
'0005' # opcode (OP_ERR)
'0007' # errcode (ENOUSER)
'0a0a4e6574776f726b20576f726b696e672047726f757000'
)
tftp = TFTP(buf)
assert tftp.errcode == ENOUSER
assert tftp.errmsg == b'\x0a\x0aNetwork Working Group'
assert tftp.data == b''
assert bytes(tftp) == buf
def test_op_other():
from binascii import unhexlify
buf = unhexlify(
'0006' # opcode (doesn't exist)
'abcdef' # trailing data
)
tftp = TFTP(buf)
assert tftp.opcode == 6
assert bytes(tftp) == buf
assert tftp.data == unhexlify('abcdef')
| 3,803 | 28.952756 | 114 |
py
|
dpkt
|
dpkt-master/dpkt/http2.py
|
# -*- coding: utf-8 -*-
"""Hypertext Transfer Protocol Version 2."""
import struct
import codecs
from . import dpkt
HTTP2_PREFACE = b'\x50\x52\x49\x20\x2a\x20\x48\x54\x54\x50\x2f\x32\x2e\x30\x0d\x0a\x0d\x0a\x53\x4d\x0d\x0a\x0d\x0a'
# Frame types
HTTP2_FRAME_DATA = 0
HTTP2_FRAME_HEADERS = 1
HTTP2_FRAME_PRIORITY = 2
HTTP2_FRAME_RST_STREAM = 3
HTTP2_FRAME_SETTINGS = 4
HTTP2_FRAME_PUSH_PROMISE = 5
HTTP2_FRAME_PING = 6
HTTP2_FRAME_GOAWAY = 7
HTTP2_FRAME_WINDOW_UPDATE = 8
HTTP2_FRAME_CONTINUATION = 9
# Flags
HTTP2_FLAG_END_STREAM = 0x01 # for DATA and HEADERS frames
HTTP2_FLAG_ACK = 0x01 # for SETTINGS and PING frames
HTTP2_FLAG_END_HEADERS = 0x04
HTTP2_FLAG_PADDED = 0x08
HTTP2_FLAG_PRIORITY = 0x20
# Settings
HTTP2_SETTINGS_HEADER_TABLE_SIZE = 0x1
HTTP2_SETTINGS_ENABLE_PUSH = 0x2
HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS = 0x3
HTTP2_SETTINGS_INITIAL_WINDOW_SIZE = 0x4
HTTP2_SETTINGS_MAX_FRAME_SIZE = 0x5
HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE = 0x6
# Error codes
HTTP2_NO_ERROR = 0x0
HTTP2_PROTOCOL_ERROR = 0x1
HTTP2_INTERNAL_ERROR = 0x2
HTTP2_FLOW_CONTROL_ERROR = 0x3
HTTP2_SETTINGS_TIMEOUT = 0x4
HTTP2_STREAM_CLOSED = 0x5
HTTP2_FRAME_SIZE_ERROR = 0x6
HTTP2_REFUSED_STREAM = 0x7
HTTP2_CANCEL = 0x8
HTTP2_COMPRESSION_ERROR = 0x9
HTTP2_CONNECT_ERROR = 0xa
HTTP2_ENHANCE_YOUR_CALM = 0xb
HTTP2_INADEQUATE_SECURITY = 0xc
HTTP2_HTTP_1_1_REQUIRED = 0xd
error_code_str = {
HTTP2_NO_ERROR: 'NO_ERROR',
HTTP2_PROTOCOL_ERROR: 'PROTOCOL_ERROR',
HTTP2_INTERNAL_ERROR: 'INTERNAL_ERROR',
HTTP2_FLOW_CONTROL_ERROR: 'FLOW_CONTROL_ERROR',
HTTP2_SETTINGS_TIMEOUT: 'SETTINGS_TIMEOUT',
HTTP2_STREAM_CLOSED: 'STREAM_CLOSED',
HTTP2_FRAME_SIZE_ERROR: 'FRAME_SIZE_ERROR',
HTTP2_REFUSED_STREAM: 'REFUSED_STREAM',
HTTP2_CANCEL: 'CANCEL',
HTTP2_COMPRESSION_ERROR: 'COMPRESSION_ERROR',
HTTP2_CONNECT_ERROR: 'CONNECT_ERROR',
HTTP2_ENHANCE_YOUR_CALM: 'ENHANCE_YOUR_CALM',
HTTP2_INADEQUATE_SECURITY: 'INADEQUATE_SECURITY',
HTTP2_HTTP_1_1_REQUIRED: 'HTTP_1_1_REQUIRED',
}
class HTTP2Exception(Exception):
pass
class Preface(dpkt.Packet):
__hdr__ = (
('preface', '24s', HTTP2_PREFACE),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.preface != HTTP2_PREFACE:
raise HTTP2Exception('Invalid HTTP/2 preface')
self.data = ''
class Frame(dpkt.Packet):
"""
An HTTP/2 frame as defined in RFC 7540
"""
# struct.unpack can't handle the 3-byte int, so we parse it as bytes
# (and store it as bytes so dpkt doesn't get confused), and turn it into
# an int in a user-facing property
__hdr__ = (
('length_bytes', '3s', 0),
('type', 'B', 0),
('flags', 'B', 0),
('stream_id', 'I', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# only take the right number of bytes
self.data = self.data[:self.length]
if len(self.data) != self.length:
raise dpkt.NeedData
@property
def length(self):
return struct.unpack('!I', b'\x00' + self.length_bytes)[0]
class Priority(dpkt.Packet):
"""
Payload of a PRIORITY frame, also used in HEADERS frame with FLAG_PRIORITY.
Also used in the HEADERS frame if the PRIORITY flag is set.
"""
__hdr__ = (
('stream_dep', 'I', 0),
('weight', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if len(self.data) != 0:
raise HTTP2Exception('Invalid number of bytes in PRIORITY frame')
self.exclusive = (self.stream_dep & 0x80000000) != 0
self.stream_dep &= 0x7fffffff
self.weight += 1
class Setting(dpkt.Packet):
"""
A key-value pair used in the SETTINGS frame.
"""
__hdr__ = (
('identifier', 'H', 0),
('value', 'I', 0),
)
class PaddedFrame(Frame):
"""
Abstract class for frame types that support the FLAG_PADDED flag: DATA,
HEADERS and PUSH_PROMISE.
"""
def unpack(self, buf):
Frame.unpack(self, buf)
if self.flags & HTTP2_FLAG_PADDED:
if self.length == 0:
raise HTTP2Exception('Missing padding length in PADDED frame')
self.pad_length = struct.unpack('B', self.data[0:1])[0]
if self.length <= self.pad_length:
raise HTTP2Exception('Missing padding bytes in PADDED frame')
self.unpadded_data = self.data[1:-self.pad_length]
else:
self.unpadded_data = self.data
class DataFrame(PaddedFrame):
"""
Frame of type DATA.
"""
@property
def payload(self):
return self.unpadded_data
class HeadersFrame(PaddedFrame):
"""
Frame of type HEADERS.
"""
def unpack(self, buf):
PaddedFrame.unpack(self, buf)
if self.flags & HTTP2_FLAG_PRIORITY:
if len(self.unpadded_data) < 5:
raise HTTP2Exception('Missing stream dependency in HEADERS frame with PRIORITY flag')
self.priority = Priority(self.unpadded_data[:5])
self.block_fragment = self.unpadded_data[5:]
else:
self.block_fragment = self.unpadded_data
class PriorityFrame(Frame):
"""
Frame of type PRIORITY.
"""
def unpack(self, buf):
Frame.unpack(self, buf)
self.priority = Priority(self.data)
class RSTStreamFrame(Frame):
"""
Frame of type RST_STREAM.
"""
def unpack(self, buf):
Frame.unpack(self, buf)
if self.length != 4:
raise HTTP2Exception('Invalid number of bytes in RST_STREAM frame (must be 4)')
self.error_code = struct.unpack('!I', self.data)[0]
class SettingsFrame(Frame):
"""
Frame of type SETTINGS.
"""
def unpack(self, buf):
Frame.unpack(self, buf)
if self.length % 6 != 0:
raise HTTP2Exception('Invalid number of bytes in SETTINGS frame (must be multiple of 6)')
self.settings = []
i = 0
while i < self.length:
self.settings.append(Setting(self.data[i:i + 6]))
i += 6
class PushPromiseFrame(PaddedFrame):
"""
Frame of type PUSH_PROMISE.
"""
def unpack(self, buf):
PaddedFrame.unpack(self, buf)
if len(self.unpadded_data) < 4:
raise HTTP2Exception('Missing promised stream ID in PUSH_PROMISE frame')
self.promised_id = struct.unpack('!I', self.data[:4])[0]
self.block_fragment = self.unpadded_data[4:]
class PingFrame(Frame):
"""
Frame of type PING.
"""
def unpack(self, buf):
Frame.unpack(self, buf)
if self.length != 8:
raise HTTP2Exception('Invalid number of bytes in PING frame (must be 8)')
class GoAwayFrame(Frame):
"""
Frame of type GO_AWAY.
"""
def unpack(self, buf):
Frame.unpack(self, buf)
if self.length < 8:
raise HTTP2Exception('Invalid number of bytes in GO_AWAY frame')
self.last_stream_id = struct.unpack('!I', self.data[:4])[0]
self.error_code = struct.unpack('!I', self.data[4:8])[0]
self.debug_data = self.data[8:]
class WindowUpdateFrame(Frame):
"""
Frame of type WINDOW_UPDATE.
"""
def unpack(self, buf):
Frame.unpack(self, buf)
if self.length != 4:
raise HTTP2Exception('Invalid number of bytes in WINDOW_UPDATE frame (must be 4)')
self.window_increment = struct.unpack('!I', self.data)[0]
class ContinuationFrame(Frame):
"""
Frame of type CONTINUATION.
"""
def unpack(self, buf):
Frame.unpack(self, buf)
self.block_fragment = self.data
FRAME_TYPES = {
HTTP2_FRAME_DATA: ('DATA', DataFrame),
HTTP2_FRAME_HEADERS: ('HEADERS', HeadersFrame),
HTTP2_FRAME_PRIORITY: ('PRIORITY', PriorityFrame),
HTTP2_FRAME_RST_STREAM: ('RST_STREAM', RSTStreamFrame),
HTTP2_FRAME_SETTINGS: ('SETTINGS', SettingsFrame),
HTTP2_FRAME_PUSH_PROMISE: ('PUSH_PROMISE', PushPromiseFrame),
HTTP2_FRAME_PING: ('PING', PingFrame),
HTTP2_FRAME_GOAWAY: ('GOAWAY', GoAwayFrame),
HTTP2_FRAME_WINDOW_UPDATE: ('WINDOW_UPDATE', WindowUpdateFrame),
HTTP2_FRAME_CONTINUATION: ('CONTINUATION', ContinuationFrame),
}
class FrameFactory(object):
def __new__(cls, buf):
if len(buf) < 4:
raise dpkt.NeedData
t = struct.unpack('B', buf[3:4])[0]
frame_type = FRAME_TYPES.get(t, None)
if frame_type is None:
raise HTTP2Exception('Invalid frame type: ' + hex(t))
return frame_type[1](buf)
def frame_multi_factory(buf, preface=False):
"""
Attempt to parse one or more Frame's out of buf
Args:
buf: string containing HTTP/2 frames. May have an incomplete frame at the
end.
preface: expect an HTTP/2 preface at the beginning of the buffer.
Returns:
[Frame]
int, total bytes consumed, != len(buf) if an incomplete frame was left at
the end.
"""
i = 0
n = len(buf)
frames = []
if preface:
try:
p = Preface(buf)
i += len(p)
except dpkt.NeedData:
return [], 0
while i < n:
try:
frame = FrameFactory(buf[i:])
frames.append(frame)
i += len(frame)
except dpkt.NeedData:
break
return frames, i
class TestFrame(object):
"""Some data found in real traffic"""
@classmethod
def setup_class(cls):
# First TLS AppData record sent by Firefox (decrypted)
record = codecs.decode(b'505249202a20485454502f322e300d0a'
b'0d0a534d0d0a0d0a00000c0400000000'
b'00000400020000000500004000000004'
b'08000000000000bf0001000005020000'
b'00000300000000c80000050200000000'
b'05000000006400000502000000000700'
b'00000000000005020000000009000000'
b'070000000502000000000b0000000300', 'hex')
cls.frames, cls.i = frame_multi_factory(record, preface=True)
def test_frame(self):
import pytest
# Too short
pytest.raises(dpkt.NeedData, Frame, codecs.decode(b'000001' # length
b'0000' # type, flags
b'deadbeef', # stream id
'hex'))
def test_data(self):
# Padded DATA frame
frame_data_padded = FrameFactory(codecs.decode(b'000008' # length
b'0008' # type, flags
b'12345678' # stream id
b'05' # pad length
b'abcd' # data
b'1122334455', # padding
'hex'))
assert (frame_data_padded.length == 8)
assert (frame_data_padded.type == HTTP2_FRAME_DATA)
assert (frame_data_padded.flags == HTTP2_FLAG_PADDED)
assert (frame_data_padded.stream_id == 0x12345678)
assert (frame_data_padded.data == b'\x05\xAB\xCD\x11\x22\x33\x44\x55')
assert (frame_data_padded.pad_length == 5)
assert (frame_data_padded.unpadded_data == b'\xAB\xCD')
assert (frame_data_padded.payload == b'\xAB\xCD')
# empty DATA frame
frame_data_empty_end = FrameFactory(codecs.decode(b'000000' # length
b'0001' # type, flags
b'deadbeef', # stream id
'hex'))
assert (frame_data_empty_end.length == 0)
assert (frame_data_empty_end.type == HTTP2_FRAME_DATA)
assert (frame_data_empty_end.flags == HTTP2_FLAG_END_STREAM)
assert (frame_data_empty_end.stream_id == 0xdeadbeef)
assert (frame_data_empty_end.data == b'')
assert (frame_data_empty_end.unpadded_data == b'')
assert (frame_data_empty_end.payload == b'')
import pytest
# Invalid padding
with pytest.raises(HTTP2Exception) as e:
DataFrame(codecs.decode(b'000000' # length
b'0008' # type, flags
b'12345678' # stream id
b'', # missing padding
'hex'))
assert (str(e.value) == 'Missing padding length in PADDED frame')
with pytest.raises(HTTP2Exception) as e:
DataFrame(codecs.decode(b'000001' # length
b'0008' # type, flags
b'12345678' # stream id
b'01'
b'', # missing padding bytes
'hex'))
assert (str(e.value) == 'Missing padding bytes in PADDED frame')
def test_headers(self):
frame_headers = FrameFactory(codecs.decode(b'000003' # length
b'0100' # type, flags
b'deadbeef' # stream id
b'f00baa', # block fragment
'hex'))
assert (frame_headers.length == 3)
assert (frame_headers.type == HTTP2_FRAME_HEADERS)
assert (frame_headers.flags == 0)
assert (frame_headers.stream_id == 0xdeadbeef)
assert (frame_headers.data == b'\xF0\x0B\xAA')
assert (frame_headers.unpadded_data == b'\xF0\x0B\xAA')
assert (frame_headers.block_fragment == b'\xF0\x0B\xAA')
frame_headers_prio = FrameFactory(codecs.decode(b'000008' # length
b'0120' # type, flags
b'deadbeef' # stream id
b'cafebabe10' # priority
b'f00baa', # block fragment
'hex'))
assert (frame_headers_prio.length == 8)
assert (frame_headers_prio.type == HTTP2_FRAME_HEADERS)
assert (frame_headers_prio.flags == HTTP2_FLAG_PRIORITY)
assert (frame_headers_prio.stream_id == 0xdeadbeef)
assert (frame_headers_prio.data == b'\xCA\xFE\xBA\xBE\x10\xF0\x0B\xAA')
assert (frame_headers_prio.unpadded_data == b'\xCA\xFE\xBA\xBE\x10\xF0\x0B\xAA')
assert (frame_headers_prio.priority.exclusive is True)
assert (frame_headers_prio.priority.stream_dep == 0x4afebabe)
assert (frame_headers_prio.priority.weight == 0x11)
assert (frame_headers_prio.block_fragment == b'\xF0\x0B\xAA')
import pytest
# Invalid priority
with pytest.raises(HTTP2Exception) as e:
HeadersFrame(codecs.decode(b'000002' # length
b'0120' # type, flags
b'deadbeef' # stream id
b'1234', # invalid priority
'hex'))
assert (str(e.value) == 'Missing stream dependency in HEADERS frame with PRIORITY flag')
def test_priority(self):
frame_priority = FrameFactory(codecs.decode(b'000005' # length
b'0200' # type, flags
b'deadbeef' # stream id
b'cafebabe' # stream dep
b'12', # weight
'hex'))
assert (frame_priority.length == 5)
assert (frame_priority.type == HTTP2_FRAME_PRIORITY)
assert (frame_priority.flags == 0)
assert (frame_priority.stream_id == 0xdeadbeef)
assert (frame_priority.data == b'\xCA\xFE\xBA\xBE\x12')
assert (frame_priority.priority.data == b'')
assert (frame_priority.priority.exclusive is True)
assert (frame_priority.priority.stream_dep == 0x4afebabe)
assert (frame_priority.priority.weight == 0x13)
import pytest
# Invalid length
with pytest.raises(HTTP2Exception) as e:
PriorityFrame(codecs.decode(b'000006' # length
b'0200' # type, flags
b'deadbeef' # stream id
b'cafebabe' # stream dep
b'12' # weight
b'00', # unexpected additional payload
'hex'))
assert (str(e.value) == 'Invalid number of bytes in PRIORITY frame')
def test_rst_stream(self):
frame_rst = FrameFactory(codecs.decode(b'000004' # length
b'0300' # type, flags
b'deadbeef' # stream id
b'0000000c', # error code
'hex'))
assert (frame_rst.length == 4)
assert (frame_rst.type == HTTP2_FRAME_RST_STREAM)
assert (frame_rst.flags == 0)
assert (frame_rst.stream_id == 0xdeadbeef)
assert (frame_rst.data == b'\x00\x00\x00\x0c')
assert (frame_rst.error_code == HTTP2_INADEQUATE_SECURITY)
import pytest
# Invalid length
with pytest.raises(HTTP2Exception) as e:
RSTStreamFrame(codecs.decode(b'000005' # length
b'0300' # type, flags
b'deadbeef' # stream id
b'0000000c' # error code
b'00', # unexpected additional payload
'hex'))
assert (str(e.value) == 'Invalid number of bytes in RST_STREAM frame (must be 4)')
def test_settings(self):
frame_settings = FrameFactory(codecs.decode(b'00000c' # length
b'0400' # type, flags
b'00000000' # stream id
# settings
b'0004' # setting id
b'00020000' # setting value
b'0005' # setting id
b'00004000', # setting value
'hex'))
assert (frame_settings.length == 12)
assert (frame_settings.type == HTTP2_FRAME_SETTINGS)
assert (frame_settings.flags == 0)
assert (frame_settings.stream_id == 0)
assert (len(frame_settings.settings) == 2)
assert (frame_settings.settings[0].identifier == HTTP2_SETTINGS_INITIAL_WINDOW_SIZE)
assert (frame_settings.settings[0].value == 0x20000)
assert (frame_settings.settings[1].identifier == HTTP2_SETTINGS_MAX_FRAME_SIZE)
assert (frame_settings.settings[1].value == 0x4000)
# Settings ack, with empty payload
frame_settings_ack = FrameFactory(codecs.decode(b'000000' # length
b'0401' # type, flags
b'00000000', # stream id
'hex'))
assert (frame_settings_ack.length == 0)
assert (frame_settings_ack.type == HTTP2_FRAME_SETTINGS)
assert (frame_settings_ack.flags == HTTP2_FLAG_ACK)
assert (frame_settings_ack.stream_id == 0)
assert (len(frame_settings_ack.settings) == 0)
import pytest
# Invalid length
with pytest.raises(HTTP2Exception) as e:
SettingsFrame(codecs.decode(b'000005' # length
b'0400' # type, flags
b'deadbeef' # stream id
b'1234567890', # invalid length
'hex'))
assert (str(e.value) == 'Invalid number of bytes in SETTINGS frame (must be multiple of 6)')
def test_push_promise(self):
frame_pp = FrameFactory(codecs.decode(b'000007' # length
b'0500' # type, flags
b'deadbeef' # stream id
b'cafebabe' # promised id
b'123456', # some block fragment
'hex'))
assert (frame_pp.length == 7)
assert (frame_pp.type == HTTP2_FRAME_PUSH_PROMISE)
assert (frame_pp.flags == 0)
assert (frame_pp.stream_id == 0xdeadbeef)
assert (frame_pp.promised_id == 0xcafebabe)
assert (frame_pp.block_fragment == b'\x12\x34\x56')
import pytest
# Invalid length
with pytest.raises(HTTP2Exception) as e:
PushPromiseFrame(codecs.decode(b'000003' # length
b'0500' # type, flags
b'deadbeef' # stream id
b'cafeba', # missing promised id
'hex'))
assert (str(e.value) == 'Missing promised stream ID in PUSH_PROMISE frame')
def test_ping(self):
frame_ping = FrameFactory(codecs.decode(b'000008' # length
b'0600' # type, flags
b'deadbeef' # stream id
b'cafebabe12345678', # user data
'hex'))
assert (frame_ping.length == 8)
assert (frame_ping.type == HTTP2_FRAME_PING)
assert (frame_ping.flags == 0)
assert (frame_ping.stream_id == 0xdeadbeef)
assert (frame_ping.data == b'\xCA\xFE\xBA\xBE\x12\x34\x56\x78')
import pytest
# Invalid length
with pytest.raises(HTTP2Exception) as e:
PingFrame(codecs.decode(b'000005' # length
b'0600' # type, flags
b'deadbeef' # stream id
b'1234567890', # invalid length
'hex'))
assert (str(e.value) == 'Invalid number of bytes in PING frame (must be 8)')
def test_goaway(self):
frame_goaway = FrameFactory(codecs.decode(b'00000a' # length
b'0700' # type, flags
b'deadbeef' # stream id
b'00000000' # last stream id
b'00000000' # error code
b'cafe', # debug data
'hex'))
assert (frame_goaway.length == 10)
assert (frame_goaway.type == HTTP2_FRAME_GOAWAY)
assert (frame_goaway.flags == 0)
assert (frame_goaway.stream_id == 0xdeadbeef)
assert (frame_goaway.last_stream_id == 0)
assert (frame_goaway.error_code == HTTP2_NO_ERROR)
assert (frame_goaway.debug_data == b'\xCA\xFE')
import pytest
# Invalid length
with pytest.raises(HTTP2Exception) as e:
GoAwayFrame(codecs.decode(b'000005' # length
b'0700' # type, flags
b'deadbeef' # stream id
b'1234567890', # invalid length
'hex'))
assert (str(e.value) == 'Invalid number of bytes in GO_AWAY frame')
def test_window_update(self):
frame_wu = FrameFactory(codecs.decode(b'000004' # length
b'0800' # type, flags
b'deadbeef' # stream id
b'12345678', # window increment
'hex'))
assert (frame_wu.length == 4)
assert (frame_wu.type == HTTP2_FRAME_WINDOW_UPDATE)
assert (frame_wu.flags == 0)
assert (frame_wu.stream_id == 0xdeadbeef)
assert (frame_wu.window_increment == 0x12345678)
import pytest
# Invalid length
with pytest.raises(HTTP2Exception) as e:
WindowUpdateFrame(codecs.decode(b'000005' # length
b'0800' # type, flags
b'deadbeef' # stream id
b'1234567890', # invalid length
'hex'))
assert (str(e.value) == 'Invalid number of bytes in WINDOW_UPDATE frame (must be 4)')
def test_continuation(self):
frame_cont = FrameFactory(codecs.decode(b'000003' # length
b'0900' # type, flags
b'deadbeef' # stream id
b'f00baa', # block fragment
'hex'))
assert (frame_cont.length == 3)
assert (frame_cont.type == HTTP2_FRAME_CONTINUATION)
assert (frame_cont.flags == 0)
assert (frame_cont.stream_id == 0xdeadbeef)
assert (frame_cont.block_fragment == b'\xF0\x0B\xAA')
def test_factory(self):
import pytest
# Too short
pytest.raises(dpkt.NeedData, FrameFactory, codecs.decode(b'000000', 'hex'))
# Invalid type
with pytest.raises(HTTP2Exception) as e:
FrameFactory(codecs.decode(b'000000' # length
b'abcd' # type, flags
b'deadbeef', # stream id
'hex'))
assert (str(e.value) == 'Invalid frame type: 0xab')
def test_preface(self):
import pytest
# Preface
pytest.raises(dpkt.NeedData, Preface,
codecs.decode(b'505249202a20485454502f322e300d0a', 'hex'))
pytest.raises(dpkt.NeedData, Preface, b'\x00' * 23)
with pytest.raises(HTTP2Exception) as e:
Preface(b'\x00' * 24)
assert (str(e.value) == 'Invalid HTTP/2 preface')
def test_multi(self):
assert (self.i == 128)
assert (len(self.frames) == 7)
assert (self.frames[0].length == 12)
assert (self.frames[1].length == 4)
assert (self.frames[2].length == 5)
assert (self.frames[3].length == 5)
assert (self.frames[4].length == 5)
assert (self.frames[5].length == 5)
assert (self.frames[6].length == 5)
assert (self.frames[0].type == HTTP2_FRAME_SETTINGS)
assert (self.frames[1].type == HTTP2_FRAME_WINDOW_UPDATE)
assert (self.frames[2].type == HTTP2_FRAME_PRIORITY)
assert (self.frames[3].type == HTTP2_FRAME_PRIORITY)
assert (self.frames[4].type == HTTP2_FRAME_PRIORITY)
assert (self.frames[5].type == HTTP2_FRAME_PRIORITY)
assert (self.frames[6].type == HTTP2_FRAME_PRIORITY)
assert (self.frames[0].flags == 0)
assert (self.frames[1].flags == 0)
assert (self.frames[2].flags == 0)
assert (self.frames[3].flags == 0)
assert (self.frames[4].flags == 0)
assert (self.frames[5].flags == 0)
assert (self.frames[6].flags == 0)
assert (self.frames[0].stream_id == 0)
assert (self.frames[1].stream_id == 0)
assert (self.frames[2].stream_id == 3)
assert (self.frames[3].stream_id == 5)
assert (self.frames[4].stream_id == 7)
assert (self.frames[5].stream_id == 9)
assert (self.frames[6].stream_id == 11)
frames, i = frame_multi_factory(
codecs.decode(b'505249202a20485454502f322e300d0a', 'hex'),
preface=True)
assert (len(frames) == 0)
assert (i == 0)
# Only preface was parsed
frames, i = frame_multi_factory(
codecs.decode(b'505249202a20485454502f322e300d0a'
b'0d0a534d0d0a0d0a00000c0400000000', 'hex'),
preface=True)
assert (len(frames) == 0)
assert (i == 24)
| 29,215 | 38.588076 | 115 |
py
|
dpkt
|
dpkt-master/dpkt/dhcp.py
|
# $Id: dhcp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Dynamic Host Configuration Protocol."""
from __future__ import print_function
from __future__ import absolute_import
import struct
from . import arp
from . import dpkt
from .compat import compat_ord
DHCP_OP_REQUEST = 1
DHCP_OP_REPLY = 2
DHCP_MAGIC = 0x63825363
# DHCP option codes
DHCP_OPT_NETMASK = 1 # I: subnet mask
DHCP_OPT_TIMEOFFSET = 2
DHCP_OPT_ROUTER = 3 # s: list of router ips
DHCP_OPT_TIMESERVER = 4
DHCP_OPT_NAMESERVER = 5
DHCP_OPT_DNS_SVRS = 6 # s: list of DNS servers
DHCP_OPT_LOGSERV = 7
DHCP_OPT_COOKIESERV = 8
DHCP_OPT_LPRSERV = 9
DHCP_OPT_IMPSERV = 10
DHCP_OPT_RESSERV = 11
DHCP_OPT_HOSTNAME = 12 # s: client hostname
DHCP_OPT_BOOTFILESIZE = 13
DHCP_OPT_DUMPFILE = 14
DHCP_OPT_DOMAIN = 15 # s: domain name
DHCP_OPT_SWAPSERV = 16
DHCP_OPT_ROOTPATH = 17
DHCP_OPT_EXTENPATH = 18
DHCP_OPT_IPFORWARD = 19
DHCP_OPT_SRCROUTE = 20
DHCP_OPT_POLICYFILTER = 21
DHCP_OPT_MAXASMSIZE = 22
DHCP_OPT_IPTTL = 23
DHCP_OPT_MTUTIMEOUT = 24
DHCP_OPT_MTUTABLE = 25
DHCP_OPT_MTUSIZE = 26
DHCP_OPT_LOCALSUBNETS = 27
DHCP_OPT_BROADCASTADDR = 28
DHCP_OPT_DOMASKDISCOV = 29
DHCP_OPT_MASKSUPPLY = 30
DHCP_OPT_DOROUTEDISC = 31
DHCP_OPT_ROUTERSOLICIT = 32
DHCP_OPT_STATICROUTE = 33
DHCP_OPT_TRAILERENCAP = 34
DHCP_OPT_ARPTIMEOUT = 35
DHCP_OPT_ETHERENCAP = 36
DHCP_OPT_TCPTTL = 37
DHCP_OPT_TCPKEEPALIVE = 38
DHCP_OPT_TCPALIVEGARBAGE = 39
DHCP_OPT_NISDOMAIN = 40
DHCP_OPT_NISSERVERS = 41
DHCP_OPT_NISTIMESERV = 42
DHCP_OPT_VENDSPECIFIC = 43
DHCP_OPT_NBNS = 44
DHCP_OPT_NBDD = 45
DHCP_OPT_NBTCPIP = 46
DHCP_OPT_NBTCPSCOPE = 47
DHCP_OPT_XFONT = 48
DHCP_OPT_XDISPLAYMGR = 49
DHCP_OPT_REQ_IP = 50 # I: IP address
DHCP_OPT_LEASE_SEC = 51 # I: lease seconds
DHCP_OPT_OPTIONOVERLOAD = 52
DHCP_OPT_MSGTYPE = 53 # B: message type
DHCP_OPT_SERVER_ID = 54 # I: server IP address
DHCP_OPT_PARAM_REQ = 55 # s: list of option codes
DHCP_OPT_MESSAGE = 56
DHCP_OPT_MAXMSGSIZE = 57
DHCP_OPT_RENEWTIME = 58
DHCP_OPT_REBINDTIME = 59
DHCP_OPT_VENDOR_ID = 60 # s: vendor class id
DHCP_OPT_CLIENT_ID = 61 # Bs: idtype, id (idtype 0: FQDN, idtype 1: MAC)
DHCP_OPT_NISPLUSDOMAIN = 64
DHCP_OPT_NISPLUSSERVERS = 65
DHCP_OPT_MOBILEIPAGENT = 68
DHCP_OPT_SMTPSERVER = 69
DHCP_OPT_POP3SERVER = 70
DHCP_OPT_NNTPSERVER = 71
DHCP_OPT_WWWSERVER = 72
DHCP_OPT_FINGERSERVER = 73
DHCP_OPT_IRCSERVER = 74
DHCP_OPT_STSERVER = 75
DHCP_OPT_STDASERVER = 76
# DHCP message type values
DHCPDISCOVER = 1
DHCPOFFER = 2
DHCPREQUEST = 3
DHCPDECLINE = 4
DHCPACK = 5
DHCPNAK = 6
DHCPRELEASE = 7
DHCPINFORM = 8
class DHCP(dpkt.Packet):
"""Dynamic Host Configuration Protocol.
The Dynamic Host Configuration Protocol (DHCP) is a network management protocol used on Internet Protocol (IP)
networks for automatically assigning IP addresses and other communication parameters to devices connected
to the network using a client–server architecture.
Attributes:
__hdr__: Header fields of DHCP.
op: (int): Operation. Message op code / message type. 1 = BOOTREQUEST, 2 = BOOTREPLY. (1 byte)
hrd: (int): Hardware type. Hardware address type, see ARP section in "Assigned
Numbers" RFC; e.g., '1' = 10mb ethernet. (1 byte)
hln: (int): Hardware Length. Hardware address length (e.g. '6' for 10mb
ethernet). (1 byte)
hops: (int): Hops. Client sets to zero, optionally used by relay agents
when booting via a relay agent. (1 byte)
xid: (int): Transaction ID. A random number chosen by the
client, used by the client and server to associate
messages and responses between a client and a
server. (4 bytes)
secs: (int): Seconds. Filled in by client, seconds elapsed since client
began address acquisition or renewal process. (2 bytes)
flags: (int): DHCP Flags. (2 bytes)
ciaddr: (int): Client IP address. Only filled in if client is in
BOUND, RENEW or REBINDING state and can respond
to ARP requests. (4 bytes)
yiaddr: (int): User IP address. (4 bytes)
siaddr: (int): Server IP address. IP address of next server to use in bootstrap;
returned in DHCPOFFER, DHCPACK by server. (4 bytes)
giaddr: (int): Gateway IP address. Relay agent IP address, used in booting via a
relay agent. (4 bytes)
chaddr: (int): Client hardware address. (16 bytes)
sname: (int): Server Hostname. Optional, null terminated string. (64 bytes)
file: (int): Boot file name. Null terminated string; "generic"
name or null in DHCPDISCOVER, fully qualified
directory-path name in DHCPOFFER. (128 bytes)
magic: (int): Magic cookie. Optional parameters field. See the options
documents for a list of defined options. (4 bytes)
"""
__hdr__ = (
('op', 'B', DHCP_OP_REQUEST),
('hrd', 'B', arp.ARP_HRD_ETH), # just like ARP.hrd
('hln', 'B', 6), # and ARP.hln
('hops', 'B', 0),
('xid', 'I', 0xdeadbeef),
('secs', 'H', 0),
('flags', 'H', 0),
('ciaddr', 'I', 0),
('yiaddr', 'I', 0),
('siaddr', 'I', 0),
('giaddr', 'I', 0),
('chaddr', '16s', 16 * b'\x00'),
('sname', '64s', 64 * b'\x00'),
('file', '128s', 128 * b'\x00'),
('magic', 'I', DHCP_MAGIC),
)
opts = (
(DHCP_OPT_MSGTYPE, chr(DHCPDISCOVER)),
(DHCP_OPT_PARAM_REQ, ''.join(map(chr, (DHCP_OPT_REQ_IP,
DHCP_OPT_ROUTER,
DHCP_OPT_NETMASK,
DHCP_OPT_DNS_SVRS))))
) # list of (type, data) tuples
def __len__(self):
return self.__hdr_len__ + \
sum([2 + len(o[1]) for o in self.opts]) + 1 + len(self.data)
def __bytes__(self):
return self.pack_hdr() + self.pack_opts() + bytes(self.data)
def pack_opts(self):
"""Return packed options string."""
if not self.opts:
return b''
l_ = []
for t, data in self.opts:
l_.append(struct.pack("BB%is" % len(data), t, len(data), data))
l_.append(b'\xff')
return b''.join(l_)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.chaddr = self.chaddr[:self.hln]
buf = self.data
l_ = []
while buf:
t = compat_ord(buf[0])
if t == 0xff:
buf = buf[1:]
break
elif t == 0:
buf = buf[1:]
else:
n = compat_ord(buf[1])
l_.append((t, buf[2:2 + n]))
buf = buf[2 + n:]
self.opts = l_
self.data = buf
def test_dhcp():
s = (
b'\x01\x01\x06\x00\xad\x53\xc8\x63\xb8\x87\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x02\x55\x82\xf3\xa6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x63\x82\x53\x63\x35\x01\x01\xfb\x01\x01\x3d\x07\x01\x00'
b'\x02\x55\x82\xf3\xa6\x32\x04\x0a\x00\x01\x65\x0c\x09\x47\x75\x69\x6e\x65\x76\x65\x72\x65\x3c\x08\x4d'
b'\x53\x46\x54\x20\x35\x2e\x30\x37\x0a\x01\x0f\x03\x06\x2c\x2e\x2f\x1f\x21\x2b\xff\x00\x00\x00\x00\x00'
)
dhcp = DHCP(s)
assert (s == bytes(dhcp))
assert len(dhcp) == 300
assert isinstance(dhcp.chaddr, bytes)
assert isinstance(dhcp.sname, bytes)
assert isinstance(dhcp.file, bytes)
# Test default construction
dhcp = DHCP()
assert isinstance(dhcp.chaddr, bytes)
assert isinstance(dhcp.sname, bytes)
assert isinstance(dhcp.file, bytes)
def test_no_opts():
from binascii import unhexlify
buf_small_hdr = unhexlify(
'00' # op
'00' # hrd
'06' # hln
'12' # hops
'deadbeef' # xid
'1234' # secs
'9866' # flags
'00000000' # ciaddr
'00000000' # yiaddr
'00000000' # siaddr
'00000000' # giaddr
)
buf = b''.join([
buf_small_hdr,
b'\x00' * 16, # chaddr
b'\x11' * 64, # sname
b'\x22' * 128, # file
b'\x44' * 4, # magic
b'\x00' # data
])
dhcp = DHCP(buf)
assert dhcp.opts == []
assert dhcp.data == b''
assert dhcp.pack_opts() == b''
| 9,433 | 34.6 | 114 |
py
|
dpkt
|
dpkt-master/dpkt/pcap.py
|
# $Id: pcap.py 77 2011-01-06 15:59:38Z dugsong $
# -*- coding: utf-8 -*-
"""Libpcap file format."""
from __future__ import print_function
from __future__ import absolute_import
import sys
import time
from decimal import Decimal
from . import dpkt
from .compat import intround
# big endian magics
TCPDUMP_MAGIC = 0xa1b2c3d4
TCPDUMP_MAGIC_NANO = 0xa1b23c4d
MODPCAP_MAGIC = 0xa1b2cd34
# little endian magics
PMUDPCT_MAGIC = 0xd4c3b2a1
PMUDPCT_MAGIC_NANO = 0x4d3cb2a1
PACPDOM_MAGIC = 0x34cdb2a1
PCAP_VERSION_MAJOR = 2
PCAP_VERSION_MINOR = 4
# see http://www.tcpdump.org/linktypes.html for explanations
DLT_NULL = 0
DLT_EN10MB = 1
DLT_EN3MB = 2
DLT_AX25 = 3
DLT_PRONET = 4
DLT_CHAOS = 5
DLT_IEEE802 = 6
DLT_ARCNET = 7
DLT_SLIP = 8
DLT_PPP = 9
DLT_FDDI = 10
DLT_PFSYNC = 18
DLT_PPP_SERIAL = 50
DLT_PPP_ETHER = 51
DLT_ATM_RFC1483 = 100
DLT_RAW = 101
DLT_C_HDLC = 104
DLT_IEEE802_11 = 105
DLT_FRELAY = 107
DLT_LOOP = 108
DLT_LINUX_SLL = 113
DLT_LTALK = 114
DLT_PFLOG = 117
DLT_PRISM_HEADER = 119
DLT_IP_OVER_FC = 122
DLT_SUNATM = 123
DLT_IEEE802_11_RADIO = 127
DLT_ARCNET_LINUX = 129
DLT_APPLE_IP_OVER_IEEE1394 = 138
DLT_MTP2_WITH_PHDR = 139
DLT_MTP2 = 140
DLT_MTP3 = 141
DLT_SCCP = 142
DLT_DOCSIS = 143
DLT_LINUX_IRDA = 144
DLT_USER0 = 147
DLT_USER1 = 148
DLT_USER2 = 149
DLT_USER3 = 150
DLT_USER4 = 151
DLT_USER5 = 152
DLT_USER6 = 153
DLT_USER7 = 154
DLT_USER8 = 155
DLT_USER9 = 156
DLT_USER10 = 157
DLT_USER11 = 158
DLT_USER12 = 159
DLT_USER13 = 160
DLT_USER14 = 161
DLT_USER15 = 162
DLT_IEEE802_11_RADIO_AVS = 163
DLT_BACNET_MS_TP = 165
DLT_PPP_PPPD = 166
DLT_GPRS_LLC = 169
DLT_GPF_T = 170
DLT_GPF_F = 171
DLT_LINUX_LAPD = 177
DLT_BLUETOOTH_HCI_H4 = 187
DLT_USB_LINUX = 189
DLT_PPI = 192
DLT_IEEE802_15_4 = 195
DLT_SITA = 196
DLT_ERF = 197
DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 201
DLT_AX25_KISS = 202
DLT_LAPD = 203
DLT_PPP_WITH_DIR = 204
DLT_C_HDLC_WITH_DIR = 205
DLT_FRELAY_WITH_DIR = 206
DLT_IPMB_LINUX = 209
DLT_IEEE802_15_4_NONASK_PHY = 215
DLT_USB_LINUX_MMAPPED = 220
DLT_FC_2 = 224
DLT_FC_2_WITH_FRAME_DELIMS = 225
DLT_IPNET = 226
DLT_CAN_SOCKETCAN = 227
DLT_IPV4 = 228
DLT_IPV6 = 229
DLT_IEEE802_15_4_NOFCS = 230
DLT_DBUS = 231
DLT_DVB_CI = 235
DLT_MUX27010 = 236
DLT_STANAG_5066_D_PDU = 237
DLT_NFLOG = 239
DLT_NETANALYZER = 240
DLT_NETANALYZER_TRANSPARENT = 241
DLT_IPOIB = 242
DLT_MPEG_2_TS = 243
DLT_NG40 = 244
DLT_NFC_LLCP = 245
DLT_INFINIBAND = 247
DLT_SCTP = 248
DLT_USBPCAP = 249
DLT_RTAC_SERIAL = 250
DLT_BLUETOOTH_LE_LL = 251
DLT_NETLINK = 253
DLT_BLUETOOTH_LINUX_MONITOR = 253
DLT_BLUETOOTH_BREDR_BB = 255
DLT_BLUETOOTH_LE_LL_WITH_PHDR = 256
DLT_PROFIBUS_DL = 257
DLT_PKTAP = 258
DLT_EPON = 259
DLT_IPMI_HPM_2 = 260
DLT_ZWAVE_R1_R2 = 261
DLT_ZWAVE_R3 = 262
DLT_WATTSTOPPER_DLM = 263
DLT_ISO_14443 = 264
DLT_LINUX_SLL2 = 276
if sys.platform.find('openbsd') != -1:
DLT_LOOP = 12
DLT_RAW = 14
else:
DLT_LOOP = 108
DLT_RAW = 12
dltoff = {DLT_NULL: 4, DLT_EN10MB: 14, DLT_IEEE802: 22, DLT_ARCNET: 6,
DLT_SLIP: 16, DLT_PPP: 4, DLT_FDDI: 21, DLT_PFLOG: 48, DLT_PFSYNC: 4,
DLT_LOOP: 4, DLT_LINUX_SLL: 16, DLT_LINUX_SLL2: 20}
class PktHdr(dpkt.Packet):
"""pcap packet header.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of pcap header.
TODO.
"""
__hdr__ = (
('tv_sec', 'I', 0),
('tv_usec', 'I', 0),
('caplen', 'I', 0),
('len', 'I', 0),
)
class PktModHdr(dpkt.Packet):
"""modified pcap packet header.
https://wiki.wireshark.org/Development/LibpcapFileFormat#modified-pcap
TODO: Longer class information....
Attributes:
__hdr__: Header fields of pcap header.
TODO.
"""
__hdr__ = (
('tv_sec', 'I', 0),
('tv_usec', 'I', 0),
('caplen', 'I', 0),
('len', 'I', 0),
('ifindex', 'I', 0),
('protocol', 'H', 0),
('pkt_type', 'B', 0),
('pad', 'B', 0),
)
class LEPktHdr(PktHdr):
__byte_order__ = '<'
class LEPktModHdr(PktModHdr):
__byte_order__ = '<'
MAGIC_TO_PKT_HDR = {
TCPDUMP_MAGIC: PktHdr,
TCPDUMP_MAGIC_NANO: PktHdr,
MODPCAP_MAGIC: PktModHdr,
PMUDPCT_MAGIC: LEPktHdr,
PMUDPCT_MAGIC_NANO: LEPktHdr,
PACPDOM_MAGIC: LEPktModHdr
}
class FileHdr(dpkt.Packet):
"""pcap file header.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of pcap file header.
TODO.
"""
__hdr__ = (
('magic', 'I', TCPDUMP_MAGIC),
('v_major', 'H', PCAP_VERSION_MAJOR),
('v_minor', 'H', PCAP_VERSION_MINOR),
('thiszone', 'I', 0),
('sigfigs', 'I', 0),
('snaplen', 'I', 1500),
('linktype', 'I', 1),
)
class LEFileHdr(FileHdr):
__byte_order__ = '<'
class Writer(object):
"""Simple pcap dumpfile writer.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of simple pcap dumpfile writer.
TODO.
"""
__le = sys.byteorder == 'little'
def __init__(self, fileobj, snaplen=1500, linktype=DLT_EN10MB, nano=False):
self.__f = fileobj
self._precision = 9 if nano else 6
self._precision_multiplier = 10**self._precision
magic = TCPDUMP_MAGIC_NANO if nano else TCPDUMP_MAGIC
if self.__le:
fh = LEFileHdr(snaplen=snaplen, linktype=linktype, magic=magic)
self._PktHdr = LEPktHdr()
else:
fh = FileHdr(snaplen=snaplen, linktype=linktype, magic=magic)
self._PktHdr = PktHdr()
self._pack_hdr = self._PktHdr._pack_hdr
self.__f.write(bytes(fh))
def writepkt(self, pkt, ts=None):
"""Write single packet and optional timestamp to file.
Args:
pkt: `bytes` will be called on this and written to file.
ts (float): Timestamp in seconds. Defaults to current time.
"""
if ts is None:
ts = time.time()
self.writepkt_time(bytes(pkt), ts)
def writepkt_time(self, pkt, ts):
"""Write single packet and its timestamp to file.
Args:
pkt (bytes): Some `bytes` to write to the file
ts (float): Timestamp in seconds
"""
n = len(pkt)
sec = int(ts)
usec = intround(ts % 1 * self._precision_multiplier)
ph = self._pack_hdr(sec, usec, n, n)
self.__f.write(ph + pkt)
def writepkts(self, pkts):
"""Write an iterable of packets to file.
Timestamps should be in seconds.
Packets must be of type `bytes` as they will not be cast.
Args:
pkts: iterable containing (ts, pkt)
"""
fd = self.__f
pack_hdr = self._pack_hdr
precision_multiplier = self._precision_multiplier
for ts, pkt in pkts:
n = len(pkt)
sec = int(ts)
usec = intround(ts % 1 * precision_multiplier)
ph = pack_hdr(sec, usec, n, n)
fd.write(ph + pkt)
def close(self):
self.__f.close()
class Reader(object):
"""Simple pypcap-compatible pcap file reader.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of simple pypcap-compatible pcap file reader.
TODO.
"""
def __init__(self, fileobj):
self.name = getattr(fileobj, 'name', '<%s>' % fileobj.__class__.__name__)
self.__f = fileobj
buf = self.__f.read(FileHdr.__hdr_len__)
self.__fh = FileHdr(buf)
# save magic
magic = self.__fh.magic
if magic in (PMUDPCT_MAGIC, PMUDPCT_MAGIC_NANO, PACPDOM_MAGIC):
self.__fh = LEFileHdr(buf)
if magic not in MAGIC_TO_PKT_HDR:
raise ValueError('invalid tcpdump header')
self.__ph = MAGIC_TO_PKT_HDR[magic]
if self.__fh.linktype in dltoff:
self.dloff = dltoff[self.__fh.linktype]
else:
self.dloff = 0
self._divisor = Decimal('1E9') if magic in (TCPDUMP_MAGIC_NANO, PMUDPCT_MAGIC_NANO) else 1E6
self.snaplen = self.__fh.snaplen
self.filter = ''
self.__iter = iter(self)
@property
def fd(self):
return self.__f.fileno()
def fileno(self):
return self.fd
def datalink(self):
return self.__fh.linktype
def setfilter(self, value, optimize=1):
raise NotImplementedError
def readpkts(self):
return list(self)
def __next__(self):
return next(self.__iter)
next = __next__ # Python 2 compat
def dispatch(self, cnt, callback, *args):
"""Collect and process packets with a user callback.
Return the number of packets processed, or 0 for a savefile.
Arguments:
cnt -- number of packets to process;
or 0 to process all packets until EOF
callback -- function with (timestamp, pkt, *args) prototype
*args -- optional arguments passed to callback on execution
"""
processed = 0
if cnt > 0:
for _ in range(cnt):
try:
ts, pkt = next(iter(self))
except StopIteration:
break
callback(ts, pkt, *args)
processed += 1
else:
for ts, pkt in self:
callback(ts, pkt, *args)
processed += 1
return processed
def loop(self, callback, *args):
self.dispatch(0, callback, *args)
def __iter__(self):
while 1:
buf = self.__f.read(self.__ph.__hdr_len__)
if not buf:
break
hdr = self.__ph(buf)
buf = self.__f.read(hdr.caplen)
yield (hdr.tv_sec + (hdr.tv_usec / self._divisor), buf)
class UniversalReader(object):
"""
Universal pcap reader for the libpcap and pcapng file formats
"""
def __new__(cls, fileobj):
try:
pcap = Reader(fileobj)
except ValueError as e1:
fileobj.seek(0)
try:
from . import pcapng
pcap = pcapng.Reader(fileobj)
except ValueError as e2:
raise ValueError('unknown pcap format; libpcap error: %s, pcapng error: %s' % (e1, e2))
return pcap
################################################################################
# TESTS #
################################################################################
class TryExceptException:
def __init__(self, exception_type, msg=''):
self.exception_type = exception_type
self.msg = msg
def __call__(self, f, *args, **kwargs):
def wrapper(*args, **kwargs):
try:
f()
except self.exception_type as e:
if self.msg:
assert str(e) == self.msg
else:
raise Exception("There should have been an Exception raised")
return wrapper
@TryExceptException(Exception, msg='There should have been an Exception raised')
def test_TryExceptException():
"""Check that we can catch a function which does not throw an exception when it is supposed to"""
@TryExceptException(NotImplementedError)
def fun():
pass
try:
fun()
except Exception as e:
raise e
def test_pcap_endian():
be = b'\xa1\xb2\xc3\xd4\x00\x02\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x60\x00\x00\x00\x01'
le = b'\xd4\xc3\xb2\xa1\x02\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x60\x00\x00\x00\x01\x00\x00\x00'
befh = FileHdr(be)
lefh = LEFileHdr(le)
assert (befh.linktype == lefh.linktype)
class TestData():
pcap = ( # full libpcap file with one packet
b'\xd4\xc3\xb2\xa1\x02\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x01\x00\x00\x00'
b'\xb2\x67\x4a\x42\xae\x91\x07\x00\x46\x00\x00\x00\x46\x00\x00\x00\x00\xc0\x9f\x32\x41\x8c\x00\xe0'
b'\x18\xb1\x0c\xad\x08\x00\x45\x00\x00\x38\x00\x00\x40\x00\x40\x11\x65\x47\xc0\xa8\xaa\x08\xc0\xa8'
b'\xaa\x14\x80\x1b\x00\x35\x00\x24\x85\xed'
)
modified_pcap = (
b'\x34\xcd\xb2\xa1\x02\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x01\x00\x00\x00'
b'\x3c\xfb\x80\x61\x6d\x32\x08\x00\x03\x00\x00\x00\x72\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xff\xff\xff'
)
def test_reader():
import pytest
data = TestData().pcap
# --- BytesIO tests ---
from .compat import BytesIO
# BytesIO
fobj = BytesIO(data)
reader = Reader(fobj)
assert reader.name == '<BytesIO>'
_, buf1 = next(iter(reader))
assert buf1 == data[FileHdr.__hdr_len__ + PktHdr.__hdr_len__:]
assert reader.datalink() == 1
with pytest.raises(NotImplementedError):
reader.setfilter(1, 2)
# --- dispatch() tests ---
# test count = 0
fobj.seek(0)
reader = Reader(fobj)
assert reader.dispatch(0, lambda ts, pkt: None) == 1
# test count > 0
fobj.seek(0)
reader = Reader(fobj)
assert reader.dispatch(4, lambda ts, pkt: None) == 1
# test iterative dispatch
fobj.seek(0)
reader = Reader(fobj)
assert reader.dispatch(1, lambda ts, pkt: None) == 1
assert reader.dispatch(1, lambda ts, pkt: None) == 0
# test loop() over all packets
fobj.seek(0)
reader = Reader(fobj)
class Count:
counter = 0
@classmethod
def inc(cls):
cls.counter += 1
reader.loop(lambda ts, pkt: Count.inc())
assert Count.counter == 1
def test_reader_dloff():
from binascii import unhexlify
buf_filehdr = unhexlify(
'a1b2c3d4' # TCPDUMP_MAGIC
'0001' # v_major
'0002' # v_minor
'00000000' # thiszone
'00000000' # sigfigs
'00000100' # snaplen
'00000023' # linktype (not known)
)
buf_pkthdr = unhexlify(
'00000003' # tv_sec
'00000005' # tv_usec
'00000004' # caplen
'00000004' # len
)
from .compat import BytesIO
fobj = BytesIO(buf_filehdr + buf_pkthdr + b'\x11' * 4)
reader = Reader(fobj)
# confirm that if the linktype is unknown, it defaults to 0
assert reader.dloff == 0
assert next(reader) == (3.000005, b'\x11' * 4)
@TryExceptException(ValueError, msg="invalid tcpdump header")
def test_reader_badheader():
from .compat import BytesIO
fobj = BytesIO(b'\x00' * 24)
_ = Reader(fobj) # noqa
def test_reader_fd():
data = TestData().pcap
import tempfile
with tempfile.TemporaryFile() as fd:
fd.write(data)
fd.seek(0)
reader = Reader(fd)
assert reader.fd == fd.fileno()
assert reader.fileno() == fd.fileno()
def test_reader_modified_pcap_type():
data = TestData().modified_pcap
import tempfile
with tempfile.TemporaryFile() as fd:
fd.write(data)
fd.seek(0)
reader = Reader(fd)
assert reader.fd == fd.fileno()
assert reader.fileno() == fd.fileno()
timestamp, pkts = next(reader)
assert pkts == 3 * b'\xff'
assert timestamp == 1635842876.537197000
class WriterTestWrap:
"""
Decorate a writer test function with an instance of this class.
The test will be provided with a writer object, which it should write some pkts to.
After the test has run, the BytesIO object will be passed to a Reader,
which will compare each pkt to the return value of the test.
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __call__(self, f, *args, **kwargs):
def wrapper(*args, **kwargs):
from .compat import BytesIO
for little_endian in [True, False]:
fobj = BytesIO()
_sysle = Writer._Writer__le
Writer._Writer__le = little_endian
f.__globals__['writer'] = Writer(fobj, **self.kwargs.get('writer', {}))
f.__globals__['fobj'] = fobj
pkts = f(*args, **kwargs)
fobj.flush()
fobj.seek(0)
assert pkts
for (ts_out, pkt_out), (ts_in, pkt_in) in zip(pkts, Reader(fobj).readpkts()):
assert ts_out == ts_in
assert pkt_out == pkt_in
# 'noqa' for flake8 to ignore these since writer was injected into globals
writer.close() # noqa
Writer._Writer__le = _sysle
return wrapper
@WriterTestWrap()
def test_writer_precision_normal():
ts, pkt = 1454725786.526401, b'foo'
writer.writepkt(pkt, ts=ts) # noqa
return [(ts, pkt)]
@WriterTestWrap(writer={'nano': True})
def test_writer_precision_nano():
ts, pkt = Decimal('1454725786.010203045'), b'foo'
writer.writepkt(pkt, ts=ts) # noqa
return [(ts, pkt)]
@WriterTestWrap(writer={'nano': False})
def test_writer_precision_nano_fail():
"""if writer is not set to nano, supplying this timestamp should be truncated"""
ts, pkt = (Decimal('1454725786.010203045'), b'foo')
writer.writepkt(pkt, ts=ts) # noqa
return [(1454725786.010203, pkt)]
@WriterTestWrap()
def test_writepkt_no_time():
ts, pkt = 1454725786.526401, b'foooo'
_tmp = time.time
time.time = lambda: ts
writer.writepkt(pkt) # noqa
time.time = _tmp
return [(ts, pkt)]
@WriterTestWrap(writer={'snaplen': 10})
def test_writepkt_snaplen():
ts, pkt = 1454725786.526401, b'foooo'
writer.writepkt(pkt, ts) # noqa
return [(ts, pkt)]
@WriterTestWrap()
def test_writepkt_with_time():
ts, pkt = 1454725786.526401, b'foooo'
writer.writepkt(pkt, ts) # noqa
return [(ts, pkt)]
@WriterTestWrap()
def test_writepkt_time():
ts, pkt = 1454725786.526401, b'foooo'
writer.writepkt_time(pkt, ts) # noqa
return [(ts, pkt)]
@WriterTestWrap()
def test_writepkts():
"""writing multiple packets from a list"""
pkts = [
(1454725786.526401, b"fooo"),
(1454725787.526401, b"barr"),
(3243204320.093211, b"grill"),
(1454725789.526401, b"lol"),
]
writer.writepkts(pkts) # noqa
return pkts
def test_universal_reader():
import pytest
from .compat import BytesIO
from . import pcapng
# libpcap
data = TestData().pcap
fobj = BytesIO(data)
reader = UniversalReader(fobj)
assert isinstance(reader, Reader)
# pcapng
data = pcapng.define_testdata().valid_pcapng
fobj = BytesIO(data)
reader = UniversalReader(fobj)
assert isinstance(reader, pcapng.Reader)
# unknown
fobj = BytesIO(b'\x42' * 1000)
with pytest.raises(ValueError):
reader = UniversalReader(fobj)
| 18,792 | 25.2106 | 108 |
py
|
dpkt
|
dpkt-master/dpkt/smb.py
|
# $Id: smb.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Server Message Block."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
# https://msdn.microsoft.com/en-us/library/ee441774.aspx
SMB_FLAGS_LOCK_AND_READ_OK = 0x01
SMB_FLAGS_BUF_AVAIL = 0x02
SMB_FLAGS_CASE_INSENSITIVE = 0x08
SMB_FLAGS_CANONICALIZED_PATHS = 0x10
SMB_FLAGS_OPLOCK = 0x20
SMB_FLAGS_OPBATCH = 0x40
SMB_FLAGS_REPLY = 0x80
SMB_FLAGS2_LONG_NAMES = 0x0001
SMB_FLAGS2_EXTENDED_ATTRIBUTES = 0x0002
SMB_FLAGS2_SECURITY_SIGNATURES = 0x0004
SMB_FLAGS2_COMPRESSED = 0x0008
SMB_FLAGS2_SECURITY_SIGNATURES_REQUIRED = 0x0010
SMB_FLAGS2_IS_LONG_NAME = 0x0040
SMB_FLAGS2_REVERSE_PATH = 0x0400
SMB_FLAGS2_EXTENDED_SECURITY = 0x0800
SMB_FLAGS2_DFS = 0x1000
SMB_FLAGS2_PAGING_IO = 0x2000
SMB_FLAGS2_NT_STATUS = 0x4000
SMB_FLAGS2_UNICODE = 0x8000
SMB_STATUS_SUCCESS = 0x00000000
class SMB(dpkt.Packet):
r"""Server Message Block.
Server Message Block (SMB) is a communication protocol[1] that Microsoft created for providing
shared access to files and printers across nodes on a network. It also provides an authenticated
inter-process communication (IPC) mechanism.
Attributes:
__hdr__: SMB Headers
proto: (bytes): Protocol. This field MUST contain the 4-byte literal string '\xFF', 'S', 'M', 'B' (4 bytes)
cmd: (int): Command. Defines SMB command. (1 byte)
status: (int): Status. Communicates error messages from the server to the client. (4 bytes)
flags: (int): Flags. Describes various features in effect for the message.(1 byte)
flags2: (int): Flags2. Represent various features in effect for the message.
Unspecified bits are reserved and MUST be zero. (2 bytes)
_pidhi: (int): PIDHigh. Represents the high-order bytes of a process identifier (PID) (2 bytes)
security: (bytes): SecurityFeatures. Has three possible interpretations. (8 bytes)
rsvd: (int): Reserved. This field is reserved and SHOULD be set to 0x0000. (2 bytes)
tid: (int): TID. A tree identifier (TID). (2 bytes)
_pidlo: (int): PIDLow. The lower 16-bits of the PID. (2 bytes)
uid: (int): UID. A user identifier (UID). (2 bytes)
mid: (int): MID. A multiplex identifier (MID).(2 bytes)
"""
__byte_order__ = '<'
__hdr__ = [
('proto', '4s', b'\xffSMB'),
('cmd', 'B', 0),
('status', 'I', SMB_STATUS_SUCCESS),
('flags', 'B', 0),
('flags2', 'H', 0),
('_pidhi', 'H', 0),
('security', '8s', b''),
('rsvd', 'H', 0),
('tid', 'H', 0),
('_pidlo', 'H', 0),
('uid', 'H', 0),
('mid', 'H', 0)
]
@property
def pid(self):
return (self._pidhi << 16) | self._pidlo
@pid.setter
def pid(self, v):
self._pidhi = v >> 16
self._pidlo = v & 0xffff
def test_smb():
buf = (b'\xffSMB\xa0\x00\x00\x00\x00\x08\x03\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x08\xfa\x7a\x00\x08\x53\x02')
smb = SMB(buf)
assert smb.flags == SMB_FLAGS_CASE_INSENSITIVE
assert smb.flags2 == (SMB_FLAGS2_UNICODE | SMB_FLAGS2_NT_STATUS |
SMB_FLAGS2_EXTENDED_SECURITY | SMB_FLAGS2_EXTENDED_ATTRIBUTES | SMB_FLAGS2_LONG_NAMES)
assert smb.pid == 31482
assert smb.uid == 2048
assert smb.mid == 595
print(repr(smb))
smb = SMB()
smb.pid = 0x00081020
smb.uid = 0x800
assert str(smb) == str(b'\xffSMB\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x20\x10\x00\x08\x00\x00')
| 3,730 | 34.875 | 119 |
py
|
dpkt
|
dpkt-master/dpkt/rx.py
|
# $Id: rx.py 23 2006-11-08 15:45:33Z jonojono $
# -*- coding: utf-8 -*-
"""Rx Protocol."""
from __future__ import absolute_import
from . import dpkt
# Types
DATA = 0x01
ACK = 0x02
BUSY = 0x03
ABORT = 0x04
ACKALL = 0x05
CHALLENGE = 0x06
RESPONSE = 0x07
DEBUG = 0x08
# Flags
CLIENT_INITIATED = 0x01
REQUEST_ACK = 0x02
LAST_PACKET = 0x04
MORE_PACKETS = 0x08
SLOW_START_OK = 0x20
JUMBO_PACKET = 0x20
# Security
SEC_NONE = 0x00
SEC_BCRYPT = 0x01
SEC_RXKAD = 0x02
SEC_RXKAD_ENC = 0x03
class Rx(dpkt.Packet):
"""Rx Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of Rx.
TODO.
"""
__hdr__ = (
('epoch', 'I', 0),
('cid', 'I', 0),
('call', 'I', 1),
('seq', 'I', 0),
('serial', 'I', 1),
('type', 'B', 0),
('flags', 'B', CLIENT_INITIATED),
('status', 'B', 0),
('security', 'B', 0),
('sum', 'H', 0),
('service', 'H', 0)
)
| 978 | 16.482143 | 47 |
py
|
dpkt
|
dpkt-master/dpkt/mrt.py
|
# $Id: mrt.py 29 2007-01-26 02:29:07Z jon.oberheide $
# -*- coding: utf-8 -*-
"""Multi-threaded Routing Toolkit."""
from __future__ import absolute_import
from . import dpkt
from . import bgp
# Multi-threaded Routing Toolkit
# http://www.ietf.org/internet-drafts/draft-ietf-grow-mrt-03.txt
# MRT Types
NULL = 0
START = 1
DIE = 2
I_AM_DEAD = 3
PEER_DOWN = 4
BGP = 5 # Deprecated by BGP4MP
RIP = 6
IDRP = 7
RIPNG = 8
BGP4PLUS = 9 # Deprecated by BGP4MP
BGP4PLUS_01 = 10 # Deprecated by BGP4MP
OSPF = 11
TABLE_DUMP = 12
BGP4MP = 16
BGP4MP_ET = 17
ISIS = 32
ISIS_ET = 33
OSPF_ET = 64
# BGP4MP Subtypes
BGP4MP_STATE_CHANGE = 0
BGP4MP_MESSAGE = 1
BGP4MP_ENTRY = 2
BGP4MP_SNAPSHOT = 3
BGP4MP_MESSAGE_32BIT_AS = 4
# Address Family Types
AFI_IPv4 = 1
AFI_IPv6 = 2
class MRTHeader(dpkt.Packet):
__hdr__ = (
('ts', 'I', 0),
('type', 'H', 0),
('subtype', 'H', 0),
('len', 'I', 0)
)
class TableDump(dpkt.Packet):
__hdr__ = (
('view', 'H', 0),
('seq', 'H', 0),
('prefix', 'I', 0),
('prefix_len', 'B', 0),
('status', 'B', 1),
('originated_ts', 'I', 0),
('peer_ip', 'I', 0),
('peer_as', 'H', 0),
('attr_len', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
plen = self.attr_len
l_ = []
while plen > 0:
attr = bgp.BGP.Update.Attribute(self.data)
self.data = self.data[len(attr):]
plen -= len(attr)
l_.append(attr)
self.attributes = l_
class BGP4MPMessage(dpkt.Packet):
__hdr__ = (
('src_as', 'H', 0),
('dst_as', 'H', 0),
('intf', 'H', 0),
('family', 'H', AFI_IPv4),
('src_ip', 'I', 0),
('dst_ip', 'I', 0)
)
class BGP4MPMessage_32(dpkt.Packet):
__hdr__ = (
('src_as', 'I', 0),
('dst_as', 'I', 0),
('intf', 'H', 0),
('family', 'H', AFI_IPv4),
('src_ip', 'I', 0),
('dst_ip', 'I', 0)
)
def test_tabledump():
from binascii import unhexlify
buf_tabledump = unhexlify(
'0001' # view
'0002' # seq
'00000003' # prefix
'04' # prefix_len
'05' # status
'00000006' # originated_ts
'00000007' # peer_ip
'0008' # peer_as
'0002' # attr_len
)
buf_attrs = unhexlify(
'01' # flags
'01' # type (ORIGIN)
'01' # length
'02' # Origin.type (INCOMPLETE)
)
buf = buf_tabledump + buf_attrs
table_dump = TableDump(buf)
assert table_dump.view == 1
assert table_dump.seq == 2
assert table_dump.prefix == 3
assert table_dump.prefix_len == 4
assert table_dump.status == 5
assert table_dump.originated_ts == 6
assert table_dump.peer_ip == 7
assert table_dump.peer_as == 8
assert table_dump.attr_len == 2
assert len(table_dump.attributes) == 1
attr = table_dump.attributes[0]
assert isinstance(attr, bgp.BGP.Update.Attribute)
assert isinstance(attr.data, bgp.BGP.Update.Attribute.Origin)
| 3,168 | 22.131387 | 65 |
py
|
dpkt
|
dpkt-master/dpkt/http.py
|
# $Id: http.py 86 2013-03-05 19:25:19Z [email protected] $
# -*- coding: utf-8 -*-
"""Hypertext Transfer Protocol."""
from __future__ import print_function
from __future__ import absolute_import
from collections import OrderedDict
from . import dpkt
from .compat import BytesIO, iteritems
def parse_headers(f):
"""Return dict of HTTP headers parsed from a file object."""
d = OrderedDict()
while 1:
# The following logic covers two kinds of loop exit criteria.
# 1) If the header is valid, when we reached the end of the header,
# f.readline() would return with '\r\n', then after strip(),
# we can break the loop.
# 2) If this is a weird header, which do not ends with '\r\n',
# f.readline() would return with '', then after strip(),
# we still get an empty string, also break the loop.
line = f.readline().strip().decode("ascii", "ignore")
if not line:
break
l_ = line.split(':', 1)
if len(l_[0].split()) != 1:
raise dpkt.UnpackError('invalid header: %r' % line)
k = l_[0].lower()
v = len(l_) != 1 and l_[1].lstrip() or ''
if k in d:
if not type(d[k]) is list:
d[k] = [d[k]]
d[k].append(v)
else:
d[k] = v
return d
def parse_body(f, headers):
"""Return HTTP body parsed from a file object, given HTTP header dict."""
if headers.get('transfer-encoding', '').lower() == 'chunked':
l_ = []
found_end = False
while 1:
try:
sz = f.readline().split(None, 1)[0]
except IndexError:
raise dpkt.UnpackError('missing chunk size')
try:
n = int(sz, 16)
except ValueError:
raise dpkt.UnpackError('invalid chunk size')
if n == 0:
found_end = True
buf = f.read(n)
if f.readline().strip():
break
if n and len(buf) == n:
l_.append(buf)
else:
# only possible when len(buf) < n, which will happen if the
# file object ends before reading a complete file chunk
break
if not found_end:
raise dpkt.NeedData('premature end of chunked body')
body = b''.join(l_)
elif 'content-length' in headers:
n = int(headers['content-length'])
body = f.read(n)
if len(body) != n:
raise dpkt.NeedData('short body (missing %d bytes)' % (n - len(body)))
elif 'content-type' in headers:
body = f.read()
else:
# XXX - need to handle HTTP/0.9
body = b''
return body
class Message(dpkt.Packet):
"""Hypertext Transfer Protocol headers + body.
HTTP messages are how data is exchanged between a server and a client. There are two types of messages: requests
sent by the client to trigger an action on the server, and responses, the answer from the server. HTTP messages are
composed of textual information encoded in ASCII, and span over multiple lines.
Attributes:
__hdr__: Header fields of HTTP.
The start-line and HTTP headers of the HTTP message are collectively known as the head of the requests,
whereas its payload is known as the body.
"""
__metaclass__ = type
__hdr_defaults__ = {}
headers = None
body = None
def __init__(self, *args, **kwargs):
if args:
self.unpack(args[0])
else:
self.headers = OrderedDict()
self.body = b''
self.data = b''
# NOTE: changing this to iteritems breaks py3 compatibility
for k, v in self.__hdr_defaults__.items():
setattr(self, k, v)
for k, v in iteritems(kwargs):
setattr(self, k, v)
def unpack(self, buf, is_body_allowed=True):
f = BytesIO(buf)
# Parse headers
self.headers = parse_headers(f)
# Parse body
if is_body_allowed:
self.body = parse_body(f, self.headers)
else:
self.body = b''
# Save the rest
self.data = f.read()
def pack_hdr(self):
return ''.join(['%s: %s\r\n' % t for t in iteritems(self.headers)])
def __len__(self):
return len(str(self))
def __str__(self):
return '%s\r\n%s' % (self.pack_hdr(), self.body.decode("utf8", "ignore"))
def __bytes__(self):
return self.pack_hdr().encode("ascii", "ignore") + b'\r\n' + (self.body or b'')
class Request(Message):
"""Hypertext Transfer Protocol Request.
HTTP requests are messages sent by the client to initiate an action on the server. Their start-line contain three
elements. An HTTP method, a verb (like GET, PUT or POST) or a noun (like HEAD or OPTIONS), The request target,
usually a URL, or the absolute path of the protocol, port, and domain are usually characterized by the request
context and The HTTP version, which defines the structure of the remaining message, acting as an indicator of the
expected version to use for the response.
Attributes:
__hdr__: Header fields of HTTP request.
Many headers can appear in requests. They can be divided in several groups:
General headers, like Via, apply to the message as a whole.
Request headers, like User-Agent or Accept, modify the request by specifying it further (like Accept-
Language), by giving context (like Referer), or by conditionally restricting it (like If-None).
Representation headers like Content-Type that describe the original format of the message data and
any encoding applied (only present if the message has a body).
"""
__hdr_defaults__ = {
'method': 'GET',
'uri': '/',
'version': '1.0',
}
__methods = dict.fromkeys((
'GET', 'PUT', 'ICY',
'COPY', 'HEAD', 'LOCK', 'MOVE', 'POLL', 'POST',
'BCOPY', 'BMOVE', 'MKCOL', 'TRACE', 'LABEL', 'MERGE',
'DELETE', 'SEARCH', 'UNLOCK', 'REPORT', 'UPDATE', 'NOTIFY',
'BDELETE', 'CONNECT', 'OPTIONS', 'CHECKIN',
'PROPFIND', 'CHECKOUT', 'CCM_POST',
'SUBSCRIBE', 'PROPPATCH', 'BPROPFIND',
'BPROPPATCH', 'UNCHECKOUT', 'MKACTIVITY',
'MKWORKSPACE', 'UNSUBSCRIBE', 'RPC_CONNECT',
'VERSION-CONTROL',
'BASELINE-CONTROL'
))
__proto = 'HTTP'
def unpack(self, buf):
f = BytesIO(buf)
line = f.readline().decode("ascii", "ignore")
l_ = line.strip().split()
if len(l_) < 2:
raise dpkt.UnpackError('invalid request: %r' % line)
if l_[0] not in self.__methods:
raise dpkt.UnpackError('invalid http method: %r' % l_[0])
if len(l_) == 2:
# HTTP/0.9 does not specify a version in the request line
self.version = '0.9'
else:
if not l_[2].startswith(self.__proto):
raise dpkt.UnpackError('invalid http version: %r' % l_[2])
self.version = l_[2][len(self.__proto) + 1:]
self.method = l_[0]
self.uri = l_[1]
Message.unpack(self, f.read())
def __str__(self):
return '%s %s %s/%s\r\n' % (self.method, self.uri, self.__proto,
self.version) + Message.__str__(self)
def __bytes__(self):
str_out = '%s %s %s/%s\r\n' % (self.method, self.uri, self.__proto,
self.version)
return str_out.encode("ascii", "ignore") + Message.__bytes__(self)
class Response(Message):
"""Hypertext Transfer Protocol Response.
The start line of an HTTP response, called the status line, contains the following information. The protocol
version, usually HTTP/1.1, a status code, indicating success or failure of the request. Common status codes are 200,
404, or 302, a status text. A brief, purely informational, textual description of the status code to help a human
understand the HTTP message. A typical status line looks like: HTTP/1.1 404 Not Found.
Attributes:
__hdr__: Header fields of HTTP Response.
Many headers can appear in responses. These can be divided into several groups:
General headers, like Via, apply to the whole message.
Response headers, like Vary and Accept-Ranges, give additional information about the server which
doesn't fit in the status line.
Representation headers like Content-Type that describe the original format of the message data and any
encoding applied (only present if the message has a body).
"""
__hdr_defaults__ = {
'version': '1.0',
'status': '200',
'reason': 'OK'
}
__proto = 'HTTP'
def unpack(self, buf):
f = BytesIO(buf)
line = f.readline()
l_ = line.strip().decode("ascii", "ignore").split(None, 2)
if len(l_) < 2 or not l_[0].startswith(self.__proto) or not l_[1].isdigit():
raise dpkt.UnpackError('invalid response: %r' % line)
self.version = l_[0][len(self.__proto) + 1:]
self.status = l_[1]
self.reason = l_[2] if len(l_) > 2 else ''
# RFC Sec 4.3.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3.
# For response messages, whether or not a message-body is included with
# a message is dependent on both the request method and the response
# status code (section 6.1.1). All responses to the HEAD request method
# MUST NOT include a message-body, even though the presence of entity-
# header fields might lead one to believe they do. All 1xx
# (informational), 204 (no content), and 304 (not modified) responses
# MUST NOT include a message-body. All other responses do include a
# message-body, although it MAY be of zero length.
is_body_allowed = int(self.status) >= 200 and 204 != int(self.status) != 304
Message.unpack(self, f.read(), is_body_allowed)
def __str__(self):
return '%s/%s %s %s\r\n' % (self.__proto, self.version, self.status,
self.reason) + Message.__str__(self)
def __bytes__(self):
str_out = '%s/%s %s %s\r\n' % (self.__proto, self.version, self.status,
self.reason)
return str_out.encode("ascii", "ignore") + Message.__bytes__(self)
def test_parse_request():
s = (b"""POST /main/redirect/ab/1,295,,00.html HTTP/1.0\r\nReferer: http://www.email.com/login/snap/login.jhtml\r\n"""
b"""Connection: Keep-Alive\r\nUser-Agent: Mozilla/4.75 [en] (X11; U; OpenBSD 2.8 i386; Nav)\r\n"""
b"""Host: ltd.snap.com\r\nAccept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, image/png, */*\r\n"""
b"""Accept-Encoding: gzip\r\nAccept-Language: en\r\nAccept-Charset: iso-8859-1,*,utf-8\r\n"""
b"""Content-type: application/x-www-form-urlencoded\r\nContent-length: 61\r\n\r\n"""
b"""sn=em&mn=dtest4&pw=this+is+atest&fr=true&login=Sign+in&od=www""")
r = Request(s)
assert r.method == 'POST'
assert r.uri == '/main/redirect/ab/1,295,,00.html'
assert r.body == b'sn=em&mn=dtest4&pw=this+is+atest&fr=true&login=Sign+in&od=www'
assert r.headers['content-type'] == 'application/x-www-form-urlencoded'
Request(s[:60])
def test_format_request():
r = Request()
assert str(r) == 'GET / HTTP/1.0\r\n\r\n'
r.method = 'POST'
r.uri = '/foo/bar/baz.html'
r.headers['content-type'] = 'text/plain'
r.headers['content-length'] = '5'
r.body = b'hello'
s = str(r)
assert s.startswith('POST /foo/bar/baz.html HTTP/1.0\r\n')
assert s.endswith('\r\n\r\nhello')
assert '\r\ncontent-length: 5\r\n' in s
assert '\r\ncontent-type: text/plain\r\n' in s
s = bytes(r)
assert s.startswith(b'POST /foo/bar/baz.html HTTP/1.0\r\n')
assert s.endswith(b'\r\n\r\nhello')
assert b'\r\ncontent-length: 5\r\n' in s
assert b'\r\ncontent-type: text/plain\r\n' in s
r = Request(bytes(r))
assert bytes(r) == s
def test_chunked_response():
from binascii import unhexlify
header = (
b"HTTP/1.1 200 OK\r\n"
b"Cache-control: no-cache\r\n"
b"Pragma: no-cache\r\n"
b"Content-Type: text/javascript; charset=utf-8\r\n"
b"Content-Encoding: gzip\r\n"
b"Transfer-Encoding: chunked\r\n"
b"Set-Cookie: S=gmail=agg:gmail_yj=v2s:gmproxy=JkU; Domain=.google.com; Path=/\r\n"
b"Server: GFE/1.3\r\n"
b"Date: Mon, 12 Dec 2005 22:33:23 GMT\r\n"
b"\r\n"
)
body = unhexlify(
'610d0a1f8b08000000000000000d0a3135320d0a6d914d4fc4201086effe0a82c99e58'
'4a4be9b6eec1e81e369e34f1e061358652da12596880bafaef85ee1a2ff231990cef30'
'3cc381a0c301e610c13ca765595435a1a4ace1db153aa49d0cfa354b00f62eaaeb86d5'
'79cd485995348ebc2a688c8e214c3759e627eb82575acf3e381e6487853158d863e6bc'
'175a898fac208465de0a215d961769b5027b7bc27a301e0f23379c77337699329dfcc2'
'6338ea5b2f4550d6bcce84d0ceabf760271fac53d2c7d2fb94024edc040feeba195803'
'547457d7b4d9920abc58a73bb09b2710243f46fdf3437a50748a55efb8c88b2d18edec'
'3ce083850821f8225bb0d36a826893b8cfd89bbadad09214a4610d630d654dfd873d58'
'3b68d96a3be0646217c202bdb046c2696e23fb3ab6c47815d69f8aafcf290b5ebce769'
'11808b004401d82f8278f6d8f74a28ae2f11701f2bc470093afefddfa359faae347f00'
'c5a595a1e20100000d0a300d0a0d0a'
)
buf = header + body
r = Response(buf)
assert r.version == '1.1'
assert r.status == '200'
assert r.reason == 'OK'
def test_multicookie_response():
s = (b"""HTTP/1.x 200 OK\r\nSet-Cookie: first_cookie=cookie1; path=/; domain=.example.com\r\n"""
b"""Set-Cookie: second_cookie=cookie2; path=/; domain=.example.com\r\nContent-Length: 0\r\n\r\n""")
r = Response(s)
assert type(r.headers['set-cookie']) is list
assert len(r.headers['set-cookie']) == 2
def test_noreason_response():
s = b"""HTTP/1.1 200 \r\n\r\n"""
r = Response(s)
assert r.reason == ''
assert bytes(r) == s
def test_response_with_body():
r = Response()
r.body = b'foo'
assert str(r) == 'HTTP/1.0 200 OK\r\n\r\nfoo'
assert bytes(r) == b'HTTP/1.0 200 OK\r\n\r\nfoo'
repr(r)
def test_body_forbidden_response():
s = b'HTTP/1.1 304 Not Modified\r\n'\
b'Content-Type: text/css\r\n'\
b'Last-Modified: Wed, 14 Jan 2009 16:42:11 GMT\r\n'\
b'ETag: "3a7-496e15e3"\r\n'\
b'Cache-Control: private, max-age=414295\r\n'\
b'Date: Wed, 22 Sep 2010 17:55:54 GMT\r\n'\
b'Connection: keep-alive\r\n'\
b'Vary: Accept-Encoding\r\n\r\n'\
b'HTTP/1.1 200 OK\r\n'\
b'Server: Sun-ONE-Web-Server/6.1\r\n'\
b'ntCoent-length: 257\r\n'\
b'Content-Type: application/x-javascript\r\n'\
b'Last-Modified: Wed, 06 Jan 2010 19:34:06 GMT\r\n'\
b'ETag: "101-4b44e5ae"\r\n'\
b'Accept-Ranges: bytes\r\n'\
b'Content-Encoding: gzip\r\n'\
b'Cache-Control: private, max-age=439726\r\n'\
b'Date: Wed, 22 Sep 2010 17:55:54 GMT\r\n'\
b'Connection: keep-alive\r\n'\
b'Vary: Accept-Encoding\r\n'
result = []
while s:
msg = Response(s)
s = msg.data
result.append(msg)
# the second HTTP response should be an standalone message
assert len(result) == 2
def test_request_version():
s = b"""GET / HTTP/1.0\r\n\r\n"""
r = Request(s)
assert r.method == 'GET'
assert r.uri == '/'
assert r.version == '1.0'
s = b"""GET /\r\n\r\n"""
r = Request(s)
assert r.method == 'GET'
assert r.uri == '/'
assert r.version == '0.9'
import pytest
s = b"""GET / CHEESE/1.0\r\n\r\n"""
with pytest.raises(dpkt.UnpackError, match="invalid http version: u?'CHEESE/1.0'"):
Request(s)
def test_valid_header():
# valid header.
s = b'POST /main/redirect/ab/1,295,,00.html HTTP/1.0\r\n' \
b'Referer: http://www.email.com/login/snap/login.jhtml\r\n' \
b'Connection: Keep-Alive\r\n' \
b'User-Agent: Mozilla/4.75 [en] (X11; U; OpenBSD 2.8 i386; Nav)\r\n' \
b'Host: ltd.snap.com\r\n' \
b'Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, image/png, */*\r\n' \
b'Accept-Encoding: gzip\r\n' \
b'Accept-Language: en\r\n' \
b'Accept-Charset: iso-8859-1,*,utf-8\r\n' \
b'Content-type: application/x-www-form-urlencoded\r\n' \
b'Content-length: 61\r\n\r\n' \
b'sn=em&mn=dtest4&pw=this+is+atest&fr=true&login=Sign+in&od=www'
r = Request(s)
assert r.method == 'POST'
assert r.uri == '/main/redirect/ab/1,295,,00.html'
assert r.body == b'sn=em&mn=dtest4&pw=this+is+atest&fr=true&login=Sign+in&od=www'
assert r.headers['content-type'] == 'application/x-www-form-urlencoded'
def test_weird_end_header():
s_weird_end = b'POST /main/redirect/ab/1,295,,00.html HTTP/1.0\r\n' \
b'Referer: http://www.email.com/login/snap/login.jhtml\r\n' \
b'Connection: Keep-Alive\r\n' \
b'User-Agent: Mozilla/4.75 [en] (X11; U; OpenBSD 2.8 i386; Nav)\r\n' \
b'Host: ltd.snap.com\r\n' \
b'Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, image/png, */*\r\n' \
b'Accept-Encoding: gzip\r\n' \
b'Accept-Language: en\r\n' \
b'Accept-Charset: iso-8859-1,*,utf-8\r\n' \
b'Content-type: application/x-www-form-urlencoded\r\n' \
b'Cookie: TrackID=1PWdcr3MO_C611BGW'
r = Request(s_weird_end)
assert r.method == 'POST'
assert r.uri == '/main/redirect/ab/1,295,,00.html'
assert r.headers['content-type'] == 'application/x-www-form-urlencoded'
def test_gzip_response():
import zlib
# valid response, compressed using gzip
s = b'HTTP/1.0 200 OK\r\n' \
b'Server: SimpleHTTP/0.6 Python/2.7.12\r\n' \
b'Date: Fri, 10 Mar 2017 20:43:08 GMT\r\n' \
b'Content-type: text/plain\r\n' \
b'Content-Encoding: gzip\r\n' \
b'Content-Length: 68\r\n' \
b'Last-Modified: Fri, 10 Mar 2017 20:40:43 GMT\r\n\r\n' \
b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\x03\x0b\xc9\xc8,V\x00\xa2D' \
b'\x85\xb2\xd4\xa2J\x85\xe2\xdc\xc4\x9c\x1c\x85\xb4\xcc\x9cT\x85\x92' \
b'|\x85\x92\xd4\xe2\x12\x85\xf4\xaa\xcc\x02\x85\xa2\xd4\xe2\x82\xfc' \
b'\xbc\xe2\xd4b=.\x00\x01(m\xad2\x00\x00\x00'
r = Response(s)
assert r.version == '1.0'
assert r.status == '200'
assert r.reason == 'OK'
# Make a zlib compressor with the appropriate gzip options
decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
body = decompressor.decompress(r.body)
assert body.startswith(b'This is a very small file')
def test_message():
# s = b'Date: Fri, 10 Mar 2017 20:43:08 GMT\r\n' # FIXME - unused
r = Message(content_length=68)
assert r.content_length == 68
assert len(r) == 2
def test_invalid():
import pytest
s = b'INVALID / HTTP/1.0\r\n'
with pytest.raises(dpkt.UnpackError, match="invalid http method: u?'INVALID'"):
Request(s)
s = b'A'
with pytest.raises(dpkt.UnpackError, match="invalid response: b?'A'"):
Response(s)
s = b'HTTT 200 OK'
with pytest.raises(dpkt.UnpackError, match="invalid response: b?'HTTT 200 OK'"):
Response(s)
s = b'HTTP TWO OK'
with pytest.raises(dpkt.UnpackError, match="invalid response: b?'HTTP TWO OK'"):
Response(s)
s = (
b'HTTP/1.0 200 OK\r\n'
b'Invalid Header: invalid\r\n'
)
with pytest.raises(dpkt.UnpackError, match="invalid header: "):
Response(s)
s = (
b"HTTP/1.1 200 OK\r\n"
b"Transfer-Encoding: chunked\r\n"
b"\r\n"
b"\r\n"
)
with pytest.raises(dpkt.UnpackError, match="missing chunk size"):
Response(s)
s = (
b"HTTP/1.1 200 OK\r\n"
b"Transfer-Encoding: chunked\r\n"
b"\r\n"
b"\x01\r\na"
)
with pytest.raises(dpkt.UnpackError, match="invalid chunk size"):
Response(s)
s = (
b"HTTP/1.1 200 OK\r\n"
b"Transfer-Encoding: chunked\r\n"
b"\r\n"
b"2\r\n"
b"abcd"
)
with pytest.raises(dpkt.NeedData, match="premature end of chunked body"):
Response(s)
s = (
b"HTTP/1.1 200 OK\r\n"
b"Content-Length: 68\r\n"
b"\r\n"
b"a\r\n"
)
with pytest.raises(dpkt.NeedData, match=r"short body \(missing 65 bytes\)"):
Response(s)
# messy header.
s_messy_header = b'aaaaaaaaa\r\nbbbbbbbbb'
with pytest.raises(dpkt.UnpackError, match="invalid request: u?'aaaaaaaa"):
Request(s_messy_header)
def test_response_str():
s = (
b'HTTP/1.0 200 OK\r\n'
b'Server: SimpleHTTP/0.6 Python/2.7.12\r\n'
b'Date: Fri, 10 Mar 2017 20:43:08 GMT\r\n'
b'Content-type: text/plain\r\n'
)
# the headers are processed to lowercase keys
resp = [
'HTTP/1.0 200 OK',
'server: SimpleHTTP/0.6 Python/2.7.12',
'date: Fri, 10 Mar 2017 20:43:08 GMT',
'content-type: text/plain',
'',
'',
]
r_str = str(Response(s))
s_arr = sorted(resp)
resp_arr = sorted(r_str.split('\r\n'))
for line1, line2 in zip(s_arr, resp_arr):
assert line1 == line2
def test_request_str():
s = b'GET / HTTP/1.0\r\n'
r = Request(s)
req = 'GET / HTTP/1.0\r\n\r\n'
assert req == str(r)
def test_parse_body():
import pytest
from .compat import BytesIO
buf = BytesIO(
b'05\r\n' # size
b'ERR' # longer than size
)
buf.seek(0)
headers = {
'transfer-encoding': 'chunked',
}
with pytest.raises(dpkt.NeedData, match="premature end of chunked body"):
parse_body(buf, headers)
| 22,114 | 36.168067 | 122 |
py
|
dpkt
|
dpkt-master/dpkt/hsrp.py
|
# $Id: hsrp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Cisco Hot Standby Router Protocol."""
from __future__ import absolute_import
from . import dpkt
# Opcodes
HELLO = 0
COUP = 1
RESIGN = 2
# States
INITIAL = 0x00
LEARN = 0x01
LISTEN = 0x02
SPEAK = 0x04
STANDBY = 0x08
ACTIVE = 0x10
class HSRP(dpkt.Packet):
"""Cisco Hot Standby Router Protocol.
It is a Cisco proprietary redundancy protocol for establishing a fault-tolerant default gateway. Version 1 of the
protocol was described in RFC 2281 in 1998. Version 2 of the protocol includes improvements and supports IPv6 but
there is no corresponding RFC published for this version.
Attributes:
__hdr__: Header fields of HSRP.
version: (int): Version. HSRP version number. (1 byte)
opcode: (int): Operation code. (Hello - 0, Coup - 1, Resign - 2) (1 byte)
state: (int): State. This field describes the current state of the router sending the message. (1 byte)
hello: (int): Hellotime. This field is only meaningful in Hello messages. It contains the approximate period
between the Hello messages that the router sends. The time is given in seconds.(1 byte)
hold: (int): Holdtime. This field is only meaningful in Hello messages. It contains the amount of time that
the current Hello message should be considered valid. The time is given in seconds. (1 byte)
priority: (int): Priority. This field is used to elect the active and standby routers. (1 byte)
group: (int): Group. This field identifies the standby group. (1 byte)
rsvd: (int): Reserved. (1 byte)
auth: (bytes): Authentication Data. This field contains a clear text 8 character reused password. (8 bytes)
vip: (bytes): Virtual IP Address. The virtual IP address used by this group. (4 bytes)
"""
__hdr__ = (
('version', 'B', 0),
('opcode', 'B', 0),
('state', 'B', 0),
('hello', 'B', 0),
('hold', 'B', 0),
('priority', 'B', 0),
('group', 'B', 0),
('rsvd', 'B', 0),
('auth', '8s', b'cisco'),
('vip', '4s', b'')
)
| 2,223 | 38.017544 | 120 |
py
|
dpkt
|
dpkt-master/dpkt/radius.py
|
# $Id: radius.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Remote Authentication Dial-In User Service."""
from __future__ import absolute_import
from . import dpkt
from .compat import compat_ord
# http://www.untruth.org/~josh/security/radius/radius-auth.html
# RFC 2865
class RADIUS(dpkt.Packet):
"""Remote Authentication Dial-In User Service.
Remote Authentication Dial-In User Service (RADIUS) is a networking protocol that provides centralized
authentication, authorization, and accounting (AAA) management for users who connect and use a network service.
RADIUS was developed by Livingston Enterprises in 1991 as an access server authentication and accounting protocol.
It was later brought into IEEE 802 and IETF standards.
Attributes:
__hdr__: Header fields of RADIUS.
code: (int): Code. (1 byte)
id: (int): ID (1 byte)
len: (int): Length (2 bytes)
auth: (int): Authentication (16 bytes)
"""
__hdr__ = (
('code', 'B', 0),
('id', 'B', 0),
('len', 'H', 4),
('auth', '16s', b'')
)
attrs = b''
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.attrs = parse_attrs(self.data)
self.data = b''
def parse_attrs(buf):
"""Parse attributes buffer into a list of (type, data) tuples."""
attrs = []
while buf:
t = compat_ord(buf[0])
l_ = compat_ord(buf[1])
if l_ < 2:
break
d, buf = buf[2:l_], buf[l_:]
attrs.append((t, d))
return attrs
# Codes
RADIUS_ACCESS_REQUEST = 1
RADIUS_ACCESS_ACCEPT = 2
RADIUS_ACCESS_REJECT = 3
RADIUS_ACCT_REQUEST = 4
RADIUS_ACCT_RESPONSE = 5
RADIUS_ACCT_STATUS = 6
RADIUS_ACCESS_CHALLENGE = 11
# Attributes
RADIUS_USER_NAME = 1
RADIUS_USER_PASSWORD = 2
RADIUS_CHAP_PASSWORD = 3
RADIUS_NAS_IP_ADDR = 4
RADIUS_NAS_PORT = 5
RADIUS_SERVICE_TYPE = 6
RADIUS_FRAMED_PROTOCOL = 7
RADIUS_FRAMED_IP_ADDR = 8
RADIUS_FRAMED_IP_NETMASK = 9
RADIUS_FRAMED_ROUTING = 10
RADIUS_FILTER_ID = 11
RADIUS_FRAMED_MTU = 12
RADIUS_FRAMED_COMPRESSION = 13
RADIUS_LOGIN_IP_HOST = 14
RADIUS_LOGIN_SERVICE = 15
RADIUS_LOGIN_TCP_PORT = 16
# unassigned
RADIUS_REPLY_MESSAGE = 18
RADIUS_CALLBACK_NUMBER = 19
RADIUS_CALLBACK_ID = 20
# unassigned
RADIUS_FRAMED_ROUTE = 22
RADIUS_FRAMED_IPX_NETWORK = 23
RADIUS_STATE = 24
RADIUS_CLASS = 25
RADIUS_VENDOR_SPECIFIC = 26
RADIUS_SESSION_TIMEOUT = 27
RADIUS_IDLE_TIMEOUT = 28
RADIUS_TERMINATION_ACTION = 29
RADIUS_CALLED_STATION_ID = 30
RADIUS_CALLING_STATION_ID = 31
RADIUS_NAS_ID = 32
RADIUS_PROXY_STATE = 33
RADIUS_LOGIN_LAT_SERVICE = 34
RADIUS_LOGIN_LAT_NODE = 35
RADIUS_LOGIN_LAT_GROUP = 36
RADIUS_FRAMED_ATALK_LINK = 37
RADIUS_FRAMED_ATALK_NETWORK = 38
RADIUS_FRAMED_ATALK_ZONE = 39
# 40-59 reserved for accounting
RADIUS_CHAP_CHALLENGE = 60
RADIUS_NAS_PORT_TYPE = 61
RADIUS_PORT_LIMIT = 62
RADIUS_LOGIN_LAT_PORT = 63
def test_parse_attrs():
from binascii import unhexlify
buf = unhexlify(
'01' # type (RADIUS_USER_NAME)
'06' # end of attribute value
'75736572' # value ('user')
'00'
'00'
)
attrs = parse_attrs(buf)
assert len(attrs) == 1
type0, value0 = attrs[0]
assert type0 == RADIUS_USER_NAME
assert value0 == b'user'
def test_parse_multiple_attrs():
from binascii import unhexlify
buf = unhexlify(
'01' # type (RADIUS_USER_NAME)
'06' # end of attribute value
'75736572' # value ('user')
'02' # type (RADIUS_USER_PASSWORD)
'0a' # end of attribute value
'70617373776f7264' # value ('password')
)
attrs = parse_attrs(buf)
assert len(attrs) == 2
type0, value0 = attrs[0]
assert type0 == RADIUS_USER_NAME
assert value0 == b'user'
type1, value1 = attrs[1]
assert type1 == RADIUS_USER_PASSWORD
assert value1 == b'password'
def test_radius_unpacking():
from binascii import unhexlify
buf_attrs = unhexlify(
'01' # type (RADIUS_USER_NAME)
'06' # end of attribute value
'75736572' # value ('user')
)
buf_radius_header = unhexlify(
'01' # code
'34' # id
'1234' # len
'0123456789abcdef' # auth
'0123456789abcdef' # auth
)
buf = buf_radius_header + buf_attrs
radius = RADIUS(buf)
assert len(radius.attrs) == 1
name0, value0 = radius.attrs[0]
assert name0 == 1
assert value0 == b'user'
| 4,632 | 25.474286 | 118 |
py
|
dpkt
|
dpkt-master/dpkt/dtp.py
|
# $Id: dtp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Dynamic Trunking Protocol."""
from __future__ import absolute_import
import struct
from . import dpkt
TRUNK_NAME = 0x01
MAC_ADDR = 0x04
class DTP(dpkt.Packet):
"""Dynamic Trunking Protocol.
The Dynamic Trunking Protocol (DTP) is a proprietary networking protocol developed by Cisco Systems for the purpose
of negotiating trunking on a link between two VLAN-aware switches, and for negotiating the type of trunking
encapsulation to be used. It works on Layer 2 of the OSI model. VLAN trunks formed using DTP may utilize either
IEEE 802.1Q or Cisco ISL trunking protocols.
Attributes:
__hdr__: Header fields of DTP.
v: (int) Version. (1 byte)
"""
__hdr__ = (
('v', 'B', 0),
) # rest is TLVs
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
buf = self.data
tvs = []
while buf:
t, l_ = struct.unpack('>HH', buf[:4])
v, buf = buf[4:4 + l_], buf[4 + l_:]
tvs.append((t, v))
self.data = tvs
def __bytes__(self):
return b''.join([struct.pack('>HH', t, len(v)) + v for t, v in self.data])
def test_creation():
dtp1 = DTP()
assert dtp1.v == 0
from binascii import unhexlify
buf = unhexlify(
'04'
'0001' # type
'0002' # length
'1234' # value
)
dtp2 = DTP(buf)
assert dtp2.v == 4
assert len(dtp2.data) == 1
tlvs = dtp2.data
tlv = tlvs[0]
key, value = tlv
assert key == 1
assert value == unhexlify('1234')
assert bytes(dtp2) == buf[1:]
| 1,664 | 24.227273 | 119 |
py
|
dpkt
|
dpkt-master/dpkt/aoecfg.py
|
# -*- coding: utf-8 -*-
"""ATA over Ethernet ATA command"""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
class AOECFG(dpkt.Packet):
"""ATA over Ethernet ATA command.
See more about the AOE on \
https://en.wikipedia.org/wiki/ATA_over_Ethernet
Attributes:
__hdr__: Header fields of AOECFG.
data: Message data.
"""
__hdr__ = (
('bufcnt', 'H', 0),
('fwver', 'H', 0),
('scnt', 'B', 0),
('aoeccmd', 'B', 0),
('cslen', 'H', 0),
)
def test_aoecfg():
s = (b'\x01\x02\x03\x04\x05\x06\x11\x12\x13\x14\x15\x16\x88\xa2\x10\x00\x00\x01\x02\x01\x80'
b'\x00\x00\x00\x12\x34\x00\x00\x00\x00\x04\x00' + b'\0xed' * 1024)
aoecfg = AOECFG(s[14 + 10:])
assert (aoecfg.bufcnt == 0x1234)
| 828 | 23.382353 | 96 |
py
|
dpkt
|
dpkt-master/dpkt/tns.py
|
# $Id: tns.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Transparent Network Substrate."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
class TNS(dpkt.Packet):
"""Transparent Network Substrate.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of TNS.
TODO.
"""
__hdr__ = (
('length', 'H', 0),
('pktsum', 'H', 0),
('type', 'B', 0),
('rsvd', 'B', 0),
('hdrsum', 'H', 0),
('msg', '0s', b''),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = self.length - self.__hdr_len__
if n > len(self.data):
raise dpkt.NeedData('short message (missing %d bytes)' %
(n - len(self.data)))
self.msg = self.data[:n]
self.data = self.data[n:]
def test_tns():
s = (b'\x00\x23\x00\x00\x01\x00\x00\x00\x01\x34\x01\x2c\x00\x00\x08\x00\x7f'
b'\xff\x4f\x98\x00\x00\x00\x01\x00\x01\x00\x22\x00\x00\x00\x00\x01\x01X')
t = TNS(s)
assert t.msg.startswith(b'\x01\x34')
# test a truncated packet
try:
t = TNS(s[:-10])
except dpkt.NeedData:
pass
| 1,247 | 23.96 | 82 |
py
|
dpkt
|
dpkt-master/dpkt/aoe.py
|
# -*- coding: utf-8 -*-
"""ATA over Ethernet Protocol."""
from __future__ import absolute_import
import struct
from . import dpkt
from .compat import iteritems
class AOE(dpkt.Packet):
"""ATA over Ethernet Protocol.
See more about the AOE on
https://en.wikipedia.org/wiki/ATA_over_Ethernet
Attributes:
__hdr__: Header fields of AOE.
data: Message data.
"""
__hdr__ = (
('_ver_fl', 'B', 0x10),
('err', 'B', 0),
('maj', 'H', 0),
('min', 'B', 0),
('cmd', 'B', 0),
('tag', 'I', 0),
)
__bit_fields__ = {
'_ver_fl': (
('ver', 4),
('fl', 4),
)
}
_cmdsw = {}
@classmethod
def set_cmd(cls, cmd, pktclass):
cls._cmdsw[cmd] = pktclass
@classmethod
def get_cmd(cls, cmd):
return cls._cmdsw[cmd]
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
try:
self.data = self._cmdsw[self.cmd](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, struct.error, dpkt.UnpackError):
pass
AOE_CMD_ATA = 0
AOE_CMD_CFG = 1
AOE_FLAG_RSP = 1 << 3
def _load_cmds():
prefix = 'AOE_CMD_'
g = globals()
for k, v in iteritems(g):
if k.startswith(prefix):
name = 'aoe' + k[len(prefix):].lower()
try:
mod = __import__(name, g, level=1)
AOE.set_cmd(v, getattr(mod, name.upper()))
except (ImportError, AttributeError):
continue
def _mod_init():
"""Post-initialization called when all dpkt modules are fully loaded"""
if not AOE._cmdsw:
_load_cmds()
def test_creation():
aoe = AOE()
# hdr fields
assert aoe._ver_fl == 0x10
assert aoe.err == 0
assert aoe.maj == 0
assert aoe.min == 0
assert aoe.cmd == 0
assert aoe.tag == 0
assert bytes(aoe) == b'\x10' + b'\x00' * 9
def test_properties():
aoe = AOE()
# property getters
assert aoe.ver == 1
assert aoe.fl == 0
# property setters
aoe.ver = 2
assert aoe.ver == 2
assert aoe._ver_fl == 0x20
aoe.fl = 12
assert aoe.fl == 12
assert aoe._ver_fl == 0x2C
def test_unpack():
from binascii import unhexlify
buf = unhexlify(
'1000000000'
'00' # cmd: AOE_CMD_ATA
'00000000' # tag
)
aoe = AOE(buf)
# AOE_CMD_ATA specified, but no data supplied
assert aoe.data == b''
buf = unhexlify(
'1000000000'
'00' # cmd: AOE_CMD_ATA
'00000000' # tag
# AOEDATA specification
'030a6b190000000045000028941f0000e30699b4232b2400de8e8442abd100500035e1'
'2920d9000000229bf0e204656b'
)
aoe = AOE(buf)
assert aoe.aoeata == aoe.data
def test_cmds():
import dpkt
assert AOE.get_cmd(AOE_CMD_ATA) == dpkt.aoeata.AOEATA
assert AOE.get_cmd(AOE_CMD_CFG) == dpkt.aoecfg.AOECFG
def test_cmd_loading():
# this test checks that failing to load a module isn't catastrophic
standard_cmds = AOE._cmdsw
# delete the existing code->module mappings
AOE._cmdsw = {}
assert not AOE._cmdsw
# create a new global constant pointing to a module which doesn't exist
globals()['AOE_CMD_FAIL'] = "FAIL"
_mod_init()
# check that the same modules were loaded, ignoring the fail
assert AOE._cmdsw == standard_cmds
| 3,460 | 22.228188 | 80 |
py
|
dpkt
|
dpkt-master/dpkt/ip6.py
|
# $Id: ip6.py 87 2013-03-05 19:41:04Z [email protected] $
# -*- coding: utf-8 -*-
"""Internet Protocol, version 6."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
from . import ip
from . import tcp
from .compat import compat_ord
from .utils import inet_to_str
import struct
# The allowed extension headers and their classes (in order according to RFC).
EXT_HDRS = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP,
ip.IP_PROTO_DSTOPTS]
# EXT_HDRS_CLS - classes is below - after all the used classes are defined.
class IP6(dpkt.Packet):
"""Internet Protocol, version 6.
Internet Protocol version 6 (IPv6) is the most recent version of the Internet Protocol (IP),
the communications protocol that provides an identification and location system for computers
on networks and routes traffic across the Internet. IPv6 was developed by the Internet Engineering
Task Force (IETF) to deal with the long-anticipated problem of IPv4 address exhaustion,
and is intended to replace IPv4.
Attributes:
__hdr__: Header fields of IPv6.
_v_fc_flow:
v: (int): Version (4 bits),
fc (int): Traffic Class (6+2 bits)
flow (int): Flow Label (20 bits).
plen: (int): Payload Length (2 bytes). The size of the payload in octets, including any extension headers.
nxt: (bytes): Next Header (1 byte). Specifies the type of the next header.
hlim: (bytes): Hop Limit (1 byte). Replaces the time to live field in IPv4.
src: (int): Source Address (16 bytes). The unicast IPv6 address of the sending node.
dst: (int): Destination Address (16 bytes). Unicast or multicast address of the destination node(s).
"""
__hdr__ = (
('_v_fc_flow', 'I', 0x60000000),
('plen', 'H', 0), # payload length (not including header)
('nxt', 'B', 0), # next header protocol
('hlim', 'B', 0), # hop limit
('src', '16s', b''),
('dst', '16s', b'')
)
__bit_fields__ = {
'_v_fc_flow': (
('v', 4), # version, 4 hi bits
('fc', 8), # traffic class, 8 bits
('flow', 20), # flow label, 20 lo bits
)
}
__pprint_funcs__ = {
'src': inet_to_str,
'dst': inet_to_str
}
_protosw = ip.IP._protosw
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.extension_hdrs = {}
# NOTE: self.extension_hdrs is not accurate, as it doesn't support duplicate header types.
# According to RFC-1883 "Each extension header should occur at most once, except for the
# Destination Options header which should occur at most twice".
# Secondly, the .headers_str() method attempts to pack the extension headers in order as
# defined in the RFC, however it doesn't adjust the next header (nxt) pointer accordingly.
# Here we introduce the new field .all_extension_headers; it allows duplicate types and
# keeps the original order.
self.all_extension_headers = []
if self.plen:
buf = self.data[:self.plen]
else: # due to jumbo payload or TSO
buf = self.data
next_ext_hdr = self.nxt
while next_ext_hdr in EXT_HDRS:
ext = EXT_HDRS_CLS[next_ext_hdr](buf)
self.extension_hdrs[next_ext_hdr] = ext
self.all_extension_headers.append(ext)
buf = buf[ext.length:]
next_ext_hdr = getattr(ext, 'nxt', None)
# set the payload protocol id
if next_ext_hdr is not None:
self.p = next_ext_hdr
# do not decode fragments after the first fragment
# https://github.com/kbandla/dpkt/issues/575
if self.nxt == 44 and ext.frag_off > 0: # 44 = IP_PROTO_FRAGMENT
self.data = buf
return
try:
self.data = self._protosw[next_ext_hdr](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def headers_str(self):
nxt = self.nxt
# If all_extension_headers is available, return the headers as they originally appeared
if hasattr(self, 'all_extension_headers') and self.all_extension_headers:
# get the nxt header from the last one
nxt = self.all_extension_headers[-1].nxt
return nxt, b''.join(bytes(ext) for ext in self.all_extension_headers)
# Output extension headers in order defined in RFC1883 (except dest opts)
header_str = b""
if hasattr(self, 'extension_hdrs'):
for hdr in EXT_HDRS:
if hdr in self.extension_hdrs:
nxt = self.extension_hdrs[hdr].nxt
header_str += bytes(self.extension_hdrs[hdr])
return nxt, header_str
def __bytes__(self):
self.p, hdr_str = self.headers_str()
# set TCP, UDP, and ICMPv6 checksums
if ((self.p == 6 or self.p == 17 or self.p == 58) and
hasattr(self.data, 'sum') and not self.data.sum):
p = bytes(self.data)
s = struct.pack('>16s16sxBH', self.src, self.dst, self.p, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
self.data.sum = dpkt.in_cksum_done(s)
return self.pack_hdr() + hdr_str + bytes(self.data)
def __len__(self):
baselen = self.__hdr_len__ + len(self.data)
if hasattr(self, 'all_extension_headers') and self.all_extension_headers:
return baselen + sum(len(hh) for hh in self.all_extension_headers)
elif hasattr(self, 'extension_hdrs') and self.extension_hdrs:
return baselen + sum(len(hh) for hh in self.extension_hdrs.values())
return baselen
@classmethod
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
@classmethod
def get_proto(cls, p):
return cls._protosw[p]
class IP6ExtensionHeader(dpkt.Packet):
"""
An extension header is very similar to a 'sub-packet'.
We just want to re-use all the hdr unpacking etc.
"""
pass
class IP6OptsHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0) # option data length in 8 octet units (ignoring first 8 octets) so, len 0 == 64bit header
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 1) * 8
options = []
index = 0
while index < self.length - 2:
try:
opt_type = compat_ord(self.data[index])
# PAD1 option
if opt_type == 0:
index += 1
continue
opt_length = compat_ord(self.data[index + 1])
if opt_type == 1: # PADN option
# PADN uses opt_length bytes in total
index += opt_length + 2
continue
options.append({
'type': opt_type,
'opt_length': opt_length,
'data': self.data[index + 2:index + 2 + opt_length]
})
# add the two chars and the option_length, to move to the next option
index += opt_length + 2
except IndexError:
raise dpkt.NeedData
self.options = options
self.data = buf[2:self.length] # keep raw data with all pad options, but not the following data
class IP6HopOptsHeader(IP6OptsHeader):
pass
class IP6DstOptsHeader(IP6OptsHeader):
pass
class IP6RoutingHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # extension data length in 8 octet units (ignoring first 8 octets) (<= 46 for type 0)
('type', 'B', 0), # routing type (currently, only 0 is used)
('segs_left', 'B', 0), # remaining segments in route, until destination (<= 23)
('_rsvd_sl_bits', 'I', 0)
)
__bit_fields__ = {
'_rsvd_sl_bits': (
('_rsvd', 8), # reserved (1 byte)
('sl_bits', 24), # strict/loose bitmap for addresses
)
}
def unpack(self, buf):
hdr_size = 8
addr_size = 16
dpkt.Packet.unpack(self, buf)
addresses = []
num_addresses = self.len // 2
buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
for i in range(num_addresses):
addresses.append(buf[i * addr_size: i * addr_size + addr_size])
self.data = buf
self.addresses = addresses
self.length = self.len * 8 + 8
class IP6FragmentHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('_resv', 'B', 0), # reserved, set to 0
('_frag_off_resv_m', 'H', 0),
('id', 'I', 0) # fragments id
)
__bit_fields__ = {
'_frag_off_resv_m': (
('frag_off', 13), # frag offset, 13 bits
('_resv', 2), # reserved zero (2 bits)
('m_flag', 1), # more frags flag
)
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__
self.data = b''
class IP6AHHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
('_resv', 'H', 0), # reserved, 2 bytes of 0
('spi', 'I', 0), # SPI security parameter index
('seq', 'I', 0) # sequence no.
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 2) * 4
self.auth_data = self.data[:(self.len - 1) * 4]
class IP6ESPHeader(IP6ExtensionHeader):
__hdr__ = (
('spi', 'I', 0),
('seq', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__ + len(self.data)
EXT_HDRS_CLS = {ip.IP_PROTO_HOPOPTS: IP6HopOptsHeader,
ip.IP_PROTO_ROUTING: IP6RoutingHeader,
ip.IP_PROTO_FRAGMENT: IP6FragmentHeader,
ip.IP_PROTO_ESP: IP6ESPHeader,
ip.IP_PROTO_AH: IP6AHHeader,
ip.IP_PROTO_DSTOPTS: IP6DstOptsHeader}
# Unit tests
def test_ipg():
s = (b'\x60\x00\x00\x00\x00\x28\x06\x40\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11\x24\xff\xfe\x8c'
b'\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80\x72\xcd\xca\x00\x16'
b'\x04\x84\x46\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\x09\x00\x00\x02\x04\x05\xa0\x01\x03'
b'\x03\x00\x01\x01\x08\x0a\x7d\x18\x35\x3f\x00\x00\x00\x00')
_ip = IP6(s)
# basic properties
assert _ip.v == 6
assert _ip.fc == 0
assert _ip.flow == 0
_ip.data.sum = 0
s2 = bytes(_ip)
assert s == s2
def test_dict():
s = (b'\x60\x00\x00\x00\x00\x28\x06\x40\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11\x24\xff\xfe\x8c'
b'\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80\x72\xcd\xca\x00\x16'
b'\x04\x84\x46\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\x09\x00\x00\x02\x04\x05\xa0\x01\x03'
b'\x03\x00\x01\x01\x08\x0a\x7d\x18\x35\x3f\x00\x00\x00\x00')
_ip = IP6(s)
d = dict(_ip)
# basic properties
assert d['src'] == b'\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11\x24\xff\xfe\x8c\x11\xde'
assert d['dst'] == b'\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80\x72'
def test_ip6_routing_header():
s = (b'\x60\x00\x00\x00\x00\x3c\x2b\x40\x20\x48\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xde\xca\x20\x47\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02'
b'\x00\x00\x00\x00\x20\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x20\x22'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00\x50\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x50\x02\x20\x00\x91\x7f\x00\x00')
_ip = IP6(s)
s2 = bytes(_ip)
# 43 is Routing header id
assert len(_ip.extension_hdrs[43].addresses) == 2
assert _ip.tcp
assert s == s2
def test_ip6_fragment_header():
s = b'\x06\xee\xff\xf9\x00\x00\xff\xff'
fh = IP6FragmentHeader(s)
# s2 = str(fh) variable 's2' is not used
assert fh.nxt == 6
assert fh.id == 65535
assert fh.frag_off == 8191
assert fh.m_flag == 1
# test packing
fh._frag_off_resv_m = 0
fh.frag_off = 8191
fh.m_flag = 1
assert bytes(fh) == s
# IP6 with fragment header
s = (b'\x60\x00\x00\x00\x00\x10\x2c\x00\x02\x22\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x02\x03\x33\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x29\x00\x00\x01'
b'\x00\x00\x00\x00\x60\x00\x00\x00\x00\x10\x2c\x00')
_ip = IP6(s)
assert bytes(_ip) == s
def test_ip6_options_header():
s = (b'\x3b\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00')
options = IP6OptsHeader(s).options
assert len(options) == 3
assert bytes(IP6OptsHeader(s)) == s
def test_ip6_ah_header():
s = b'\x3b\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ah = IP6AHHeader(s)
assert ah.length == 24
assert ah.auth_data == b'xxxxxxxx'
assert ah.spi == 0x2020202
assert ah.seq == 0x1010101
assert bytes(ah) == s
def test_ip6_esp_header():
s = (b'\x00\x00\x01\x00\x00\x00\x00\x44\xe2\x4f\x9e\x68\xf3\xcd\xb1\x5f\x61\x65\x42\x8b\x78\x0b'
b'\x4a\xfd\x13\xf0\x15\x98\xf5\x55\x16\xa8\x12\xb3\xb8\x4d\xbc\x16\xb2\x14\xbe\x3d\xf9\x96'
b'\xd4\xa0\x39\x1f\x85\x74\x25\x81\x83\xa6\x0d\x99\xb6\xba\xa3\xcc\xb6\xe0\x9a\x78\xee\xf2'
b'\xaf\x9a')
esp = IP6ESPHeader(s)
assert esp.length == 68
assert esp.spi == 256
assert bytes(esp) == s
def test_ip6_extension_headers():
p = (b'\x60\x00\x00\x00\x00\x3c\x2b\x40\x20\x48\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xde\xca\x20\x47\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02'
b'\x00\x00\x00\x00\x20\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x20\x22'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00\x50\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x50\x02\x20\x00\x91\x7f\x00\x00')
_ip = IP6(p)
o = (b'\x3b\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00')
_ip.extension_hdrs[0] = IP6HopOptsHeader(o)
fh = b'\x06\xee\xff\xfb\x00\x00\xff\xff'
_ip.extension_hdrs[44] = IP6FragmentHeader(fh)
ah = b'\x3b\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
_ip.extension_hdrs[51] = IP6AHHeader(ah)
do = b'\x3b\x02\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
_ip.extension_hdrs[60] = IP6DstOptsHeader(do)
assert len(_ip.extension_hdrs) == 5
# this is a legacy unit test predating the addition of .all_extension_headers
# this way of adding extension headers does not update .all_extension_headers
# so we need to kick .all_extension_headers to force the __len__() method pick up
# the updated legacy attribute and calculate the len correctly
del _ip.all_extension_headers
assert len(_ip) == len(p) + len(o) + len(fh) + len(ah) + len(do)
def test_ip6_all_extension_headers(): # https://github.com/kbandla/dpkt/pull/403
s = (b'\x60\x00\x00\x00\x00\x47\x3c\x40\xfe\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x02\xfe\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x3c\x00\x01\x04'
b'\x00\x00\x00\x00\x3c\x00\x01\x04\x00\x00\x00\x00\x2c\x00\x01\x04\x00\x00\x00\x00\x2c\x00'
b'\x00\x00\x00\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x2c\x00\x01\x04\x00\x00\x00\x00'
b'\x3a\x00\x00\x00\x00\x00\x00\x00\x80\x00\xd8\xe5\x0c\x1a\x00\x00\x50\x61\x79\x4c\x6f\x61'
b'\x64')
_ip = IP6(s)
assert _ip.p == 58 # ICMPv6
hdrs = _ip.all_extension_headers
assert len(hdrs) == 7
assert isinstance(hdrs[0], IP6DstOptsHeader)
assert isinstance(hdrs[3], IP6FragmentHeader)
assert isinstance(hdrs[5], IP6DstOptsHeader)
assert bytes(_ip) == s
assert len(_ip) == len(s)
def test_ip6_gen_tcp_ack():
t = tcp.TCP()
t.win = 8192
t.dport = 80
t.sport = 4711
t.flags = tcp.TH_ACK
t.seq = 22
t.ack = 33
ipp = IP6()
ipp.src = b'\xfd\x00\x00\x00\x00\x00\x00\x00\xc8\xba\x88\x88\x00\xaa\xbb\x01'
ipp.dst = b'\x00d\xff\x9b\x00\x00\x00\x00\x00\x00\x00\x00\xc1\n@*'
ipp.hlim = 64
ipp.nxt = ip.IP_PROTO_TCP
ipp.data = t
ipp.plen = ipp.data.ulen = len(ipp.data)
assert len(bytes(ipp)) == 60
assert ipp.p == ip.IP_PROTO_TCP
# Second part of testing - with ext headers.
ipp.p = 0
o = (b'\x3b\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00')
ipp.extension_hdrs = {}
ipp.extension_hdrs[0] = IP6HopOptsHeader(o)
ipp.extension_hdrs[0].nxt = ip.IP_PROTO_TCP
ipp.nxt = ip.proto = ip.IP_PROTO_HOPOPTS
_p, exthdrs = ipp.headers_str()
ipp.plen = len(exthdrs) + len(ipp.data)
assert bytes(ipp)
assert ipp.p == ip.IP_PROTO_TCP
assert ipp.nxt == ip.IP_PROTO_HOPOPTS
def test_ip6_opts():
import pytest
# https://github.com/kbandla/dpkt/issues/477
s = (b'\x52\x54\x00\xf3\x83\x6f\x52\x54\x00\x86\x33\xd9\x86\xdd\x60\x00\x00\x00\x05\x08\x3a\xff'
b'\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xfd\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x00\xd2\xf3\x00\x00\x05\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'
b'\x00\x50\xd4\x34\x1a\x48\x24\x50\x6d\x8d\xb3\xc2\x80\x10\x01\xf6\x46\xe8\x00\x00\x01\x01'
b'\x08\x0a\xd7\x9d\x6b\x8a\x3a\xd1\xf4\x58\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61'
b'\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61'
b'\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61'
b'\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x61\x0a')
from dpkt.ethernet import Ethernet
assert Ethernet(s)
assert Ethernet(s).ip6
assert Ethernet(s).ip6.icmp6
assert Ethernet(s).ip6.icmp6.data
with pytest.raises(dpkt.NeedData):
IP6(Ethernet(s).ip6.icmp6.data) # should raise NeedData
from binascii import unhexlify
buf_ip6_opts = unhexlify(
'00' # nxt
'00' # len
'000000000000' # only padding
)
ip6opt = IP6OptsHeader(buf_ip6_opts)
assert ip6opt.options == []
assert ip6opt.data == b'\x00' * 6
def test_ip6_routing_properties():
ip6rh = IP6RoutingHeader()
assert ip6rh.sl_bits == 0
ip6rh.sl_bits = 1024
assert ip6rh.sl_bits == 1024
def test_ip6_fragment_properties():
ip6fh = IP6FragmentHeader()
assert ip6fh.frag_off == 0
ip6fh.frag_off = 1234
assert ip6fh.frag_off == 1234
assert ip6fh.m_flag == 0
ip6fh.m_flag = 1
assert ip6fh.m_flag == 1
def test_ip6_properties():
ip6 = IP6()
assert ip6.v == 6
ip6.v = 10
assert ip6.v == 10
assert ip6.fc == 0
ip6.fc = 5
assert ip6.fc == 5
assert ip6.flow == 0
ip6.flow = 4
assert ip6.flow == 4
# property delete
del ip6.v
del ip6.fc
del ip6.flow
assert ip6.v == 6
assert ip6.fc == 0
assert ip6.flow == 0
def test_proto_accessors():
class Proto:
pass
assert 'PROTO' not in IP6._protosw
IP6.set_proto('PROTO', Proto)
assert IP6.get_proto('PROTO') == Proto
def test_ip6_fragment_no_decode(): # https://github.com/kbandla/dpkt/issues/575
from . import udp
# fragment 0
s = (b'\x60\x00'
b'\x00\x00\x00\x2c\x11\x3f\x20\x01\x06\x38\x05\x01\x8e\xfe\xcc\x4a'
b'\x48\x39\xfa\x79\x04\xdc\x20\x01\x05\x00\x00\x60\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x30\xde\xf2\x00\x35\x00\x2c\x61\x50\x4d\x8b'
b'\x01\x20\x00\x01\x00\x00\x00\x00\x00\x01\x03\x69\x73\x63\x03\x6f'
b'\x72\x67\x00\x00\xff\x00\x01\x00\x00\x29\x10\x00\x00\x00\x80\x00'
b'\x00\x00')
frag0 = IP6(s)
assert type(frag0.data) == udp.UDP
s = (b'\x60\x00\x00\x00\x01\x34\x2c\x35\x20\x01\x05\x00\x00\x60\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x30\x20\x01\x06\x38\x05\x01\x8e\xfe'
b'\xcc\x4a\x48\x39\xfa\x79\x04\xdc'
b'\x11\x72\x31\xb9\xc1\x0f\xcf\x7c\x61\x62\x63\x64\x65\x66\x67\x68') # partial data
frag2 = IP6(s)
assert type(frag2.data) == bytes
# test packing
assert bytes(frag2) == s
| 21,362 | 35.208475 | 118 |
py
|
dpkt
|
dpkt-master/dpkt/icmp6.py
|
# $Id: icmp6.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Internet Control Message Protocol for IPv6."""
from __future__ import absolute_import
from . import dpkt
ICMP6_DST_UNREACH = 1 # dest unreachable, codes:
ICMP6_PACKET_TOO_BIG = 2 # packet too big
ICMP6_TIME_EXCEEDED = 3 # time exceeded, code:
ICMP6_PARAM_PROB = 4 # ip6 header bad
ICMP6_ECHO_REQUEST = 128 # echo service
ICMP6_ECHO_REPLY = 129 # echo reply
MLD_LISTENER_QUERY = 130 # multicast listener query
MLD_LISTENER_REPORT = 131 # multicast listener report
MLD_LISTENER_DONE = 132 # multicast listener done
# RFC2292 decls
ICMP6_MEMBERSHIP_QUERY = 130 # group membership query
ICMP6_MEMBERSHIP_REPORT = 131 # group membership report
ICMP6_MEMBERSHIP_REDUCTION = 132 # group membership termination
ND_ROUTER_SOLICIT = 133 # router solicitation
ND_ROUTER_ADVERT = 134 # router advertisement
ND_NEIGHBOR_SOLICIT = 135 # neighbor solicitation
ND_NEIGHBOR_ADVERT = 136 # neighbor advertisement
ND_REDIRECT = 137 # redirect
ICMP6_ROUTER_RENUMBERING = 138 # router renumbering
ICMP6_WRUREQUEST = 139 # who are you request
ICMP6_WRUREPLY = 140 # who are you reply
ICMP6_FQDN_QUERY = 139 # FQDN query
ICMP6_FQDN_REPLY = 140 # FQDN reply
ICMP6_NI_QUERY = 139 # node information request
ICMP6_NI_REPLY = 140 # node information reply
ICMP6_MAXTYPE = 201
class ICMP6(dpkt.Packet):
"""Internet Control Message Protocol for IPv6.
Internet Control Message Protocol version 6 (ICMPv6) is the implementation of the Internet Control Message Protocol
(ICMP) for Internet Protocol version 6 (IPv6). ICMPv6 is an integral part of IPv6 and performs error reporting
and diagnostic functions.
Attributes:
__hdr__: Header fields of ICMPv6.
type: (int): Type. Control messages are identified by the value in the type field. (1 byte)
code: (int): Code. The code field gives additional context information for the message. (1 byte)
sum: (int): Checksum. ICMPv6 provides a minimal level of message integrity verification. (2 bytes)
"""
__hdr__ = (
('type', 'B', 0),
('code', 'B', 0),
('sum', 'H', 0)
)
class Error(dpkt.Packet):
__hdr__ = (('pad', 'I', 0), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
from . import ip6
self.data = self.ip6 = ip6.IP6(self.data)
class Unreach(Error):
pass
class TooBig(Error):
__hdr__ = (('mtu', 'I', 1232), )
class TimeExceed(Error):
pass
class ParamProb(Error):
__hdr__ = (('ptr', 'I', 0), )
class Echo(dpkt.Packet):
__hdr__ = (('id', 'H', 0), ('seq', 'H', 0))
_typesw = {1: Unreach, 2: TooBig, 3: TimeExceed, 4: ParamProb, 128: Echo, 129: Echo}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
try:
self.data = self._typesw[self.type](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
pass
| 3,084 | 31.819149 | 119 |
py
|
dpkt
|
dpkt-master/dpkt/ipip.py
|
# Defines a copy of the IP protocol as IPIP so the protocol parsing in ip.py
# can decode IPIP packets.
from __future__ import absolute_import
from .ip import IP as IPIP
| 171 | 27.666667 | 76 |
py
|
dpkt
|
dpkt-master/dpkt/ah.py
|
# $Id: ah.py 34 2007-01-28 07:54:20Z dugsong $
# -*- coding: utf-8 -*-
"""Authentication Header."""
from __future__ import absolute_import
from . import dpkt
from . import ip
class AH(dpkt.Packet):
"""Authentication Header.
The Authentication Header (AH) protocol provides data origin authentication, data integrity, and replay protection.
Attributes:
__hdr__: Header fields of AH.
auth: Authentication body.
data: Message data.
"""
__hdr__ = (
('nxt', 'B', 0),
('len', 'B', 0), # payload length
('rsvd', 'H', 0),
('spi', 'I', 0),
('seq', 'I', 0)
)
auth = b''
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
auth_len = max(4*self.len - 4, 0) # see RFC 4302, section 2.2
self.auth = self.data[:auth_len]
buf = self.data[auth_len:]
try:
self.data = ip.IP.get_proto(self.nxt)(buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def __len__(self):
return self.__hdr_len__ + len(self.auth) + len(self.data)
def __bytes__(self):
return self.pack_hdr() + bytes(self.auth) + bytes(self.data)
def test_default_creation():
ah = AH()
assert ah.nxt == 0
assert ah.len == 0
assert ah.rsvd == 0
assert ah.spi == 0
assert ah.seq == 0
assert len(ah) == ah.__hdr_len__
assert bytes(ah) == b'\x00' * 12
def test_creation_from_buf():
from binascii import unhexlify
buf_ip = unhexlify(
'04' # IP
'0000000000000000000000'
'4500002200000000401172c001020304'
'01020304006f00de000ebf35666f6f626172'
)
ah = AH(buf_ip)
assert ah.nxt == 4 # IP
assert isinstance(ah.data, ip.IP)
assert len(ah) == 46
assert bytes(ah) == buf_ip
buf_not_ip = unhexlify(
'37' # Not registered
'0000000000000000000000'
'4500002200000000401172c001020304'
'01020304006f00de000ebf35666f6f626172'
)
ah_not_ip = AH(buf_not_ip)
assert ah_not_ip.nxt == 0x37
assert isinstance(ah_not_ip.data, bytes)
assert len(ah_not_ip) == 46
assert bytes(ah_not_ip) == buf_not_ip
| 2,265 | 25.045977 | 119 |
py
|
dpkt
|
dpkt-master/dpkt/dns.py
|
# $Id: dns.py 27 2006-11-21 01:22:52Z dahelder $
# -*- coding: utf-8 -*-
"""Domain Name System."""
from __future__ import print_function
from __future__ import absolute_import
import struct
import codecs
from . import dpkt
from .compat import compat_ord
DNS_Q = 0
DNS_R = 1
# Opcodes
DNS_QUERY = 0
DNS_IQUERY = 1
DNS_STATUS = 2
DNS_NOTIFY = 4
DNS_UPDATE = 5
# Flags
DNS_CD = 0x0010 # checking disabled
DNS_AD = 0x0020 # authenticated data
DNS_Z = 0x0040 # unused
DNS_RA = 0x0080 # recursion available
DNS_RD = 0x0100 # recursion desired
DNS_TC = 0x0200 # truncated
DNS_AA = 0x0400 # authoritative answer
DNS_QR = 0x8000 # response ( query / response )
# Response codes
DNS_RCODE_NOERR = 0
DNS_RCODE_FORMERR = 1
DNS_RCODE_SERVFAIL = 2
DNS_RCODE_NXDOMAIN = 3
DNS_RCODE_NOTIMP = 4
DNS_RCODE_REFUSED = 5
DNS_RCODE_YXDOMAIN = 6
DNS_RCODE_YXRRSET = 7
DNS_RCODE_NXRRSET = 8
DNS_RCODE_NOTAUTH = 9
DNS_RCODE_NOTZONE = 10
# RR types
DNS_A = 1
DNS_NS = 2
DNS_CNAME = 5
DNS_SOA = 6
DNS_NULL = 10
DNS_PTR = 12
DNS_HINFO = 13
DNS_MX = 15
DNS_TXT = 16
DNS_AAAA = 28
DNS_SRV = 33
DNS_OPT = 41
# RR classes
DNS_IN = 1
DNS_CHAOS = 3
DNS_HESIOD = 4
DNS_ANY = 255
def pack_name(name, off, label_ptrs):
name = codecs.encode(name, 'utf-8')
if name:
labels = name.split(b'.')
else:
labels = []
labels.append(b'')
buf = b''
for i, label in enumerate(labels):
key = b'.'.join(labels[i:]).upper()
ptr = label_ptrs.get(key)
if ptr is None:
if len(key) > 1:
ptr = off + len(buf)
if ptr < 0xc000:
label_ptrs[key] = ptr
i = len(label)
buf += struct.pack("B", i) + label
else:
buf += struct.pack('>H', (0xc000 | ptr))
break
return buf
def unpack_name(buf, off):
name = []
saved_off = 0
start_off = off
name_length = 0
while True:
if off >= len(buf):
raise dpkt.NeedData()
n = compat_ord(buf[off])
if n == 0:
off += 1
break
elif (n & 0xc0) == 0xc0:
ptr = struct.unpack('>H', buf[off:off + 2])[0] & 0x3fff
if ptr >= start_off:
raise dpkt.UnpackError('Invalid label compression pointer')
off += 2
if not saved_off:
saved_off = off
start_off = off = ptr
elif (n & 0xc0) == 0x00:
off += 1
name.append(buf[off:off + n])
name_length += n + 1
if name_length > 255:
raise dpkt.UnpackError('name longer than 255 bytes')
off += n
else:
raise dpkt.UnpackError('Invalid label length %02x' % n)
if not saved_off:
saved_off = off
return codecs.decode(b'.'.join(name), 'utf-8'), saved_off
class DNS(dpkt.Packet):
"""Domain Name System.
The Domain Name System (DNS) is the hierarchical and decentralized naming system used to identify computers,
services, and other resources reachable through the Internet or other Internet Protocol (IP) networks.
The resource records contained in the DNS associate domain names with other forms of information.
Attributes:
__hdr__ (tuple(header_name, c_type, offset)): Header fields of DNS.
id: (int): Identification. Used to match request/reply packets.
op: (int): Operation
qd: (int): Query Definition
an: (int): Answer
ns: (int): Name Server
ar: (int): Additional Record
"""
__hdr__ = (
('id', 'H', 0),
('op', 'H', DNS_RD), # recursive query
# XXX - lists of query, RR objects
('qd', 'H', []),
('an', 'H', []),
('ns', 'H', []),
('ar', 'H', [])
)
@property
def qr(self):
"""DNS Query/Response. 1 bit"""
return int((self.op & DNS_QR) == DNS_QR)
@qr.setter
def qr(self, v):
if v:
self.op |= DNS_QR
else:
self.op &= ~DNS_QR
@property
def opcode(self):
"""Operation code. 4 bits."""
return (self.op >> 11) & 0xf
@opcode.setter
def opcode(self, v):
self.op = (self.op & ~0x7800) | ((v & 0xf) << 11)
@property
def aa(self):
"""Authoritative Answer. 1 bit.
Specifies that the responding name server is an authority for the domain name in question section."""
return int((self.op & DNS_AA) == DNS_AA)
@aa.setter
def aa(self, v):
if v:
self.op |= DNS_AA
else:
self.op &= ~DNS_AA
@property
def tc(self):
"""Truncated. 1 bit. Indicates that only the first 512 bytes of the reply was returned."""
return int((self.op & DNS_TC) == DNS_TC)
@tc.setter
def tc(self, v):
if v:
self.op |= DNS_TC
else:
self.op &= ~DNS_TC
@property
def rd(self):
"""Recursion Desired. 1 bit. May be set in a query and is copied into the response.
If set, the name server is directed to pursue the query recursively. Recursive query support is optional."""
return int((self.op & DNS_RD) == DNS_RD)
@rd.setter
def rd(self, v):
if v:
self.op |= DNS_RD
else:
self.op &= ~DNS_RD
@property
def ra(self):
"""Recursion Available. 1 bit. Indicates if recursive query support is available in the name server."""
return int((self.op & DNS_RA) == DNS_RA)
@ra.setter
def ra(self, v):
if v:
self.op |= DNS_RA
else:
self.op &= ~DNS_RA
@property
def zero(self):
"""Zero 1 bit"""
return int((self.op & DNS_Z) == DNS_Z)
@zero.setter
def zero(self, v):
if v:
self.op |= DNS_Z
else:
self.op &= ~DNS_Z
@property
def rcode(self):
"""Return code. 4 bits."""
return self.op & 0xf
@rcode.setter
def rcode(self, v):
self.op = (self.op & ~0xf) | (v & 0xf)
class Q(dpkt.Packet):
"""DNS question."""
__hdr__ = (
('name', '1025s', b''),
('type', 'H', DNS_A),
('cls', 'H', DNS_IN)
)
# XXX - suk
def __len__(self):
raise NotImplementedError
__str__ = __len__
def unpack(self, buf):
raise NotImplementedError
class RR(Q):
"""DNS resource record."""
__hdr__ = (
('name', '1025s', b''),
('type', 'H', DNS_A),
('cls', 'H', DNS_IN),
('ttl', 'I', 0),
('rlen', 'H', 4),
('rdata', 's', b'')
)
def pack_rdata(self, off, label_ptrs):
# XXX - yeah, this sux
if self.rdata:
return self.rdata
if self.type == DNS_A:
return self.ip
elif self.type == DNS_NS:
return pack_name(self.nsname, off, label_ptrs)
elif self.type == DNS_CNAME:
return pack_name(self.cname, off, label_ptrs)
elif self.type == DNS_PTR:
return pack_name(self.ptrname, off, label_ptrs)
elif self.type == DNS_SOA:
l_ = []
l_.append(pack_name(self.mname, off, label_ptrs))
l_.append(pack_name(self.rname, off + len(l_[0]), label_ptrs))
l_.append(struct.pack('>IIIII', self.serial, self.refresh,
self.retry, self.expire, self.minimum))
return b''.join(l_)
elif self.type == DNS_MX:
return struct.pack('>H', self.preference) + \
pack_name(self.mxname, off + 2, label_ptrs)
elif self.type == DNS_TXT or self.type == DNS_HINFO:
return b''.join(struct.pack('B', len(x)) + x for x in self.text)
elif self.type == DNS_AAAA:
return self.ip6
elif self.type == DNS_SRV:
return struct.pack('>HHH', self.priority, self.weight, self.port) + \
pack_name(self.srvname, off + 6, label_ptrs)
elif self.type == DNS_OPT:
return b'' # self.rdata
else:
raise dpkt.PackError('RR type %s is not supported' % self.type)
def unpack_rdata(self, buf, off):
if self.type == DNS_A:
self.ip = self.rdata
elif self.type == DNS_NS:
self.nsname, off = unpack_name(buf, off)
elif self.type == DNS_CNAME:
self.cname, off = unpack_name(buf, off)
elif self.type == DNS_PTR:
self.ptrname, off = unpack_name(buf, off)
elif self.type == DNS_SOA:
self.mname, off = unpack_name(buf, off)
self.rname, off = unpack_name(buf, off)
self.serial, self.refresh, self.retry, self.expire, self.minimum = \
struct.unpack('>IIIII', buf[off:off + 20])
elif self.type == DNS_MX:
self.preference = struct.unpack('>H', self.rdata[:2])
self.mxname, off = unpack_name(buf, off + 2)
elif self.type == DNS_TXT or self.type == DNS_HINFO:
self.text = []
buf = self.rdata
while buf:
n = compat_ord(buf[0])
self.text.append(codecs.decode(buf[1:1 + n], 'utf-8'))
buf = buf[1 + n:]
elif self.type == DNS_AAAA:
self.ip6 = self.rdata
elif self.type == DNS_NULL:
self.null = codecs.encode(self.rdata, 'hex')
elif self.type == DNS_SRV:
self.priority, self.weight, self.port = struct.unpack('>HHH', self.rdata[:6])
self.srvname, off = unpack_name(buf, off + 6)
elif self.type == DNS_OPT:
pass # RFC-6891: OPT is a pseudo-RR not carrying any DNS data
else:
raise dpkt.UnpackError('RR type %s is not supported' % self.type)
def pack_q(self, buf, q):
"""Append packed DNS question and return buf."""
return buf + pack_name(q.name, len(buf), self.label_ptrs) + struct.pack('>HH', q.type, q.cls)
def unpack_q(self, buf, off):
"""Return DNS question and new offset."""
q = self.Q()
q.name, off = unpack_name(buf, off)
q.type, q.cls = struct.unpack('>HH', buf[off:off + 4])
off += 4
return q, off
def pack_rr(self, buf, rr):
"""Append packed DNS RR and return buf."""
name = pack_name(rr.name, len(buf), self.label_ptrs)
rdata = rr.pack_rdata(len(buf) + len(name) + 10, self.label_ptrs)
return buf + name + struct.pack('>HHIH', rr.type, rr.cls, rr.ttl, len(rdata)) + rdata
def unpack_rr(self, buf, off):
"""Return DNS RR and new offset."""
rr = self.RR()
rr.name, off = unpack_name(buf, off)
rr.type, rr.cls, rr.ttl, rdlen = struct.unpack('>HHIH', buf[off:off + 10])
off += 10
rr.rdata = buf[off:off + rdlen]
rr.rlen = rdlen
rr.unpack_rdata(buf, off)
off += rdlen
return rr, off
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
off = self.__hdr_len__
cnt = self.qd # FIXME: This relies on this being properly set somewhere else
self.qd = []
for _ in range(cnt):
q, off = self.unpack_q(buf, off)
self.qd.append(q)
for x in ('an', 'ns', 'ar'):
cnt = getattr(self, x, 0)
setattr(self, x, [])
for _ in range(cnt):
rr, off = self.unpack_rr(buf, off)
getattr(self, x).append(rr)
self.data = b''
def __len__(self):
# XXX - cop out
return len(bytes(self))
def __bytes__(self):
# XXX - compress names on the fly
self.label_ptrs = {}
buf = struct.pack(self.__hdr_fmt__, self.id, self.op, len(self.qd),
len(self.an), len(self.ns), len(self.ar))
for q in self.qd:
buf = self.pack_q(buf, q)
for x in ('an', 'ns', 'ar'):
for rr in getattr(self, x):
buf = self.pack_rr(buf, rr)
del self.label_ptrs
return buf
# TESTS
def define_testdata():
"""
Reference test data is stored in the dynamically defined class.
It is created in this way so that we can import unhexlify only during
testing, and not during normal use.
"""
from binascii import unhexlify
class TestData(object):
a_resp = unhexlify(
"059c8180000100010000000106676f6f676c6503636f6d0000010001c00c00010"
"0010000012b0004d83ace2e0000290200000000000000"
)
aaaa_resp = unhexlify(
"7f228180000100010000000005676d61696c03636f6d00001c0001c00c001c000"
"10000012b00102a001450400908020000000000002005"
)
cname_resp = unhexlify(
"a154818000010001000000000377777705676d61696c03636f6d0000010001c00"
"c000500010000545f000e046d61696c06676f6f676c65c016"
)
invalid_rr = unhexlify(
"000001000000000100000000046e616d650000150001000000000000"
)
mx_resp = unhexlify(
"053b8180000100010000000006676f6f676c6503636f6d00000f0001c00c000f0"
"001000002570011001e04616c7432056173706d78016cc00c"
)
null_resp = unhexlify(
"12b0840000010001000000000b626c6168626c616836363606706972617465037"
"3656100000a0001c00c000a00010000000000095641434b4403c5e901"
)
opt_resp = unhexlify(
"8d6e0110000100000000000104783131310678787878313106616b616d6169036"
"e657400000100010000290fa0000080000000"
)
ptr_resp = unhexlify(
"67028180000100010003000001310131033231310331343107696e2d616464720"
"46172706100000c0001c00c000c000100000d3600240764656661756c740a762d"
"756d63652d69667305756d6e657405756d6963680365647500c00e00020001000"
"00d36000d0673686162627903696673c04fc00e0002000100000d36000f0c6669"
"73682d6c6963656e7365c06dc00e0002000100000d36000b04646e73320369746"
"4c04f"
)
soa_resp = unhexlify(
"851f8180000100010000000006676f6f676c6503636f6d0000060001c00c00060"
"001000000230026036e7332c00c09646e732d61646d696ec00c0a747447000003"
"8400000384000007080000003c"
)
srv_resp = unhexlify(
"7f2281800001000100000000075f6a6162626572045f746370066a61626265720"
"3636f6d0000210001c00c0021000100000e0f001a000a000014950764656e6a61"
"6232066a616262657203636f6d00"
)
txt_resp = unhexlify(
"10328180000100010000000006676f6f676c6503636f6d0000100001c00c00100"
"0010000010e00100f763d7370663120707472203f616c6c"
)
return TestData()
def test_basic():
buf = define_testdata().a_resp
my_dns = DNS(buf)
assert my_dns.qd[0].name == 'google.com'
assert my_dns.an[0].name == 'google.com'
assert bytes(my_dns) == buf
class TryExceptException:
def __init__(self, exception_type, msg=''):
self.exception_type = exception_type
self.msg = msg
def __call__(self, f, *args, **kwargs):
def wrapper(*args, **kwargs):
try:
f()
except self.exception_type as e:
if self.msg:
assert str(e) == self.msg
else:
raise Exception("There should have been an Exception raised")
return wrapper
@TryExceptException(Exception, msg='There should have been an Exception raised')
def test_TryExceptException():
"""Check that we can catch a function which does not throw an exception when it is supposed to"""
@TryExceptException(NotImplementedError)
def fun():
pass
try:
fun()
except Exception as e:
raise e
@TryExceptException(NotImplementedError)
def test_Q_len():
"""Test in place for when the method is written"""
q = DNS.Q()
len(q)
@TryExceptException(NotImplementedError)
def test_Q_unpack():
"""Test in place for when the method is written"""
q = DNS.Q()
q.unpack(None)
def property_runner(prop, ops, set_to=None):
if set_to is None:
set_to = [False, True, False]
buf = define_testdata().a_resp
dns = DNS(buf)
for set_to, op in zip(set_to, ops):
setattr(dns, prop, set_to)
assert dns.op == op
assert getattr(dns, prop) == set_to
def test_qr():
property_runner('qr', ops=[384, 33152, 384])
def test_opcode():
property_runner('opcode', ops=[33152, 35200, 33152])
def test_aa():
property_runner('aa', ops=[33152, 34176, 33152])
def test_tc():
property_runner('tc', ops=[33152, 33664, 33152])
def test_rd():
property_runner('rd', ops=[32896, 33152, 32896])
def test_ra():
property_runner('ra', ops=[33024, 33152, 33024])
def test_zero():
property_runner('zero', ops=[33152, 33216, 33152])
def test_rcode():
property_runner('rcode', ops=[33152, 33153, 33152])
def test_PTR():
buf = define_testdata().ptr_resp
my_dns = DNS(buf)
assert my_dns.qd[0].name == '1.1.211.141.in-addr.arpa' and \
my_dns.an[0].ptrname == 'default.v-umce-ifs.umnet.umich.edu' and \
my_dns.ns[0].nsname == 'shabby.ifs.umich.edu' and \
my_dns.ns[1].ttl == 3382 and \
my_dns.ns[2].nsname == 'dns2.itd.umich.edu'
assert buf == bytes(my_dns)
def test_OPT():
buf = define_testdata().opt_resp
my_dns = DNS(buf)
my_rr = my_dns.ar[0]
assert my_rr.type == DNS_OPT
assert my_rr.rlen == 0 and my_rr.rdata == b''
assert bytes(my_dns) == buf
my_rr.rdata = b'\x00\x00\x00\x02\x00\x00' # add 1 attribute tlv
my_dns2 = DNS(bytes(my_dns))
my_rr2 = my_dns2.ar[0]
assert my_rr2.rlen == 6 and my_rr2.rdata == b'\x00\x00\x00\x02\x00\x00'
def test_pack_name():
# Empty name is \0
x = pack_name('', 0, {})
assert x == b'\0'
@TryExceptException(dpkt.UnpackError)
def test_unpack_name():
"""If the offset is longer than the buffer, there will be an UnpackError"""
unpack_name(b' ', 0)
@TryExceptException(dpkt.UnpackError)
def test_random_data():
DNS(b'\x83z0\xd2\x9a\xec\x94_7\xf3\xb7+\x85"?\xf0\xfb')
@TryExceptException(dpkt.UnpackError)
def test_circular_pointers():
DNS(b'\xc0\x00\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\xc0\x00')
@TryExceptException(dpkt.UnpackError)
def test_very_long_name():
DNS(b'\x00\x00\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00' + (b'\x10abcdef0123456789' * 16) + b'\x00')
def test_null_response():
buf = define_testdata().null_resp
my_dns = DNS(buf)
assert my_dns.qd[0].name == 'blahblah666.pirate.sea' and \
my_dns.an[0].null == b'5641434b4403c5e901'
assert str(buf) == str(my_dns)
def test_txt_response():
buf = define_testdata().txt_resp
my_dns = DNS(buf)
my_rr = my_dns.an[0]
assert my_rr.type == DNS_TXT
assert my_rr.name == 'google.com'
assert my_rr.text == ['v=spf1 ptr ?all']
assert str(my_dns) == str(buf)
assert bytes(my_dns) == buf
def test_rdata_TXT():
rr = DNS.RR(
type=DNS_TXT,
text=[b'v=spf1 ptr ?all', b'a=something']
)
packdata = rr.pack_rdata(0, {})
correct = b'\x0fv=spf1 ptr ?all\x0ba=something'
assert packdata == correct
def test_rdata_HINFO():
rr = DNS.RR(
type=DNS_HINFO,
text=[b'v=spf1 ptr ?all', b'a=something']
)
packdata = rr.pack_rdata(0, {})
correct = b'\x0fv=spf1 ptr ?all\x0ba=something'
assert packdata == correct
def test_rdata_rdata():
rr = DNS.RR(
name='zc.akadns.org',
ttl=123446,
rdata=b'?\xf1\xc76',
)
packdata = rr.pack_rdata(0, {})
correct = b'?\xf1\xc76'
assert packdata == correct
def test_rdata_A():
rr = DNS.RR(
name='zc.akadns.org',
ttl=123446,
ip=b'?\xf1\xc76',
type=DNS_A,
)
packdata = rr.pack_rdata(0, {})
correct = b'?\xf1\xc76'
assert packdata == correct
def test_rdata_NS():
rr = DNS.RR(
nsname='zc.akadns.org',
ttl=123446,
ip=b'?\xf1\xc76',
type=DNS_NS,
)
packdata = rr.pack_rdata(0, {})
correct = b'\x02zc\x06akadns\x03org\x00'
assert packdata == correct
def test_rdata_CNAME():
rr = DNS.RR(
cname='zc.akadns.org',
ttl=123446,
ip=b'?\xf1\xc76',
type=DNS_CNAME,
)
packdata = rr.pack_rdata(0, {})
correct = b'\x02zc\x06akadns\x03org\x00'
assert packdata == correct
def test_rdata_PTR():
rr = DNS.RR(
ptrname='default.v-umce-ifs.umnet.umich.edu',
ttl=1236,
ip=b'?\xf1\xc76',
type=DNS_PTR,
)
packdata = rr.pack_rdata(0, {})
correct = b'\x07default\nv-umce-ifs\x05umnet\x05umich\x03edu\x00'
assert packdata == correct
def test_rdata_SOA():
rr = DNS.RR(
mname='blah.google.com',
rname='moo.blah.com',
serial=12345666,
refresh=123463,
retry=209834,
minimum=9000,
expire=28341,
type=DNS_SOA,
)
packdata = rr.pack_rdata(0, {})
correct = (
b'\x04blah\x06google\x03com\x00\x03moo\x04blah\xc0\x0c\x00\xbcaB'
b'\x00\x01\xe2G\x00\x033\xaa\x00\x00n\xb5\x00\x00#(')
assert packdata == correct
def test_rdata_MX():
rr = DNS.RR(
type=DNS_MX,
preference=2124,
mxname='mail.google.com',
)
packdata = rr.pack_rdata(0, {})
correct = b'\x08L\x04mail\x06google\x03com\x00'
assert packdata == correct
def test_rdata_AAAA():
ip6 = b'&\x07\xf8\xb0@\x0c\x0c\x03\x00\x00\x00\x00\x00\x00\x00\x1a'
rr = DNS.RR(
type=DNS_AAAA,
ip6=ip6,
)
packdata = rr.pack_rdata(0, {})
correct = ip6
assert packdata == correct
def test_rdata_SRV():
rr = DNS.RR(
type=DNS_SRV,
ttl=86400,
priority=0,
weight=5,
port=5060,
srvname='_sip._tcp.example.com',
)
packdata = rr.pack_rdata(0, {})
correct = b'\x00\x00\x00\x05\x13\xc4\x04_sip\x04_tcp\x07example\x03com\x00'
assert packdata == correct
def test_rdata_OPT():
rr = DNS.RR(
type=DNS_OPT,
)
# TODO: This is hardcoded to return b''. Is this intentional?
packdata = rr.pack_rdata(0, {})
correct = b''
assert packdata == correct
def test_dns_len():
my_dns = DNS()
assert len(my_dns) == 12
@TryExceptException(dpkt.PackError)
def test_rdata_FAIL():
DNS.RR(type=12345666).pack_rdata(0, {})
def test_soa():
buf = define_testdata().soa_resp
soa = DNS(buf)
assert soa.id == 34079
assert soa.op == 33152
assert len(soa.qd) == 1
q = soa.qd[0]
assert q.name == 'google.com'
assert q.type == DNS_SOA
assert q.cls == DNS_IN
assert len(soa.an) == 1
a = soa.an[0]
assert a.name == 'google.com'
assert a.type == DNS_SOA
assert a.cls == DNS_IN
assert a.ttl == 35
assert a.retry == 900
assert a.mname == 'ns2.google.com'
assert a.minimum == 60
assert a.refresh == 900
assert a.expire == 1800
assert a.serial == 175404103
assert a.rlen == 38
assert a.rname == 'dns-admin.google.com'
assert a.rdata == b'\x03ns2\xc0\x0c\tdns-admin\xc0\x0c\nttG\x00\x00\x03\x84\x00\x00\x03\x84\x00\x00\x07\x08\x00\x00\x00<'
assert soa.ar == []
def test_mx():
buf = define_testdata().mx_resp
mx = DNS(buf)
assert mx.id == 1339
assert mx.op == 33152
assert len(mx.qd) == 1
q = mx.qd[0]
assert q.name == 'google.com'
assert q.type == DNS_MX
assert q.cls == DNS_IN
assert len(mx.an) == 1
a = mx.an[0]
assert a.type == DNS_MX
assert a.cls == DNS_IN
assert a.name == 'google.com'
assert a.ttl == 599
assert a.mxname == 'alt2.aspmx.l.google.com'
assert a.preference == (30,)
assert a.rlen == 17
assert a.rdata == b'\x00\x1e\x04alt2\x05aspmx\x01l\xc0\x0c'
assert mx.ar == []
def test_aaaa():
buf = define_testdata().aaaa_resp
aaaa = DNS(buf)
aaaa.id = 32546
aaaa.op = 33152
assert len(aaaa.qd) == 1
q = aaaa.qd[0]
assert q.type == DNS_AAAA
assert q.name == 'gmail.com'
assert len(aaaa.an) == 1
a = aaaa.an[0]
assert a.type == DNS_AAAA
assert a.cls == DNS_IN
assert a.name == 'gmail.com'
assert a.ttl == 299
assert a.ip6 == b'*\x00\x14P@\t\x08\x02\x00\x00\x00\x00\x00\x00 \x05'
assert a.rlen == 16
assert a.rdata == b'*\x00\x14P@\t\x08\x02\x00\x00\x00\x00\x00\x00 \x05'
assert aaaa.ar == []
def test_srv():
buf = define_testdata().srv_resp
srv = DNS(buf)
srv.id = 32546
srv.op = 33152
assert len(srv.qd) == 1
q = srv.qd[0]
assert q.type == DNS_SRV
assert q.name == '_jabber._tcp.jabber.com'
assert q.cls == DNS_IN
assert len(srv.an) == 1
a = srv.an[0]
assert a.type == DNS_SRV
assert a.cls == DNS_IN
assert a.name == '_jabber._tcp.jabber.com'
assert a.port == 5269
assert a.ttl == 3599
assert a.srvname == 'denjab2.jabber.com'
assert a.priority == 10
assert a.weight == 0
assert a.rlen == 26
assert a.rdata == b'\x00\n\x00\x00\x14\x95\x07denjab2\x06jabber\x03com\x00'
assert srv.ar == []
def test_cname():
buf = define_testdata().cname_resp
cname = DNS(buf)
cname.id = 41300
cname.op = 33152
assert len(cname.qd) == 1
q = cname.qd[0]
assert q.type == DNS_A
assert q.cls == DNS_IN
assert q.name == 'www.gmail.com'
assert len(cname.an) == 1
a = cname.an[0]
assert a.type == DNS_CNAME
assert a.cls == DNS_IN
assert a.name == 'www.gmail.com'
assert a.ttl == 21599
assert a.cname == 'mail.google.com'
assert a.rlen == 14
assert a.rdata == b'\x04mail\x06google\xc0\x16'
assert cname.ar == []
@TryExceptException(dpkt.UnpackError)
def test_invalid_rr():
buf = define_testdata().invalid_rr
DNS(buf)
| 26,308 | 27.077908 | 125 |
py
|
dpkt
|
dpkt-master/dpkt/sccp.py
|
# $Id: sccp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Cisco Skinny Client Control Protocol."""
from __future__ import absolute_import
from . import dpkt
KEYPAD_BUTTON = 0x00000003
OFF_HOOK = 0x00000006
ON_HOOK = 0x00000007
OPEN_RECEIVE_CHANNEL_ACK = 0x00000022
START_TONE = 0x00000082
STOP_TONE = 0x00000083
SET_LAMP = 0x00000086
SET_SPEAKER_MODE = 0x00000088
START_MEDIA_TRANSMIT = 0x0000008A
STOP_MEDIA_TRANSMIT = 0x0000008B
CALL_INFO = 0x0000008F
DEFINE_TIME_DATE = 0x00000094
DISPLAY_TEXT = 0x00000099
OPEN_RECEIVE_CHANNEL = 0x00000105
CLOSE_RECEIVE_CHANNEL = 0x00000106
SELECT_SOFTKEYS = 0x00000110
CALL_STATE = 0x00000111
DISPLAY_PROMPT_STATUS = 0x00000112
CLEAR_PROMPT_STATUS = 0x00000113
ACTIVATE_CALL_PLANE = 0x00000116
class ActivateCallPlane(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_instance', 'I', 0),
)
class CallInfo(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('calling_party_name', '40s', b''),
('calling_party', '24s', b''),
('called_party_name', '40s', b''),
('called_party', '24s', b''),
('line_instance', 'I', 0),
('call_id', 'I', 0),
('call_type', 'I', 0),
('orig_called_party_name', '40s', b''),
('orig_called_party', '24s', b'')
)
class CallState(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('call_state', 'I', 12), # 12: Proceed, 15: Connected
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class ClearPromptStatus(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class CloseReceiveChannel(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
)
class DisplayPromptStatus(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('msg_timeout', 'I', 0),
('display_msg', '32s', b''),
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class DisplayText(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('display_msg', '36s', b''),
)
class KeypadButton(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('button', 'I', 0),
)
class OpenReceiveChannel(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
('ms_packet', 'I', 0),
('payload_capability', 'I', 4), # 4: G.711 u-law 64k
('echo_cancel_type', 'I', 4),
('g723_bitrate', 'I', 0),
)
class OpenReceiveChannelAck(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('channel_status', 'I', 0),
('ip', '4s', b''),
('port', 'I', 0),
('passthruparty_id', 'I', 0),
)
class SelectStartKeys(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_id', 'I', 1),
('call_id', 'I', 0),
('softkey_set', 'I', 8),
('softkey_map', 'I', 0xffffffff)
)
class SetLamp(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('stimulus', 'I', 9), # 9: Line
('stimulus_instance', 'I', 1),
('lamp_mode', 'I', 1),
)
class SetSpeakerMode(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('speaker', 'I', 2), # 2: SpeakerOff
)
class StartMediaTransmission(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
('ipv4_or_ipv6', 'I', 0),
('remote_ip', '16s', b''),
('remote_port', 'I', 0),
('ms_packet', 'I', 0),
('payload_capability', 'I', 4), # 4: G.711 u-law 64k
('precedence', 'I', 0),
('silence_suppression', 'I', 0),
('max_frames_per_pkt', 'I', 1),
('g723_bitrate', 'I', 0),
('call_reference', 'I', 0)
)
class StartTone(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('tone', 'I', 0x24), # 0x24: AlertingTone
)
class StopMediaTransmission(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
)
class SCCP(dpkt.Packet):
"""Cisco Skinny Client Control Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of SCCP.
TODO.
"""
__byte_order__ = '<'
__hdr__ = (
('len', 'I', 0),
('rsvd', 'I', 0),
('msgid', 'I', 0),
('msg', '0s', b''),
)
_msgsw = {
KEYPAD_BUTTON: KeypadButton,
OPEN_RECEIVE_CHANNEL_ACK: OpenReceiveChannelAck,
START_TONE: StartTone,
SET_LAMP: SetLamp,
START_MEDIA_TRANSMIT: StartMediaTransmission,
STOP_MEDIA_TRANSMIT: StopMediaTransmission,
CALL_INFO: CallInfo,
DISPLAY_TEXT: DisplayText,
OPEN_RECEIVE_CHANNEL: OpenReceiveChannel,
CLOSE_RECEIVE_CHANNEL: CloseReceiveChannel,
CALL_STATE: CallState,
DISPLAY_PROMPT_STATUS: DisplayPromptStatus,
CLEAR_PROMPT_STATUS: ClearPromptStatus,
ACTIVATE_CALL_PLANE: ActivateCallPlane,
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = self.len - 4
if n > len(self.data):
raise dpkt.NeedData('not enough data')
self.msg, self.data = self.data[:n], self.data[n:]
try:
p = self._msgsw[self.msgid](self.msg)
setattr(self, p.__class__.__name__.lower(), p)
except (KeyError, dpkt.UnpackError):
pass
def test_sccp():
import pytest
from binascii import unhexlify
buf = unhexlify(
'08000000' # len
'00000000' # rsvd
'03000000' # msgid (KEYPAD_BUTTON)
'abcdef01' # msg
'23456789' # daat
)
sccp = SCCP(buf)
assert sccp.msg == b'\xab\xcd\xef\x01'
assert sccp.data == b'\x23\x45\x67\x89'
assert isinstance(sccp.keypadbutton, KeypadButton)
# len is too long for data, raises NeedData
buf = unhexlify(
'88880000' # len
'00000000' # rsvd
'00000003' # msgid (KEYPAD_BUTTON)
'abcdef01' # msg
)
with pytest.raises(dpkt.NeedData):
SCCP(buf)
# msgid is invalid, raises KeyError on _msgsw (silently caught)
buf = unhexlify(
'08000000' # len
'00000000' # rsvd
'00000003' # msgid (invalid)
'abcdef01' # msg
)
sccp = SCCP(buf)
assert sccp.msg == b'\xab\xcd\xef\x01'
assert sccp.data == b''
| 6,545 | 23.516854 | 67 |
py
|
dpkt
|
dpkt-master/dpkt/ntp.py
|
# $Id: ntp.py 48 2008-05-27 17:31:15Z yardley $
# -*- coding: utf-8 -*-
"""Network Time Protocol."""
from __future__ import print_function
from . import dpkt
# NTP v4
# Leap Indicator (LI) Codes
NO_WARNING = 0
LAST_MINUTE_61_SECONDS = 1
LAST_MINUTE_59_SECONDS = 2
ALARM_CONDITION = 3
# Mode Codes
RESERVED = 0
SYMMETRIC_ACTIVE = 1
SYMMETRIC_PASSIVE = 2
CLIENT = 3
SERVER = 4
BROADCAST = 5
CONTROL_MESSAGE = 6
PRIVATE = 7
class NTP(dpkt.Packet):
"""Network Time Protocol.
The Network Time Protocol (NTP) is a networking protocol for clock synchronization between computer systems over
packet-switched, variable-latency data networks. In operation since before 1985, NTP is one of the oldest Internet
protocols in current use. NTP was designed by David L. Mills of the University of Delaware.
Attributes:
__hdr__: Header fields of NTP.
TODO.
"""
__hdr__ = (
('flags', 'B', 0),
('stratum', 'B', 0),
('interval', 'B', 0),
('precision', 'B', 0),
('delay', 'I', 0),
('dispersion', 'I', 0),
('id', '4s', 0),
('update_time', '8s', 0),
('originate_time', '8s', 0),
('receive_time', '8s', 0),
('transmit_time', '8s', 0)
)
__bit_fields__ = {
'flags': (
('li', 2), # leap indicator, 2 hi bits
('v', 3), # version, 3 bits
('mode', 3), # mode, 3 lo bits
)
}
__s = (b'\x24\x02\x04\xef\x00\x00\x00\x84\x00\x00\x33\x27\xc1\x02\x04\x02\xc8\x90\xec\x11\x22\xae'
b'\x07\xe5\xc8\x90\xf9\xd9\xc0\x7e\x8c\xcd\xc8\x90\xf9\xd9\xda\xc5\xb0\x78\xc8\x90\xf9\xd9\xda\xc6\x8a\x93')
def test_ntp_pack():
n = NTP(__s)
assert (__s == bytes(n))
def test_ntp_unpack():
n = NTP(__s)
assert (n.li == NO_WARNING)
assert (n.v == 4)
assert (n.mode == SERVER)
assert (n.stratum == 2)
assert (n.id == b'\xc1\x02\x04\x02')
# test get/set functions
n.li = ALARM_CONDITION
n.v = 3
n.mode = CLIENT
assert (n.li == ALARM_CONDITION)
assert (n.v == 3)
assert (n.mode == CLIENT)
| 2,115 | 24.190476 | 118 |
py
|
dpkt
|
dpkt-master/dpkt/__init__.py
|
"""fast, simple packet creation and parsing."""
from __future__ import absolute_import
from __future__ import division
import sys
__author__ = 'Various'
__author_email__ = ''
__license__ = 'BSD-3-Clause'
__url__ = 'https://github.com/kbandla/dpkt'
__version__ = '1.9.8'
from .dpkt import *
from . import ah
from . import aoe
from . import aim
from . import arp
from . import asn1
from . import bgp
from . import cdp
from . import dhcp
from . import diameter
from . import dns
from . import dtp
from . import esp
from . import ethernet
from . import gre
from . import gzip
from . import h225
from . import hsrp
from . import http
from . import http2
from . import icmp
from . import icmp6
from . import ieee80211
from . import igmp
from . import ip
from . import ip6
from . import ipx
from . import llc
from . import loopback
from . import mrt
from . import netbios
from . import netflow
from . import ntp
from . import ospf
from . import pcap
from . import pcapng
from . import pim
from . import pmap
from . import ppp
from . import pppoe
from . import qq
from . import radiotap
from . import radius
from . import rfb
from . import rip
from . import rpc
from . import rtcp
from . import rtp
from . import rx
from . import sccp
from . import sctp
from . import sip
from . import sll
from . import sll2
from . import smb
from . import ssl
from . import stp
from . import stun
from . import tcp
from . import telnet
from . import tftp
from . import tns
from . import tpkt
from . import udp
from . import vrrp
from . import yahoo
# Note: list() is used to get a copy of the dict in order to avoid
# "RuntimeError: dictionary changed size during iteration"
# exception in Python 3 caused by _mod_init() funcs that load another modules
for name, mod in list(sys.modules.items()):
if name.startswith('dpkt.') and hasattr(mod, '_mod_init'):
mod._mod_init()
| 1,865 | 20.697674 | 77 |
py
|
dpkt
|
dpkt-master/dpkt/rtp.py
|
# $Id: rtp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Real-Time Transport Protocol."""
from __future__ import absolute_import
from .dpkt import Packet
# version 1100 0000 0000 0000 ! 0xC000 14
# p 0010 0000 0000 0000 ! 0x2000 13
# x 0001 0000 0000 0000 ! 0x1000 12
# cc 0000 1111 0000 0000 ! 0x0F00 8
# m 0000 0000 1000 0000 ! 0x0080 7
# pt 0000 0000 0111 1111 ! 0x007F 0
#
_VERSION_MASK = 0xC000
_P_MASK = 0x2000
_X_MASK = 0x1000
_CC_MASK = 0x0F00
_M_MASK = 0x0080
_PT_MASK = 0x007F
_VERSION_SHIFT = 14
_P_SHIFT = 13
_X_SHIFT = 12
_CC_SHIFT = 8
_M_SHIFT = 7
_PT_SHIFT = 0
VERSION = 2
class RTP(Packet):
"""Real-Time Transport Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of RTP.
TODO.
"""
__hdr__ = (
('_type', 'H', 0x8000),
('seq', 'H', 0),
('ts', 'I', 0),
('ssrc', 'I', 0),
)
csrc = b''
@property
def version(self):
return (self._type & _VERSION_MASK) >> _VERSION_SHIFT
@version.setter
def version(self, ver):
self._type = (ver << _VERSION_SHIFT) | (self._type & ~_VERSION_MASK)
@property
def p(self):
return (self._type & _P_MASK) >> _P_SHIFT
@p.setter
def p(self, p):
self._type = (p << _P_SHIFT) | (self._type & ~_P_MASK)
@property
def x(self):
return (self._type & _X_MASK) >> _X_SHIFT
@x.setter
def x(self, x):
self._type = (x << _X_SHIFT) | (self._type & ~_X_MASK)
@property
def cc(self):
return (self._type & _CC_MASK) >> _CC_SHIFT
@cc.setter
def cc(self, cc):
self._type = (cc << _CC_SHIFT) | (self._type & ~_CC_MASK)
@property
def m(self):
return (self._type & _M_MASK) >> _M_SHIFT
@m.setter
def m(self, m):
self._type = (m << _M_SHIFT) | (self._type & ~_M_MASK)
@property
def pt(self):
return (self._type & _PT_MASK) >> _PT_SHIFT
@pt.setter
def pt(self, m):
self._type = (m << _PT_SHIFT) | (self._type & ~_PT_MASK)
def __len__(self):
return self.__hdr_len__ + len(self.csrc) + len(self.data)
def __bytes__(self):
return self.pack_hdr() + self.csrc + bytes(self.data)
def unpack(self, buf):
super(RTP, self).unpack(buf)
self.csrc = buf[self.__hdr_len__:self.__hdr_len__ + self.cc * 4]
self.data = buf[self.__hdr_len__ + self.cc * 4:]
def test_rtp():
rtp = RTP(
b'\x80\x08\x4d\x01\x00\x01\x00\xe0\x34\x3f\xfa\x34\x53\x53\x53\x56\x53\x5d\x56\x57\xd5\xd6'
b'\xd1\xde\xdf\xd3\xd9\xda\xdf\xdc\xdf\xd8\xdd\xd4\xdd\xd9\xd1\xd6\xdc\xda\xde\xdd\xc7\xc1'
b'\xdf\xdf\xda\xdb\xdd\xdd\xc4\xd9\x55\x57\xd4\x50\x44\x44\x5b\x44\x4f\x4c\x47\x40\x4c\x47'
b'\x59\x5b\x58\x5d\x56\x56\x53\x56\xd5\xd5\x54\x55\xd6\xd6\xd4\xd1\xd1\xd0\xd1\xd5\xdd\xd6'
b'\x55\xd4\xd6\xd1\xd4\xd6\xd7\xd7\xd5\xd4\xd0\xd7\xd1\xd4\xd2\xdc\xd6\xdc\xdf\xdc\xdd\xd2'
b'\xde\xdc\xd0\xdd\xdc\xd0\xd6\xd6\xd6\x55\x54\x55\x57\x57\x56\x50\x50\x5c\x5c\x52\x5d\x5d'
b'\x5f\x5e\x5d\x5e\x52\x50\x52\x56\x54\x57\x55\x55\xd4\xd7\x55\xd5\x55\x55\x55\x55\x55\x54'
b'\x57\x54\x55\x55\xd5\xd5\xd7\xd6\xd7\xd1\xd1\xd3\xd2\xd3\xd2\xd2\xd3\xd3'
)
assert (rtp.version == 2)
assert (rtp.p == 0)
assert (rtp.x == 0)
assert (rtp.cc == 0)
assert (rtp.m == 0)
assert (rtp.pt == 8)
assert (rtp.seq == 19713)
assert (rtp.ts == 65760)
assert (rtp.ssrc == 0x343ffa34)
assert (len(rtp) == 172)
assert (bytes(rtp) == (
b'\x80\x08\x4d\x01\x00\x01\x00\xe0\x34\x3f\xfa\x34\x53\x53\x53\x56\x53\x5d\x56\x57\xd5\xd6'
b'\xd1\xde\xdf\xd3\xd9\xda\xdf\xdc\xdf\xd8\xdd\xd4\xdd\xd9\xd1\xd6\xdc\xda\xde\xdd\xc7\xc1'
b'\xdf\xdf\xda\xdb\xdd\xdd\xc4\xd9\x55\x57\xd4\x50\x44\x44\x5b\x44\x4f\x4c\x47\x40\x4c\x47'
b'\x59\x5b\x58\x5d\x56\x56\x53\x56\xd5\xd5\x54\x55\xd6\xd6\xd4\xd1\xd1\xd0\xd1\xd5\xdd\xd6'
b'\x55\xd4\xd6\xd1\xd4\xd6\xd7\xd7\xd5\xd4\xd0\xd7\xd1\xd4\xd2\xdc\xd6\xdc\xdf\xdc\xdd\xd2'
b'\xde\xdc\xd0\xdd\xdc\xd0\xd6\xd6\xd6\x55\x54\x55\x57\x57\x56\x50\x50\x5c\x5c\x52\x5d\x5d'
b'\x5f\x5e\x5d\x5e\x52\x50\x52\x56\x54\x57\x55\x55\xd4\xd7\x55\xd5\x55\x55\x55\x55\x55\x54'
b'\x57\x54\x55\x55\xd5\xd5\xd7\xd6\xd7\xd1\xd1\xd3\xd2\xd3\xd2\xd2\xd3\xd3'
))
# the following tests RTP header setters
rtp = RTP()
rtp.m = 1
rtp.pt = 3
rtp.seq = 1234
rtp.ts = 5678
rtp.ssrc = 0xabcdef01
assert (rtp.m == 1)
assert (rtp.pt == 3)
assert (rtp.seq == 1234)
assert (rtp.ts == 5678)
assert (rtp.ssrc == 0xabcdef01)
def test_rtp_properties():
from .compat import compat_izip
rtp = RTP()
properties = ['version', 'p', 'x', 'cc', 'm', 'pt']
defaults = [2, 0, 0, 0, 0, 0]
for prop, default in compat_izip(properties, defaults):
assert hasattr(rtp, prop)
assert getattr(rtp, prop) == default
setattr(rtp, prop, 1)
assert getattr(rtp, prop) == 1
| 5,077 | 29.407186 | 99 |
py
|
dpkt
|
dpkt-master/dpkt/compat.py
|
from __future__ import absolute_import
from struct import pack, unpack
import sys
if sys.version_info < (3,):
compat_ord = ord
else:
def compat_ord(char):
return char
try:
from itertools import izip
compat_izip = izip
except ImportError:
compat_izip = zip
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
from BytesIO import BytesIO
except ImportError:
from io import BytesIO
if sys.version_info < (3,):
def iteritems(d, **kw):
return d.iteritems(**kw)
def intround(num):
return int(round(num))
else:
def iteritems(d, **kw):
return iter(d.items(**kw))
# python3 will return an int if you round to 0 decimal places
intround = round
def ntole(v):
"""convert a 2-byte word from the network byte order (big endian) to little endian;
replaces socket.ntohs() to work on both little and big endian architectures
"""
return unpack('<H', pack('!H', v))[0]
def ntole64(v):
"""
Convert an 8-byte word from network byte order (big endian) to little endian.
"""
return unpack('<Q', pack('!Q', v))[0]
def isstr(s):
"""True if 's' is an instance of basestring in py2, or of str in py3"""
bs = getattr(__builtins__, 'basestring', str)
return isinstance(s, bs)
| 1,327 | 20.770492 | 87 |
py
|
dpkt
|
dpkt-master/dpkt/rfb.py
|
# $Id: rfb.py 47 2008-05-27 02:10:00Z jon.oberheide $
# -*- coding: utf-8 -*-
"""Remote Framebuffer Protocol."""
from __future__ import absolute_import
from . import dpkt
# Remote Framebuffer Protocol
# http://www.realvnc.com/docs/rfbproto.pdf
# Client to Server Messages
CLIENT_SET_PIXEL_FORMAT = 0
CLIENT_SET_ENCODINGS = 2
CLIENT_FRAMEBUFFER_UPDATE_REQUEST = 3
CLIENT_KEY_EVENT = 4
CLIENT_POINTER_EVENT = 5
CLIENT_CUT_TEXT = 6
# Server to Client Messages
SERVER_FRAMEBUFFER_UPDATE = 0
SERVER_SET_COLOUR_MAP_ENTRIES = 1
SERVER_BELL = 2
SERVER_CUT_TEXT = 3
class RFB(dpkt.Packet):
"""Remote Framebuffer Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of RADIUS.
TODO.
"""
__hdr__ = (
('type', 'B', 0),
)
class SetPixelFormat(dpkt.Packet):
__hdr__ = (
('pad', '3s', b''),
('pixel_fmt', '16s', b'')
)
class SetEncodings(dpkt.Packet):
__hdr__ = (
('pad', '1s', b''),
('num_encodings', 'H', 0)
)
class FramebufferUpdateRequest(dpkt.Packet):
__hdr__ = (
('incremental', 'B', 0),
('x_position', 'H', 0),
('y_position', 'H', 0),
('width', 'H', 0),
('height', 'H', 0)
)
class KeyEvent(dpkt.Packet):
__hdr__ = (
('down_flag', 'B', 0),
('pad', '2s', b''),
('key', 'I', 0)
)
class PointerEvent(dpkt.Packet):
__hdr__ = (
('button_mask', 'B', 0),
('x_position', 'H', 0),
('y_position', 'H', 0)
)
class FramebufferUpdate(dpkt.Packet):
__hdr__ = (
('pad', '1s', b''),
('num_rects', 'H', 0)
)
class SetColourMapEntries(dpkt.Packet):
__hdr__ = (
('pad', '1s', b''),
('first_colour', 'H', 0),
('num_colours', 'H', 0)
)
class CutText(dpkt.Packet):
__hdr__ = (
('pad', '3s', b''),
('length', 'I', 0)
)
| 1,927 | 18.089109 | 53 |
py
|
dpkt
|
dpkt-master/dpkt/ieee80211.py
|
# $Id: 80211.py 53 2008-12-18 01:22:57Z jon.oberheide $
# -*- coding: utf-8 -*-
"""IEEE 802.11."""
from __future__ import print_function
from __future__ import absolute_import
import struct
from . import dpkt
from .compat import ntole, ntole64
# Frame Types
MGMT_TYPE = 0
CTL_TYPE = 1
DATA_TYPE = 2
# Frame Sub-Types
M_ASSOC_REQ = 0
M_ASSOC_RESP = 1
M_REASSOC_REQ = 2
M_REASSOC_RESP = 3
M_PROBE_REQ = 4
M_PROBE_RESP = 5
M_BEACON = 8
M_ATIM = 9
M_DISASSOC = 10
M_AUTH = 11
M_DEAUTH = 12
M_ACTION = 13
C_BLOCK_ACK_REQ = 8
C_BLOCK_ACK = 9
C_PS_POLL = 10
C_RTS = 11
C_CTS = 12
C_ACK = 13
C_CF_END = 14
C_CF_END_ACK = 15
D_DATA = 0
D_DATA_CF_ACK = 1
D_DATA_CF_POLL = 2
D_DATA_CF_ACK_POLL = 3
D_NULL = 4
D_CF_ACK = 5
D_CF_POLL = 6
D_CF_ACK_POLL = 7
D_QOS_DATA = 8
D_QOS_CF_ACK = 9
D_QOS_CF_POLL = 10
D_QOS_CF_ACK_POLL = 11
D_QOS_NULL = 12
D_QOS_CF_POLL_EMPTY = 14
TO_DS_FLAG = 10
FROM_DS_FLAG = 1
INTER_DS_FLAG = 11
# Bitshifts for Frame Control
_VERSION_MASK = 0x0300
_TYPE_MASK = 0x0c00
_SUBTYPE_MASK = 0xf000
_TO_DS_MASK = 0x0001
_FROM_DS_MASK = 0x0002
_MORE_FRAG_MASK = 0x0004
_RETRY_MASK = 0x0008
_PWR_MGT_MASK = 0x0010
_MORE_DATA_MASK = 0x0020
_WEP_MASK = 0x0040
_ORDER_MASK = 0x0080
_FRAGMENT_NUMBER_MASK = 0x000F
_SEQUENCE_NUMBER_MASK = 0XFFF0
_VERSION_SHIFT = 8
_TYPE_SHIFT = 10
_SUBTYPE_SHIFT = 12
_TO_DS_SHIFT = 0
_FROM_DS_SHIFT = 1
_MORE_FRAG_SHIFT = 2
_RETRY_SHIFT = 3
_PWR_MGT_SHIFT = 4
_MORE_DATA_SHIFT = 5
_WEP_SHIFT = 6
_ORDER_SHIFT = 7
_SEQUENCE_NUMBER_SHIFT = 4
# IEs
IE_SSID = 0
IE_RATES = 1
IE_FH = 2
IE_DS = 3
IE_CF = 4
IE_TIM = 5
IE_IBSS = 6
IE_HT_CAPA = 45
IE_ESR = 50
IE_HT_INFO = 61
FCS_LENGTH = 4
FRAMES_WITH_CAPABILITY = [M_BEACON, M_ASSOC_RESP, M_ASSOC_REQ, M_REASSOC_REQ, ]
# Block Ack control constants
_ACK_POLICY_SHIFT = 0
_MULTI_TID_SHIFT = 1
_COMPRESSED_SHIFT = 2
_TID_SHIFT = 12
_ACK_POLICY_MASK = 0x0001
_MULTI_TID_MASK = 0x0002
_COMPRESSED_MASK = 0x0004
_TID_MASK = 0xf000
_COMPRESSED_BMP_LENGTH = 8
_BMP_LENGTH = 128
# Action frame categories
BLOCK_ACK = 3
# Block ack category action codes
BLOCK_ACK_CODE_REQUEST = 0
BLOCK_ACK_CODE_RESPONSE = 1
BLOCK_ACK_CODE_DELBA = 2
class IEEE80211(dpkt.Packet):
"""IEEE 802.11.
IEEE 802.11 is part of the IEEE 802 set of local area network (LAN) technical standards,
and specifies the set of media access control (MAC) and physical layer (PHY) protocols
for implementing wireless local area network (WLAN) computer communication.
Attributes:
__hdr__: Header fields of IEEE802.11.
framectl: (int): Frame control (2 bytes)
duration: (int): Duration ID (2 bytes)
"""
__hdr__ = (
('framectl', 'H', 0),
('duration', 'H', 0)
)
# The standard really defines the entire MAC protocol as little-endian on the wire,
# however there is broken logic in the rest of the module preventing this from working right now
# __byte_order__ = '<'
@property
def version(self):
return (self.framectl & _VERSION_MASK) >> _VERSION_SHIFT
@version.setter
def version(self, val):
self.framectl = (val << _VERSION_SHIFT) | (self.framectl & ~_VERSION_MASK)
@property
def type(self):
return (self.framectl & _TYPE_MASK) >> _TYPE_SHIFT
@type.setter
def type(self, val):
self.framectl = (val << _TYPE_SHIFT) | (self.framectl & ~_TYPE_MASK)
@property
def subtype(self):
return (self.framectl & _SUBTYPE_MASK) >> _SUBTYPE_SHIFT
@subtype.setter
def subtype(self, val):
self.framectl = (val << _SUBTYPE_SHIFT) | (self.framectl & ~_SUBTYPE_MASK)
@property
def to_ds(self):
return (self.framectl & _TO_DS_MASK) >> _TO_DS_SHIFT
@to_ds.setter
def to_ds(self, val):
self.framectl = (val << _TO_DS_SHIFT) | (self.framectl & ~_TO_DS_MASK)
@property
def from_ds(self):
return (self.framectl & _FROM_DS_MASK) >> _FROM_DS_SHIFT
@from_ds.setter
def from_ds(self, val):
self.framectl = (val << _FROM_DS_SHIFT) | (self.framectl & ~_FROM_DS_MASK)
@property
def more_frag(self):
return (self.framectl & _MORE_FRAG_MASK) >> _MORE_FRAG_SHIFT
@more_frag.setter
def more_frag(self, val):
self.framectl = (val << _MORE_FRAG_SHIFT) | (self.framectl & ~_MORE_FRAG_MASK)
@property
def retry(self):
return (self.framectl & _RETRY_MASK) >> _RETRY_SHIFT
@retry.setter
def retry(self, val):
self.framectl = (val << _RETRY_SHIFT) | (self.framectl & ~_RETRY_MASK)
@property
def pwr_mgt(self):
return (self.framectl & _PWR_MGT_MASK) >> _PWR_MGT_SHIFT
@pwr_mgt.setter
def pwr_mgt(self, val):
self.framectl = (val << _PWR_MGT_SHIFT) | (self.framectl & ~_PWR_MGT_MASK)
@property
def more_data(self):
return (self.framectl & _MORE_DATA_MASK) >> _MORE_DATA_SHIFT
@more_data.setter
def more_data(self, val):
self.framectl = (val << _MORE_DATA_SHIFT) | (self.framectl & ~_MORE_DATA_MASK)
@property
def wep(self):
return (self.framectl & _WEP_MASK) >> _WEP_SHIFT
@wep.setter
def wep(self, val):
self.framectl = (val << _WEP_SHIFT) | (self.framectl & ~_WEP_MASK)
@property
def order(self):
return (self.framectl & _ORDER_MASK) >> _ORDER_SHIFT
@order.setter
def order(self, val):
self.framectl = (val << _ORDER_SHIFT) | (self.framectl & ~_ORDER_MASK)
def unpack_ies(self, buf):
self.ies = []
ie_decoder = {
IE_SSID: ('ssid', self.IE),
IE_RATES: ('rate', self.IE),
IE_FH: ('fh', self.FH),
IE_DS: ('ds', self.DS),
IE_CF: ('cf', self.CF),
IE_TIM: ('tim', self.TIM),
IE_IBSS: ('ibss', self.IBSS),
IE_HT_CAPA: ('ht_capa', self.IE),
IE_ESR: ('esr', self.IE),
IE_HT_INFO: ('ht_info', self.IE)
}
# each IE starts with an ID and a length
while len(buf) > FCS_LENGTH:
ie_id = struct.unpack('B', buf[:1])[0]
try:
parser = ie_decoder[ie_id][1]
name = ie_decoder[ie_id][0]
except KeyError:
parser = self.IE
name = 'ie_' + str(ie_id)
ie = parser(buf)
ie.data = buf[2:2 + ie.len]
setattr(self, name, ie)
self.ies.append(ie)
buf = buf[2 + ie.len:]
class Capability(object):
def __init__(self, field):
self.ess = field & 1
self.ibss = (field >> 1) & 1
self.cf_poll = (field >> 2) & 1
self.cf_poll_req = (field >> 3) & 1
self.privacy = (field >> 4) & 1
self.short_preamble = (field >> 5) & 1
self.pbcc = (field >> 6) & 1
self.hopping = (field >> 7) & 1
self.spec_mgmt = (field >> 8) & 1
self.qos = (field >> 9) & 1
self.short_slot = (field >> 10) & 1
self.apsd = (field >> 11) & 1
self.dsss = (field >> 13) & 1
self.delayed_blk_ack = (field >> 14) & 1
self.imm_blk_ack = (field >> 15) & 1
def __init__(self, *args, **kwargs):
if kwargs and 'fcs' in kwargs:
self.fcs_present = kwargs.pop('fcs')
else:
self.fcs_present = False
super(IEEE80211, self).__init__(*args, **kwargs)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = buf[self.__hdr_len__:]
m_decoder = {
M_BEACON: ('beacon', self.Beacon),
M_ASSOC_REQ: ('assoc_req', self.Assoc_Req),
M_ASSOC_RESP: ('assoc_resp', self.Assoc_Resp),
M_DISASSOC: ('diassoc', self.Disassoc),
M_REASSOC_REQ: ('reassoc_req', self.Reassoc_Req),
M_REASSOC_RESP: ('reassoc_resp', self.Assoc_Resp),
M_AUTH: ('auth', self.Auth),
M_PROBE_RESP: ('probe_resp', self.Beacon),
M_DEAUTH: ('deauth', self.Deauth),
M_ACTION: ('action', self.Action)
}
c_decoder = {
C_RTS: ('rts', self.RTS),
C_CTS: ('cts', self.CTS),
C_ACK: ('ack', self.ACK),
C_BLOCK_ACK_REQ: ('bar', self.BlockAckReq),
C_BLOCK_ACK: ('back', self.BlockAck),
C_CF_END: ('cf_end', self.CFEnd),
}
d_dsData = {
0: self.Data,
FROM_DS_FLAG: self.DataFromDS,
TO_DS_FLAG: self.DataToDS,
INTER_DS_FLAG: self.DataInterDS
}
# For now decode everything with DATA. Haven't checked about other QoS
# additions
d_decoder = {
# modified the decoder to consider the ToDS and FromDS flags
# Omitting the 11 case for now
D_DATA: ('data_frame', d_dsData),
D_NULL: ('data_frame', d_dsData),
D_QOS_DATA: ('data_frame', d_dsData),
D_QOS_NULL: ('data_frame', d_dsData)
}
decoder = {
MGMT_TYPE: m_decoder,
CTL_TYPE: c_decoder,
DATA_TYPE: d_decoder
}
# Strip off the FCS field
if self.fcs_present:
self.fcs = struct.unpack('<I', self.data[-1 * FCS_LENGTH:])[0]
self.data = self.data[0: -1 * FCS_LENGTH]
if self.type == MGMT_TYPE:
self.mgmt = self.MGMT_Frame(self.data)
self.data = self.mgmt.data
if self.subtype == M_PROBE_REQ:
self.unpack_ies(self.data)
return
if self.subtype == M_ATIM:
return
try:
parser = decoder[self.type][self.subtype][1]
name = decoder[self.type][self.subtype][0]
except KeyError:
raise dpkt.UnpackError("KeyError: type=%s subtype=%s" % (self.type, self.subtype))
if self.type == DATA_TYPE:
# need to grab the ToDS/FromDS info
parser = parser[self.to_ds * 10 + self.from_ds]
if self.type == MGMT_TYPE:
field = parser(self.mgmt.data)
else:
field = parser(self.data)
self.data = field
setattr(self, name, field)
if self.type == MGMT_TYPE:
self.unpack_ies(field.data)
if self.subtype in FRAMES_WITH_CAPABILITY:
self.capability = self.Capability(ntole(field.capability))
if self.type == DATA_TYPE and self.subtype == D_QOS_DATA:
self.qos_data = self.QoS_Data(field.data)
field.data = self.qos_data.data
self.data = field.data
class BlockAckReq(dpkt.Packet):
__hdr__ = (
('dst', '6s', '\x00' * 6),
('src', '6s', '\x00' * 6),
('ctl', 'H', 0),
('seq', 'H', 0),
)
class BlockAck(dpkt.Packet):
__hdr__ = (
('dst', '6s', '\x00' * 6),
('src', '6s', '\x00' * 6),
('ctl', 'H', 0),
('seq', 'H', 0),
)
@property
def compressed(self):
return (self.ctl & _COMPRESSED_MASK) >> _COMPRESSED_SHIFT
@compressed.setter
def compressed(self, val):
self.ctl = (val << _COMPRESSED_SHIFT) | (self.ctl & ~_COMPRESSED_MASK)
@property
def ack_policy(self):
return (self.ctl & _ACK_POLICY_MASK) >> _ACK_POLICY_SHIFT
@ack_policy.setter
def ack_policy(self, val):
self.ctl = (val << _ACK_POLICY_SHIFT) | (self.ctl & ~_ACK_POLICY_MASK)
@property
def multi_tid(self):
return (self.ctl & _MULTI_TID_MASK) >> _MULTI_TID_SHIFT
@multi_tid.setter
def multi_tid(self, val):
self.ctl = (val << _MULTI_TID_SHIFT) | (self.ctl & ~_MULTI_TID_MASK)
@property
def tid(self):
return (self.ctl & _TID_MASK) >> _TID_SHIFT
@tid.setter
def tid(self, val):
self.ctl = (val << _TID_SHIFT) | (self.ctl & ~_TID_MASK)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = buf[self.__hdr_len__:]
self.ctl = ntole(self.ctl)
if self.compressed:
self.bmp = struct.unpack('8s', self.data[0:_COMPRESSED_BMP_LENGTH])[0]
else:
self.bmp = struct.unpack('128s', self.data[0:_BMP_LENGTH])[0]
self.data = self.data[len(self.__hdr__) + len(self.bmp):]
class _FragmentNumSeqNumMixin(object):
@property
def fragment_number(self):
return ntole(self.frag_seq) & _FRAGMENT_NUMBER_MASK
@property
def sequence_number(self):
return (ntole(self.frag_seq) & _SEQUENCE_NUMBER_MASK) >> _SEQUENCE_NUMBER_SHIFT
class RTS(dpkt.Packet):
__hdr__ = (
('dst', '6s', '\x00' * 6),
('src', '6s', '\x00' * 6)
)
class CTS(dpkt.Packet):
__hdr__ = (
('dst', '6s', '\x00' * 6),
)
class ACK(dpkt.Packet):
__hdr__ = (
('dst', '6s', '\x00' * 6),
)
class CFEnd(dpkt.Packet):
__hdr__ = (
('dst', '6s', '\x00' * 6),
('src', '6s', '\x00' * 6),
)
class MGMT_Frame(dpkt.Packet, _FragmentNumSeqNumMixin):
__hdr__ = (
('dst', '6s', '\x00' * 6),
('src', '6s', '\x00' * 6),
('bssid', '6s', '\x00' * 6),
('frag_seq', 'H', 0)
)
class Beacon(dpkt.Packet):
__hdr__ = (
('timestamp', 'Q', 0),
('interval', 'H', 0),
('capability', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.timestamp = ntole64(self.timestamp)
self.interval = ntole(self.interval)
class Disassoc(dpkt.Packet):
__hdr__ = (
('reason', 'H', 0),
)
class Assoc_Req(dpkt.Packet):
__hdr__ = (
('capability', 'H', 0),
('interval', 'H', 0)
)
class Assoc_Resp(dpkt.Packet):
__hdr__ = (
('capability', 'H', 0),
('status', 'H', 0),
('aid', 'H', 0)
)
class Reassoc_Req(dpkt.Packet):
__hdr__ = (
('capability', 'H', 0),
('interval', 'H', 0),
('current_ap', '6s', '\x00' * 6)
)
# This obviously doesn't support any of AUTH frames that use encryption
class Auth(dpkt.Packet):
__hdr__ = (
('algorithm', 'H', 0),
('auth_seq', 'H', 0),
)
class Deauth(dpkt.Packet):
__hdr__ = (
('reason', 'H', 0),
)
class Action(dpkt.Packet):
__hdr__ = (
('category', 'B', 0),
('code', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
action_parser = {
BLOCK_ACK: {
BLOCK_ACK_CODE_REQUEST: ('block_ack_request', IEEE80211.BlockAckActionRequest),
BLOCK_ACK_CODE_RESPONSE: ('block_ack_response', IEEE80211.BlockAckActionResponse),
BLOCK_ACK_CODE_DELBA: ('block_ack_delba', IEEE80211.BlockAckActionDelba),
},
}
try:
decoder = action_parser[self.category][self.code][1]
field_name = action_parser[self.category][self.code][0]
except KeyError:
raise dpkt.UnpackError("KeyError: category=%s code=%s" % (self.category, self.code))
field = decoder(self.data)
setattr(self, field_name, field)
self.data = field.data
class BlockAckActionRequest(dpkt.Packet):
__hdr__ = (
('dialog', 'B', 0),
('parameters', 'H', 0),
('timeout', 'H', 0),
('starting_seq', 'H', 0),
)
class BlockAckActionResponse(dpkt.Packet):
__hdr__ = (
('dialog', 'B', 0),
('status_code', 'H', 0),
('parameters', 'H', 0),
('timeout', 'H', 0),
)
class BlockAckActionDelba(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('delba_param_set', 'H', 0),
('reason_code', 'H', 0),
# ('gcr_group_addr', '8s', '\x00' * 8), # Standard says it must be there, but it isn't?
)
class Data(dpkt.Packet, _FragmentNumSeqNumMixin):
__hdr__ = (
('dst', '6s', '\x00' * 6),
('src', '6s', '\x00' * 6),
('bssid', '6s', '\x00' * 6),
('frag_seq', 'H', 0)
)
class DataFromDS(dpkt.Packet, _FragmentNumSeqNumMixin):
__hdr__ = (
('dst', '6s', '\x00' * 6),
('bssid', '6s', '\x00' * 6),
('src', '6s', '\x00' * 6),
('frag_seq', 'H', 0)
)
class DataToDS(dpkt.Packet, _FragmentNumSeqNumMixin):
__hdr__ = (
('bssid', '6s', '\x00' * 6),
('src', '6s', '\x00' * 6),
('dst', '6s', '\x00' * 6),
('frag_seq', 'H', 0)
)
class DataInterDS(dpkt.Packet, _FragmentNumSeqNumMixin):
__hdr__ = (
('dst', '6s', '\x00' * 6),
('src', '6s', '\x00' * 6),
('da', '6s', '\x00' * 6),
('frag_seq', 'H', 0),
('sa', '6s', '\x00' * 6)
)
class QoS_Data(dpkt.Packet):
__hdr__ = (
('control', 'H', 0),
)
class IE(dpkt.Packet):
__hdr__ = (
('id', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.info = buf[2:self.len + 2]
class FH(dpkt.Packet):
__hdr__ = (
('id', 'B', 0),
('len', 'B', 0),
('tu', 'H', 0),
('hopset', 'B', 0),
('hoppattern', 'B', 0),
('hopindex', 'B', 0)
)
class DS(dpkt.Packet):
__hdr__ = (
('id', 'B', 0),
('len', 'B', 0),
('ch', 'B', 0)
)
class CF(dpkt.Packet):
__hdr__ = (
('id', 'B', 0),
('len', 'B', 0),
('count', 'B', 0),
('period', 'B', 0),
('max', 'H', 0),
('dur', 'H', 0)
)
class TIM(dpkt.Packet):
__hdr__ = (
('id', 'B', 0),
('len', 'B', 0),
('count', 'B', 0),
('period', 'B', 0),
('ctrl', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.bitmap = buf[5:self.len + 2]
class IBSS(dpkt.Packet):
__hdr__ = (
('id', 'B', 0),
('len', 'B', 0),
('atim', 'H', 0)
)
def test_802211_ack():
s = b'\xd4\x00\x00\x00\x00\x12\xf0\xb6\x1c\xa4\xff\xff\xff\xff'
ieee = IEEE80211(s, fcs=True)
assert ieee.version == 0
assert ieee.type == CTL_TYPE
assert ieee.subtype == C_ACK
assert ieee.to_ds == 0
assert ieee.from_ds == 0
assert ieee.pwr_mgt == 0
assert ieee.more_data == 0
assert ieee.wep == 0
assert ieee.order == 0
assert ieee.ack.dst == b'\x00\x12\xf0\xb6\x1c\xa4'
fcs = struct.unpack('<I', s[-4:])[0]
assert ieee.fcs == fcs
def test_80211_beacon():
s = (
b'\x80\x00\x00\x00\xff\xff\xff\xff\xff\xff\x00\x26\xcb\x18\x6a\x30\x00\x26\xcb\x18\x6a\x30'
b'\xa0\xd0\x77\x09\x32\x03\x8f\x00\x00\x00\x66\x00\x31\x04\x00\x04\x43\x41\x45\x4e\x01\x08'
b'\x82\x84\x8b\x0c\x12\x96\x18\x24\x03\x01\x01\x05\x04\x00\x01\x00\x00\x07\x06\x55\x53\x20'
b'\x01\x0b\x1a\x0b\x05\x00\x00\x6e\x00\x00\x2a\x01\x02\x2d\x1a\x6e\x18\x1b\xff\xff\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x30\x14\x01'
b'\x00\x00\x0f\xac\x04\x01\x00\x00\x0f\xac\x04\x01\x00\x00\x0f\xac\x01\x28\x00\x32\x04\x30'
b'\x48\x60\x6c\x36\x03\x51\x63\x03\x3d\x16\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x85\x1e\x05\x00\x8f\x00\x0f\x00\xff\x03\x59\x00'
b'\x63\x73\x65\x2d\x33\x39\x31\x32\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x36\x96\x06'
b'\x00\x40\x96\x00\x14\x00\xdd\x18\x00\x50\xf2\x02\x01\x01\x80\x00\x03\xa4\x00\x00\x27\xa4'
b'\x00\x00\x42\x43\x5e\x00\x62\x32\x2f\x00\xdd\x06\x00\x40\x96\x01\x01\x04\xdd\x05\x00\x40'
b'\x96\x03\x05\xdd\x05\x00\x40\x96\x0b\x09\xdd\x08\x00\x40\x96\x13\x01\x00\x34\x01\xdd\x05'
b'\x00\x40\x96\x14\x05'
)
ieee = IEEE80211(s, fcs=True)
assert ieee.version == 0
assert ieee.type == MGMT_TYPE
assert ieee.subtype == M_BEACON
assert ieee.to_ds == 0
assert ieee.from_ds == 0
assert ieee.pwr_mgt == 0
assert ieee.more_data == 0
assert ieee.wep == 0
assert ieee.order == 0
assert ieee.mgmt.dst == b'\xff\xff\xff\xff\xff\xff'
assert ieee.mgmt.src == b'\x00\x26\xcb\x18\x6a\x30'
assert ieee.beacon.capability == 0x3104
assert ieee.capability.privacy == 1
assert ieee.ssid.data == b'CAEN'
assert ieee.rate.data == b'\x82\x84\x8b\x0c\x12\x96\x18\x24'
assert ieee.ds.data == b'\x01'
assert ieee.tim.data == b'\x00\x01\x00\x00'
fcs = struct.unpack('<I', s[-4:])[0]
assert ieee.fcs == fcs
def test_80211_data():
s = (
b'\x08\x09\x20\x00\x00\x26\xcb\x17\x3d\x91\x00\x16\x44\xb0\xae\xc6\x00\x02\xb3\xd6\x26\x3c'
b'\x80\x7e\xaa\xaa\x03\x00\x00\x00\x08\x00\x45\x00\x00\x28\x07\x27\x40\x00\x80\x06\x1d\x39'
b'\x8d\xd4\x37\x3d\x3f\xf5\xd1\x69\xc0\x5f\x01\xbb\xb2\xd6\xef\x23\x38\x2b\x4f\x08\x50\x10'
b'\x42\x04\xac\x17\x00\x00'
)
ieee = IEEE80211(s, fcs=True)
assert ieee.type == DATA_TYPE
assert ieee.subtype == D_DATA
assert ieee.data_frame.dst == b'\x00\x02\xb3\xd6\x26\x3c'
assert ieee.data_frame.src == b'\x00\x16\x44\xb0\xae\xc6'
assert ieee.data_frame.frag_seq == 0x807e
assert ieee.data_frame.fragment_number == 0
assert ieee.data_frame.sequence_number == 2024
assert ieee.data == (b'\xaa\xaa\x03\x00\x00\x00\x08\x00\x45\x00\x00\x28\x07\x27\x40\x00\x80\x06'
b'\x1d\x39\x8d\xd4\x37\x3d\x3f\xf5\xd1\x69\xc0\x5f\x01\xbb\xb2\xd6\xef\x23'
b'\x38\x2b\x4f\x08\x50\x10\x42\x04')
assert ieee.fcs == struct.unpack('<I', b'\xac\x17\x00\x00')[0]
from . import llc
llc_pkt = llc.LLC(ieee.data_frame.data)
ip_pkt = llc_pkt.data
assert ip_pkt.dst == b'\x3f\xf5\xd1\x69'
def test_80211_data_qos():
s = (
b'\x88\x01\x3a\x01\x00\x26\xcb\x17\x44\xf0\x00\x23\xdf\xc9\xc0\x93\x00\x26\xcb\x17\x44\xf0'
b'\x20\x7b\x00\x00\xaa\xaa\x03\x00\x00\x00\x88\x8e\x01\x00\x00\x74\x02\x02\x00\x74\x19\x80'
b'\x00\x00\x00\x6a\x16\x03\x01\x00\x65\x01\x00\x00\x61\x03\x01\x4b\x4c\xa7\x7e\x27\x61\x6f'
b'\x02\x7b\x3c\x72\x39\xe3\x7b\xd7\x43\x59\x91\x7f\xaa\x22\x47\x51\xb6\x88\x9f\x85\x90\x87'
b'\x5a\xd1\x13\x20\xe0\x07\x00\x00\x68\xbd\xa4\x13\xb0\xd5\x82\x7e\xc7\xfb\xe7\xcc\xab\x6e'
b'\x5d\x5a\x51\x50\xd4\x45\xc5\xa1\x65\x53\xad\xb5\x88\x5b\x00\x1a\x00\x2f\x00\x05\x00\x04'
b'\x00\x35\x00\x0a\x00\x09\x00\x03\x00\x08\x00\x33\x00\x39\x00\x16\x00\x15\x00\x14\x01\x00'
b'\xff\xff\xff\xff'
)
ieee = IEEE80211(s, fcs=True)
assert ieee.type == DATA_TYPE
assert ieee.subtype == D_QOS_DATA
assert ieee.data_frame.dst == b'\x00\x26\xcb\x17\x44\xf0'
assert ieee.data_frame.src == b'\x00\x23\xdf\xc9\xc0\x93'
assert ieee.data_frame.frag_seq == 0x207b
assert ieee.data_frame.fragment_number == 0
assert ieee.data_frame.sequence_number == 1970
assert ieee.data == (b'\xaa\xaa\x03\x00\x00\x00\x88\x8e\x01\x00\x00\x74\x02\x02\x00\x74\x19\x80'
b'\x00\x00\x00\x6a\x16\x03\x01\x00\x65\x01\x00\x00\x61\x03\x01\x4b\x4c\xa7'
b'\x7e\x27\x61\x6f\x02\x7b\x3c\x72\x39\xe3\x7b\xd7\x43\x59\x91\x7f\xaa\x22'
b'\x47\x51\xb6\x88\x9f\x85\x90\x87\x5a\xd1\x13\x20\xe0\x07\x00\x00\x68\xbd'
b'\xa4\x13\xb0\xd5\x82\x7e\xc7\xfb\xe7\xcc\xab\x6e\x5d\x5a\x51\x50\xd4\x45'
b'\xc5\xa1\x65\x53\xad\xb5\x88\x5b\x00\x1a\x00\x2f\x00\x05\x00\x04\x00\x35'
b'\x00\x0a\x00\x09\x00\x03\x00\x08\x00\x33\x00\x39\x00\x16\x00\x15\x00\x14\x01\x00')
assert ieee.qos_data.control == 0x0
assert ieee.fcs == struct.unpack('<I', b'\xff\xff\xff\xff')[0]
def test_bug():
s = (b'\x88\x41\x2c\x00\x00\x26\xcb\x17\x44\xf0\x00\x1e\x52\x97\x14\x11\x00\x1f\x6d\xe8\x18\x00'
b'\xd0\x07\x00\x00\x6f\x00\x00\x20\x00\x00\x00\x00')
ieee = IEEE80211(s)
assert ieee.wep == 1
def test_data_ds():
# verifying the ToDS and FromDS fields and that we're getting the
# correct values
s = (b'\x08\x03\x00\x00\x01\x0b\x85\x00\x00\x00\x00\x26\xcb\x18\x73\x50\x01\x0b\x85\x00\x00\x00'
b'\x00\x89\x00\x26\xcb\x18\x73\x50')
ieee = IEEE80211(s)
assert ieee.type == DATA_TYPE
assert ieee.to_ds == 1
assert ieee.from_ds == 1
assert ieee.data_frame.sa == b'\x00\x26\xcb\x18\x73\x50'
assert ieee.data_frame.src == b'\x00\x26\xcb\x18\x73\x50'
assert ieee.data_frame.dst == b'\x01\x0b\x85\x00\x00\x00'
assert ieee.data_frame.da == b'\x01\x0b\x85\x00\x00\x00'
s = (b'\x88\x41\x50\x01\x00\x26\xcb\x17\x48\xc1\x00\x24\x2c\xe7\xfe\x8a\xff\xff\xff\xff\xff\xff'
b'\x80\xa0\x00\x00\x09\x1a\x00\x20\x00\x00\x00\x00')
ieee = IEEE80211(s)
assert ieee.type == DATA_TYPE
assert ieee.to_ds == 1
assert ieee.from_ds == 0
assert ieee.data_frame.bssid == b'\x00\x26\xcb\x17\x48\xc1'
assert ieee.data_frame.src == b'\x00\x24\x2c\xe7\xfe\x8a'
assert ieee.data_frame.dst == b'\xff\xff\xff\xff\xff\xff'
s = b'\x08\x02\x02\x01\x00\x02\x44\xac\x27\x70\x00\x1f\x33\x39\x75\x44\x00\x1f\x33\x39\x75\x44\x90\xa4'
ieee = IEEE80211(s)
assert ieee.type == DATA_TYPE
assert ieee.to_ds == 0
assert ieee.from_ds == 1
assert ieee.data_frame.bssid == b'\x00\x1f\x33\x39\x75\x44'
assert ieee.data_frame.src == b'\x00\x1f\x33\x39\x75\x44'
assert ieee.data_frame.dst == b'\x00\x02\x44\xac\x27\x70'
def test_compressed_block_ack():
s = (b'\x94\x00\x00\x00\x34\xc0\x59\xd6\x3f\x62\xb4\x75\x0e\x46\x83\xc1\x05\x50\x80\xee\x03\x00'
b'\x00\x00\x00\x00\x00\x00\xa2\xe4\x98\x45')
ieee = IEEE80211(s, fcs=True)
assert ieee.type == CTL_TYPE
assert ieee.subtype == C_BLOCK_ACK
assert ieee.back.dst == b'\x34\xc0\x59\xd6\x3f\x62'
assert ieee.back.src == b'\xb4\x75\x0e\x46\x83\xc1'
assert ieee.back.compressed == 1
assert len(ieee.back.bmp) == 8
assert ieee.back.ack_policy == 1
assert ieee.back.tid == 5
def test_action_block_ack_request():
s = (b'\xd0\x00\x3a\x01\x00\x23\x14\x36\x52\x30\xb4\x75\x0e\x46\x83\xc1\xb4\x75\x0e\x46\x83\xc1'
b'\x70\x14\x03\x00\x0d\x02\x10\x00\x00\x40\x29\x06\x50\x33\x9e')
ieee = IEEE80211(s, fcs=True)
assert ieee.type == MGMT_TYPE
assert ieee.subtype == M_ACTION
assert ieee.action.category == BLOCK_ACK
assert ieee.action.code == BLOCK_ACK_CODE_REQUEST
assert ieee.action.block_ack_request.timeout == 0
parameters = struct.unpack('<H', b'\x10\x02')[0]
assert ieee.action.block_ack_request.parameters == parameters
def test_action_block_ack_response():
s = (b'\xd0\x00\x3c\x00\xb4\x75\x0e\x46\x83\xc1\x00\x23\x14\x36\x52\x30\xb4\x75\x0e\x46\x83\xc1'
b'\xd0\x68\x03\x01\x0d\x00\x00\x02\x10\x88\x13\x9f\xc0\x0b\x75')
ieee = IEEE80211(s, fcs=True)
assert ieee.type == MGMT_TYPE
assert ieee.subtype == M_ACTION
assert ieee.action.category == BLOCK_ACK
assert ieee.action.code == BLOCK_ACK_CODE_RESPONSE
timeout = struct.unpack('<H', b'\x13\x88')[0]
assert ieee.action.block_ack_response.timeout == timeout
parameters = struct.unpack('<H', b'\x10\x02')[0]
assert ieee.action.block_ack_response.parameters == parameters
def test_action_block_ack_delete():
s = (b'\xd0\x00\x2c\x00\x00\xc1\x41\x06\x13\x0d\x6c\xb2\xae\xae\xde\x80\x6c\xb2\xae\xae\xde\x80'
b'\xa0\x52\x03\x02\x00\x08\x01\x00\x74\x5d\x0a\xc6')
ieee = IEEE80211(s, fcs=True)
assert ieee.type == MGMT_TYPE
assert ieee.subtype == M_ACTION
assert ieee.action.category == BLOCK_ACK
assert ieee.action.code == BLOCK_ACK_CODE_DELBA
assert ieee.action.block_ack_delba.delba_param_set == 0x0800
assert ieee.action.block_ack_delba.reason_code == 1
def test_ieee80211_properties():
ieee80211 = IEEE80211()
assert ieee80211.version == 0
ieee80211.version = 1
assert ieee80211.version == 1
assert ieee80211.type == 0
ieee80211.type = 1
assert ieee80211.type == 1
assert ieee80211.subtype == 0
ieee80211.subtype = 1
assert ieee80211.subtype == 1
assert ieee80211.to_ds == 0
ieee80211.to_ds = 1
assert ieee80211.to_ds == 1
assert ieee80211.from_ds == 0
ieee80211.from_ds = 1
assert ieee80211.from_ds == 1
assert ieee80211.more_frag == 0
ieee80211.more_frag = 1
assert ieee80211.more_frag == 1
assert ieee80211.retry == 0
ieee80211.retry = 0
assert ieee80211.retry == 0
assert ieee80211.pwr_mgt == 0
ieee80211.pwr_mgt = 0
assert ieee80211.pwr_mgt == 0
assert ieee80211.more_data == 0
ieee80211.more_data = 0
assert ieee80211.more_data == 0
assert ieee80211.wep == 0
ieee80211.wep = 1
assert ieee80211.wep == 1
assert ieee80211.order == 0
ieee80211.order = 1
assert ieee80211.order == 1
def test_blockack_properties():
blockack = IEEE80211.BlockAck()
assert blockack.compressed == 0
blockack.compressed = 1
assert blockack.compressed == 1
assert blockack.ack_policy == 0
blockack.ack_policy = 1
assert blockack.ack_policy == 1
assert blockack.multi_tid == 0
blockack.multi_tid = 1
assert blockack.multi_tid == 1
assert blockack.tid == 0
blockack.tid = 1
assert blockack.tid == 1
def test_ieee80211_unpack():
import pytest
from binascii import unhexlify
buf = unhexlify(
'4000' # subtype set to M_PROBE_REQ
'0000'
# MGMT_Frame
'000000000000' # dst
'000000000000' # src
'000000000000' # bssid
'0000' # frag_seq
)
ieee80211 = IEEE80211(buf)
assert ieee80211.ies == []
buf = unhexlify(
'9000' # subtype set to M_ATIM
'0000'
# MGMT_Frame
'000000000000' # dst
'000000000000' # src
'000000000000' # bssid
'0000' # frag_seq
)
ieee80211 = IEEE80211(buf)
assert not hasattr(ieee80211, 'ies')
buf = unhexlify(
'0c00' # type set to invalid value
'0000'
)
with pytest.raises(dpkt.UnpackError, match="KeyError: type=3 subtype=0"):
IEEE80211(buf)
def test_blockack_unpack():
from binascii import unhexlify
# unpack a non-compressed BlockAck
buf = unhexlify(
'000000000000'
'000000000000'
'0000' # compressed flag not set
'0000'
) + b'\xff' * 128
blockack = IEEE80211.BlockAck(buf)
assert blockack.bmp == b'\xff' * 128
assert blockack.data == b''
def test_action_unpack():
import pytest
from binascii import unhexlify
buf = unhexlify(
'01' # category
'00' # code (non-existent)
)
with pytest.raises(dpkt.UnpackError, match="KeyError: category=1 code=0"):
IEEE80211.Action(buf)
def test_beacon_unpack():
beacon_payload = b"\xb9\x71\xfa\x45\x52\x02\x00\x00\x64\x00\x11\x04"
beacon = IEEE80211.Beacon(beacon_payload)
assert beacon.timestamp == 0x0000025245fa71b9
assert beacon.interval == 100
assert beacon.capability == 0x1104
def test_fragment_and_sequence_values():
from binascii import unhexlify
for raw_frag_seq, (expected_frag_num, expected_seq_num) in [
("0000", (0, 0)),
("0F00", (15, 0)),
("0111", (1, 272)),
("B3FF", (3, 4091))
]:
buf = unhexlify(
'000000000000' # dst
'000000000000' # src
'000000000000' # bssid
+ raw_frag_seq
)
data = IEEE80211.Data(buf)
assert data.fragment_number == expected_frag_num
assert data.sequence_number == expected_seq_num
| 32,495 | 30.276227 | 109 |
py
|
dpkt
|
dpkt-master/dpkt/snoop.py
|
# $Id$
# -*- coding: utf-8 -*-
"""Snoop file format."""
from __future__ import absolute_import
import time
from abc import abstractmethod
from . import dpkt
from .compat import intround
# RFC 1761
SNOOP_MAGIC = 0x736E6F6F70000000
SNOOP_VERSION = 2
SDL_8023 = 0
SDL_8024 = 1
SDL_8025 = 2
SDL_8026 = 3
SDL_ETHER = 4
SDL_HDLC = 5
SDL_CHSYNC = 6
SDL_IBMCC = 7
SDL_FDDI = 8
SDL_OTHER = 9
dltoff = {SDL_ETHER: 14}
class PktHdr(dpkt.Packet):
"""snoop packet header.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of snoop packet header.
TODO.
"""
__byte_order__ = '!'
__hdr__ = (
# 32-bit unsigned integer representing the length in octets of the
# captured packet as received via a network.
('orig_len', 'I', 0),
# 32-bit unsigned integer representing the length of the Packet Data
# field. This is the number of octets of the captured packet that are
# included in this packet record. If the received packet was
# truncated, the Included Length field will be less than the Original
# Length field.
('incl_len', 'I', 0),
# 32-bit unsigned integer representing the total length of this packet
# record in octets. This includes the 24 octets of descriptive
# information, the length of the Packet Data field, and the length of
# the Pad field.
('rec_len', 'I', 0),
# 32-bit unsigned integer representing the number of packets that were
# lost by the system that created the packet file between the first
# packet record in the file and this one. Packets may be lost because
# of insufficient resources in the capturing system, or for other
# reasons. Note: some implementations lack the ability to count
# dropped packets. Those implementations may set the cumulative drops
# value to zero.
('cum_drops', 'I', 0),
# 32-bit unsigned integer representing the time, in seconds since
# January 1, 1970, when the packet arrived.
('ts_sec', 'I', 0),
# 32-bit unsigned integer representing microsecond resolution of packet
# arrival time.
('ts_usec', 'I', 0),
)
class FileHdr(dpkt.Packet):
"""snoop file header.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of snoop file header.
TODO.
"""
__byte_order__ = '!'
__hdr__ = (
('magic', 'Q', SNOOP_MAGIC),
('v', 'I', SNOOP_VERSION),
('linktype', 'I', SDL_ETHER),
)
class FileWriter(object):
def __init__(self, fileobj):
self._f = fileobj
self.write = self._f.write
def close(self):
self._f.close()
def writepkt(self, pkt, ts=None):
"""Write single packet and optional timestamp to file.
Args:
pkt: `bytes` will be called on this and written to file.
ts (float): Timestamp in seconds. Defaults to current time.
"""
if ts is None:
ts = time.time()
self.writepkt_time(bytes(pkt), ts)
@abstractmethod
def writepkt_time(self, pkt, ts):
"""Write single packet and its timestamp to file.
Args:
pkt (bytes): Some `bytes` to write to the file
ts (float): Timestamp in seconds
"""
pass
class Writer(FileWriter):
"""Simple snoop dumpfile writer.
TODO: Longer class information....
Attributes:
TODO.
"""
precision_multiplier = 1000000
def __init__(self, fileobj, linktype=SDL_ETHER):
super(Writer, self).__init__(fileobj)
fh = FileHdr(linktype=linktype)
self._PktHdr = PktHdr()
self._pack_hdr = self._PktHdr._pack_hdr
self.write(bytes(fh))
def writepkt_time(self, pkt, ts):
"""Write single packet and its timestamp to file.
Args:
pkt (bytes): Some `bytes` to write to the file
ts (float): Timestamp in seconds
"""
pkt_len = len(pkt)
pad_len = (4 - pkt_len) & 3
pkt_header = self._pack_hdr(
pkt_len,
pkt_len,
PktHdr.__hdr_len__ + pkt_len + pad_len,
0,
int(ts),
intround(ts % 1 * self.precision_multiplier),
)
self.write(pkt_header + pkt + b'\x00' * pad_len)
def writepkts(self, pkts):
"""Write an iterable of packets to file.
Timestamps should be in seconds.
Packets must be of type `bytes` as they will not be cast.
Args:
pkts: iterable containing (ts, pkt)
"""
# take local references to these variables so we don't need to
# dereference every time in the loop
write = self.write
pack_hdr = self._pack_hdr
for ts, pkt in pkts:
pkt_len = len(pkt)
pad_len = (4 - pkt_len) & 3
pkt_header = pack_hdr(
pkt_len,
pkt_len,
PktHdr.__hdr_len__ + pkt_len + pad_len,
0,
int(ts),
intround(ts % 1 * self.precision_multiplier),
)
write(pkt_header + pkt + b'\x00' * pad_len)
class FileReader(object):
def __init__(self, fileobj):
self.name = getattr(fileobj, 'name', '<%s>' % fileobj.__class__.__name__)
self._f = fileobj
self.filter = ''
@property
def fd(self):
return self._f.fileno()
def fileno(self):
return self.fd
def setfilter(self, value, optimize=1):
raise NotImplementedError
def readpkts(self):
return list(self)
def dispatch(self, cnt, callback, *args):
"""Collect and process packets with a user callback.
Return the number of packets processed, or 0 for a savefile.
Arguments:
cnt -- number of packets to process;
or 0 to process all packets until EOF
callback -- function with (timestamp, pkt, *args) prototype
*args -- optional arguments passed to callback on execution
"""
processed = 0
if cnt > 0:
for _ in range(cnt):
try:
ts, pkt = next(self)
except StopIteration:
break
callback(ts, pkt, *args)
processed += 1
else:
for ts, pkt in self:
callback(ts, pkt, *args)
processed += 1
return processed
def loop(self, callback, *args):
"""
Convenience method which will apply the callback to all packets.
Returns the number of packets processed.
Arguments:
callback -- function with (timestamp, pkt, *args) prototype
*args -- optional arguments passed to callback on execution
"""
return self.dispatch(0, callback, *args)
def __iter__(self):
return self
class Reader(FileReader):
"""Simple pypcap-compatible snoop file reader.
TODO: Longer class information....
Attributes:
TODO.
"""
def __init__(self, fileobj):
super(Reader, self).__init__(fileobj)
buf = self._f.read(FileHdr.__hdr_len__)
self._fh = FileHdr(buf)
self._ph = PktHdr
if self._fh.magic != SNOOP_MAGIC:
raise ValueError('invalid snoop header')
self.dloff = dltoff[self._fh.linktype]
def datalink(self):
return self._fh.linktype
def __next__(self):
buf = self._f.read(self._ph.__hdr_len__)
if not buf:
raise StopIteration
hdr = self._ph(buf)
buf = self._f.read(hdr.rec_len - self._ph.__hdr_len__)
return (hdr.ts_sec + (hdr.ts_usec / 1000000.0), buf[:hdr.incl_len])
next = __next__
def test_snoop_pkt_header():
from binascii import unhexlify
buf = unhexlify(
'000000010000000200000003000000040000000500000006'
)
pkt = PktHdr(buf)
assert pkt.orig_len == 1
assert pkt.incl_len == 2
assert pkt.rec_len == 3
assert pkt.cum_drops == 4
assert pkt.ts_sec == 5
assert pkt.ts_usec == 6
assert bytes(pkt) == buf
def test_snoop_file_header():
from binascii import unhexlify
buf = unhexlify(
'000000000000000b000000160000014d'
)
hdr = FileHdr(buf)
assert hdr.magic == 11
assert hdr.v == 22
assert hdr.linktype == 333
class TestSnoopWriter(object):
@classmethod
def setup_class(cls):
from .compat import BytesIO
from binascii import unhexlify
cls.fobj = BytesIO()
# write the file header only
cls.writer = Writer(cls.fobj)
cls.file_header = unhexlify(
'736e6f6f700000000000000200000004'
)
cls.pkt = unhexlify(
'000000010000000200000003000000040000000500000006'
)
cls.pkt_and_header = unhexlify(
'00000018' # orig_len
'00000018' # incl_len
'00000030' # rec_len
'00000000' # cum_drops
'00000000' # ts_sec
'00000000' # ts_usec
# data
'000000010000000200000003000000040000000500000006'
)
def test_snoop_file_writer_filehdr(self):
# jump to the start and read the file header
self.fobj.seek(0)
buf = self.fobj.read()
assert buf == self.file_header
def test_writepkt(self):
loc = self.fobj.tell()
self.writer.writepkt(self.pkt)
# jump back to just before the writing of the packet
self.fobj.seek(loc)
# read the packet back in
buf = self.fobj.read()
# compare everything except the timestamp
assert buf[:16] == self.pkt_and_header[:16]
assert buf[24:] == self.pkt_and_header[24:]
def test_writepkt_time(self):
loc = self.fobj.tell()
self.writer.writepkt_time(self.pkt, 0)
self.fobj.seek(loc)
# read the packet we just wrote
buf = self.fobj.read()
assert buf == self.pkt_and_header
def test_writepkts(self):
loc = self.fobj.tell()
self.writer.writepkts([
(0, self.pkt),
(1, self.pkt),
(2, self.pkt),
])
self.fobj.seek(loc)
buf = self.fobj.read()
pkt_len = len(self.pkt_and_header)
# chunk up the file and check each packet
for idx in range(0, 3):
pkt = buf[idx * pkt_len:(idx + 1) * pkt_len]
assert pkt[:16] == self.pkt_and_header[:16]
assert pkt[16:20] == dpkt.struct.pack('>I', idx)
assert pkt[20:] == self.pkt_and_header[20:]
def test_snoop_writer_close(self):
assert not self.fobj.closed
# check that the underlying file object is closed
self.writer.close()
assert self.fobj.closed
class TestSnoopReader(object):
@classmethod
def setup_class(cls):
from binascii import unhexlify
cls.header = unhexlify(
'736e6f6f700000000000000200000004'
)
cls.pkt_header = unhexlify(
'00000018' # orig_len
'00000018' # incl_len
'00000030' # rec_len
'00000000' # cum_drops
'00000000' # ts_sec
'00000000' # ts_usec
)
cls.pkt_bytes = unhexlify(
# data
'000000010000000200000003000000040000000500000006'
)
def setup_method(self):
from .compat import BytesIO
self.fobj = BytesIO(
self.header + self.pkt_header + self.pkt_bytes
)
self.reader = Reader(self.fobj)
def test_open(self):
assert self.reader.dloff == 14
assert self.reader.datalink() == SDL_ETHER
def test_invalid_magic(self):
import pytest
self.fobj.seek(0)
self.fobj.write(b'\x00' * 4)
self.fobj.seek(0)
with pytest.raises(ValueError, match='invalid snoop header'):
Reader(self.fobj)
def test_read_pkt(self):
ts, pkt = next(self.reader)
assert ts == 0
assert pkt == self.pkt_bytes
def test_readpkts(self):
pkts = self.reader.readpkts()
assert len(pkts) == 1
ts, buf = pkts[0]
assert ts == 0
assert buf == self.pkt_bytes
class TestFileWriter(object):
def setup_method(self):
from .compat import BytesIO
self.fobj = BytesIO()
self.writer = FileWriter(self.fobj)
def test_write(self):
buf = b'\x01' * 10
self.writer.write(buf)
self.fobj.seek(0)
assert self.fobj.read() == buf
def test_close(self):
assert not self.fobj.closed
self.writer.close()
assert self.fobj.closed
class TestFileReader(object):
"""
Testing for the FileReader superclass which Reader inherits from.
"""
pkts = [
(0, b'000001'),
(1, b'000002'),
(2, b'000003'),
]
class SampleReader(FileReader):
"""
Very simple class which returns index as timestamp, and
unparsed buffer as packet
"""
def __init__(self, fobj):
super(TestFileReader.SampleReader, self).__init__(fobj)
self._iter = iter(TestFileReader.pkts)
def __next__(self):
return next(self._iter)
next = __next__
def setup_method(self):
import tempfile
self.fd = tempfile.TemporaryFile()
self.reader = self.SampleReader(self.fd)
def test_attributes(self):
import pytest
assert self.reader.name == self.fd.name
assert self.reader.fd == self.fd.fileno()
assert self.reader.fileno() == self.fd.fileno()
assert self.reader.filter == ''
with pytest.raises(NotImplementedError):
self.reader.setfilter(1, 2)
def test_readpkts_list(self):
pkts = self.reader.readpkts()
print(len(pkts))
for idx, (ts, buf) in enumerate(pkts):
assert ts == idx
assert buf == self.pkts[idx][1]
def test_readpkts_iter(self):
for idx, (ts, buf) in enumerate(self.reader):
assert ts == idx
assert buf == self.pkts[idx][1]
def test_dispatch_all(self):
assert self.reader.dispatch(0, lambda ts, pkt: None) == 3
def test_dispatch_some(self):
assert self.reader.dispatch(2, lambda ts, pkt: None) == 2
def test_dispatch_termination(self):
assert self.reader.dispatch(20, lambda ts, pkt: None) == 3
def test_loop(self):
class Count:
counter = 0
@classmethod
def inc(cls):
cls.counter += 1
assert self.reader.loop(lambda ts, pkt: Count.inc()) == 3
assert Count.counter == 3
def test_next(self):
ts, buf = next(self.reader)
assert ts == 0
assert buf == self.pkts[0][1]
| 15,023 | 26.021583 | 81 |
py
|
dpkt
|
dpkt-master/dpkt/ssl.py
|
# $Id: ssl.py 90 2014-04-02 22:06:23Z [email protected] $
# Portion Copyright 2012 Google Inc. All rights reserved.
# -*- coding: utf-8 -*-
"""Secure Sockets Layer / Transport Layer Security."""
from __future__ import absolute_import
import struct
import binascii
from . import dpkt
from . import ssl_ciphersuites
from .compat import compat_ord
from .utils import deprecation_warning
#
# Note from April 2011: [email protected] added code that parses SSL3/TLS messages more in depth.
#
# Jul 2012: [email protected] modified and extended SSL support further.
#
# SSL 2.0 is deprecated in RFC 6176
class SSL2(dpkt.Packet):
__hdr__ = (
('len', 'H', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# In SSL, all data sent is encapsulated in a record, an object which is
# composed of a header and some non-zero amount of data. Each record header
# contains a two or three byte length code. If the most significant bit is
# set in the first byte of the record length code then the record has
# no padding and the total header length will be 2 bytes, otherwise the
# record has padding and the total header length will be 3 bytes. The
# record header is transmitted before the data portion of the record.
if self.len & 0x8000:
n = self.len = self.len & 0x7FFF
self.msg, self.data = self.data[:n], self.data[n:]
else:
# Note that in the long header case (3 bytes total), the second most
# significant bit in the first byte has special meaning. When zero,
# the record being sent is a data record. When one, the record
# being sent is a security escape (there are currently no examples
# of security escapes; this is reserved for future versions of the
# protocol). In either case, the length code describes how much
# data is in the record.
n = self.len = self.len & 0x3FFF
padlen = compat_ord(self.data[0])
self.msg = self.data[1:1 + n]
self.pad = self.data[1 + n:1 + n + padlen]
self.data = self.data[1 + n + padlen:]
# SSL 3.0 is deprecated in RFC 7568
# Use class TLS for >= SSL 3.0
class TLS(dpkt.Packet):
def __init__(self, *args, **kwargs):
self.records = []
dpkt.Packet.__init__(self, *args, **kwargs)
def unpack(self, buf):
# this either unpacks the entire buffer into 1 or multiple TLSRecord's
# or throws NeedData if the buffer is incomplete (truncated).
# tls_multi_factory() will do the same, but gracefully; it will unpack
# multiple TLS records and catch NeedData if the buffer was incomplete
while buf:
tlsrec = TLSRecord(buf)
self.records.append(tlsrec)
buf = buf[5 + tlsrec.length:] # 5 = TLSRecord.__hdr_len__
self.data = b''
# SSLv3/TLS versions
SSL3_V = 0x0300
TLS1_V = 0x0301
TLS11_V = 0x0302
TLS12_V = 0x0303
ssl3_versions_str = {
SSL3_V: 'SSL3',
TLS1_V: 'TLS 1.0',
TLS11_V: 'TLS 1.1',
TLS12_V: 'TLS 1.2'
}
SSL3_VERSION_BYTES = set((b'\x03\x00', b'\x03\x01', b'\x03\x02', b'\x03\x03'))
# Alert levels
SSL3_AD_WARNING = 1
SSL3_AD_FATAL = 2
alert_level_str = {
SSL3_AD_WARNING: 'SSL3_AD_WARNING',
SSL3_AD_FATAL: 'SSL3_AD_FATAL'
}
# SSL3 alert descriptions
SSL3_AD_CLOSE_NOTIFY = 0
SSL3_AD_UNEXPECTED_MESSAGE = 10 # fatal
SSL3_AD_BAD_RECORD_MAC = 20 # fatal
SSL3_AD_DECOMPRESSION_FAILURE = 30 # fatal
SSL3_AD_HANDSHAKE_FAILURE = 40 # fatal
SSL3_AD_NO_CERTIFICATE = 41
SSL3_AD_BAD_CERTIFICATE = 42
SSL3_AD_UNSUPPORTED_CERTIFICATE = 43
SSL3_AD_CERTIFICATE_REVOKED = 44
SSL3_AD_CERTIFICATE_EXPIRED = 45
SSL3_AD_CERTIFICATE_UNKNOWN = 46
SSL3_AD_ILLEGAL_PARAMETER = 47 # fatal
# TLS1 alert descriptions
TLS1_AD_DECRYPTION_FAILED = 21
TLS1_AD_RECORD_OVERFLOW = 22
TLS1_AD_UNKNOWN_CA = 48 # fatal
TLS1_AD_ACCESS_DENIED = 49 # fatal
TLS1_AD_DECODE_ERROR = 50 # fatal
TLS1_AD_DECRYPT_ERROR = 51
TLS1_AD_EXPORT_RESTRICTION = 60 # fatal
TLS1_AD_PROTOCOL_VERSION = 70 # fatal
TLS1_AD_INSUFFICIENT_SECURITY = 71 # fatal
TLS1_AD_INTERNAL_ERROR = 80 # fatal
TLS1_AD_USER_CANCELLED = 90
TLS1_AD_NO_RENEGOTIATION = 100
# /* codes 110-114 are from RFC3546 */
TLS1_AD_UNSUPPORTED_EXTENSION = 110
TLS1_AD_CERTIFICATE_UNOBTAINABLE = 111
TLS1_AD_UNRECOGNIZED_NAME = 112
TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE = 113
TLS1_AD_BAD_CERTIFICATE_HASH_VALUE = 114
TLS1_AD_UNKNOWN_PSK_IDENTITY = 115 # fatal
# Mapping alert types to strings
alert_description_str = {
SSL3_AD_CLOSE_NOTIFY: 'SSL3_AD_CLOSE_NOTIFY',
SSL3_AD_UNEXPECTED_MESSAGE: 'SSL3_AD_UNEXPECTED_MESSAGE',
SSL3_AD_BAD_RECORD_MAC: 'SSL3_AD_BAD_RECORD_MAC',
SSL3_AD_DECOMPRESSION_FAILURE: 'SSL3_AD_DECOMPRESSION_FAILURE',
SSL3_AD_HANDSHAKE_FAILURE: 'SSL3_AD_HANDSHAKE_FAILURE',
SSL3_AD_NO_CERTIFICATE: 'SSL3_AD_NO_CERTIFICATE',
SSL3_AD_BAD_CERTIFICATE: 'SSL3_AD_BAD_CERTIFICATE',
SSL3_AD_UNSUPPORTED_CERTIFICATE: 'SSL3_AD_UNSUPPORTED_CERTIFICATE',
SSL3_AD_CERTIFICATE_REVOKED: 'SSL3_AD_CERTIFICATE_REVOKED',
SSL3_AD_CERTIFICATE_EXPIRED: 'SSL3_AD_CERTIFICATE_EXPIRED',
SSL3_AD_CERTIFICATE_UNKNOWN: 'SSL3_AD_CERTIFICATE_UNKNOWN',
SSL3_AD_ILLEGAL_PARAMETER: 'SSL3_AD_ILLEGAL_PARAMETER',
TLS1_AD_DECRYPTION_FAILED: 'TLS1_AD_DECRYPTION_FAILED',
TLS1_AD_RECORD_OVERFLOW: 'TLS1_AD_RECORD_OVERFLOW',
TLS1_AD_UNKNOWN_CA: 'TLS1_AD_UNKNOWN_CA',
TLS1_AD_ACCESS_DENIED: 'TLS1_AD_ACCESS_DENIED',
TLS1_AD_DECODE_ERROR: 'TLS1_AD_DECODE_ERROR',
TLS1_AD_DECRYPT_ERROR: 'TLS1_AD_DECRYPT_ERROR',
TLS1_AD_EXPORT_RESTRICTION: 'TLS1_AD_EXPORT_RESTRICTION',
TLS1_AD_PROTOCOL_VERSION: 'TLS1_AD_PROTOCOL_VERSION',
TLS1_AD_INSUFFICIENT_SECURITY: 'TLS1_AD_INSUFFICIENT_SECURITY',
TLS1_AD_INTERNAL_ERROR: 'TLS1_AD_INTERNAL_ERROR',
TLS1_AD_USER_CANCELLED: 'TLS1_AD_USER_CANCELLED',
TLS1_AD_NO_RENEGOTIATION: 'TLS1_AD_NO_RENEGOTIATION',
TLS1_AD_UNSUPPORTED_EXTENSION: 'TLS1_AD_UNSUPPORTED_EXTENSION',
TLS1_AD_CERTIFICATE_UNOBTAINABLE: 'TLS1_AD_CERTIFICATE_UNOBTAINABLE',
TLS1_AD_UNRECOGNIZED_NAME: 'TLS1_AD_UNRECOGNIZED_NAME',
TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE: 'TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE',
TLS1_AD_BAD_CERTIFICATE_HASH_VALUE: 'TLS1_AD_BAD_CERTIFICATE_HASH_VALUE',
TLS1_AD_UNKNOWN_PSK_IDENTITY: 'TLS1_AD_UNKNOWN_PSK_IDENTITY'
}
# struct format strings for parsing buffer lengths
# don't forget, you have to pad a 3-byte value with \x00
_SIZE_FORMATS = ['!B', '!H', '!I', '!I']
def parse_variable_array(buf, lenbytes):
"""
Parse an array described using the 'Type name<x..y>' syntax from the spec
Read a length at the start of buf, and returns that many bytes
after, in a tuple with the TOTAL bytes consumed (including the size). This
does not check that the array is the right length for any given datatype.
"""
# first have to figure out how to parse length
assert lenbytes <= 4 # pretty sure 4 is impossible, too
size_format = _SIZE_FORMATS[lenbytes - 1]
padding = b'\x00' if lenbytes == 3 else b''
# read off the length
size = struct.unpack(size_format, padding + buf[:lenbytes])[0]
# read the actual data
data = buf[lenbytes:lenbytes + size]
# if len(data) != size: insufficient data
return data, size + lenbytes
def parse_extensions(buf):
"""
Parse TLS extensions in passed buf. Returns an ordered list of extension tuples with
ordinal extension type as first value and extension data as second value.
Passed buf must start with the 2-byte extensions length TLV.
http://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml
"""
extensions_length = struct.unpack('!H', buf[:2])[0]
extensions = []
pointer = 2
while pointer < extensions_length:
ext_type = struct.unpack('!H', buf[pointer:pointer + 2])[0]
pointer += 2
ext_data, parsed = parse_variable_array(buf[pointer:], 2)
extensions.append((ext_type, ext_data))
pointer += parsed
return extensions
class SSL3Exception(Exception):
pass
class TLSRecord(dpkt.Packet):
"""
SSLv3 or TLSv1+ packet.
In addition to the fields specified in the header, there are
compressed and decrypted fields, indicating whether, in the language
of the spec, this is a TLSPlaintext, TLSCompressed, or
TLSCiphertext. The application will have to figure out when it's
appropriate to change these values.
"""
__hdr__ = (
('type', 'B', 0),
('version', 'H', 0),
('length', 'H', 0),
)
def __init__(self, *args, **kwargs):
# assume plaintext unless specified otherwise in arguments
self.compressed = kwargs.pop('compressed', False)
self.encrypted = kwargs.pop('encrypted', False)
# parent constructor
dpkt.Packet.__init__(self, *args, **kwargs)
# make sure length and data are consistent
self.length = len(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
header_length = self.__hdr_len__
self.data = buf[header_length:header_length + self.length]
# make sure buffer was long enough
if len(self.data) < self.length:
raise dpkt.NeedData('TLSRecord data was too short.')
# assume compressed and encrypted when it's been parsed from
# raw data
self.compressed = True
self.encrypted = True
class TLSChangeCipherSpec(dpkt.Packet):
"""
ChangeCipherSpec message is just a single byte with value 1
"""
__hdr__ = (('type', 'B', 1),)
class TLSAppData(str):
"""
As far as TLSRecord is concerned, AppData is just an opaque blob.
"""
pass
class TLSAlert(dpkt.Packet):
__hdr__ = (
('level', 'B', 1),
('description', 'B', 0),
)
class TLSHelloRequest(dpkt.Packet):
__hdr__ = tuple()
class TLSClientHello(dpkt.Packet):
__hdr__ = (
('version', 'H', 0x0301),
('random', '32s', '\x00' * 32),
) # the rest is variable-length and has to be done manually
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# now session, cipher suites, extensions are in self.data
self.session_id, pointer = parse_variable_array(self.data, 1)
# handle ciphersuites
ciphersuites, parsed = parse_variable_array(self.data[pointer:], 2)
pointer += parsed
num_ciphersuites = int(len(ciphersuites) / 2)
self.ciphersuites = [
ssl_ciphersuites.BY_CODE.get(code, ssl_ciphersuites.get_unknown_ciphersuite(code))
for code in struct.unpack('!' + num_ciphersuites * 'H', ciphersuites)]
# check len(ciphersuites) % 2 == 0 ?
# compression methods
compression_methods, parsed = parse_variable_array(self.data[pointer:], 1)
pointer += parsed
self.compression_methods = struct.unpack('{0}B'.format(len(compression_methods)), compression_methods)
# Parse extensions if present
if len(self.data[pointer:]) >= 6:
self.extensions = parse_extensions(self.data[pointer:])
class TLSServerHello(dpkt.Packet):
__hdr__ = (
('version', 'H', '0x0301'),
('random', '32s', '\x00' * 32),
) # session is variable, forcing rest to be manual
def unpack(self, buf):
try:
dpkt.Packet.unpack(self, buf)
self.session_id, pointer = parse_variable_array(self.data, 1)
# single cipher suite
code = struct.unpack('!H', self.data[pointer:pointer + 2])[0]
self.ciphersuite = \
ssl_ciphersuites.BY_CODE.get(code, ssl_ciphersuites.get_unknown_ciphersuite(code))
pointer += 2
# single compression method
self.compression_method = struct.unpack('!B', self.data[pointer:pointer + 1])[0]
pointer += 1
# Parse extensions if present
if len(self.data[pointer:]) >= 6:
self.extensions = parse_extensions(self.data[pointer:])
except struct.error:
# probably data too short
raise dpkt.NeedData
# XXX - legacy, deprecated
# for whatever reason these attributes were named differently than their sister attributes in TLSClientHello
@property
def cipher_suite(self):
deprecation_warning("TLSServerHello.cipher_suite is deprecated and renamed to .ciphersuite")
return self.ciphersuite
@property
def compression(self):
deprecation_warning("TLSServerHello.compression is deprecated and renamed to .compression_method")
return self.compression_method
class TLSCertificate(dpkt.Packet):
__hdr__ = tuple()
def unpack(self, buf):
try:
dpkt.Packet.unpack(self, buf)
all_certs, all_certs_len = parse_variable_array(self.data, 3)
self.certificates = []
pointer = 3
while pointer < all_certs_len:
cert, parsed = parse_variable_array(self.data[pointer:], 3)
self.certificates.append((cert))
pointer += parsed
except struct.error:
raise dpkt.NeedData
class TLSUnknownHandshake(dpkt.Packet):
__hdr__ = tuple()
TLSNewSessionTicket = TLSUnknownHandshake
TLSServerKeyExchange = TLSUnknownHandshake
TLSCertificateRequest = TLSUnknownHandshake
TLSServerHelloDone = TLSUnknownHandshake
TLSCertificateVerify = TLSUnknownHandshake
TLSClientKeyExchange = TLSUnknownHandshake
TLSFinished = TLSUnknownHandshake
# mapping of handshake type ids to their names
# and the classes that implement them
HANDSHAKE_TYPES = {
0: ('HelloRequest', TLSHelloRequest),
1: ('ClientHello', TLSClientHello),
2: ('ServerHello', TLSServerHello),
4: ('NewSessionTicket', TLSNewSessionTicket),
11: ('Certificate', TLSCertificate),
12: ('ServerKeyExchange', TLSServerKeyExchange),
13: ('CertificateRequest', TLSCertificateRequest),
14: ('ServerHelloDone', TLSServerHelloDone),
15: ('CertificateVerify', TLSCertificateVerify),
16: ('ClientKeyExchange', TLSClientKeyExchange),
20: ('Finished', TLSFinished),
}
class TLSHandshake(dpkt.Packet):
"""
A TLS Handshake message
This goes for all messages encapsulated in the Record layer, but especially
important for handshakes and app data: A message may be spread across a
number of TLSRecords, in addition to the possibility of there being more
than one in a given Record. You have to put together the contents of
TLSRecord's yourself.
"""
# struct.unpack can't handle the 3-byte int, so we parse it as bytes
# (and store it as bytes so dpkt doesn't get confused), and turn it into
# an int in a user-facing property
__hdr__ = (
('type', 'B', 0),
('length_bytes', '3s', 0),
)
__pprint_funcs__ = {
'length_bytes': lambda x: struct.unpack('!I', b'\x00' + x)[0]
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# Wait, might there be more than one message of self.type?
embedded_type = HANDSHAKE_TYPES.get(self.type, None)
if embedded_type is None:
raise SSL3Exception('Unknown or invalid handshake type %d' %
self.type)
# only take the right number of bytes
self.data = self.data[:self.length]
if len(self.data) != self.length:
raise dpkt.NeedData
# get class out of embedded_type tuple
self.data = embedded_type[1](self.data)
@property
def length(self):
return struct.unpack('!I', b'\x00' + self.length_bytes)[0]
RECORD_TYPES = {
20: TLSChangeCipherSpec,
21: TLSAlert,
22: TLSHandshake,
23: TLSAppData,
}
class SSLFactory(object):
def __new__(cls, buf):
v = buf[1:3]
if v in SSL3_VERSION_BYTES:
return TLSRecord(buf)
# SSL2 has no characteristic header or magic bytes, so we just assume
# that the msg is an SSL2 msg if it is not detected as SSL3+
return SSL2(buf)
def tls_multi_factory(buf):
"""
Attempt to parse one or more TLSRecord's out of buf
Args:
buf: string containing SSL/TLS messages. May have an incomplete record
on the end
Returns:
[TLSRecord]
int, total bytes consumed, != len(buf) if an incomplete record was left at
the end.
Raises SSL3Exception.
"""
i, n = 0, len(buf)
msgs = []
while i + 5 <= n:
v = buf[i + 1:i + 3]
if v in SSL3_VERSION_BYTES:
try:
msg = TLSRecord(buf[i:])
msgs.append(msg)
except dpkt.NeedData:
break
else:
raise SSL3Exception('Bad TLS version in buf: %r' % buf[i:i + 5])
i += len(msg)
return msgs, i
_hexdecode = binascii.a2b_hex
class TestTLS(object):
"""
Test basic TLS functionality.
Test that each TLSRecord is correctly discovered and added to TLS.records
"""
@classmethod
def setup_class(cls):
cls.p = TLS(
b'\x16\x03\x00\x02\x06\x01\x00\x02\x02\x03\x03\x58\x5c\x2f\xf7\x2a\x65\x99\x49\x87\x71\xf5'
b'\x95\x14\xf1\x0a\xf6\x8c\x68\xf9\xef\x30\xd0\xda\xdc\x9e\x1a\xf6\x4d\x10\x91\x47\x6a\x00'
b'\x00\x84\xc0\x2b\xc0\x2c\xc0\x86\xc0\x87\xc0\x09\xc0\x23\xc0\x0a\xc0\x24\xc0\x72\xc0\x73'
b'\xc0\x08\xc0\x07\xc0\x2f\xc0\x30\xc0\x8a\xc0\x8b\xc0\x13\xc0\x27\xc0\x14\xc0\x28\xc0\x76'
b'\xc0\x77\xc0\x12\xc0\x11\x00\x9c\x00\x9d\xc0\x7a\xc0\x7b\x00\x2f\x00\x3c\x00\x35\x00\x3d'
b'\x00\x41\x00\xba\x00\x84\x00\xc0\x00\x0a\x00\x05\x00\x04\x00\x9e\x00\x9f\xc0\x7c\xc0\x7d'
b'\x00\x33\x00\x67\x00\x39\x00\x6b\x00\x45\x00\xbe\x00\x88\x00\xc4\x00\x16\x00\xa2\x00\xa3'
b'\xc0\x80\xc0\x81\x00\x32\x00\x40\x00\x38\x00\x6a\x00\x44\x00\xbd\x00\x87\x00\xc3\x00\x13'
b'\x00\x66\x01\x00\x01\x55\x00\x05\x00\x05\x01\x00\x00\x00\x00\x00\x00\x00\x11\x00\x0f\x00'
b'\x00\x0c\x77\x77\x77\x2e\x69\x61\x6e\x61\x2e\x6f\x72\x67\xff\x01\x00\x01\x00\x00\x23\x00'
b'\x00\x00\x0a\x00\x0c\x00\x0a\x00\x13\x00\x15\x00\x17\x00\x18\x00\x19\x00\x0b\x00\x02\x01'
b'\x00\x00\x0d\x00\x1c\x00\x1a\x04\x01\x04\x02\x04\x03\x05\x01\x05\x03\x06\x01\x06\x03\x03'
b'\x01\x03\x02\x03\x03\x02\x01\x02\x02\x02\x03\x00\x15\x00\xf4\x00\xf2\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
)
# multiple records in first handshake taken from TLSv1.2 capture with 73 cipher suites
# https://bugs.wireshark.org/bugzilla/attachment.cgi?id=11612
# This data is extracted from and verified by Wireshark
cls.p2 = TLS(
b'\x16\x03\x03\x00\x42\x02\x00\x00\x3e\x03\x03\x52\x36\x2c\x10\xa2'
b'\x66\x5e\x32\x3a\x2a\xdb\x4b\x9d\xa0\xc1\x0d\x4a\x88\x23\x71\x92'
b'\x72\xf8\xb4\xc9\x7a\xf2\x4f\x92\x78\x48\x12\x00\xc0\x30\x01\x00'
b'\x16\xff\x01\x00\x01\x00\x00\x0b\x00\x04\x03\x00\x01\x02\x00\x23'
b'\x00\x00\x00\x0f\x00\x01\x01\x16\x03\x03\x01\xc3\x0b\x00\x01\xbf'
b'\x00\x01\xbc\x00\x01\xb9\x30\x82\x01\xb5\x30\x82\x01\x1e\x02\x09'
b'\x00\xf4\xa7\x2f\xd3\xe8\xfc\x37\xc4\x30\x0d\x06\x09\x2a\x86\x48'
b'\x86\xf7\x0d\x01\x01\x05\x05\x00\x30\x1f\x31\x1d\x30\x1b\x06\x03'
b'\x55\x04\x03\x0c\x14\x54\x65\x73\x74\x20\x43\x65\x72\x74\x69\x66'
b'\x69\x63\x61\x74\x65\x20\x52\x53\x41\x30\x1e\x17\x0d\x31\x33\x30'
b'\x39\x31\x35\x32\x31\x35\x31\x31\x30\x5a\x17\x0d\x32\x33\x30\x39'
b'\x31\x33\x32\x31\x35\x31\x31\x30\x5a\x30\x1f\x31\x1d\x30\x1b\x06'
b'\x03\x55\x04\x03\x0c\x14\x54\x65\x73\x74\x20\x43\x65\x72\x74\x69'
b'\x66\x69\x63\x61\x74\x65\x20\x52\x53\x41\x30\x81\x9f\x30\x0d\x06'
b'\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x03\x81\x8d\x00'
b'\x30\x81\x89\x02\x81\x81\x00\xac\x35\x2a\x93\x7f\xc5\x4f\x18\x98'
b'\xb2\x9f\xa0\xfb\x34\xe6\xe2\x8b\x9e\xd7\x46\x91\x07\xd8\x48\x8a'
b'\xa8\x43\x8b\xfa\xc0\xff\xb7\xca\xd5\x5f\x58\xbe\xe4\x2f\x20\x1c'
b'\x3e\xf9\x42\xf4\xb0\x27\x9a\xb6\xb0\x01\xbf\x97\x40\xaa\xc4\x2a'
b'\x1c\xac\x93\x70\xb4\x8e\x94\xda\x38\xcb\xb4\x5e\x14\xb6\xcc\x19'
b'\x66\xe8\x06\xf2\x99\xec\x49\x0c\x91\x09\x96\xe6\x9a\xe1\x66\xe5'
b'\x84\x64\x2f\xa2\x4c\xe3\x21\xac\x42\x75\xec\x8c\xe9\xf6\xd9\x9e'
b'\x40\xcb\x1d\x02\xc3\x8c\x68\xf0\x2b\x46\x1c\xb3\x27\x39\x75\x0e'
b'\x2a\xc4\xd9\x9c\xb6\xb4\x4d\x02\x03\x01\x00\x01\x30\x0d\x06\x09'
b'\x2a\x86\x48\x86\xf7\x0d\x01\x01\x05\x05\x00\x03\x81\x81\x00\x67'
b'\x43\x4c\xa8\xa4\x3e\xeb\x1b\x32\x28\x70\x8b\xdb\xeb\xfe\xf1\xb3'
b'\x70\x39\x95\x34\x33\x26\xef\x54\xb6\x22\xf9\xe1\xd5\xe6\xc3\x76'
b'\x96\xe5\xc1\x14\x61\x5b\xa5\xc2\x6c\xe7\xe6\xef\x00\x26\xec\xbc'
b'\x48\x27\xf5\x3d\x73\x66\x15\x37\x9c\xaa\x87\x97\xef\x22\xda\x58'
b'\x51\xbb\x33\xe9\xc8\x46\x44\xd1\xc9\x9d\x35\xcc\x66\x05\x29\xb4'
b'\x64\x5f\x6d\xe1\x21\x0d\x45\x68\xac\x06\x43\x15\xe1\xc6\xc4\xc8'
b'\xb4\xfa\xc3\x34\xfd\x49\x39\xcb\x22\x01\x8a\x30\x34\x50\xb0\x24'
b'\x55\x7b\x6c\x6d\x5c\xf6\x33\x1a\x6c\xf6\x77\xa6\x2c\x9a\x32\x16'
b'\x03\x03\x00\xcd\x0c\x00\x00\xc9\x03\x00\x17\x41\x04\x97\xe0\xa1'
b'\x4e\xd7\x18\xa0\xe8\x17\xbf\xe1\xa0\xc1\xad\x25\x65\xfd\x35\x94'
b'\x1b\xe1\xc2\xdf\x8a\x23\xdf\xef\xfb\xd3\xed\xe5\x4f\x61\x04\xf0'
b'\x0b\x73\x26\x22\xf5\x59\x05\xc3\x31\x30\xf0\xba\xe0\x51\x9d\x33'
b'\xa4\x58\xc9\x7c\x9e\x94\xad\xf7\x47\x78\x1d\xf4\x3b\x06\x01\x00'
b'\x80\x4a\x39\x59\xd3\xdb\xbe\x40\x32\x7a\x44\x06\xe6\x2a\x2b\xfc'
b'\x5d\xc6\x45\x32\x19\xf0\x56\xb4\xbf\x60\x77\xa1\xbe\xde\xaf\xfb'
b'\x36\xb1\x03\x2a\xc2\xa2\xed\x12\xb0\x9b\xad\x4b\x68\x9b\xd1\xe0'
b'\xac\x4a\xa1\x28\x11\x5e\xa6\xd1\x4d\x7a\xc3\xd8\xcc\x49\x33\x43'
b'\xeb\x32\x8a\xd8\x5e\x4f\xb1\xd9\xcc\x2e\xfa\x82\x7b\x28\x50\xfb'
b'\x7e\x8a\x0e\x85\xd7\x6c\xae\xc9\x89\xc0\x33\x63\x90\x46\x9e\x67'
b'\x84\x40\x2e\xc5\x09\xe4\x36\x0c\x35\xc9\x8c\x4c\x50\x9f\x66\x84'
b'\xb0\x6e\x84\x61\x42\x79\x20\x19\x63\xfe\xfa\x25\xe7\x3f\xa0\xac'
b'\xb3\x16\x03\x03\x00\x04\x0e\x00\x00\x00'
)
def test_records_length(self):
assert (len(self.p.records) == 1)
assert (len(self.p2.records) == 4)
def test_record_type(self):
assert (self.p.records[0].type == 22)
assert (all([rec.type == 22 for rec in self.p2.records]))
def test_record_version(self):
assert (self.p.records[0].version == 768)
assert (all([rec.version == 771 for rec in self.p2.records]))
class TestTLSRecord(object):
"""
Test basic TLSRecord functionality
For this test, the contents of the record doesn't matter, since we're not parsing the next layer.
"""
@classmethod
def setup_class(cls):
# add some extra data, to make sure length is parsed correctly
cls.p = TLSRecord(b'\x17\x03\x01\x00\x08abcdefghzzzzzzzzzzz')
def test_content_type(self):
assert (self.p.type == 23)
def test_version(self):
assert (self.p.version == 0x0301)
def test_length(self):
assert (self.p.length == 8)
def test_data(self):
assert (self.p.data == b'abcdefgh')
def test_initial_flags(self):
assert (self.p.compressed is True)
assert (self.p.encrypted is True)
def test_repack(self):
p2 = TLSRecord(type=23, version=0x0301, data=b'abcdefgh')
assert (p2.type == 23)
assert (p2.version == 0x0301)
assert (p2.length == 8)
assert (p2.data == b'abcdefgh')
assert (p2.pack() == self.p.pack())
def test_total_length(self):
# that len(p) includes header
assert (len(self.p) == 13)
def test_raises_need_data_when_buf_is_short(self):
import pytest
pytest.raises(dpkt.NeedData, TLSRecord, b'\x16\x03\x01\x00\x10abc')
class TestTLSChangeCipherSpec(object):
"""It's just a byte. This will be quick, I promise"""
@classmethod
def setup_class(cls):
cls.p = TLSChangeCipherSpec(b'\x01')
def test_parses(self):
assert (self.p.type == 1)
def test_total_length(self):
assert (len(self.p) == 1)
class TestTLSAppData(object):
"""AppData is basically just a string"""
def test_value(self):
d = TLSAppData('abcdefgh')
assert (d == 'abcdefgh')
class TestTLSHandshake(object):
@classmethod
def setup_class(cls):
cls.h = TLSHandshake(b'\x00\x00\x00\x01\xff')
def test_created_inside_message(self):
assert (isinstance(self.h.data, TLSHelloRequest) is True)
def test_length(self):
assert (self.h.length == 0x01)
def test_raises_need_data(self):
import pytest
pytest.raises(dpkt.NeedData, TLSHandshake, b'\x00\x00\x01\x01')
class TestClientHello(object):
"""This data is extracted from and verified by Wireshark"""
@classmethod
def setup_class(cls):
cls.data = _hexdecode(
b"01000199" # handshake header
b"0301" # version
b"5008220ce5e0e78b6891afe204498c9363feffbe03235a2d9e05b7d990eb708d" # rand
b"2009bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed1" # session id
# cipher suites
b"005200ffc00ac0140088008700390038c00fc00500840035c007c009c011c0130045004400330032"
b"c00cc00ec002c0040096004100050004002fc008c01200160013c00dc003000ac006c010c00bc00100020001"
b"0100" # compression methods
# extensions
b"00fc0000000e000c0000096c6f63616c686f7374000a00080006001700180019000b000201000023"
b"00d0a50b2e9f618a9ea9bf493ef49b421835cd2f6b05bbe1179d8edf70d58c33d656e8696d36d7e7"
b"e0b9d3ecc0e4de339552fa06c64c0fcb550a334bc43944e2739ca342d15a9ebbe981ac87a0d38160"
b"507d47af09bdc16c5f0ee4cdceea551539382333226048a026d3a90a0535f4a64236467db8fee22b"
b"041af986ad0f253bc369137cd8d8cd061925461d7f4d7895ca9a4181ab554dad50360ac31860e971"
b"483877c9335ac1300c5e78f3e56f3b8e0fc16358fcaceefd5c8d8aaae7b35be116f8832856ca6114"
b"4fcdd95e071b94d0cf7233740000"
b"FFFFFFFFFFFFFFFF") # random garbage
cls.p = TLSHandshake(cls.data)
def test_client_hello_constructed(self):
"""Make sure the correct class was constructed"""
# print self.p
assert (isinstance(self.p.data, TLSClientHello) is True)
# def testClientDateCorrect(self):
# self.assertEqual(self.p.random_unixtime, 1342710284)
def test_client_random_correct(self):
assert (self.p.data.random == _hexdecode(b'5008220ce5e0e78b6891afe204498c9363feffbe03235a2d9e05b7d990eb708d'))
def test_ciphersuites(self):
assert (tuple([c.code for c in self.p.data.ciphersuites]) == struct.unpack('!{0}H'.format(
len(self.p.data.ciphersuites)), _hexdecode(
b'00ffc00ac0140088008700390038c00fc00500840035c007c009c011c0130045004400330032c00c'
b'c00ec002c0040096004100050004002fc008c01200160013c00dc003000ac006c010c00bc00100020001')))
assert (len(self.p.data.ciphersuites) == 41)
def test_session_id(self):
assert (self.p.data.session_id == _hexdecode(b'09bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed1'))
def test_compression_methods(self):
assert (list(self.p.data.compression_methods) == [0x00, ])
def test_total_length(self):
assert (len(self.p) == 413)
class TestServerHello(object):
"""Again, from Wireshark"""
@classmethod
def setup_class(cls):
cls.data = _hexdecode(
b'0200004d03015008220c8ec43c5462315a7c99f5d5b6bff009ad285b51dc18485f352e9fdecd2009'
b'bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed10002000005ff01000100')
cls.p = TLSHandshake(cls.data)
def test_constructed(self):
assert (isinstance(self.p.data, TLSServerHello) is True)
# def testDateCorrect(self):
# self.assertEqual(self.p.random_unixtime, 1342710284)
def test_random_correct(self):
assert (self.p.data.random == _hexdecode(b'5008220c8ec43c5462315a7c99f5d5b6bff009ad285b51dc18485f352e9fdecd'))
def test_ciphersuite(self):
assert (self.p.data.ciphersuite.name == 'TLS_RSA_WITH_NULL_SHA')
assert (self.p.data.cipher_suite.name == 'TLS_RSA_WITH_NULL_SHA') # deprecated; still test for coverage
def test_compression_method(self):
assert (self.p.data.compression_method == 0)
assert (self.p.data.compression == 0) # deprecated; still test for coverage
def test_total_length(self):
assert (len(self.p) == 81)
class TestTLSCertificate(object):
"""We use a 2016 certificate record from iana.org as test data."""
@classmethod
def setup_class(cls):
cls.p = TLSHandshake(
b'\x0b\x00\x0b\x45\x00\x0b\x42\x00\x06\x87\x30\x82\x06\x83\x30\x82\x05\x6b\xa0\x03\x02\x01\x02\x02\x10\x09\xca'
b'\xbb\xe2\x19\x1c\x8f\x56\x9d\xd4\xb6\xdd\x25\x0f\x21\xd8\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b'
b'\x05\x00\x30\x70\x31\x0b\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x15\x30\x13\x06\x03\x55\x04\x0a\x13'
b'\x0c\x44\x69\x67\x69\x43\x65\x72\x74\x20\x49\x6e\x63\x31\x19\x30\x17\x06\x03\x55\x04\x0b\x13\x10\x77\x77\x77'
b'\x2e\x64\x69\x67\x69\x63\x65\x72\x74\x2e\x63\x6f\x6d\x31\x2f\x30\x2d\x06\x03\x55\x04\x03\x13\x26\x44\x69\x67'
b'\x69\x43\x65\x72\x74\x20\x53\x48\x41\x32\x20\x48\x69\x67\x68\x20\x41\x73\x73\x75\x72\x61\x6e\x63\x65\x20\x53'
b'\x65\x72\x76\x65\x72\x20\x43\x41\x30\x1e\x17\x0d\x31\x34\x31\x30\x32\x37\x30\x30\x30\x30\x30\x30\x5a\x17\x0d'
b'\x31\x38\x30\x31\x30\x33\x31\x32\x30\x30\x30\x30\x5a\x30\x81\xa3\x31\x0b\x30\x09\x06\x03\x55\x04\x06\x13\x02'
b'\x55\x53\x31\x13\x30\x11\x06\x03\x55\x04\x08\x13\x0a\x43\x61\x6c\x69\x66\x6f\x72\x6e\x69\x61\x31\x14\x30\x12'
b'\x06\x03\x55\x04\x07\x13\x0b\x4c\x6f\x73\x20\x41\x6e\x67\x65\x6c\x65\x73\x31\x3c\x30\x3a\x06\x03\x55\x04\x0a'
b'\x13\x33\x49\x6e\x74\x65\x72\x6e\x65\x74\x20\x43\x6f\x72\x70\x6f\x72\x61\x74\x69\x6f\x6e\x20\x66\x6f\x72\x20'
b'\x41\x73\x73\x69\x67\x6e\x65\x64\x20\x4e\x61\x6d\x65\x73\x20\x61\x6e\x64\x20\x4e\x75\x6d\x62\x65\x72\x73\x31'
b'\x16\x30\x14\x06\x03\x55\x04\x0b\x13\x0d\x49\x54\x20\x4f\x70\x65\x72\x61\x74\x69\x6f\x6e\x73\x31\x13\x30\x11'
b'\x06\x03\x55\x04\x03\x0c\x0a\x2a\x2e\x69\x61\x6e\x61\x2e\x6f\x72\x67\x30\x82\x02\x22\x30\x0d\x06\x09\x2a\x86'
b'\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x03\x82\x02\x0f\x00\x30\x82\x02\x0a\x02\x82\x02\x01\x00\x9d\xbd\xfd\xde'
b'\xb5\xca\xe5\x3a\x55\x97\x47\xe2\xfd\xa6\x37\x28\xe4\xab\xa6\x0f\x18\xb7\x9a\x69\xf0\x33\x10\xbf\x01\x64\xe5'
b'\xee\x7d\xb6\xb1\x5b\xf5\x6d\xf2\x3f\xdd\xba\xe6\xa1\xbb\x38\x44\x9b\x8c\x88\x3f\x18\x10\x2b\xbd\x8b\xb6\x55'
b'\xac\x0e\x2d\xac\x2e\xe3\xed\x5c\xf4\x31\x58\x68\xd2\xc5\x98\x06\x82\x84\x85\x4b\x24\x89\x4d\xcd\x4b\xd3\x78'
b'\x11\xf0\xad\x3a\x28\x2c\xd4\xb4\xe5\x99\xff\xd0\x7d\x8d\x2d\x3f\x24\x78\x55\x4f\x81\x02\x0b\x32\x0e\xe1\x2f'
b'\x44\x94\x8e\x2e\xa1\xed\xbc\x99\x0b\x83\x0c\xa5\xcc\xa6\xb4\xa8\x39\xfb\x27\xb5\x18\x50\xc9\x84\x7e\xac\x74'
b'\xf2\x66\x09\xeb\x24\x36\x5b\x97\x51\xfb\x1c\x32\x08\xf5\x69\x13\xba\xcb\xca\xe4\x92\x01\x34\x7c\x78\xb7\xe5'
b'\x4a\x9d\x99\x97\x94\x04\xc3\x7f\x00\xfb\x65\xdb\x84\x9f\xd7\x5e\x3a\x68\x77\x0c\x30\xf2\xab\xe6\x5b\x33\x25'
b'\x6f\xb5\x9b\x45\x00\x50\xb0\x0d\x81\x39\xd4\xd8\x0d\x36\xf7\xbc\x46\xda\xf3\x03\xe4\x8f\x0f\x07\x91\xb2\xfd'
b'\xd7\x2e\xc6\x0b\x2c\xb3\xad\x53\x3c\x3f\x28\x8c\x9c\x19\x4e\x49\x33\x7a\x69\xc4\x96\x73\x1f\x08\x6d\x4f\x1f'
b'\x98\x25\x90\x07\x13\xe2\xa5\x51\xd0\x5c\xb6\x05\x75\x67\x85\x0d\x91\xe6\x00\x1c\x4c\xe2\x71\x76\xf0\x95\x78'
b'\x73\xa9\x5b\x88\x0a\xcb\xec\x19\xe7\xbd\x9b\xcf\x12\x86\xd0\x45\x2b\x73\x78\x9c\x41\x90\x5d\xd4\x70\x97\x1c'
b'\xd7\x3a\xea\x52\xc7\x7b\x08\x0c\xd7\x79\xaf\x58\x23\x4f\x33\x72\x25\xc2\x6f\x87\xa8\xc1\x3e\x2a\x65\xe9\xdd'
b'\x4e\x03\xa5\xb4\x1d\x7e\x06\xb3\x35\x3f\x38\x12\x9b\x23\x27\xa5\x31\xec\x96\x27\xa2\x1d\xc4\x23\x73\x3a\xa0'
b'\x29\xd4\x98\x94\x48\xba\x33\x22\x89\x1c\x1a\x56\x90\xdd\xf2\xd2\x5c\x8e\xc8\xaa\xa8\x94\xb1\x4a\xa9\x21\x30'
b'\xc6\xb6\xd9\x69\xa2\x1f\xf6\x71\xb6\x0c\x4c\x92\x3a\x94\xa9\x3e\xa1\xdd\x04\x92\xc9\x33\x93\xca\x6e\xdd\x61'
b'\xf3\x3c\xa7\x7e\x92\x08\xd0\x1d\x6b\xd1\x51\x07\x66\x2e\xc0\x88\x73\x3d\xf4\xc8\x76\xa7\xe1\x60\x8b\x82\x97'
b'\x3a\x0f\x75\x92\xe8\x4e\xd1\x55\x79\xd1\x81\xe7\x90\x24\xae\x8a\x7e\x4b\x9f\x00\x78\xeb\x20\x05\xb2\x3f\x9d'
b'\x09\xa1\xdf\x1b\xbc\x7d\xe2\xa5\xa6\x08\x5a\x36\x46\xd9\xfa\xdb\x0e\x9d\xa2\x73\xa5\xf4\x03\xcd\xd4\x28\x31'
b'\xce\x6f\x0c\xa4\x68\x89\x58\x56\x02\xbb\x8b\xc3\x6b\xb3\xbe\x86\x1f\xf6\xd1\xa6\x2e\x35\x02\x03\x01\x00\x01'
b'\xa3\x82\x01\xe3\x30\x82\x01\xdf\x30\x1f\x06\x03\x55\x1d\x23\x04\x18\x30\x16\x80\x14\x51\x68\xff\x90\xaf\x02'
b'\x07\x75\x3c\xcc\xd9\x65\x64\x62\xa2\x12\xb8\x59\x72\x3b\x30\x1d\x06\x03\x55\x1d\x0e\x04\x16\x04\x14\xc7\xd0'
b'\xac\xef\x89\x8b\x20\xe4\xb9\x14\x66\x89\x33\x03\x23\x94\xf6\xbf\x3a\x61\x30\x1f\x06\x03\x55\x1d\x11\x04\x18'
b'\x30\x16\x82\x0a\x2a\x2e\x69\x61\x6e\x61\x2e\x6f\x72\x67\x82\x08\x69\x61\x6e\x61\x2e\x6f\x72\x67\x30\x0e\x06'
b'\x03\x55\x1d\x0f\x01\x01\xff\x04\x04\x03\x02\x05\xa0\x30\x1d\x06\x03\x55\x1d\x25\x04\x16\x30\x14\x06\x08\x2b'
b'\x06\x01\x05\x05\x07\x03\x01\x06\x08\x2b\x06\x01\x05\x05\x07\x03\x02\x30\x75\x06\x03\x55\x1d\x1f\x04\x6e\x30'
b'\x6c\x30\x34\xa0\x32\xa0\x30\x86\x2e\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x6c\x33\x2e\x64\x69\x67\x69\x63\x65'
b'\x72\x74\x2e\x63\x6f\x6d\x2f\x73\x68\x61\x32\x2d\x68\x61\x2d\x73\x65\x72\x76\x65\x72\x2d\x67\x33\x2e\x63\x72'
b'\x6c\x30\x34\xa0\x32\xa0\x30\x86\x2e\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x6c\x34\x2e\x64\x69\x67\x69\x63\x65'
b'\x72\x74\x2e\x63\x6f\x6d\x2f\x73\x68\x61\x32\x2d\x68\x61\x2d\x73\x65\x72\x76\x65\x72\x2d\x67\x33\x2e\x63\x72'
b'\x6c\x30\x42\x06\x03\x55\x1d\x20\x04\x3b\x30\x39\x30\x37\x06\x09\x60\x86\x48\x01\x86\xfd\x6c\x01\x01\x30\x2a'
b'\x30\x28\x06\x08\x2b\x06\x01\x05\x05\x07\x02\x01\x16\x1c\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x64'
b'\x69\x67\x69\x63\x65\x72\x74\x2e\x63\x6f\x6d\x2f\x43\x50\x53\x30\x81\x83\x06\x08\x2b\x06\x01\x05\x05\x07\x01'
b'\x01\x04\x77\x30\x75\x30\x24\x06\x08\x2b\x06\x01\x05\x05\x07\x30\x01\x86\x18\x68\x74\x74\x70\x3a\x2f\x2f\x6f'
b'\x63\x73\x70\x2e\x64\x69\x67\x69\x63\x65\x72\x74\x2e\x63\x6f\x6d\x30\x4d\x06\x08\x2b\x06\x01\x05\x05\x07\x30'
b'\x02\x86\x41\x68\x74\x74\x70\x3a\x2f\x2f\x63\x61\x63\x65\x72\x74\x73\x2e\x64\x69\x67\x69\x63\x65\x72\x74\x2e'
b'\x63\x6f\x6d\x2f\x44\x69\x67\x69\x43\x65\x72\x74\x53\x48\x41\x32\x48\x69\x67\x68\x41\x73\x73\x75\x72\x61\x6e'
b'\x63\x65\x53\x65\x72\x76\x65\x72\x43\x41\x2e\x63\x72\x74\x30\x0c\x06\x03\x55\x1d\x13\x01\x01\xff\x04\x02\x30'
b'\x00\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b\x05\x00\x03\x82\x01\x01\x00\x70\x31\x4c\x38\xe7\xc0'
b'\x2f\xd8\x08\x10\x50\x0b\x9d\xf6\xda\xe8\x5d\xe9\xb2\x3e\x29\xfb\xd6\x8b\xfd\xb5\xf2\x34\x11\xc8\x9a\xcf\xaf'
b'\x9a\xe0\x5a\xf9\x12\x3a\x8a\xa6\xbc\xe6\x95\x4a\x4e\x68\xdc\x7c\xfc\x48\x0a\x65\xd7\x6f\x22\x9c\x4b\xd5\xf5'
b'\x67\x4b\x0c\x9a\xc6\xd0\x6a\x37\xa1\xa1\xc1\x45\xc3\x95\x61\x20\xb8\xef\xe6\x7c\x88\x7a\xb4\xff\x7d\x6a\xa9'
b'\x50\xff\x36\x98\xf2\x7c\x4a\x19\xd5\x9d\x93\xa3\x9a\xca\x5a\x7b\x6d\x6c\x75\xe3\x49\x74\xe5\x0f\x5a\x59\x00'
b'\x05\xb3\xcb\x66\x5d\xdb\xd7\x07\x4f\x9f\xcb\xcb\xf9\xc5\x02\x28\xd5\xe2\x55\x96\xb6\x4a\xda\x16\x0b\x48\xf7'
b'\x7a\x93\xaa\xce\xd2\x26\x17\xbf\xe0\x05\xe0\x0f\xe2\x0a\x53\x2a\x0a\xdc\xb8\x18\xc8\x78\xdc\x5d\x66\x49\x27'
b'\x77\x77\xca\x1a\x81\x4e\x21\xd0\xb5\x33\x08\xaf\x40\x78\xbe\x45\x54\x71\x5e\x4c\xe4\x82\x8b\x01\x2f\x25\xff'
b'\xa1\x3a\x6c\xeb\x30\xd2\x0a\x75\xde\xba\x8a\x34\x4e\x41\xd6\x27\xfa\x63\x8f\xef\xf3\x8a\x30\x63\xa0\x18\x75'
b'\x19\xb3\x9b\x05\x3f\x71\x34\xd9\xcd\x83\xe6\x09\x1a\xcc\xf5\xd2\xe3\xa0\x5e\xdf\xa1\xdf\xbe\x18\x1a\x87\xad'
b'\x86\xba\x24\xfe\x6b\x97\xfe\x00\x04\xb5\x30\x82\x04\xb1\x30\x82\x03\x99\xa0\x03\x02\x01\x02\x02\x10\x04\xe1'
b'\xe7\xa4\xdc\x5c\xf2\xf3\x6d\xc0\x2b\x42\xb8\x5d\x15\x9f\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b'
b'\x05\x00\x30\x6c\x31\x0b\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x15\x30\x13\x06\x03\x55\x04\x0a\x13'
b'\x0c\x44\x69\x67\x69\x43\x65\x72\x74\x20\x49\x6e\x63\x31\x19\x30\x17\x06\x03\x55\x04\x0b\x13\x10\x77\x77\x77'
b'\x2e\x64\x69\x67\x69\x63\x65\x72\x74\x2e\x63\x6f\x6d\x31\x2b\x30\x29\x06\x03\x55\x04\x03\x13\x22\x44\x69\x67'
b'\x69\x43\x65\x72\x74\x20\x48\x69\x67\x68\x20\x41\x73\x73\x75\x72\x61\x6e\x63\x65\x20\x45\x56\x20\x52\x6f\x6f'
b'\x74\x20\x43\x41\x30\x1e\x17\x0d\x31\x33\x31\x30\x32\x32\x31\x32\x30\x30\x30\x30\x5a\x17\x0d\x32\x38\x31\x30'
b'\x32\x32\x31\x32\x30\x30\x30\x30\x5a\x30\x70\x31\x0b\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x15\x30'
b'\x13\x06\x03\x55\x04\x0a\x13\x0c\x44\x69\x67\x69\x43\x65\x72\x74\x20\x49\x6e\x63\x31\x19\x30\x17\x06\x03\x55'
b'\x04\x0b\x13\x10\x77\x77\x77\x2e\x64\x69\x67\x69\x63\x65\x72\x74\x2e\x63\x6f\x6d\x31\x2f\x30\x2d\x06\x03\x55'
b'\x04\x03\x13\x26\x44\x69\x67\x69\x43\x65\x72\x74\x20\x53\x48\x41\x32\x20\x48\x69\x67\x68\x20\x41\x73\x73\x75'
b'\x72\x61\x6e\x63\x65\x20\x53\x65\x72\x76\x65\x72\x20\x43\x41\x30\x82\x01\x22\x30\x0d\x06\x09\x2a\x86\x48\x86'
b'\xf7\x0d\x01\x01\x01\x05\x00\x03\x82\x01\x0f\x00\x30\x82\x01\x0a\x02\x82\x01\x01\x00\xb6\xe0\x2f\xc2\x24\x06'
b'\xc8\x6d\x04\x5f\xd7\xef\x0a\x64\x06\xb2\x7d\x22\x26\x65\x16\xae\x42\x40\x9b\xce\xdc\x9f\x9f\x76\x07\x3e\xc3'
b'\x30\x55\x87\x19\xb9\x4f\x94\x0e\x5a\x94\x1f\x55\x56\xb4\xc2\x02\x2a\xaf\xd0\x98\xee\x0b\x40\xd7\xc4\xd0\x3b'
b'\x72\xc8\x14\x9e\xef\x90\xb1\x11\xa9\xae\xd2\xc8\xb8\x43\x3a\xd9\x0b\x0b\xd5\xd5\x95\xf5\x40\xaf\xc8\x1d\xed'
b'\x4d\x9c\x5f\x57\xb7\x86\x50\x68\x99\xf5\x8a\xda\xd2\xc7\x05\x1f\xa8\x97\xc9\xdc\xa4\xb1\x82\x84\x2d\xc6\xad'
b'\xa5\x9c\xc7\x19\x82\xa6\x85\x0f\x5e\x44\x58\x2a\x37\x8f\xfd\x35\xf1\x0b\x08\x27\x32\x5a\xf5\xbb\x8b\x9e\xa4'
b'\xbd\x51\xd0\x27\xe2\xdd\x3b\x42\x33\xa3\x05\x28\xc4\xbb\x28\xcc\x9a\xac\x2b\x23\x0d\x78\xc6\x7b\xe6\x5e\x71'
b'\xb7\x4a\x3e\x08\xfb\x81\xb7\x16\x16\xa1\x9d\x23\x12\x4d\xe5\xd7\x92\x08\xac\x75\xa4\x9c\xba\xcd\x17\xb2\x1e'
b'\x44\x35\x65\x7f\x53\x25\x39\xd1\x1c\x0a\x9a\x63\x1b\x19\x92\x74\x68\x0a\x37\xc2\xc2\x52\x48\xcb\x39\x5a\xa2'
b'\xb6\xe1\x5d\xc1\xdd\xa0\x20\xb8\x21\xa2\x93\x26\x6f\x14\x4a\x21\x41\xc7\xed\x6d\x9b\xf2\x48\x2f\xf3\x03\xf5'
b'\xa2\x68\x92\x53\x2f\x5e\xe3\x02\x03\x01\x00\x01\xa3\x82\x01\x49\x30\x82\x01\x45\x30\x12\x06\x03\x55\x1d\x13'
b'\x01\x01\xff\x04\x08\x30\x06\x01\x01\xff\x02\x01\x00\x30\x0e\x06\x03\x55\x1d\x0f\x01\x01\xff\x04\x04\x03\x02'
b'\x01\x86\x30\x1d\x06\x03\x55\x1d\x25\x04\x16\x30\x14\x06\x08\x2b\x06\x01\x05\x05\x07\x03\x01\x06\x08\x2b\x06'
b'\x01\x05\x05\x07\x03\x02\x30\x34\x06\x08\x2b\x06\x01\x05\x05\x07\x01\x01\x04\x28\x30\x26\x30\x24\x06\x08\x2b'
b'\x06\x01\x05\x05\x07\x30\x01\x86\x18\x68\x74\x74\x70\x3a\x2f\x2f\x6f\x63\x73\x70\x2e\x64\x69\x67\x69\x63\x65'
b'\x72\x74\x2e\x63\x6f\x6d\x30\x4b\x06\x03\x55\x1d\x1f\x04\x44\x30\x42\x30\x40\xa0\x3e\xa0\x3c\x86\x3a\x68\x74'
b'\x74\x70\x3a\x2f\x2f\x63\x72\x6c\x34\x2e\x64\x69\x67\x69\x63\x65\x72\x74\x2e\x63\x6f\x6d\x2f\x44\x69\x67\x69'
b'\x43\x65\x72\x74\x48\x69\x67\x68\x41\x73\x73\x75\x72\x61\x6e\x63\x65\x45\x56\x52\x6f\x6f\x74\x43\x41\x2e\x63'
b'\x72\x6c\x30\x3d\x06\x03\x55\x1d\x20\x04\x36\x30\x34\x30\x32\x06\x04\x55\x1d\x20\x00\x30\x2a\x30\x28\x06\x08'
b'\x2b\x06\x01\x05\x05\x07\x02\x01\x16\x1c\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x64\x69\x67\x69\x63'
b'\x65\x72\x74\x2e\x63\x6f\x6d\x2f\x43\x50\x53\x30\x1d\x06\x03\x55\x1d\x0e\x04\x16\x04\x14\x51\x68\xff\x90\xaf'
b'\x02\x07\x75\x3c\xcc\xd9\x65\x64\x62\xa2\x12\xb8\x59\x72\x3b\x30\x1f\x06\x03\x55\x1d\x23\x04\x18\x30\x16\x80'
b'\x14\xb1\x3e\xc3\x69\x03\xf8\xbf\x47\x01\xd4\x98\x26\x1a\x08\x02\xef\x63\x64\x2b\xc3\x30\x0d\x06\x09\x2a\x86'
b'\x48\x86\xf7\x0d\x01\x01\x0b\x05\x00\x03\x82\x01\x01\x00\x18\x8a\x95\x89\x03\xe6\x6d\xdf\x5c\xfc\x1d\x68\xea'
b'\x4a\x8f\x83\xd6\x51\x2f\x8d\x6b\x44\x16\x9e\xac\x63\xf5\xd2\x6e\x6c\x84\x99\x8b\xaa\x81\x71\x84\x5b\xed\x34'
b'\x4e\xb0\xb7\x79\x92\x29\xcc\x2d\x80\x6a\xf0\x8e\x20\xe1\x79\xa4\xfe\x03\x47\x13\xea\xf5\x86\xca\x59\x71\x7d'
b'\xf4\x04\x96\x6b\xd3\x59\x58\x3d\xfe\xd3\x31\x25\x5c\x18\x38\x84\xa3\xe6\x9f\x82\xfd\x8c\x5b\x98\x31\x4e\xcd'
b'\x78\x9e\x1a\xfd\x85\xcb\x49\xaa\xf2\x27\x8b\x99\x72\xfc\x3e\xaa\xd5\x41\x0b\xda\xd5\x36\xa1\xbf\x1c\x6e\x47'
b'\x49\x7f\x5e\xd9\x48\x7c\x03\xd9\xfd\x8b\x49\xa0\x98\x26\x42\x40\xeb\xd6\x92\x11\xa4\x64\x0a\x57\x54\xc4\xf5'
b'\x1d\xd6\x02\x5e\x6b\xac\xee\xc4\x80\x9a\x12\x72\xfa\x56\x93\xd7\xff\xbf\x30\x85\x06\x30\xbf\x0b\x7f\x4e\xff'
b'\x57\x05\x9d\x24\xed\x85\xc3\x2b\xfb\xa6\x75\xa8\xac\x2d\x16\xef\x7d\x79\x27\xb2\xeb\xc2\x9d\x0b\x07\xea\xaa'
b'\x85\xd3\x01\xa3\x20\x28\x41\x59\x43\x28\xd2\x81\xe3\xaa\xf6\xec\x7b\x3b\x77\xb6\x40\x62\x80\x05\x41\x45\x01'
b'\xef\x17\x06\x3e\xde\xc0\x33\x9b\x67\xd3\x61\x2e\x72\x87\xe4\x69\xfc\x12\x00\x57\x40\x1e\x70\xf5\x1e\xc9\xb4'
)
def test_num_certs(self):
assert (len(self.p.data.certificates) == 2)
class TestTLSMultiFactory(object):
"""Made up test data"""
@classmethod
def setup_class(cls):
cls.data = _hexdecode(b'1703010010' # header 1
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' # data 1
b'1703010010' # header 2
b'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB' # data 2
b'1703010010' # header 3
b'CCCCCCCC') # data 3 (incomplete)
cls.msgs, cls.bytes_parsed = tls_multi_factory(cls.data)
def test_num_messages(self):
# only complete messages should be parsed, incomplete ones left
# in buffer
assert (len(self.msgs) == 2)
def test_bytes_parsed(self):
assert (self.bytes_parsed == (5 + 16) * 2)
def test_first_msg_data(self):
assert (self.msgs[0].data == _hexdecode(b'AA' * 16))
def test_second_msg_data(self):
assert (self.msgs[1].data == _hexdecode(b'BB' * 16))
def test_incomplete(self):
import pytest
msgs, n = tls_multi_factory(_hexdecode(b'17'))
assert (len(msgs) == 0)
assert (n == 0)
msgs, n = tls_multi_factory(_hexdecode(b'1703'))
assert (len(msgs) == 0)
assert (n == 0)
msgs, n = tls_multi_factory(_hexdecode(b'170301'))
assert (len(msgs) == 0)
assert (n == 0)
msgs, n = tls_multi_factory(_hexdecode(b'17030100'))
assert (len(msgs) == 0)
assert (n == 0)
msgs, n = tls_multi_factory(_hexdecode(b'1703010000'))
assert (len(msgs) == 1)
assert (n == 5)
with pytest.raises(SSL3Exception, match='Bad TLS version in buf: '):
tls_multi_factory(_hexdecode(b'000000000000'))
def test_ssl2():
from binascii import unhexlify
buf_padding = unhexlify(
'0001' # len
'02' # padlen
'03' # msg
'0405' # pad
'0607' # data
)
ssl2 = SSL2(buf_padding)
assert ssl2.len == 1
assert ssl2.msg == b'\x03'
assert ssl2.pad == b'\x04\x05'
assert ssl2.data == b'\x06\x07'
buf_no_padding = unhexlify(
'8001' # len
'03' # msg
'0607' # data
)
ssl2 = SSL2(buf_no_padding)
assert ssl2.len == 1
assert ssl2.msg == b'\x03'
assert ssl2.data == b'\x06\x07'
def test_clienthello_invalidcipher():
# NOTE: this test relies on ciphersuite 0x001c not being in ssl_ciphersuites.py CIPHERSUITES.
# IANA has reserved this value to avoid conflict with SSLv3, but if it gets reassigned,
# a new value should be chosen to fix this test.
from binascii import unhexlify
buf = unhexlify(
'0301' # version
'0000000000000000000000000000000000000000000000000000000000000000' # random
'01' # session_id length
'02' # session_id
'0002' # ciphersuites len
'001c' # ciphersuite (reserved; not implemented
'00'
)
th = TLSClientHello(buf)
assert th.ciphersuites[0].name == 'Unknown'
def test_serverhello_invalidcipher():
# NOTE: this test relies on ciphersuite 0x001c not being in ssl_ciphersuites.py CIPHERSUITES.
# IANA has reserved this value to avoid conflict with SSLv3, but if it gets reassigned,
# a new value should be chosen to fix this test.
import pytest
from binascii import unhexlify
buf = unhexlify(
'0301' # version
'0000000000000000000000000000000000000000000000000000000000000000' # random
'01' # session_id length
'02' # session_id
'001c' # ciphersuite (reserved; not implemented
'00'
)
th = TLSServerHello(buf)
assert th.ciphersuite.name == 'Unknown'
# remove the final byte from the ciphersuite so it will fail unpacking
buf = buf[:-1]
with pytest.raises(dpkt.NeedData):
TLSServerHello(buf)
def test_tlscertificate_unpacking_error():
import pytest
from binascii import unhexlify
buf = unhexlify(
'000003' # certs len
'0000' # certs (invalid, as size < 3)
)
with pytest.raises(dpkt.NeedData):
TLSCertificate(buf)
def test_tlshandshake_invalid_type():
import pytest
from binascii import unhexlify
buf = unhexlify(
'7b' # type (invalid)
'000000' # length_bytes
)
with pytest.raises(SSL3Exception, match='Unknown or invalid handshake type 123'):
TLSHandshake(buf)
def test_sslfactory():
from binascii import unhexlify
buf_tls31 = unhexlify(
'00' # type
'0301' # version
'0000' # length
)
tls = SSLFactory(buf_tls31)
assert isinstance(tls, TLSRecord)
buf_ssl2 = unhexlify(
'00' # type
'0000' # not an SSL3+ version
)
ssl2 = SSLFactory(buf_ssl2)
assert isinstance(ssl2, SSL2)
def test_extensions():
from binascii import unhexlify
buf = unhexlify(
b"010000e0" # handshake header
b"0303" # version
b"60b92b07b6b0e1dffd0ac313788a6d54056d24f73c4d7425631e29b11be97b22" # rand
b"20b3330000ab415e3356226b305993bfb76b2d50bfaeb5298549723b594c999479" # session id
# cipher suites
b"0026c02cc02bc030c02fc024c023c028c027c00ac009c014c013009d009c003d003c0035002f000a"
b"0100" # compression methods
# extensions
b"006d00000023002100001e73656c662e6576656e74732e646174612e6d6963726f736f66742e636f"
b"6d000500050100000000000a00080006001d00170018000b00020100000d001a0018080408050806"
b"0401050102010403050302030202060106030023000000170000ff01000100"
b"ffeeddcc" # extra 4 bytes
)
handshake = TLSHandshake(buf)
hello = handshake.data
assert len(hello.extensions) == 8
assert hello.extensions[-1] == (65281, b'\x00')
| 49,787 | 46.014164 | 123 |
py
|
dpkt
|
dpkt-master/dpkt/h225.py
|
# $Id: h225.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""ITU-T H.225.0 Call Signaling."""
from __future__ import print_function
from __future__ import absolute_import
import struct
from . import dpkt
from . import tpkt
# H225 Call Signaling
#
# Call messages and information elements (IEs) are defined by Q.931:
# http://cvsup.de.openbsd.org/historic/comp/doc/standards/itu/Q/Q.931.ps.gz
#
# The User-to-User IEs of H225 are encoded by PER of ASN.1.
# Call Establishment Messages
ALERTING = 1
CALL_PROCEEDING = 2
CONNECT = 7
CONNECT_ACKNOWLEDGE = 15
PROGRESS = 3
SETUP = 5
SETUP_ACKNOWLEDGE = 13
# Call Information Phase Messages
RESUME = 38
RESUME_ACKNOWLEDGE = 46
RESUME_REJECT = 34
SUSPEND = 37
SUSPEND_ACKNOWLEDGE = 45
SUSPEND_REJECT = 33
USER_INFORMATION = 32
# Call Clearing Messages
DISCONNECT = 69
RELEASE = 77
RELEASE_COMPLETE = 90
RESTART = 70
RESTART_ACKNOWLEDGE = 78
# Miscellaneous Messages
SEGMENT = 96
CONGESTION_CONTROL = 121
INFORMATION = 123
NOTIFY = 110
STATUS = 125
STATUS_ENQUIRY = 117
# Type 1 Single Octet Information Element IDs
RESERVED = 128
SHIFT = 144
CONGESTION_LEVEL = 176
REPEAT_INDICATOR = 208
# Type 2 Single Octet Information Element IDs
MORE_DATA = 160
SENDING_COMPLETE = 161
# Variable Length Information Element IDs
SEGMENTED_MESSAGE = 0
BEARER_CAPABILITY = 4
CAUSE = 8
CALL_IDENTITY = 16
CALL_STATE = 20
CHANNEL_IDENTIFICATION = 24
PROGRESS_INDICATOR = 30
NETWORK_SPECIFIC_FACILITIES = 32
NOTIFICATION_INDICATOR = 39
DISPLAY = 40
DATE_TIME = 41
KEYPAD_FACILITY = 44
SIGNAL = 52
INFORMATION_RATE = 64
END_TO_END_TRANSIT_DELAY = 66
TRANSIT_DELAY_SELECTION_AND_INDICATION = 67
PACKET_LAYER_BINARY_PARAMETERS = 68
PACKET_LAYER_WINDOW_SIZE = 69
PACKET_SIZE = 70
CLOSED_USER_GROUP = 71
REVERSE_CHARGE_INDICATION = 74
CALLING_PARTY_NUMBER = 108
CALLING_PARTY_SUBADDRESS = 109
CALLED_PARTY_NUMBER = 112
CALLED_PARTY_SUBADDRESS = 113
REDIRECTING_NUMBER = 116
TRANSIT_NETWORK_SELECTION = 120
RESTART_INDICATOR = 121
LOW_LAYER_COMPATIBILITY = 124
HIGH_LAYER_COMPATIBILITY = 125
USER_TO_USER = 126
ESCAPE_FOR_EXTENSION = 127
class H225(dpkt.Packet):
"""ITU-T H.225.0 Call Signaling.
H.225.0 is a key protocol in the H.323 VoIP architecture defined by ITU-T. H.225.0 describes how audio, video,
data and control information on a packet based network can be managed to provide conversational services in H.323
equipment. H.225.0 has two major parts: Call signaling and RAS (Registration, Admission and Status).
Attributes:
__hdr__: Header fields of H225.
proto: (int): Protocol Discriminator. The Protocol Discriminator identifies the Layer 3 protocol. (1 byte)
ref_len: (int): Call Reference Value. Contains the length of the Call Reference Value (CRV) field. (1 byte)
"""
__hdr__ = (
('proto', 'B', 8),
('ref_len', 'B', 2)
)
def unpack(self, buf):
# TPKT header
self.tpkt = tpkt.TPKT(buf)
if self.tpkt.v != 3:
raise dpkt.UnpackError('invalid TPKT version')
if self.tpkt.rsvd != 0:
raise dpkt.UnpackError('invalid TPKT reserved value')
n = self.tpkt.len - self.tpkt.__hdr_len__
if n > len(self.tpkt.data):
raise dpkt.UnpackError('invalid TPKT length')
buf = self.tpkt.data
# Q.931 payload
dpkt.Packet.unpack(self, buf)
buf = buf[self.__hdr_len__:]
self.ref_val = buf[:self.ref_len]
buf = buf[self.ref_len:]
self.type = struct.unpack('B', buf[:1])[0]
buf = buf[1:]
# Information Elements
l_ = []
while buf:
ie = self.IE(buf)
l_.append(ie)
buf = buf[len(ie):]
self.data = l_
def __len__(self):
return self.tpkt.__hdr_len__ + self.__hdr_len__ + sum(map(len, self.data))
def __bytes__(self):
return self.tpkt.pack_hdr() + self.pack_hdr() + self.ref_val + \
struct.pack('B', self.type) + b''.join(map(bytes, self.data))
class IE(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
buf = buf[self.__hdr_len__:]
# single-byte IE
if self.type & 0x80:
self.len = 0
self.data = b''
# multi-byte IE
else:
# special PER-encoded UUIE
if self.type == USER_TO_USER:
self.len = struct.unpack('>H', buf[:2])[0]
buf = buf[2:]
# normal TLV-like IE
else:
self.len = struct.unpack('B', buf[:1])[0]
buf = buf[1:]
self.data = buf[:self.len]
def __len__(self):
if self.type & 0x80:
n = 0
else:
if self.type == USER_TO_USER:
n = 2
else:
n = 1
return self.__hdr_len__ + self.len + n
def __bytes__(self):
if self.type & 0x80:
length_str = b''
else:
if self.type == USER_TO_USER:
length_str = struct.pack('>H', self.len)
else:
length_str = struct.pack('B', self.len)
return struct.pack('B', self.type) + length_str + self.data
__s = (
b'\x03\x00\x04\x11\x08\x02\x54\x2b\x05\x04\x03\x88\x93\xa5\x28\x0e\x4a\x6f\x6e\x20\x4f\x62\x65\x72\x68\x65\x69\x64\x65'
b'\x00\x7e\x03\xf0\x05\x20\xb8\x06\x00\x08\x91\x4a\x00\x04\x01\x40\x0c\x00\x4a\x00\x6f\x00\x6e\x00\x20\x00\x4f\x00\x62'
b'\x00\x65\x00\x72\x00\x68\x00\x65\x00\x69\x00\x64\x00\x65\x22\xc0\x09\x00\x00\x3d\x06\x65\x6b\x69\x67\x61\x00\x00\x14'
b'\x32\x2e\x30\x2e\x32\x20\x28\x4f\x50\x41\x4c\x20\x76\x32\x2e\x32\x2e\x32\x29\x00\x00\x00\x01\x40\x15\x00\x74\x00\x63'
b'\x00\x70\x00\x24\x00\x68\x00\x33\x00\x32\x00\x33\x00\x2e\x00\x76\x00\x6f\x00\x78\x00\x67\x00\x72\x00\x61\x00\x74\x00'
b'\x69\x00\x61\x00\x2e\x00\x6f\x00\x72\x00\x67\x00\x42\x87\x23\x2c\x06\xb8\x00\x6a\x8b\x1d\x0c\xb7\x06\xdb\x11\x9e\xca'
b'\x00\x10\xa4\x89\x6d\x6a\x00\xc5\x1d\x80\x04\x07\x00\x0a\x00\x01\x7a\x75\x30\x11\x00\x5e\x88\x1d\x0c\xb7\x06\xdb\x11'
b'\x9e\xca\x00\x10\xa4\x89\x6d\x6a\x82\x2b\x0e\x30\x40\x00\x00\x06\x04\x01\x00\x4c\x10\x09\x00\x00\x3d\x0f\x53\x70\x65'
b'\x65\x78\x20\x62\x73\x34\x20\x57\x69\x64\x65\x36\x80\x11\x1c\x00\x01\x00\x98\xa0\x26\x41\x13\x8a\x00\x98\xa0\x26\x41'
b'\x13\x8b\x26\x00\x00\x64\x0c\x10\x09\x00\x00\x3d\x0f\x53\x70\x65\x65\x78\x20\x62\x73\x34\x20\x57\x69\x64\x65\x36\x80'
b'\x0b\x0d\x00\x01\x00\x98\xa0\x26\x41\x13\x8b\x00\x2a\x40\x00\x00\x06\x04\x01\x00\x4c\x10\x09\x00\x00\x3d\x09\x69\x4c'
b'\x42\x43\x2d\x31\x33\x6b\x33\x80\x11\x1c\x00\x01\x00\x98\xa0\x26\x41\x13\x8a\x00\x98\xa0\x26\x41\x13\x8b\x20\x00\x00'
b'\x65\x0c\x10\x09\x00\x00\x3d\x09\x69\x4c\x42\x43\x2d\x31\x33\x6b\x33\x80\x0b\x0d\x00\x01\x00\x98\xa0\x26\x41\x13\x8b'
b'\x00\x20\x40\x00\x00\x06\x04\x01\x00\x4e\x0c\x03\x00\x83\x00\x80\x11\x1c\x00\x01\x00\x98\xa0\x26\x41\x13\x8a\x00\x98'
b'\xa0\x26\x41\x13\x8b\x16\x00\x00\x66\x0e\x0c\x03\x00\x83\x00\x80\x0b\x0d\x00\x01\x00\x98\xa0\x26\x41\x13\x8b\x00\x4b'
b'\x40\x00\x00\x06\x04\x01\x00\x4c\x10\xb5\x00\x53\x4c\x2a\x02\x00\x00\x00\x00\x00\x40\x01\x00\x00\x40\x01\x02\x00\x08'
b'\x00\x00\x00\x00\x00\x31\x00\x01\x00\x40\x1f\x00\x00\x59\x06\x00\x00\x41\x00\x00\x00\x02\x00\x40\x01\x00\x00\x80\x11'
b'\x1c\x00\x01\x00\x98\xa0\x26\x41\x13\x8a\x00\x98\xa0\x26\x41\x13\x8b\x41\x00\x00\x67\x0c\x10\xb5\x00\x53\x4c\x2a\x02'
b'\x00\x00\x00\x00\x00\x40\x01\x00\x00\x40\x01\x02\x00\x08\x00\x00\x00\x00\x00\x31\x00\x01\x00\x40\x1f\x00\x00\x59\x06'
b'\x00\x00\x41\x00\x00\x00\x02\x00\x40\x01\x00\x00\x80\x0b\x0d\x00\x01\x00\x98\xa0\x26\x41\x13\x8b\x00\x32\x40\x00\x00'
b'\x06\x04\x01\x00\x4c\x10\x09\x00\x00\x3d\x11\x53\x70\x65\x65\x78\x20\x62\x73\x34\x20\x4e\x61\x72\x72\x6f\x77\x33\x80'
b'\x11\x1c\x00\x01\x00\x98\xa0\x26\x41\x13\x8a\x00\x98\xa0\x26\x41\x13\x8b\x28\x00\x00\x68\x0c\x10\x09\x00\x00\x3d\x11'
b'\x53\x70\x65\x65\x78\x20\x62\x73\x34\x20\x4e\x61\x72\x72\x6f\x77\x33\x80\x0b\x0d\x00\x01\x00\x98\xa0\x26\x41\x13\x8b'
b'\x00\x1d\x40\x00\x00\x06\x04\x01\x00\x4c\x60\x1d\x80\x11\x1c\x00\x01\x00\x98\xa0\x26\x41\x13\x8a\x00\x98\xa0\x26\x41'
b'\x13\x8b\x13\x00\x00\x69\x0c\x60\x1d\x80\x0b\x0d\x00\x01\x00\x98\xa0\x26\x41\x13\x8b\x00\x1d\x40\x00\x00\x06\x04\x01'
b'\x00\x4c\x20\x1d\x80\x11\x1c\x00\x01\x00\x98\xa0\x26\x41\x13\x8a\x00\x98\xa0\x26\x41\x13\x8b\x13\x00\x00\x6a\x0c\x20'
b'\x1d\x80\x0b\x0d\x00\x01\x00\x98\xa0\x26\x41\x13\x8b\x00\x01\x00\x01\x00\x01\x00\x01\x00\x81\x03\x02\x80\xf8\x02\x70'
b'\x01\x06\x00\x08\x81\x75\x00\x0b\x80\x13\x80\x01\xf4\x00\x01\x00\x00\x01\x00\x00\x01\x00\x00\x0c\xc0\x01\x00\x01\x80'
b'\x0b\x80\x00\x00\x20\x20\x09\x00\x00\x3d\x0f\x53\x70\x65\x65\x78\x20\x62\x73\x34\x20\x57\x69\x64\x65\x36\x80\x00\x01'
b'\x20\x20\x09\x00\x00\x3d\x09\x69\x4c\x42\x43\x2d\x31\x33\x6b\x33\x80\x00\x02\x24\x18\x03\x00\xe6\x00\x80\x00\x03\x20'
b'\x20\xb5\x00\x53\x4c\x2a\x02\x00\x00\x00\x00\x00\x40\x01\x00\x00\x40\x01\x02\x00\x08\x00\x00\x00\x00\x00\x31\x00\x01'
b'\x00\x40\x1f\x00\x00\x59\x06\x00\x00\x41\x00\x00\x00\x02\x00\x40\x01\x00\x00\x80\x00\x04\x20\x20\x09\x00\x00\x3d\x11'
b'\x53\x70\x65\x65\x78\x20\x62\x73\x34\x20\x4e\x61\x72\x72\x6f\x77\x33\x80\x00\x05\x20\xc0\xef\x80\x00\x06\x20\x40\xef'
b'\x80\x00\x07\x08\xe0\x03\x51\x00\x80\x01\x00\x80\x00\x08\x08\xd0\x03\x51\x00\x80\x01\x00\x80\x00\x09\x83\x01\x50\x80'
b'\x00\x0a\x83\x01\x10\x80\x00\x0b\x83\x01\x40\x00\x80\x01\x03\x06\x00\x00\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00'
b'\x06\x01\x00\x07\x00\x08\x00\x00\x09\x01\x00\x0a\x00\x0b\x07\x01\x00\x32\x80\xa6\xff\x4c\x02\x80\x01\x80'
)
def test_pack():
h = H225(__s)
assert (__s == bytes(h))
assert len(h) == 1038 # len(__s) == 1041
def test_unpack():
h = H225(__s)
assert (h.tpkt.v == 3)
assert (h.tpkt.rsvd == 0)
assert (h.tpkt.len == 1041)
assert (h.proto == 8)
assert (h.type == SETUP)
assert (len(h.data) == 3)
ie = h.data[0]
assert (ie.type == BEARER_CAPABILITY)
assert (ie.len == 3)
ie = h.data[1]
assert (ie.type == DISPLAY)
assert (ie.len == 14)
ie = h.data[2]
assert (ie.type == USER_TO_USER)
assert (ie.len == 1008)
def test_tpkt_unpack_errors():
import pytest
from binascii import unhexlify
# invalid version
buf_tpkt_version0 = unhexlify(
'00' # v
'00' # rsvd
'0000' # len
)
with pytest.raises(dpkt.UnpackError, match="invalid TPKT version"):
H225(buf_tpkt_version0)
# invalid reserved value
buf_tpkt_rsvd = unhexlify(
'03' # v
'ff' # rsvd
'0000' # len
)
with pytest.raises(dpkt.UnpackError, match="invalid TPKT reserved value"):
H225(buf_tpkt_rsvd)
# invalid len
buf_tpkt_len = unhexlify(
'03' # v
'00' # rsvd
'ffff' # len
)
with pytest.raises(dpkt.UnpackError, match="invalid TPKT length"):
H225(buf_tpkt_len)
def test_unpack_ie():
ie = H225.IE(b'\x80')
assert ie.len == 0
assert ie.data == b''
assert len(ie) == 1
assert bytes(ie) == b'\x80'
| 11,388 | 36.837209 | 123 |
py
|
dpkt
|
dpkt-master/dpkt/netbios.py
|
# $Id: netbios.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Network Basic Input/Output System."""
from __future__ import absolute_import
import struct
from . import dpkt
from . import dns
from .compat import compat_ord
def encode_name(name):
"""
Return the NetBIOS first-level encoded name.
14.1. FIRST LEVEL ENCODING
The first level representation consists of two parts:
- NetBIOS name
- NetBIOS scope identifier
The 16 byte NetBIOS name is mapped into a 32 byte wide field using a
reversible, half-ASCII, biased encoding. Each half-octet of the
NetBIOS name is encoded into one byte of the 32 byte field. The
first half octet is encoded into the first byte, the second half-
octet into the second byte, etc.
Each 4-bit, half-octet of the NetBIOS name is treated as an 8-bit,
right-adjusted, zero-filled binary number. This number is added to
value of the ASCII character 'A' (hexadecimal 41). The resulting 8-
bit number is stored in the appropriate byte. The following diagram
demonstrates this procedure:
0 1 2 3 4 5 6 7
+-+-+-+-+-+-+-+-+
|a b c d|w x y z| ORIGINAL BYTE
+-+-+-+-+-+-+-+-+
| |
+--------+ +--------+
| | SPLIT THE NIBBLES
v v
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
|0 0 0 0 a b c d| |0 0 0 0 w x y z|
+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
| |
+ + ADD 'A'
| |
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
|0 1 0 0 0 0 0 1| |0 1 0 0 0 0 0 1|
+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
This encoding results in a NetBIOS name being represented as a
sequence of 32 ASCII, upper-case characters from the set
{A,B,C...N,O,P}.
The NetBIOS scope identifier is a valid domain name (without a
leading dot).
An ASCII dot (2E hexadecimal) and the scope identifier are appended
to the encoded form of the NetBIOS name, the result forming a valid
domain name.
"""
l_ = []
for c in struct.pack('16s', name.encode()):
c = compat_ord(c)
l_.append(chr((c >> 4) + 0x41))
l_.append(chr((c & 0xf) + 0x41))
return ''.join(l_)
def decode_name(nbname):
"""
Return the NetBIOS first-level decoded nbname.
"""
if len(nbname) != 32:
return nbname
l_ = []
for i in range(0, 32, 2):
l_.append(
chr(
((ord(nbname[i]) - 0x41) << 4) |
((ord(nbname[i + 1]) - 0x41) & 0xf)
)
)
return ''.join(l_).split('\x00', 1)[0]
# RR types
NS_A = 0x01 # IP address
NS_NS = 0x02 # Name Server
NS_NULL = 0x0A # NULL
NS_NB = 0x20 # NetBIOS general Name Service
NS_NBSTAT = 0x21 # NetBIOS NODE STATUS
# RR classes
NS_IN = 1
# NBSTAT name flags
NS_NAME_G = 0x8000 # group name (as opposed to unique)
NS_NAME_DRG = 0x1000 # deregister
NS_NAME_CNF = 0x0800 # conflict
NS_NAME_ACT = 0x0400 # active
NS_NAME_PRM = 0x0200 # permanent
# NBSTAT service names
nbstat_svcs = {
# (service, unique): list of ordered (name prefix, service name) tuples
(0x00, 0): [('', 'Domain Name')],
(0x00, 1): [('IS~', 'IIS'), ('', 'Workstation Service')],
(0x01, 0): [('__MSBROWSE__', 'Master Browser')],
(0x01, 1): [('', 'Messenger Service')],
(0x03, 1): [('', 'Messenger Service')],
(0x06, 1): [('', 'RAS Server Service')],
(0x1B, 1): [('', 'Domain Master Browser')],
(0x1C, 0): [('INet~Services', 'IIS'), ('', 'Domain Controllers')],
(0x1D, 1): [('', 'Master Browser')],
(0x1E, 0): [('', 'Browser Service Elections')],
(0x1F, 1): [('', 'NetDDE Service')],
(0x20, 1): [('Forte_$ND800ZA', 'DCA IrmaLan Gateway Server Service'),
('', 'File Server Service')],
(0x21, 1): [('', 'RAS Client Service')],
(0x22, 1): [('', 'Microsoft Exchange Interchange(MSMail Connector)')],
(0x23, 1): [('', 'Microsoft Exchange Store')],
(0x24, 1): [('', 'Microsoft Exchange Directory')],
(0x2B, 1): [('', 'Lotus Notes Server Service')],
(0x2F, 0): [('IRISMULTICAST', 'Lotus Notes')],
(0x30, 1): [('', 'Modem Sharing Server Service')],
(0x31, 1): [('', 'Modem Sharing Client Service')],
(0x33, 0): [('IRISNAMESERVER', 'Lotus Notes')],
(0x43, 1): [('', 'SMS Clients Remote Control')],
(0x44, 1): [('', 'SMS Administrators Remote Control Tool')],
(0x45, 1): [('', 'SMS Clients Remote Chat')],
(0x46, 1): [('', 'SMS Clients Remote Transfer')],
(0x4C, 1): [('', 'DEC Pathworks TCPIP service on Windows NT')],
(0x52, 1): [('', 'DEC Pathworks TCPIP service on Windows NT')],
(0x87, 1): [('', 'Microsoft Exchange MTA')],
(0x6A, 1): [('', 'Microsoft Exchange IMC')],
(0xBE, 1): [('', 'Network Monitor Agent')],
(0xBF, 1): [('', 'Network Monitor Application')]
}
def node_to_service_name(name_service_flags):
name, service, flags = name_service_flags
try:
unique = int(flags & NS_NAME_G == 0)
for namepfx, svcname in nbstat_svcs[(service, unique)]:
if name.startswith(namepfx):
return svcname
except KeyError:
pass
return ''
class NS(dns.DNS):
"""
NetBIOS Name Service.
RFC1002: https://tools.ietf.org/html/rfc1002
"""
class Q(dns.DNS.Q):
pass
class RR(dns.DNS.RR):
"""NetBIOS resource record.
RFC1001: 14. REPRESENTATION OF NETBIOS NAMES
NetBIOS names as seen across the client interface to NetBIOS are
exactly 16 bytes long. Within the NetBIOS-over-TCP protocols, a
longer representation is used.
There are two levels of encoding. The first level maps a NetBIOS
name into a domain system name. The second level maps the domain
system name into the "compressed" representation required for
interaction with the domain name system.
Except in one packet, the second level representation is the only
NetBIOS name representation used in NetBIOS-over-TCP packet formats.
The exception is the RDATA field of a NODE STATUS RESPONSE packet.
"""
_node_name_struct = struct.Struct('>15s B H')
_node_name_len = _node_name_struct.size
def unpack_rdata(self, buf, off):
if self.type == NS_A:
self.ip = self.rdata
elif self.type == NS_NBSTAT:
num_names = compat_ord(self.rdata[0])
self.nodenames = [
self._node_name_struct.unpack_from(
self.rdata, 1+idx*self._node_name_len
) for idx in range(num_names)
]
# XXX - skip stats
class Session(dpkt.Packet):
"""NetBIOS Session Service."""
__hdr__ = (
('type', 'B', 0),
('flags', 'B', 0),
('len', 'H', 0)
)
SSN_MESSAGE = 0
SSN_REQUEST = 1
SSN_POSITIVE = 2
SSN_NEGATIVE = 3
SSN_RETARGET = 4
SSN_KEEPALIVE = 5
class Datagram(dpkt.Packet):
"""NetBIOS Datagram Service."""
__hdr__ = (
('type', 'B', 0),
('flags', 'B', 0),
('id', 'H', 0),
('src', 'I', 0),
('sport', 'H', 0),
('len', 'H', 0),
('off', 'H', 0)
)
DGRAM_UNIQUE = 0x10
DGRAM_GROUP = 0x11
DGRAM_BROADCAST = 0x12
DGRAM_ERROR = 0x13
DGRAM_QUERY = 0x14
DGRAM_POSITIVE = 0x15
DGRAM_NEGATIVE = 0x16
def test_encode_name():
assert encode_name('The NetBIOS name') == 'FEGIGFCAEOGFHEECEJEPFDCAGOGBGNGF'
# rfc1002
assert encode_name('FRED ') == 'EGFCEFEECACACACACACACACACACACACA'
# https://github.com/kbandla/dpkt/issues/458
assert encode_name('*') == 'CKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
def test_decode_name():
assert decode_name('FEGIGFCAEOGFHEECEJEPFDCAGOGBGNGF') == 'The NetBIOS name'
# original botched example from rfc1001
assert decode_name('FEGHGFCAEOGFHEECEJEPFDCAHEGBGNGF') == 'Tge NetBIOS tame'
assert decode_name('CKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA') == '*'
# decode a name which is not 32 chars long
assert decode_name('CKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABB') == 'CKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABB'
def test_node_to_service_name():
svcname = node_to_service_name(("ISS", 0x00, 0x0800))
assert svcname == "Workstation Service"
def test_node_to_service_name_keyerror():
svcname = node_to_service_name(("ISS", 0xff, 0x0800))
assert svcname == ""
def test_rr():
import pytest
from binascii import unhexlify
rr = NS.RR()
with pytest.raises(NotImplementedError):
len(rr)
buf = unhexlify(''.join([
'01', # A record
'0001', # DNS_IN
'00000000', # TTL
'0000', # rlen
]))
rr.unpack_rdata(buf, 0)
assert rr.ip == rr.rdata
def test_rr_nbstat():
from binascii import unhexlify
buf = unhexlify(''.join([
'41' * 1025, # Name
'0033', # NS_NBSTAT
'0001', # DNS_IN
'00000000', # TTL
'0004', # rlen
]))
rdata = (
b'\x02' # NUM_NAMES
b'ABCDEFGHIJKLMNO\x2f\x01\x02'
b'PQRSTUVWXYZABCD\x43\x03\x04'
)
rr = NS.RR(
type=NS_NBSTAT,
rdata=rdata,
)
assert rr.type == NS_NBSTAT
rr.unpack_rdata(buf, 0)
assert rr.nodenames == [
(b'ABCDEFGHIJKLMNO', 0x2f, 0x0102),
(b'PQRSTUVWXYZABCD', 0x43, 0x0304),
]
def test_ns():
from binascii import unhexlify
ns = NS()
correct = unhexlify(
'0000'
'0100'
'0000000000000000'
)
assert bytes(ns) == correct
| 10,061 | 29.583587 | 100 |
py
|
dpkt
|
dpkt-master/dpkt/asn1.py
|
# $Id: asn1.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Abstract Syntax Notation #1."""
from __future__ import absolute_import
from __future__ import print_function
import struct
from calendar import timegm
from . import dpkt
from .compat import compat_ord
# Type class
CLASSMASK = 0xc0
UNIVERSAL = 0x00
APPLICATION = 0x40
CONTEXT = 0x80
PRIVATE = 0xc0
# Constructed (vs. primitive)
CONSTRUCTED = 0x20
# Universal-class tags
TAGMASK = 0x1f
INTEGER = 2
BIT_STRING = 3 # arbitrary bit string
OCTET_STRING = 4 # arbitrary octet string
NULL = 5
OID = 6 # object identifier
SEQUENCE = 16 # ordered collection of types
SET = 17 # unordered collection of types
PRINT_STRING = 19 # printable string
T61_STRING = 20 # T.61 (8-bit) character string
IA5_STRING = 22 # ASCII
UTC_TIME = 23
def utctime(buf):
"""Convert ASN.1 UTCTime string to UTC float.
TODO: Long description here.
Args:
buf: A buffer with format "yymnddhhmm"
Returns:
A floating point number, indicates seconds since the Epoch.
"""
yy = int(buf[:2])
mn = int(buf[2:4])
dd = int(buf[4:6])
hh = int(buf[6:8])
mm = int(buf[8:10])
try:
ss = int(buf[10:12])
buf = buf[12:]
except TypeError:
ss = 0
buf = buf[10:]
if buf[0] == '+':
hh -= int(buf[1:3])
mm -= int(buf[3:5])
elif buf[0] == '-':
hh += int(buf[1:3])
mm += int(buf[3:5])
return timegm((2000 + yy, mn, dd, hh, mm, ss, 0, 0, 0))
def decode(buf):
"""Sleazy ASN.1 decoder.
TODO: Long description here.
Args:
buf: A buffer with Sleazy ASN.1 data.
Returns:
A list of (id, value) tuples from ASN.1 BER/DER encoded buffer.
Raises:
UnpackError: An error occurred the ASN.1 length exceed.
"""
msg = []
while buf:
t = compat_ord(buf[0])
constructed = t & CONSTRUCTED
tag = t & TAGMASK
l_ = compat_ord(buf[1])
c = 0
if constructed and l_ == 128:
# XXX - constructed, indefinite length
msg.append((t, decode(buf[2:])))
elif l_ >= 128:
c = l_ & 127
if c == 1:
l_ = compat_ord(buf[2])
elif c == 2:
l_ = struct.unpack('>H', buf[2:4])[0]
elif c == 3:
l_ = struct.unpack('>I', buf[1:5])[0] & 0xfff
c = 2
elif c == 4:
l_ = struct.unpack('>I', buf[2:6])[0]
else:
# XXX - can be up to 127 bytes, but...
raise dpkt.UnpackError('excessive long-form ASN.1 length %d' % l_)
# Skip type, length
buf = buf[2 + c:]
# Parse content
if constructed:
msg.append((t, decode(buf)))
elif tag == INTEGER:
if l_ == 0:
n = 0
elif l_ == 1:
n = compat_ord(buf[0])
elif l_ == 2:
n = struct.unpack('>H', buf[:2])[0]
elif l_ == 3:
n = struct.unpack('>I', buf[:4])[0] >> 8
elif l_ == 4:
n = struct.unpack('>I', buf[:4])[0]
else:
raise dpkt.UnpackError('excessive integer length > %d bytes' % l_)
msg.append((t, n))
elif tag == UTC_TIME:
msg.append((t, utctime(buf[:l_])))
else:
msg.append((t, buf[:l_]))
# Skip content
buf = buf[l_:]
return msg
def test_asn1():
s = (
b'0\x82\x02Q\x02\x01\x0bc\x82\x02J\x04xcn=Douglas J Song 1, ou=Information Technology Division,'
b' ou=Faculty and Staff, ou=People, o=University of Michigan, c=US\n\x01\x00\n\x01\x03\x02\x01'
b'\x00\x02\x01\x00\x01\x01\x00\x87\x0bobjectclass0\x82\x01\xb0\x04\rmemberOfGroup\x04\x03acl'
b'\x04\x02cn\x04\x05title\x04\rpostalAddress\x04\x0ftelephoneNumber\x04\x04mail\x04\x06member'
b'\x04\thomePhone\x04\x11homePostalAddress\x04\x0bobjectClass\x04\x0bdescription\x04\x18'
b'facsimileTelephoneNumber\x04\x05pager\x04\x03uid\x04\x0cuserPassword\x04\x08joinable\x04\x10'
b'associatedDomain\x04\x05owner\x04\x0erfc822ErrorsTo\x04\x08ErrorsTo\x04\x10rfc822RequestsTo\x04\n'
b'RequestsTo\x04\tmoderator\x04\nlabeledURL\x04\nonVacation\x04\x0fvacationMessage\x04\x05drink\x04\x0e'
b'lastModifiedBy\x04\x10lastModifiedTime\x04\rmodifiersname\x04\x0fmodifytimestamp\x04\x0ccreatorsname'
b'\x04\x0fcreatetimestamp'
)
assert decode(s) == [
(48, [
(2, 11),
(99, [
(4, (
b'cn=Douglas J Song 1, '
b'ou=Information Technology Division, '
b'ou=Faculty and Staff, '
b'ou=People, '
b'o=University of Michigan, '
b'c=US'
)),
(10, b'\x00'),
(10, b'\x03'),
(2, 0),
(2, 0),
(1, b'\x00'),
(135, b'objectclass'),
(48, [
(4, b'memberOfGroup'),
(4, b'acl'),
(4, b'cn'),
(4, b'title'),
(4, b'postalAddress'),
(4, b'telephoneNumber'),
(4, b'mail'),
(4, b'member'),
(4, b'homePhone'),
(4, b'homePostalAddress'),
(4, b'objectClass'),
(4, b'description'),
(4, b'facsimileTelephoneNumber'),
(4, b'pager'),
(4, b'uid'),
(4, b'userPassword'),
(4, b'joinable'),
(4, b'associatedDomain'),
(4, b'owner'),
(4, b'rfc822ErrorsTo'),
(4, b'ErrorsTo'),
(4, b'rfc822RequestsTo'),
(4, b'RequestsTo'),
(4, b'moderator'),
(4, b'labeledURL'),
(4, b'onVacation'),
(4, b'vacationMessage'),
(4, b'drink'),
(4, b'lastModifiedBy'),
(4, b'lastModifiedTime'),
(4, b'modifiersname'),
(4, b'modifytimestamp'),
(4, b'creatorsname'),
(4, b'createtimestamp'),
])
])
])
]
def test_utctime():
buf = (
'201005' # yymndd
'012345' # hhmmss
'+1234' # +hhmm
)
assert utctime(buf) == 1601815785.0
buf = (
'201005' # yymndd
'012345' # hhmmss
'-1234' # -hhmm
)
assert utctime(buf) == 1601906265.0
def test_decode():
import pytest
from binascii import unhexlify
buf = unhexlify(
'20' # CONSTRUCTED
'80' # 128 | 0
)
assert decode(buf) == [(32, []), (32, [])]
# unpacking UTC_TIME
buf = unhexlify(
'17' # t: code: UTC_TIME
'81' # l_: code: 128 | 1 (constructed
'22' # data len
'3230313030353031323334352b30303030'
)
assert decode(buf) == [(23, 1601861025.0)]
# unpacking 2-byte size; zero-length integer
buf = unhexlify(
'02' # t: INTEGER
'82' # l_: 128 | 2
'0000' # new l_
)
assert decode(buf) == [(2, 0)]
# unpacking 3-byte size
buf = unhexlify(
'02' # t: INTEGER
'83' # l_: 128 | 3
'000001' # new l_
)
assert decode(buf) == [(2, 1)]
# unpacking 4-byte size
buf = unhexlify(
'02' # t: INTEGER
'84' # l_: 128 | 4
'00000002' # new l_
'abcd'
)
assert decode(buf) == [(2, 43981)]
# unpacking 4-byte size
buf = unhexlify(
'02' # t: INTEGER
'85' # l_: 128 | 5
)
with pytest.raises(dpkt.UnpackError, match="excessive long-form ASN.1 length 133"):
decode(buf)
# unpacking 1-byte size; 4-byte integer
buf = unhexlify(
'02' # t: INTEGER
'81' # l_: 128 | 1
'04' # new l_
'12345678' # integer
)
assert decode(buf) == [(2, 305419896)]
# unpacking 1-byte size; 4-byte integer
buf = unhexlify(
'02' # t: INTEGER
'81' # l_: 128 | 1
'05' # new l_
)
with pytest.raises(dpkt.UnpackError, match="excessive integer length > 5 bytes"):
decode(buf)
# unpacking 1-byte size; 3-byte integer
buf = unhexlify(
'02' # t: INTEGER
'81' # l_: 128 | 1
'03' # new l_
'123456' # integer
'02' # t: INTEGER
'81' # l_: 128 | 1
'00' # new l_
)
assert decode(buf) == [
(2, 1193046),
(2, 0),
]
| 9,011 | 27.884615 | 112 |
py
|
dpkt
|
dpkt-master/dpkt/pcapng.py
|
"""pcap Next Generation file format"""
# Spec: https://pcapng.github.io/pcapng/
# pylint: disable=no-member
# pylint: disable=attribute-defined-outside-init
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division # so python 2 doesn't do integer division
from struct import pack as struct_pack, unpack as struct_unpack
from time import time
import sys
from . import dpkt
from .compat import BytesIO, intround
BYTE_ORDER_MAGIC = 0x1A2B3C4D
BYTE_ORDER_MAGIC_LE = 0x4D3C2B1A
PCAPNG_VERSION_MAJOR = 1
PCAPNG_VERSION_MINOR = 0
# Block types
PCAPNG_BT_IDB = 0x00000001 # Interface Description Block
PCAPNG_BT_PB = 0x00000002 # Packet Block (deprecated)
PCAPNG_BT_SPB = 0x00000003 # Simple Packet Block
PCAPNG_BT_EPB = 0x00000006 # Enhanced Packet Block
PCAPNG_BT_SHB = 0x0A0D0D0A # Section Header Block
# Options
PCAPNG_OPT_ENDOFOPT = 0 # end of options
PCAPNG_OPT_COMMENT = 1 # comment
# SHB options
PCAPNG_OPT_SHB_HARDWARE = 2 # description of the hardware
PCAPNG_OPT_SHB_OS = 3 # name of the operating system
PCAPNG_OPT_SHB_USERAPPL = 4 # name of the application
# IDB options
PCAPNG_OPT_IF_NAME = 2 # interface name
PCAPNG_OPT_IF_DESCRIPTION = 3 # interface description
PCAPNG_OPT_IF_IPV4ADDR = 4 # IPv4 network address and netmask for the interface
PCAPNG_OPT_IF_IPV6ADDR = 5 # IPv6 network address and prefix length for the interface
PCAPNG_OPT_IF_MACADDR = 6 # interface hardware MAC address
PCAPNG_OPT_IF_EUIADDR = 7 # interface hardware EUI address
PCAPNG_OPT_IF_SPEED = 8 # interface speed in bits/s
PCAPNG_OPT_IF_TSRESOL = 9 # timestamp resolution
PCAPNG_OPT_IF_TZONE = 10 # time zone
PCAPNG_OPT_IF_FILTER = 11 # capture filter
PCAPNG_OPT_IF_OS = 12 # operating system
PCAPNG_OPT_IF_FCSLEN = 13 # length of the Frame Check Sequence in bits
PCAPNG_OPT_IF_TSOFFSET = 14 # offset (in seconds) that must be added to packet timestamp
# <copied from pcap.py>
DLT_NULL = 0
DLT_EN10MB = 1
DLT_EN3MB = 2
DLT_AX25 = 3
DLT_PRONET = 4
DLT_CHAOS = 5
DLT_IEEE802 = 6
DLT_ARCNET = 7
DLT_SLIP = 8
DLT_PPP = 9
DLT_FDDI = 10
DLT_PFSYNC = 18
DLT_IEEE802_11 = 105
DLT_LINUX_SLL = 113
DLT_PFLOG = 117
DLT_IEEE802_11_RADIO = 127
if sys.platform.find('openbsd') != -1:
DLT_LOOP = 12
DLT_RAW = 14
else:
DLT_LOOP = 108
DLT_RAW = 12
dltoff = {DLT_NULL: 4, DLT_EN10MB: 14, DLT_IEEE802: 22, DLT_ARCNET: 6,
DLT_SLIP: 16, DLT_PPP: 4, DLT_FDDI: 21, DLT_PFLOG: 48, DLT_PFSYNC: 4,
DLT_LOOP: 4, DLT_LINUX_SLL: 16}
# </copied from pcap.py>
def _swap32b(i):
"""Swap endianness of an uint32"""
return struct_unpack('<I', struct_pack('>I', i))[0]
def _align32b(i):
"""Return int `i` aligned to the 32-bit boundary"""
r = i % 4
return i if not r else i + 4 - r
def _padded(s):
"""Return bytes `s` padded with zeroes to align to the 32-bit boundary"""
return struct_pack('%ss' % _align32b(len(s)), s)
def _padded_tolen(s, tolen):
"""Return bytes `s` padded with `tolen` zeroes to align to the 32-bit boundary"""
return struct_pack('%ss' % tolen, s)
def _padlen(s):
"""Return size of padding required to align str `s` to the 32-bit boundary"""
return _align32b(len(s)) - len(s)
class _PcapngBlock(dpkt.Packet):
"""Base class for a pcapng block with Options"""
__hdr__ = (
('type', 'I', 0), # block type
('len', 'I', 12), # block total length: total size of this block, in octets
# ( body, variable size )
('_len', 'I', 12), # dup of len
)
def unpack_hdr(self, buf):
dpkt.Packet.unpack(self, buf)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.len > len(buf):
raise dpkt.NeedData
self._do_unpack_options(buf)
def _do_unpack_options(self, buf, oo=None):
self.opts = []
self.data = ''
oo = oo or self.__hdr_len__ - 4 # options offset
ol = self.len - oo - 4 # length
opts_buf = buf[oo:oo + ol]
while opts_buf:
opt = (PcapngOptionLE(opts_buf) if self.__hdr_fmt__[0] == '<'
else PcapngOption(opts_buf))
self.opts.append(opt)
opts_buf = opts_buf[len(opt):]
if opt.code == PCAPNG_OPT_ENDOFOPT:
break
# duplicate total length field
self._len = struct_unpack(self.__hdr_fmt__[0] + 'I', buf[-4:])[0]
if self._len != self.len:
raise dpkt.UnpackError('length fields do not match')
def _do_pack_options(self):
if not getattr(self, 'opts', None):
return b''
if self.opts[-1].code != PCAPNG_OPT_ENDOFOPT:
raise dpkt.PackError('options must end with opt_endofopt')
return b''.join(bytes(o) for o in self.opts)
def __bytes__(self):
opts_buf = self._do_pack_options()
n = len(opts_buf) + self.__hdr_len__
self.len = n
self._len = n
hdr_buf = self._pack_hdr(self.type, n, n)
return b''.join([hdr_buf[:-4], opts_buf, hdr_buf[-4:]])
def __len__(self):
if not getattr(self, 'opts', None):
return self.__hdr_len__
opts_len = sum(len(o) for o in self.opts)
return self.__hdr_len__ + opts_len
class PcapngBlockLE(_PcapngBlock):
__byte_order__ = '<'
class PcapngOption(dpkt.Packet):
"""A single Option"""
__hdr__ = (
('code', 'H', PCAPNG_OPT_ENDOFOPT),
('len', 'H', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = buf[self.__hdr_len__:self.__hdr_len__ + self.len]
# decode comment
if self.code == PCAPNG_OPT_COMMENT:
try:
self.text = self.data.decode('utf-8')
except UnicodeDecodeError as ude:
if b'\x00' in self.data:
self.text = self.data[:self.data.index(b'\x00')].decode('ascii')
else:
raise ude
def __bytes__(self):
# encode comment
if self.code == PCAPNG_OPT_COMMENT:
text = getattr(self, 'text', self.data)
self.data = text.encode('utf-8') if not isinstance(text, bytes) else text
self.len = len(self.data)
hdr = self._pack_hdr(self.code, self.len)
return hdr + _padded(self.data)
def __len__(self):
return self.__hdr_len__ + len(self.data) + _padlen(self.data)
def __repr__(self):
if self.code == PCAPNG_OPT_ENDOFOPT:
return '{0}(opt_endofopt)'.format(self.__class__.__name__)
else:
return dpkt.Packet.__repr__(self)
class PcapngOptionLE(PcapngOption):
__byte_order__ = '<'
class SectionHeaderBlock(_PcapngBlock):
"""Section Header block"""
__hdr__ = (
('type', 'I', PCAPNG_BT_SHB),
('len', 'I', 28),
('bom', 'I', BYTE_ORDER_MAGIC),
('v_major', 'H', PCAPNG_VERSION_MAJOR),
('v_minor', 'H', PCAPNG_VERSION_MINOR),
('sec_len', 'q', -1), # section length, -1 = auto
# ( options, variable size )
('_len', 'I', 28)
)
def __bytes__(self):
opts_buf = self._do_pack_options()
n = len(opts_buf) + self.__hdr_len__
self.len = n
self._len = n
hdr_buf = self._pack_hdr(
self.type,
n,
self.bom,
self.v_major,
self.v_minor,
self.sec_len,
n,
)
return b''.join([hdr_buf[:-4], opts_buf, hdr_buf[-4:]])
class SectionHeaderBlockLE(SectionHeaderBlock):
__byte_order__ = '<'
class InterfaceDescriptionBlock(_PcapngBlock):
"""Interface Description block"""
__hdr__ = (
('type', 'I', PCAPNG_BT_IDB),
('len', 'I', 20),
('linktype', 'H', DLT_EN10MB),
('_reserved', 'H', 0),
('snaplen', 'I', 1500),
# ( options, variable size )
('_len', 'I', 20)
)
def __bytes__(self):
opts_buf = self._do_pack_options()
n = len(opts_buf) + self.__hdr_len__
self.len = n
self._len = n
hdr_buf = self._pack_hdr(
self.type,
n,
self.linktype,
self._reserved,
self.snaplen,
n,
)
return b''.join([hdr_buf[:-4], opts_buf, hdr_buf[-4:]])
class InterfaceDescriptionBlockLE(InterfaceDescriptionBlock):
__byte_order__ = '<'
class EnhancedPacketBlock(_PcapngBlock):
"""Enhanced Packet block"""
__hdr__ = (
('type', 'I', PCAPNG_BT_EPB),
('len', 'I', 64),
('iface_id', 'I', 0),
('ts_high', 'I', 0), # timestamp high
('ts_low', 'I', 0), # timestamp low
('caplen', 'I', 0), # captured len, size of pkt_data
('pkt_len', 'I', 0), # actual packet len
# ( pkt_data, variable size )
# ( options, variable size )
('_len', 'I', 64)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.len > len(buf):
raise dpkt.NeedData
# packet data
po = self.__hdr_len__ - 4 # offset of pkt_data
self.pkt_data = buf[po:po + self.caplen]
# skip padding between pkt_data and options
opts_offset = po + _align32b(self.caplen)
self._do_unpack_options(buf, opts_offset)
def __bytes__(self):
pkt_buf = self.pkt_data
pkt_len = len(pkt_buf)
self.caplen = pkt_len
self.pkt_len = pkt_len
opts_buf = self._do_pack_options()
n = self.__hdr_len__ + _align32b(self.caplen) + len(opts_buf)
self.len = n
self._len = n
hdr_buf = self._pack_hdr(
self.type,
n,
self.iface_id,
self.ts_high,
self.ts_low,
pkt_len,
pkt_len,
n
)
return b''.join([hdr_buf[:-4], _padded(pkt_buf), opts_buf, hdr_buf[-4:]])
def __len__(self):
opts_len = sum(len(o) for o in self.opts)
return self.__hdr_len__ + _align32b(self.caplen) + opts_len
class EnhancedPacketBlockLE(EnhancedPacketBlock):
__byte_order__ = '<'
class PacketBlock(EnhancedPacketBlock):
"""Packet block (deprecated)"""
__hdr__ = (
('type', 'I', PCAPNG_BT_PB),
('len', 'I', 64),
('iface_id', 'H', 0),
('drops_count', 'H', 0), # local drop counter
('ts_high', 'I', 0), # timestamp high
('ts_low', 'I', 0), # timestamp low
('caplen', 'I', 0), # captured len, size of pkt_data
('pkt_len', 'I', 0), # actual packet len
# ( pkt_data, variable size )
# ( options, variable size )
('_len', 'I', 64)
)
def __bytes__(self):
pkt_buf = self.pkt_data
pkt_len = len(pkt_buf)
self.caplen = pkt_len
self.pkt_len = pkt_len
opts_buf = self._do_pack_options()
n = self.__hdr_len__ + _align32b(self.caplen) + len(opts_buf)
self.len = n
self._len = n
hdr_buf = self._pack_hdr(
self.type,
n,
self.iface_id,
self.drops_count,
self.ts_high,
self.ts_low,
pkt_len,
pkt_len,
n
)
return b''.join([hdr_buf[:-4], _padded(pkt_buf), opts_buf, hdr_buf[-4:]])
class PacketBlockLE(PacketBlock):
__byte_order__ = '<'
class Writer(object):
"""Simple pcapng dumpfile writer."""
__le = sys.byteorder == 'little'
def __init__(self, fileobj, snaplen=1500, linktype=DLT_EN10MB, shb=None, idb=None):
"""
Create a pcapng dumpfile writer for the given fileobj.
shb can be an instance of SectionHeaderBlock(LE)
idb can be an instance of InterfaceDescriptionBlock(LE) (or sequence of them)
"""
self.__f = fileobj
self._precision_multiplier = 1000000
if shb:
self._validate_block('shb', shb, SectionHeaderBlock)
if idb:
try:
for idb_ in idb:
self._validate_block('idb', idb_, InterfaceDescriptionBlock)
except (TypeError, ValueError): # not iter or _validate_block failed
self._validate_block('idb', idb, InterfaceDescriptionBlock)
idb = [idb]
if self.__le:
shb = shb or SectionHeaderBlockLE()
idb = idb or [InterfaceDescriptionBlockLE(snaplen=snaplen, linktype=linktype)]
self._kls = EnhancedPacketBlockLE
else:
shb = shb or SectionHeaderBlock()
idb = idb or [InterfaceDescriptionBlock(snaplen=snaplen, linktype=linktype)]
self._kls = EnhancedPacketBlock
self.__f.write(bytes(shb))
for idb_ in idb:
self.__f.write(bytes(idb_))
def _validate_block(self, arg_name, blk, expected_cls):
"""Check a user-defined block for correct type and endianness"""
if not isinstance(blk, expected_cls):
raise ValueError('{0}: expecting class {1}'.format(
arg_name, expected_cls.__name__))
if self.__le and blk.__hdr_fmt__[0] == '>':
raise ValueError('{0}: expecting class {1}LE on a little-endian system'.format(
arg_name, expected_cls.__name__))
if not self.__le and blk.__hdr_fmt__[0] == '<':
raise ValueError('{0}: expecting class {1} on a big-endian system'.format(
arg_name, expected_cls.__name__.replace('LE', '')))
def writepkt(self, pkt, ts=None):
"""
Write a single packet with an optional timestamp.
Args:
pkt: buffer or instance of EnhancedPacketBlock(LE)
ts: Unix timestamp in seconds since Epoch (e.g. 1454725786.99)
"""
if isinstance(pkt, EnhancedPacketBlock):
self._validate_block('pkt', pkt, EnhancedPacketBlock)
if ts is not None: # ts as an argument gets precedence
ts = intround(ts * self._precision_multiplier)
elif pkt.ts_high == pkt.ts_low == 0:
ts = intround(time() * self._precision_multiplier)
if ts is not None:
pkt.ts_high = ts >> 32
pkt.ts_low = ts & 0xffffffff
self.__f.write(bytes(pkt))
return
# pkt is a buffer - wrap it into an EPB
if ts is None:
ts = time()
self.writepkt_time(pkt, ts)
def writepkt_time(self, pkt, ts):
"""
Write a single packet with a mandatory timestamp.
Args:
pkt: a buffer
ts: Unix timestamp in seconds since Epoch (e.g. 1454725786.99)
"""
ts = intround(ts * self._precision_multiplier) # to int microseconds
s = pkt
n = len(s)
epb = self._kls(
ts_high=ts >> 32,
ts_low=ts & 0xffffffff,
caplen=n,
pkt_len=n,
pkt_data=s
)
self.__f.write(bytes(epb))
def writepkts(self, pkts):
"""
Take an iterable of (ts, pkt), and write to file.
"""
kls = self._kls()
ph = kls._pack_hdr
fd = self.__f
iface_id = kls.iface_id
pkt_type = kls.type
opts_buf = kls._do_pack_options()
opts_len = len(opts_buf)
hdr_len = kls.__hdr_len__
precalc_n = hdr_len + opts_len
for ts, pkt in pkts:
ts = intround(ts * self._precision_multiplier) # int microseconds
pkt_len = len(pkt)
pkt_len_align = _align32b(pkt_len)
n = precalc_n + pkt_len_align
hdr_buf = ph(
pkt_type,
n,
iface_id,
ts >> 32,
ts & 0xffffffff,
pkt_len,
pkt_len,
n
)
buf = b''.join([
hdr_buf[:-4],
_padded_tolen(pkt, pkt_len_align),
opts_buf,
hdr_buf[-4:]
])
fd.write(buf)
def close(self):
self.__f.close()
class Reader(object):
"""Simple pypcap-compatible pcapng file reader."""
def __init__(self, fileobj):
self.name = getattr(fileobj, 'name', '<{0}>'.format(fileobj.__class__.__name__))
self.__f = fileobj
shb = SectionHeaderBlock()
buf = self.__f.read(shb.__hdr_len__)
if len(buf) < shb.__hdr_len__:
raise ValueError('invalid pcapng header')
# unpack just the header since endianness is not known
shb.unpack_hdr(buf)
if shb.type != PCAPNG_BT_SHB:
raise ValueError('invalid pcapng header: not a SHB')
# determine the correct byte order and reload full SHB
if shb.bom == BYTE_ORDER_MAGIC_LE:
self.__le = True
buf += self.__f.read(_swap32b(shb.len) - shb.__hdr_len__)
shb = SectionHeaderBlockLE(buf)
elif shb.bom == BYTE_ORDER_MAGIC:
self.__le = False
buf += self.__f.read(shb.len - shb.__hdr_len__)
shb = SectionHeaderBlock(buf)
else:
raise ValueError('unknown endianness')
# check if this version is supported
if shb.v_major != PCAPNG_VERSION_MAJOR:
raise ValueError('unknown pcapng version {0}.{1}'.format(shb.v_major, shb.v_minor,))
# look for a mandatory IDB
idb = None
while 1:
buf = self.__f.read(8)
if len(buf) < 8:
break
blk_type, blk_len = struct_unpack('<II' if self.__le else '>II', buf)
buf += self.__f.read(blk_len - 8)
if blk_type == PCAPNG_BT_IDB:
idb = (InterfaceDescriptionBlockLE(buf) if self.__le
else InterfaceDescriptionBlock(buf))
break
# just skip other blocks
if idb is None:
raise ValueError('IDB not found')
# set timestamp resolution and offset
self._divisor = 1000000 # defaults
self._tsoffset = 0
for opt in idb.opts:
if opt.code == PCAPNG_OPT_IF_TSRESOL:
# if MSB=0, the remaining bits is a neg power of 10 (e.g. 6 means microsecs)
# if MSB=1, the remaining bits is a neg power of 2 (e.g. 10 means 1/1024 of second)
opt_val = struct_unpack('b', opt.data)[0]
pow_num = 2 if opt_val & 0b10000000 else 10
self._divisor = pow_num ** (opt_val & 0b01111111)
elif opt.code == PCAPNG_OPT_IF_TSOFFSET:
# 64-bit int that specifies an offset (in seconds) that must be added to the
# timestamp of each packet
self._tsoffset = struct_unpack('<q' if self.__le else '>q', opt.data)[0]
if idb.linktype in dltoff:
self.dloff = dltoff[idb.linktype]
else:
self.dloff = 0
self.idb = idb
self.snaplen = idb.snaplen
self.filter = ''
self.__iter = iter(self)
@property
def fd(self):
return self.__f.fileno()
def fileno(self):
return self.fd
def datalink(self):
return self.idb.linktype
def setfilter(self, value, optimize=1):
raise NotImplementedError
def readpkts(self):
return list(self)
def __next__(self):
return next(self.__iter)
next = __next__ # Python 2 compat
def dispatch(self, cnt, callback, *args):
"""Collect and process packets with a user callback.
Return the number of packets processed, or 0 for a savefile.
Arguments:
cnt -- number of packets to process;
or 0 to process all packets until EOF
callback -- function with (timestamp, pkt, *args) prototype
*args -- optional arguments passed to callback on execution
"""
processed = 0
if cnt > 0:
for _ in range(cnt):
try:
ts, pkt = next(iter(self))
except StopIteration:
break
callback(ts, pkt, *args)
processed += 1
else:
for ts, pkt in self:
callback(ts, pkt, *args)
processed += 1
return processed
def loop(self, callback, *args):
self.dispatch(0, callback, *args)
def __iter__(self):
while 1:
buf = self.__f.read(8)
if len(buf) < 8:
break
blk_type, blk_len = struct_unpack('<II' if self.__le else '>II', buf)
buf += self.__f.read(blk_len - 8)
if blk_type == PCAPNG_BT_EPB:
epb = EnhancedPacketBlockLE(buf) if self.__le else EnhancedPacketBlock(buf)
ts = self._tsoffset + (((epb.ts_high << 32) | epb.ts_low) / self._divisor)
yield (ts, epb.pkt_data)
elif blk_type == PCAPNG_BT_PB:
pb = PacketBlockLE(buf) if self.__le else PacketBlock(buf)
ts = self._tsoffset + (((pb.ts_high << 32) | pb.ts_low) / self._divisor)
yield (ts, pb.pkt_data)
# just ignore other blocks
#########
# TESTS #
#########
def test_shb():
"""Test SHB with options"""
buf = (
b'\x0a\x0d\x0d\x0a\x58\x00\x00\x00\x4d\x3c\x2b\x1a\x01\x00\x00\x00\xff\xff\xff\xff\xff\xff'
b'\xff\xff\x04\x00\x31\x00\x54\x53\x68\x61\x72\x6b\x20\x31\x2e\x31\x30\x2e\x30\x72\x63\x32'
b'\x20\x28\x53\x56\x4e\x20\x52\x65\x76\x20\x34\x39\x35\x32\x36\x20\x66\x72\x6f\x6d\x20\x2f'
b'\x74\x72\x75\x6e\x6b\x2d\x31\x2e\x31\x30\x29\x00\x00\x00\x00\x00\x00\x00\x58\x00\x00\x00')
opt_buf = b'\x04\x00\x31\x00TShark 1.10.0rc2 (SVN Rev 49526 from /trunk-1.10)\x00\x00\x00'
# block unpacking
shb = SectionHeaderBlockLE(buf)
assert shb.type == PCAPNG_BT_SHB
assert shb.bom == BYTE_ORDER_MAGIC
assert shb.v_major == 1
assert shb.v_minor == 0
assert shb.sec_len == -1
assert shb.data == ''
# options unpacking
assert len(shb.opts) == 2
assert shb.opts[0].code == PCAPNG_OPT_SHB_USERAPPL
assert shb.opts[0].data == b'TShark 1.10.0rc2 (SVN Rev 49526 from /trunk-1.10)'
assert shb.opts[0].len == len(shb.opts[0].data)
assert shb.opts[1].code == PCAPNG_OPT_ENDOFOPT
assert shb.opts[1].len == 0
# option packing
assert str(shb.opts[0]) == str(opt_buf)
assert len(shb.opts[0]) == len(opt_buf)
assert bytes(shb.opts[1]) == b'\x00\x00\x00\x00'
# block packing
assert bytes(shb) == bytes(buf)
assert str(shb) == str(buf)
assert len(shb) == len(buf)
def test_idb():
"""Test IDB with options"""
buf = (
b'\x01\x00\x00\x00\x20\x00\x00\x00\x01\x00\x00\x00\xff\xff\x00\x00\x09\x00\x01\x00\x06\x00'
b'\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00')
# block unpacking
idb = InterfaceDescriptionBlockLE(buf)
assert idb.type == PCAPNG_BT_IDB
assert idb.linktype == DLT_EN10MB
assert idb.snaplen == 0xffff
assert idb.data == ''
# options unpacking
assert len(idb.opts) == 2
assert idb.opts[0].code == PCAPNG_OPT_IF_TSRESOL
assert idb.opts[0].len == 1
assert idb.opts[0].data == b'\x06'
assert idb.opts[1].code == PCAPNG_OPT_ENDOFOPT
assert idb.opts[1].len == 0
# option packing
assert bytes(idb.opts[0]) == b'\x09\x00\x01\x00\x06\x00\x00\x00'
assert len(idb.opts[0]) == 8
assert bytes(idb.opts[1]) == b'\x00\x00\x00\x00'
# block packing
assert bytes(idb) == bytes(buf)
assert str(idb) == str(buf)
assert len(idb) == len(buf)
def test_epb():
"""Test EPB with a non-ascii comment option"""
buf = (
b'\x06\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x73\xe6\x04\x00\xbe\x37\xe2\x19\x4a\x00'
b'\x00\x00\x4a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x45\x00'
b'\x00\x3c\x5d\xb3\x40\x00\x40\x06\xdf\x06\x7f\x00\x00\x01\x7f\x00\x00\x01\x98\x34\x11\x4e'
b'\x95\xcb\x2d\x3a\x00\x00\x00\x00\xa0\x02\xaa\xaa\xfe\x30\x00\x00\x02\x04\xff\xd7\x04\x02'
b'\x08\x0a\x05\x8f\x70\x89\x00\x00\x00\x00\x01\x03\x03\x07\x00\x00\x01\x00\x0a\x00\xd0\xbf'
b'\xd0\xb0\xd0\xba\xd0\xb5\xd1\x82\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00')
# block unpacking
epb = EnhancedPacketBlockLE(buf)
assert epb.type == PCAPNG_BT_EPB
assert epb.caplen == len(epb.pkt_data)
assert epb.pkt_len == len(epb.pkt_data)
assert epb.caplen == 74
assert epb.ts_high == 321139
assert epb.ts_low == 434255806
assert epb.data == ''
# options unpacking
assert len(epb.opts) == 2
assert epb.opts[0].code == PCAPNG_OPT_COMMENT
assert epb.opts[0].text == u'\u043f\u0430\u043a\u0435\u0442'
assert epb.opts[1].code == PCAPNG_OPT_ENDOFOPT
assert epb.opts[1].len == 0
# option packing
assert bytes(epb.opts[0]) == b'\x01\x00\x0a\x00\xd0\xbf\xd0\xb0\xd0\xba\xd0\xb5\xd1\x82\x00\x00'
assert len(epb.opts[0]) == 16
assert bytes(epb.opts[1]) == b'\x00\x00\x00\x00'
# block packing
assert bytes(epb) == bytes(buf)
assert str(epb) == str(buf)
assert len(epb) == len(buf)
def test_pb():
"""Test PB with a non-ascii comment option"""
buf = (
b'\x02\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x73\xe6\x04\x00\xbe\x37\xe2\x19\x4a\x00'
b'\x00\x00\x4a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x45\x00'
b'\x00\x3c\x5d\xb3\x40\x00\x40\x06\xdf\x06\x7f\x00\x00\x01\x7f\x00\x00\x01\x98\x34\x11\x4e'
b'\x95\xcb\x2d\x3a\x00\x00\x00\x00\xa0\x02\xaa\xaa\xfe\x30\x00\x00\x02\x04\xff\xd7\x04\x02'
b'\x08\x0a\x05\x8f\x70\x89\x00\x00\x00\x00\x01\x03\x03\x07\x00\x00\x01\x00\x0a\x00\xd0\xbf'
b'\xd0\xb0\xd0\xba\xd0\xb5\xd1\x82\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00')
# block unpacking
pb = PacketBlockLE(buf)
assert pb.type == PCAPNG_BT_PB
assert pb.caplen == len(pb.pkt_data)
assert pb.iface_id == 0
assert pb.drops_count == 0
assert pb.pkt_len == len(pb.pkt_data)
assert pb.caplen == 74
assert pb.ts_high == 321139
assert pb.ts_low == 434255806
assert pb.data == ''
# options unpacking
assert len(pb.opts) == 2
assert pb.opts[0].code == PCAPNG_OPT_COMMENT
assert pb.opts[0].text == u'\u043f\u0430\u043a\u0435\u0442'
assert pb.opts[1].code == PCAPNG_OPT_ENDOFOPT
assert pb.opts[1].len == 0
# option packing
assert bytes(pb.opts[0]) == b'\x01\x00\x0a\x00\xd0\xbf\xd0\xb0\xd0\xba\xd0\xb5\xd1\x82\x00\x00'
assert len(pb.opts[0]) == 16
assert bytes(pb.opts[1]) == b'\x00\x00\x00\x00'
# block packing
assert bytes(pb) == bytes(buf)
assert str(pb) == str(buf)
assert len(pb) == len(buf)
def test_pb_read():
""" Test PB parsing as part of file """
pb_packet = (
b'\x02\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x73\xe6\x04\x00\xbe\x37\xe2\x19\x4a\x00'
b'\x00\x00\x4a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x45\x00'
b'\x00\x3c\x5d\xb3\x40\x00\x40\x06\xdf\x06\x7f\x00\x00\x01\x7f\x00\x00\x01\x98\x34\x11\x4e'
b'\x95\xcb\x2d\x3a\x00\x00\x00\x00\xa0\x02\xaa\xaa\xfe\x30\x00\x00\x02\x04\xff\xd7\x04\x02'
b'\x08\x0a\x05\x8f\x70\x89\x00\x00\x00\x00\x01\x03\x03\x07\x00\x00\x01\x00\x0a\x00\xd0\xbf'
b'\xd0\xb0\xd0\xba\xd0\xb5\xd1\x82\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00')
buf = define_testdata().valid_pcapng + pb_packet
fobj = BytesIO(buf)
# test reading
reader = Reader(fobj)
# first packet is EPB and comes from define_testdata().valid_pcapng
ts, buf1 = next(iter(reader))
assert ts == 1442984653.210838
# second packet is concatenated PB, pb_packet defined above
ts, buf2 = next(iter(reader))
assert ts == 1379281936.72595
def test_epb_ascii_comment_option():
"""Test EPB with an ascii comment option"""
buf = (
b'\x06\x00\x00\x00\x7c\x00\x00\x00\x01\x00\x00\x00\xff\xff\xff\xff\x79\xd2\xdf\xe1\x44\x00'
b'\x00\x00\x44\x00\x00\x00\x00\x00\x00\x01\x00\x06\x00\x0b\xdb\x43\xe7\x4b\xf6\x7f\x08\x00'
b'\x45\x00\x00\x34\x2b\x1f\x40\x00\x40\x06\x15\x63\x82\xd9\xfa\x81\x82\xd9\xfa\x0d\x17\x70'
b'\xec\x3e\x02\xba\x94\x38\x81\x52\x4a\x39\x80\x10\xbb\x5d\x53\x0d\x00\x00\x01\x01\x08\x0a'
b'\x03\xf9\xc7\xbf\x04\x02\x38\x28\x01\x00\x0f\x00\x50\x61\x63\x6b\x65\x74\x20\x23\x31\x00'
b'\x78\x4d\x39\x87\x0c\x00\x00\x00\x00\x00\x7c\x00\x00\x00')
# block unpacking
epb = EnhancedPacketBlockLE(buf)
# options unpacking
assert len(epb.opts) == 2
assert epb.opts[0].code == PCAPNG_OPT_COMMENT
assert epb.opts[0].text == 'Packet #1'
assert epb.opts[1].code == PCAPNG_OPT_ENDOFOPT
assert epb.opts[1].len == 0
# option packing
assert bytes(epb.opts[0]) == b'\x01\x00\x09\x00\x50\x61\x63\x6b\x65\x74\x20\x23\x31\x00\x00\x00'
assert len(epb.opts[0]) == 16
assert bytes(epb.opts[1]) == b'\x00\x00\x00\x00'
def test_epb_invalid_utf8_comment_option():
"""Test EPB with an invalid (non UTF-8, non-zero terminated ascii) comment option"""
buf = (
b'\x06\x00\x00\x00\x7c\x00\x00\x00\x01\x00\x00\x00\xff\xff\xff\xff\x79\xd2\xdf\xe1\x44\x00'
b'\x00\x00\x44\x00\x00\x00\x00\x00\x00\x01\x00\x06\x00\x0b\xdb\x43\xe7\x4b\xf6\x7f\x08\x00'
b'\x45\x00\x00\x34\x2b\x1f\x40\x00\x40\x06\x15\x63\x82\xd9\xfa\x81\x82\xd9\xfa\x0d\x17\x70'
b'\xec\x3e\x02\xba\x94\x38\x81\x52\x4a\x39\x80\x10\xbb\x5d\x53\x0d\x00\x00\x01\x01\x08\x0a'
b'\x03\xf9\xc7\xbf\x04\x02\x38\x28\x01\x00\x0f\x00\x50\x61\x63\x6b\x65\x74\x20\x23\x31\x20'
b'\x78\x4d\x39\x87\x0c\x00\x00\x00\x00\x00\x7c\x00\x00\x00')
try:
EnhancedPacketBlockLE(buf)
except Exception as e:
assert isinstance(e, UnicodeDecodeError)
def test_simple_write_read():
"""Test writing a basic pcapng and then reading it"""
fobj = BytesIO()
writer = Writer(fobj, snaplen=0x2000, linktype=DLT_LINUX_SLL)
writer.writepkt(b'foo', ts=1454725786.526401)
fobj.flush()
fobj.seek(0)
reader = Reader(fobj)
assert reader.snaplen == 0x2000
assert reader.datalink() == DLT_LINUX_SLL
ts, buf1 = next(iter(reader))
assert ts == 1454725786.526401
assert buf1 == b'foo'
# test dispatch()
fobj.seek(0)
reader = Reader(fobj)
assert reader.dispatch(1, lambda ts, pkt: None) == 1
assert reader.dispatch(1, lambda ts, pkt: None) == 0
fobj.close()
def test_pcapng_header():
"""Reading an empty file will fail as the header length is incorrect"""
fobj = BytesIO()
try:
Reader(fobj)
except Exception as e:
assert isinstance(e, ValueError)
def define_testdata():
class TestData(object):
def __init__(self):
self.valid_shb_le = SectionHeaderBlockLE(opts=[
PcapngOptionLE(code=3, data=b'64-bit Windows 8.1, build 9600'),
PcapngOptionLE(code=4, data=b'Dumpcap 1.12.7 (v1.12.7-0-g7fc8978 from master-1.12)'),
PcapngOptionLE()
])
self.valid_shb_be = SectionHeaderBlock(opts=[
PcapngOption(code=3, data=b'64-bit Windows 8.1, build 9600'),
PcapngOption(code=4, data=b'Dumpcap 1.12.7 (v1.12.7-0-g7fc8978 from master-1.12)'),
PcapngOption()
])
self.valid_idb_le = InterfaceDescriptionBlockLE(snaplen=0x40000, opts=[
PcapngOptionLE(code=2, data=b'\\Device\\NPF_{3BBF21A7-91AE-4DDB-AB2C-C782999C22D5}'),
PcapngOptionLE(code=9, data=b'\x06'),
PcapngOptionLE(code=12, data=b'64-bit Windows 8.1, build 9600'),
PcapngOptionLE()
])
self.valid_idb_be = InterfaceDescriptionBlock(snaplen=0x40000, opts=[
PcapngOption(code=2, data=b'\\Device\\NPF_{3BBF21A7-91AE-4DDB-AB2C-C782999C22D5}'),
PcapngOption(code=9, data=b'\x06'),
PcapngOption(code=12, data=b'64-bit Windows 8.1, build 9600'),
PcapngOption()
])
self.valid_pcapng = (
b'\x0a\x0d\x0d\x0a\x7c\x00\x00\x00\x4d\x3c\x2b\x1a\x01\x00\x00'
b'\x00\xff\xff\xff\xff\xff\xff\xff\xff\x03\x00\x1e\x00\x36\x34'
b'\x2d\x62\x69\x74\x20\x57\x69\x6e\x64\x6f\x77\x73\x20\x38\x2e'
b'\x31\x2c\x20\x62\x75\x69\x6c\x64\x20\x39\x36\x30\x30\x00\x00'
b'\x04\x00\x34\x00\x44\x75\x6d\x70\x63\x61\x70\x20\x31\x2e\x31'
b'\x32\x2e\x37\x20\x28\x76\x31\x2e\x31\x32\x2e\x37\x2d\x30\x2d'
b'\x67\x37\x66\x63\x38\x39\x37\x38\x20\x66\x72\x6f\x6d\x20\x6d'
b'\x61\x73\x74\x65\x72\x2d\x31\x2e\x31\x32\x29\x00\x00\x00\x00'
b'\x7c\x00\x00\x00\x01\x00\x00\x00\x7c\x00\x00\x00\x01\x00\x00'
b'\x00\x00\x00\x04\x00\x02\x00\x32\x00\x5c\x44\x65\x76\x69\x63'
b'\x65\x5c\x4e\x50\x46\x5f\x7b\x33\x42\x42\x46\x32\x31\x41\x37'
b'\x2d\x39\x31\x41\x45\x2d\x34\x44\x44\x42\x2d\x41\x42\x32\x43'
b'\x2d\x43\x37\x38\x32\x39\x39\x39\x43\x32\x32\x44\x35\x7d\x00'
b'\x00\x09\x00\x01\x00\x06\x00\x00\x00\x0c\x00\x1e\x00\x36\x34'
b'\x2d\x62\x69\x74\x20\x57\x69\x6e\x64\x6f\x77\x73\x20\x38\x2e'
b'\x31\x2c\x20\x62\x75\x69\x6c\x64\x20\x39\x36\x30\x30\x00\x00'
b'\x00\x00\x00\x00\x7c\x00\x00\x00\x06\x00\x00\x00\x84\x00\x00'
b'\x00\x00\x00\x00\x00\x63\x20\x05\x00\xd6\xc4\xab\x0b\x4a\x00'
b'\x00\x00\x4a\x00\x00\x00\x08\x00\x27\x96\xcb\x7c\x52\x54\x00'
b'\x12\x35\x02\x08\x00\x45\x00\x00\x3c\xa4\x40\x00\x00\x1f\x01'
b'\x27\xa2\xc0\xa8\x03\x28\x0a\x00\x02\x0f\x00\x00\x56\xf0\x00'
b'\x01\x00\x6d\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c'
b'\x4d\x4e\x4f\x50\x51\x52\x53\x54\x55\x56\x57\x41\x42\x43\x44'
b'\x45\x46\x47\x48\x49\x00\x00\x01\x00\x0f\x00\x64\x70\x6b\x74'
b'\x20\x69\x73\x20\x61\x77\x65\x73\x6f\x6d\x65\x00\x00\x00\x00'
b'\x00\x84\x00\x00\x00'
)
self.valid_pkts = [
(1442984653.210838,
(b"\x08\x00'\x96\xcb|RT\x00\x125\x02\x08\x00E\x00\x00<\xa4@"
b"\x00\x00\x1f\x01'\xa2\xc0\xa8\x03(\n\x00\x02\x0f\x00\x00V"
b"\xf0\x00\x01\x00mABCDEFGHIJKLMNOPQRSTUVWABCDEFGHI"))
]
self.valid_epb_be = EnhancedPacketBlock(opts=[
PcapngOption(code=1, text=b'dpkt is awesome'),
PcapngOption()
], pkt_data=(
b'\x08\x00\x27\x96\xcb\x7c\x52\x54\x00\x12\x35\x02\x08\x00\x45'
b'\x00\x00\x3c\xa4\x40\x00\x00\x1f\x01\x27\xa2\xc0\xa8\x03\x28'
b'\x0a\x00\x02\x0f\x00\x00\x56\xf0\x00\x01\x00\x6d\x41\x42\x43'
b'\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50\x51\x52'
b'\x53\x54\x55\x56\x57\x41\x42\x43\x44\x45\x46\x47\x48\x49'
))
self.valid_epb_le = EnhancedPacketBlockLE(opts=[
PcapngOptionLE(code=1, text=b'dpkt is awesome'),
PcapngOptionLE()
], pkt_data=(
b'\x08\x00\x27\x96\xcb\x7c\x52\x54\x00\x12\x35\x02\x08\x00\x45'
b'\x00\x00\x3c\xa4\x40\x00\x00\x1f\x01\x27\xa2\xc0\xa8\x03\x28'
b'\x0a\x00\x02\x0f\x00\x00\x56\xf0\x00\x01\x00\x6d\x41\x42\x43'
b'\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50\x51\x52'
b'\x53\x54\x55\x56\x57\x41\x42\x43\x44\x45\x46\x47\x48\x49'
))
@property
def shb_idb_epb_le(self):
return self.valid_shb_le, self.valid_idb_le, self.valid_epb_le
@property
def shb_idb_epb_be(self):
return self.valid_shb_be, self.valid_idb_be, self.valid_epb_be
@property
def shb_idb_epb(self):
return self.shb_idb_epb_le if sys.byteorder == 'little' else self.shb_idb_epb_be
return TestData()
def pre_test(f):
def wrapper(*args, **kwargs):
fobj = BytesIO()
f.__globals__['fobj'] = fobj
ret = f(*args, **kwargs)
fobj.flush()
fobj.seek(0)
return ret
return wrapper
class WriterTestWrap:
"""
Decorate a writer test function with an instance of this class.
The test will be provided with a writer object, which it should write some pkts to.
After the test has run, the BytesIO object will be passed to a Reader,
which will compare each pkt to the return value of the test.
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __call__(self, f, *args, **kwargs):
def wrapper(*args, **kwargs):
from .compat import BytesIO
for little_endian in [True, False]:
fobj = BytesIO()
_sysle = Writer._Writer__le
Writer._Writer__le = little_endian
f.__globals__['writer'] = Writer(fobj, **self.kwargs.get('writer', {}))
f.__globals__['fobj'] = fobj
pkts = f(*args, **kwargs)
fobj.flush()
fobj.seek(0)
assert pkts, "You must return the input data from the test"
for (ts_out, pkt_out), (ts_in, pkt_in) in zip(pkts, iter(Reader(fobj))):
assert ts_out == ts_in
assert pkt_out == pkt_in
# 'noqa' for flake8 to ignore these since writer and fobj were injected into globals
writer.close() # noqa
Writer._Writer__le = _sysle
del f.__globals__['writer']
del f.__globals__['fobj']
return wrapper
class PostTest:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __call__(self, f, *args, **kwargs):
def wrapper(*args, **kwargs):
ret = f(*args, **kwargs)
fobj = f.__globals__['fobj']
test_type = self.kwargs.get('test')
if test_type == 'assertion':
isexception = False
try:
Reader(fobj)
except Exception as e:
isexception = True
assert isinstance(e, self.kwargs['type'])
assert str(e) == self.kwargs['msg']
assert isexception, "No assertion raised!"
elif test_type == 'compare_property':
prop = self.kwargs['property']
reader = Reader(fobj)
assert bytes(ret) == bytes(getattr(reader, prop))
elif test_type == 'compare_method':
method = self.kwargs['method']
reader = Reader(fobj)
comp = getattr(reader, method)()
assert comp == ret
else:
raise Exception("No test type specified")
return wrapper
@PostTest(test='assertion', type=ValueError, msg='invalid pcapng header: not a SHB')
@pre_test
def test_shb_header():
shb = define_testdata().valid_shb_le
shb.type = 123456666
fobj.write(bytes(shb)) # noqa
@PostTest(test='assertion', type=ValueError, msg='unknown endianness')
@pre_test
def test_shb_bom():
shb = define_testdata().valid_shb_le
shb.bom = 12345666
fobj.write(bytes(shb)) # noqa
@PostTest(test='assertion', type=ValueError, msg='unknown pcapng version 123.45')
@pre_test
def test_shb_version():
shb = define_testdata().valid_shb_le
shb.v_major = 123
shb.v_minor = 45
fobj.write(bytes(shb)) # noqa
@PostTest(test='assertion', type=ValueError, msg='IDB not found')
@pre_test
def test_no_idb():
shb = define_testdata().valid_shb_le
fobj.write(bytes(shb)+b'aaaa') # noqa
@PostTest(test='compare_property', property='idb')
@pre_test
def test_idb_opt_offset():
"""Test that the timestamp offset is correctly written and read"""
shb = define_testdata().valid_shb_le
idb = define_testdata().valid_idb_le
idb.opts.insert(0, PcapngOptionLE(
code=PCAPNG_OPT_IF_TSOFFSET,
data=struct_pack('<q', 123456666))
)
fobj.write(bytes(shb)+bytes(idb)) # noqa
return idb
@PostTest(test='compare_property', property='dloff')
@pre_test
def test_idb_linktype():
"""Test that if the idb.linktype is not in dloff, dloff is set to 0"""
shb = define_testdata().valid_shb_le
idb = define_testdata().valid_idb_le
idb.linktype = 3456
fobj.write(bytes(shb)+bytes(idb)) # noqa
return 0
def test_repr():
"""check the __repr__ method for Packet subclass.
The __repr__ method currently includes the b'' in the string. This means that python2 and python3 will differ.
"""
real = repr(define_testdata().valid_shb_le)
python2 = (
"SectionHeaderBlockLE(opts=[PcapngOptionLE(code=3, data='64-bit Windows 8.1, build 9600'),"
" PcapngOptionLE(code=4, data='Dumpcap 1.12.7 (v1.12.7-0-g7fc8978 from master-1.12)'),"
" PcapngOptionLE(opt_endofopt)])")
python3 = (
"SectionHeaderBlockLE(opts=[PcapngOptionLE(code=3, data=b'64-bit Windows 8.1, build 9600'),"
" PcapngOptionLE(code=4, data=b'Dumpcap 1.12.7 (v1.12.7-0-g7fc8978 from master-1.12)'),"
" PcapngOptionLE(opt_endofopt)])")
assert real in [python2, python3]
@pre_test
def test_filter():
buf = define_testdata().valid_pcapng
fobj.write(buf) # noqa
fobj.flush() # noqa
fobj.seek(0) # noqa
reader = Reader(fobj) # noqa
try:
reader.setfilter(None, None)
except Exception as e:
assert isinstance(e, NotImplementedError)
@PostTest(test='compare_method', method='readpkts')
@pre_test
def test_readpkts():
fobj.write(define_testdata().valid_pcapng) # noqa
return define_testdata().valid_pkts
@PostTest(test='compare_method', method='next')
@pre_test
def test_next():
fobj.write(define_testdata().valid_pcapng) # noqa
return define_testdata().valid_pkts[0]
@pre_test
def test_dispatch():
fobj.write(define_testdata().valid_pcapng) # noqa
fobj.flush() # noqa
fobj.seek(0) # noqa
def callback(timestamp, pkt, *args):
assert (timestamp, pkt) == define_testdata().valid_pkts[0]
reader = Reader(fobj) # noqa
assert 1 == reader.dispatch(0, callback)
@pre_test
def test_loop():
fobj.write(define_testdata().valid_pcapng) # noqa
fobj.flush() # noqa
fobj.seek(0) # noqa
def callback(timestamp, pkt, *args):
assert (timestamp, pkt) == define_testdata().valid_pkts[0]
reader = Reader(fobj) # noqa
reader.loop(callback)
def test_idb_opt_err():
"""Test that options end with opt_endofopt"""
idb = define_testdata().valid_idb_le
del idb.opts[-1]
try:
bytes(idb)
except Exception as e:
assert isinstance(e, dpkt.PackError)
assert str(e) == 'options must end with opt_endofopt'
def test_custom_read_write():
"""Test a full pcapng file with 1 ICMP packet"""
buf = define_testdata().valid_pcapng
fobj = BytesIO(buf)
# test reading
reader = Reader(fobj)
assert reader.snaplen == 0x40000
assert reader.datalink() == DLT_EN10MB
assert reader.idb.opts[0].data.decode('utf-8') == '\\Device\\NPF_{3BBF21A7-91AE-4DDB-AB2C-C782999C22D5}'
assert reader.idb.opts[2].data.decode('utf-8') == '64-bit Windows 8.1, build 9600'
ts, buf1 = next(iter(reader))
assert ts == 1442984653.2108380
assert len(buf1) == 74
assert buf1.startswith(b'\x08\x00\x27\x96')
assert buf1.endswith(b'FGHI')
fobj.close()
# test pcapng customized writing
shb, idb, epb = define_testdata().shb_idb_epb
fobj = BytesIO()
writer = Writer(fobj, shb=shb, idb=idb)
writer.writepkt(epb, ts=1442984653.210838)
# .valid_pcapng buf was collected on a little endian system
if sys.byteorder == 'little':
assert fobj.getvalue() == buf
fobj.close()
# same with timestamps defined inside EPB
epb.ts_high = 335971
epb.ts_low = 195806422
fobj = BytesIO()
writer = Writer(fobj, shb=shb, idb=idb)
writer.writepkt(epb)
if sys.byteorder == 'little':
assert fobj.getvalue() == buf
fobj.close()
def test_multi_idb_writer():
"""Test writing multiple interface description blocks into pcapng and read it"""
fobj = BytesIO()
shb, idb, epb = define_testdata().shb_idb_epb
writer = Writer(fobj, shb=shb, idb=[idb, idb])
writer.writepkt(epb)
fobj.flush()
fobj.seek(0)
Reader(fobj)
fobj.close()
@pre_test
def test_writer_validate_instance():
"""System endianness and shb endianness should match"""
shb = 10
try:
writer = Writer(fobj, shb=shb) # noqa
except Exception as e:
assert isinstance(e, ValueError)
assert str(e) == 'shb: expecting class SectionHeaderBlock'
@pre_test
def test_writepkt_epb_ts():
"""writepkt should assign ts_high/low for epb if they are 0"""
global time
shb, idb, epb = define_testdata().shb_idb_epb
writer = Writer(fobj, shb=shb, idb=idb) # noqa
epb.ts_high = epb.ts_low = 0
ts = 1454725786.526401
_tmp = time
def time():
return ts
writer.writepkt(epb)
time = _tmp
ts_high, ts_low = 338704, 3183502017
assert epb.ts_high == ts_high
assert epb.ts_low == ts_low
@pre_test
def test_writer_validate_le():
"""System endianness and shb endianness should match"""
shb = define_testdata().valid_shb_be
_sysle = Writer._Writer__le
Writer._Writer__le = True
try:
writer = Writer(fobj, shb=shb) # noqa
except Exception as e:
assert isinstance(e, ValueError)
assert str(e) == 'shb: expecting class SectionHeaderBlockLE on a little-endian system'
Writer._Writer__le = _sysle
@pre_test
def test_writer_validate_be():
"""System endianness and shb endianness should match"""
shb = define_testdata().valid_shb_le
_sysle = Writer._Writer__le
Writer._Writer__le = False
try:
writer = Writer(fobj, shb=shb) # noqa
except Exception as e:
assert isinstance(e, ValueError)
assert str(e) == 'shb: expecting class SectionHeaderBlock on a big-endian system'
Writer._Writer__le = _sysle
@WriterTestWrap()
def test_writepkt_no_time():
global time
ts, pkt = 1454725786.526401, b'foooo'
_tmp = time
def time():
return ts
writer.writepkt(pkt) # noqa
time = _tmp
return [(ts, pkt)]
@WriterTestWrap(writer={'snaplen': 10})
def test_writepkt_snaplen():
ts, pkt = 1454725786.526401, b'foooo' * 100
writer.writepkt(pkt, ts) # noqa
return [(ts, pkt)]
@WriterTestWrap()
def test_writepkt_with_time():
ts, pkt = 1454725786.526401, b'foooo'
writer.writepkt(pkt, ts) # noqa
return [(ts, pkt)]
@WriterTestWrap()
def test_writepkts():
"""writing multiple packets from a list"""
pkts = [
(1454725786.526401, b"fooo"),
(1454725787.526401, b"barr"),
(3243204320.093211, b"grill"),
(1454725789.526401, b"lol"),
]
writer.writepkts(pkts) # noqa
return pkts
def test_pcapng_block_pack():
assert bytes(_PcapngBlock())
def test_pcapng_block_unpack():
block = _PcapngBlock()
buf = b'012345678901'
try:
block.unpack(buf)
except Exception as e:
assert isinstance(e, dpkt.NeedData)
def test_epb_unpack():
"""EnhancedPacketBlock can only unpack data >64 bytes, the length of their header"""
shb, idb, epb = define_testdata().shb_idb_epb
buf = b'quite-long-but-not-long-enough-at-least-32'
try:
epb.unpack(buf)
except Exception as e:
assert isinstance(e, dpkt.NeedData)
def test_epb_unpack_length_mismatch():
"""Force calculated len to be 0 when unpacking epb, this should fail when unpacking"""
shb, idb, epb = define_testdata().shb_idb_epb
unpackme = bytes(epb)
unpackme = unpackme[:-4] + b'\x00' * 4
try:
epb.unpack(unpackme)
except Exception as e:
assert isinstance(e, dpkt.UnpackError)
assert str(e) == 'length fields do not match'
def test_pcapng_block_len_no_opts():
"""_PcapngBlock should return its own header __len__ if it has no opts"""
block = _PcapngBlock()
assert len(block) == 12
def test_reader_file_descriptor():
"""Reader has .fd and .fileno() convenience members. Compare them to the actual fobj that was passed in"""
pcapng = define_testdata().valid_pcapng
import tempfile
with tempfile.TemporaryFile() as fobj:
fobj.write(pcapng)
fobj.seek(0)
reader = Reader(fobj)
assert reader.fd == fobj.fileno()
assert reader.fileno() == fobj.fileno()
def test_posttest():
"""Check that PostTest wrapper doesn't fail silently"""
@PostTest()
@pre_test
def fun():
pass
try:
fun()
except Exception as e:
assert str(e) == 'No test type specified'
| 49,325 | 31.366142 | 114 |
py
|
dpkt
|
dpkt-master/dpkt/igmp.py
|
# $Id: igmp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Internet Group Management Protocol."""
from __future__ import absolute_import
from . import dpkt
class IGMP(dpkt.Packet):
"""Internet Group Management Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of IGMP.
TODO.
"""
__hdr__ = (
('type', 'B', 0),
('maxresp', 'B', 0),
('sum', 'H', 0),
('group', '4s', b'\x00' * 4)
)
def __bytes__(self):
if not self.sum:
self.sum = dpkt.in_cksum(dpkt.Packet.__bytes__(self))
return dpkt.Packet.__bytes__(self)
def test_construction_no_sum():
igmp = IGMP()
assert igmp.type == 0
assert igmp.maxresp == 0
assert igmp.sum == 0
assert igmp.group == b'\x00' * 4
assert bytes(igmp) == b'\x00\x00' + b'\xff\xff' + b'\x00' * 4
def test_construction_sum_set():
igmp = IGMP(sum=1)
assert igmp.type == 0
assert igmp.maxresp == 0
assert igmp.sum == 1
assert igmp.group == b'\x00' * 4
assert bytes(igmp) == b'\x00\x00\x00\x01' + b'\x00' * 4
| 1,135 | 21.72 | 65 |
py
|
dpkt
|
dpkt-master/dpkt/rtcp.py
|
# $Id: rtcp.py 23 2023-01-22 11:22:33Z pajarom $
# -*- coding: utf-8 -*-
# RFC3550 and RFC3611
"""RTP Control Protocol."""
from __future__ import absolute_import
from . import dpkt
from .dpkt import Packet
import math
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# header |V=2|P| RC | PT=SR=200 | length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | SSRC of sender |
# +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# sender | NTP timestamp, most significant word |
# info +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | NTP timestamp, least significant word |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | RTP timestamp |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | sender's packet count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | sender's octet count |
# +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# report | SSRC_1 (SSRC of first source) |
# block +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# 1 | fraction lost | cumulative number of packets lost |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | extended highest sequence number received |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | interarrival jitter |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | last SR (LSR) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | delay since last SR (DLSR) |
# +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# report | SSRC_2 (SSRC of second source) |
# block +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# 2 : ... :
# +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# | profile-specific extensions |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
class SRInfo(dpkt.Packet):
"""RTCP Sender Info"""
__hdr__ = (
("ssrc", "I", 0),
("ntp_ts_msw", "I", 0),
("ntp_ts_lsw", "I", 0),
("rtp_ts", "I", 0),
("pkts", "I", 0),
("octs", "I", 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = b""
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# header |V=2|P| RC | PT=RR=201 | length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | SSRC of packet sender |
# +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# report | SSRC_1 (SSRC of first source) |
# block +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# 1 | fraction lost | cumulative number of packets lost |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | extended highest sequence number received |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | interarrival jitter |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | last SR (LSR) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | delay since last SR (DLSR) |
# +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# report | SSRC_2 (SSRC of second source) |
# block +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# 2 : ... :
# +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# | profile-specific extensions |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
class RRInfo(dpkt.Packet):
"""RTCP Receiver Info"""
__hdr__ = (("ssrc", "I", 0),)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = b""
class Report(dpkt.Packet):
"""RTCP Report Sender"""
__hdr__ = (
("ssrc", "I", 0),
("_lossfrac_losscumm", "I", 0),
("seq", "I", 0),
("jitter", "I", 0),
("lsr", "I", 0),
("dlsr", "I", 0),
)
__bit_fields__ = {
"_lossfrac_losscumm": (
("lossfrac", 8), # first byte
("losscumm", 24), # lower 3 bytes
),
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = b""
def __bytes__(self):
return self.pack_hdr()
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# header |V=2|P| SC | PT=SDES=202 | length |
# +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# chunk | SSRC/CSRC_1 |
# 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | SDES items |
# | ... |
# +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# chunk | SSRC/CSRC_2 |
# 2 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | SDES items |
# | ... |
# +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# header |V=2|P| SC | PT=BYE=203 | length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | SSRC/CSRC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# : ... :
# +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# (opt) | length | reason for leaving ...
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# header +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |V=2|P| subtype | PT=APP=204 | length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | SSRC/CSRC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | name (ASCII) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | application-dependent data ...
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |V=2|P|reserved | PT=XR=207 | length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | SSRC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# : report blocks :
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
BT_LOSS = 1 # Loss RLE Report Block
BT_DUPL = 2 # Duplicate RLE Report Block
BT_RCVT = 3 # Packet Receipt Times Report Block
BT_RCVR = 4 # Receiver Reference Time Report Block
BT_DLRR = 5 # DLRR Report Block
BT_STAT = 6 # Statistics Summary Report Block
BT_VOIP = 7 # VoIP Metrics Report Block
class XBlockLoss(dpkt.Packet):
"""RTCP Extended Loss RLE Report Block"""
__hdr__ = (("ssrc", "I", 0),)
# def unpack(self, buf):
# super(XBlockLoss, self).unpack(buf)
# self.data = buf[self.__hdr_len_:]
class XBlockDupl(dpkt.Packet):
"""RTCP Extended Duplicate RLE Report Block"""
__hdr__ = (("ssrc", "I", 0),)
# def unpack(self, buf):
# super(XBlockDupl, self).unpack(buf)
# self.data = buf[self.__hdr_len_:]
class XBlockRcvt(dpkt.Packet):
"""RTCP Extended Packet Receipt Times Report Block"""
__hdr__ = (("ssrc", "I", 0),)
# def unpack(self, buf):
# super(XBlockRcvt, self).unpack(buf)
# self.data = buf[self.__hdr_len_:]
class XBlockRcvr(dpkt.Packet):
"""RTCP Extended Receiver Reference Time Report Block"""
__hdr__ = (("ntp_ts_msw", "I", 0), ("ntp_ts_lsw", "I", 0))
class XBlockDlrr(dpkt.Packet):
"""RTCP Extended DLRR Report Block"""
__hdr__ = ()
def unpack(self, buf):
self.data = buf
class XBlockStat(dpkt.Packet):
"""RTCP Extended Statistics Summary Report Block"""
__hdr__ = (
("ssrc", "I", 0),
("beg_seq", "H", 0),
("end_seq", "H", 0),
("loss", "I", 0),
("dupl", "I", 0),
("min_jitter", "I", 0),
("max_jitter", "I", 0),
("avg_jitter", "I", 0),
("dev_jitter", "I", 0),
("min_ttl_or_hl", "B", 0),
("max_ttl_or_hl", "B", 0),
("mean_ttl_or_hl", "B", 0),
("dev_ttl_or_hl", "B", 0),
)
class XBlockVoip(dpkt.Packet):
"""RTCP Extended Info"""
__hdr__ = (
("ssrc", "I", 0),
("loss_rate", "B", 0),
("disc_rate", "B", 0),
("burst_density", "B", 0),
("gap_density", "B", 0),
("burst_duration", "H", 0),
("gap_duration", "H", 0),
("rtt", "H", 0),
("end_sys_delay", "H", 0),
("signal_level", "B", 0),
("noise_level", "B", 0),
("RERL", "B", 0),
("Gmin", "B", 0),
("RFactor", "B", 0),
("ext_RFactor", "B", 0),
("MOS_LQ", "B", 0),
("MOS_CQ", "B", 0),
("RX_config", "B", 0),
("reserved", "B", 0),
("nominal_jitter", "H", 0),
("max_jitter", "H", 0),
("abs_max_jitter", "H", 0),
)
class XReportBlock(dpkt.Packet):
"""RTCP Extended VoIP Metrics Report Block"""
__hdr__ = (("type", "B", 0), ("spec", "B", 0), ("len", "H", 0))
def setBlock(self, block):
self.block = block
if isinstance(block, XBlockLoss):
self.type = BT_LOSS
elif isinstance(block, XBlockDupl):
self.type = BT_DUPL
elif isinstance(block, XBlockRcvt):
self.type = BT_RCVT
elif isinstance(block, XBlockRcvr):
self.type = BT_RCVR
elif isinstance(block, XBlockDlrr):
self.type = BT_DLRR
elif isinstance(block, XBlockStat):
self.type = BT_STAT
elif isinstance(block, XBlockVoip):
self.type = BT_VOIP
else:
raise ValueError("Invalid Block Type.")
self.len = math.ceil((block.__hdr_len__ + len(block.data)) / 4)
def unpack(self, buf):
super(XReportBlock, self).unpack(buf)
self.block = None
buf = self.data
if self.type == BT_LOSS:
self.block = XBlockLoss(buf[0: self.len * 4])
elif self.type == BT_DUPL:
self.block = XBlockDupl(buf[0: self.len * 4])
elif self.type == BT_RCVT:
self.block = XBlockRcvt(buf[0: self.len * 4])
elif self.type == BT_RCVR:
self.block = XBlockRcvr(buf[0: self.len * 4])
elif self.type == BT_DLRR:
self.block = XBlockDlrr(buf[0: self.len * 4])
elif self.type == BT_STAT:
self.block = XBlockStat(buf[0: self.len * 4])
elif self.type == BT_VOIP:
self.block = XBlockVoip(buf[0: self.len * 4])
else:
raise ValueError("Invalid Block Type.")
self.data = b""
class XReport(dpkt.Packet):
"""RTCP Extended Info"""
__hdr__ = ()
def __init__(self, *args, **kwargs):
self.blocks = []
super(XReport, self).__init__(*args, **kwargs)
def addBlock(self, block):
self.blocks.append(block)
def unpack(self, buf):
super(XReport, self).unpack(buf)
buf = self.data
self.data = b""
try:
ll = 0
while ll < len(buf):
blck = XReportBlock(buf[ll:])
ll = ll + blck.__hdr_len__ + blck.len * 4
self.blocks.append(blck)
except ValueError:
if len(self.blocks) == 0: # At least one block must be present...
raise ValueError("Invalid Block Type.")
def __len__(self):
ll = 0
for _ in range(len(self.blocks)):
ll = ll + self.blocks[_].__hdr_len__ + self.blocks[_].len * 4
return ll
def __bytes__(self):
bb = b"" # No data at this level by default
if len(self.blocks) > 0:
for _ in range(len(self.blocks)):
bb = (
bb
+ self.blocks[_].pack_hdr()
+ self.blocks[_].block.pack_hdr()
+ self.blocks[_].block.data
)
return bb
VERSION = 2
PT_SR = 200
PT_RR = 201
PT_SDES = 202
PT_BYE = 203
PT_APP = 204
PT_XR = 207
# START TODO...
SDES_CNAME = 1
SDES_NAME = 2
SDES_EMAIL = 3
SDES_PHONE = 4
SDES_LOC = 5
SDES_TOOL = 6
SDES_NOTE = 7
SDES_PRIV = 8
# END TODO...
class RTCP(Packet):
"""Real-Time Transport Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of RTCP.
TODO.
"""
__hdr__ = (("_version_p_cc_pt", "H", 0x8000), ("len", "H", 0))
__bit_fields__ = {
"_version_p_cc_pt": (
("version", 2), # version 1100 0000 0000 0000 ! 0xC000 14
("p", 1), # p 0010 0000 0000 0000 ! 0x2000 13
("cc", 5), # cc 0001 1111 0000 0000 ! 0x1F00 8
("pt", 8), # pt 0000 0000 1111 1111 ! 0x00FF 0
),
}
def addInfo(self, info):
if not (self.pt in (PT_SR, PT_RR, PT_XR)):
raise ValueError("Info property not supported.")
self.info = info
ll = self.__hdr_len__ + self.info.__hdr_len__ + len(self.data)
# Only valid for PT_SR and PT_RR
if len(self.reports) > 0:
if self.pt in (PT_SR, PT_RR):
ll = ll + 24 * self.cc
else:
ll = ll + len(self.reports[0])
self.len = math.ceil((ll - 4) / 4)
def addReport(self, report):
if not (self.pt in (PT_SR, PT_RR, PT_XR)):
raise ValueError("Report property not supported.")
self.reports.append(report)
self.cc = len(self.reports)
ll = self.__hdr_len__ + len(self.data)
if self.info:
ll = ll + self.info.__hdr_len__
# Only valid for PT_SR and PT_RR
if self.pt in (PT_SR, PT_RR):
ll = ll + 24 * self.cc
else:
ll = ll + len(self.reports[0])
self.len = math.ceil((ll - 4) / 4)
def addData(self, data):
if self.pt in (PT_RR, PT_XR):
raise ValueError("Data property not supported.")
self.data = data
ll = self.__hdr_len__ + len(self.data)
if self.info:
ll = ll + self.info.__hdr_len__
if self.pt in (PT_SR, PT_RR):
# Only valid for PT_SR and PT_RR
ll = ll + 24 * self.cc
self.len = math.ceil((ll - 4) / 4)
def unpack(self, buf):
super(RTCP, self).unpack(buf)
if not self.version == VERSION or not self.p == 0:
raise dpkt.UnpackError("invalid %s: %r" % (self.__class__.__name__, buf))
# self.csrc = buf[self.__hdr_len__:self.__hdr_len__ + 4]
buf = self.data
if self.pt == PT_SR:
self.info = SRInfo(buf)
buf = buf[self.info.__hdr_len__:]
for _ in range(self.cc):
sr = Report(buf)
buf = buf[sr.__hdr_len__:]
self.reports.append(sr)
self.data = buf[
0: len(self) - self.__hdr_len__ - self.info.__hdr_len__ - self.cc * 24
]
elif self.pt == PT_RR:
self.info = RRInfo(buf)
buf = buf[self.info.__hdr_len__:]
self.reports = []
for _ in range(self.cc):
rr = Report(buf)
buf = buf[rr.__hdr_len__:]
self.reports.append(rr)
self.data = b""
elif self.pt == PT_SDES:
# TODO
self.data = buf[0: len(self) - self.__hdr_len__]
elif self.pt == PT_BYE:
# TODO
self.data = buf[0: len(self) - self.__hdr_len__]
elif self.pt == PT_APP:
# TODO
self.data = buf[0: len(self) - self.__hdr_len__]
elif self.pt == PT_XR:
self.info = RRInfo(buf) # Only cssr in info...
buf = buf[self.info.__hdr_len__:]
xr = XReport(
buf[0: len(self) - self.info.__hdr_len__]
) # Limiting buffer length is important in this case to determine the number of blocks.
self.reports.append(xr)
self.data = b""
else:
raise dpkt.UnpackError("invalid %s: %r" % (self.__class__.__name__, buf))
def __init__(self, *args, **kwargs):
self.info = None
self.reports = []
self.data = b""
super(RTCP, self).__init__(*args, **kwargs)
def __len__(self):
return self.len * 4 + 4
def __bytes__(self):
bb = self.pack_hdr()
if self.info:
bb = bb + self.info.pack_hdr()
if len(self.reports) > 0:
for _ in range(self.cc):
bb = bb + bytes(self.reports[_])
return bb + self.data
def test_RTCP_SR():
RTCP_SR = RTCP(
b"\x81\xc8\x00\x0c\x28\xaa\x34\x78\xe6\xa2\x5f\xa9\x29\x03\xd3\x2f"
b"\x00\x00\x87\x00\x00\x00\x00\x09\x00\x00\x00\xd2\x58\xfe\xf5\x57"
b"\x00\x00\x00\x00\x00\x00\x3a\xb4\x00\x00\x03\x11\x5f\xa8\x87\x09"
b"\x00\x00\x6b\x75"
)
assert RTCP_SR.version == 2
assert RTCP_SR.p == 0
assert RTCP_SR.cc == 1
assert RTCP_SR.pt == PT_SR
assert RTCP_SR.len == 12
assert len(RTCP_SR) == 52
assert RTCP_SR.info
assert RTCP_SR.info.ssrc == 0x28AA3478
assert RTCP_SR.info.ntp_ts_msw == 3869401001
assert RTCP_SR.info.ntp_ts_lsw == 688116527
assert RTCP_SR.info.rtp_ts == 34560
assert RTCP_SR.info.pkts == 9
assert RTCP_SR.info.octs == 210
assert len(RTCP_SR.reports) == 1
assert RTCP_SR.reports[0].ssrc == 0x58FEF557
assert RTCP_SR.reports[0].lossfrac == 0
assert RTCP_SR.reports[0].losscumm == 0
assert RTCP_SR.reports[0].seq == 15028
assert RTCP_SR.reports[0].jitter == 785
assert RTCP_SR.reports[0].lsr == 1604880137
assert RTCP_SR.reports[0].dlsr == 27509
assert RTCP_SR.data == b""
assert bytes(RTCP_SR) == (
b"\x81\xc8\x00\x0c\x28\xaa\x34\x78\xe6\xa2\x5f\xa9\x29\x03\xd3\x2f"
b"\x00\x00\x87\x00\x00\x00\x00\x09\x00\x00\x00\xd2\x58\xfe\xf5\x57"
b"\x00\x00\x00\x00\x00\x00\x3a\xb4\x00\x00\x03\x11\x5f\xa8\x87\x09"
b"\x00\x00\x6b\x75"
)
def test_build_RTCP_SR():
RTCP_SR = RTCP(pt=PT_SR)
RTCP_SR.addInfo(
SRInfo(
ssrc=0x28AA3478,
ntp_ts_msw=3869401001,
ntp_ts_lsw=688116527,
rtp_ts=34560,
pkts=9,
octs=210,
)
)
RTCP_SR.addReport(
Report(
ssrc=0x58FEF557,
lossfrac=0,
losscumm=0,
seq=15028,
jitter=785,
lsr=1604880137,
dlsr=27509,
)
)
assert len(RTCP_SR.reports) == 1
assert bytes(RTCP_SR) == (
b"\x81\xc8\x00\x0c\x28\xaa\x34\x78\xe6\xa2\x5f\xa9\x29\x03\xd3\x2f"
b"\x00\x00\x87\x00\x00\x00\x00\x09\x00\x00\x00\xd2\x58\xfe\xf5\x57"
b"\x00\x00\x00\x00\x00\x00\x3a\xb4\x00\x00\x03\x11\x5f\xa8\x87\x09"
b"\x00\x00\x6b\x75"
)
def test_RTCP_RR():
RTCP_RR = RTCP(
b"\x81\xc9\x00\x07\x28\xaa\x34\x78\x58\xfe\xf5\x57\x00\x00\x00\x00"
b"\x00\x00\x3a\xaa\x00\x00\x00\x00\x5f\xa8\x0b\xa7\x00\x00\x50\x37"
)
assert RTCP_RR.version == 2
assert RTCP_RR.p == 0
assert RTCP_RR.cc == 1
assert RTCP_RR.pt == PT_RR
assert RTCP_RR.len == 7
assert len(RTCP_RR) == 32
assert RTCP_RR.info
assert RTCP_RR.info.ssrc == 0x28AA3478
assert len(RTCP_RR.reports) == 1
assert RTCP_RR.reports[0].ssrc == 0x58FEF557
assert RTCP_RR.reports[0].lossfrac == 0
assert RTCP_RR.reports[0].losscumm == 0
assert RTCP_RR.reports[0].seq == 15018
assert RTCP_RR.reports[0].jitter == 0
assert RTCP_RR.reports[0].lsr == 1604848551
assert RTCP_RR.reports[0].dlsr == 20535
assert RTCP_RR.data == b""
assert bytes(RTCP_RR) == (
b"\x81\xc9\x00\x07\x28\xaa\x34\x78\x58\xfe\xf5\x57\x00\x00\x00\x00"
b"\x00\x00\x3a\xaa\x00\x00\x00\x00\x5f\xa8\x0b\xa7\x00\x00\x50\x37"
)
def test_build_RTCP_RR():
RTCP_RR = RTCP(pt=PT_RR)
RTCP_RR.addInfo(RRInfo(ssrc=0x28AA3478))
RTCP_RR.addReport(
Report(
ssrc=0x58FEF557,
lossfrac=0,
losscumm=0,
seq=15018,
jitter=0,
lsr=1604848551,
dlsr=20535,
)
)
assert len(RTCP_RR.reports) == 1
assert bytes(RTCP_RR) == (
b"\x81\xc9\x00\x07\x28\xaa\x34\x78\x58\xfe\xf5\x57\x00\x00\x00\x00"
b"\x00\x00\x3a\xaa\x00\x00\x00\x00\x5f\xa8\x0b\xa7\x00\x00\x50\x37"
)
def test_RTCP_SDES():
RTCP_SDES = RTCP(
b"\x81\xca\x00\x06\x28\xaa\x34\x78\x01\x10\x35\x36\x38\x30\x65\x39"
b"\x30\x61\x36\x62\x37\x63\x38\x34\x36\x37\x00\x00"
)
assert RTCP_SDES.version == 2
assert RTCP_SDES.p == 0
assert RTCP_SDES.cc == 1
assert RTCP_SDES.pt == PT_SDES
assert RTCP_SDES.len == 6
assert len(RTCP_SDES) == 28
assert not RTCP_SDES.info
assert len(RTCP_SDES.reports) == 0
assert RTCP_SDES.data == (
b"\x28\xaa\x34\x78\x01\x10\x35\x36\x38\x30\x65\x39"
b"\x30\x61\x36\x62\x37\x63\x38\x34\x36\x37\x00\x00"
)
assert bytes(RTCP_SDES) == (
b"\x81\xca\x00\x06\x28\xaa\x34\x78\x01\x10\x35\x36\x38\x30\x65\x39"
b"\x30\x61\x36\x62\x37\x63\x38\x34\x36\x37\x00\x00"
)
def test_build_RTCP_SDES():
RTCP_SDES = RTCP(
pt=PT_SDES, cc=1 # Chunck decoding not implemented need to hardcode count.
)
RTCP_SDES.addData(
(
b"\x28\xaa\x34\x78\x01\x10\x35\x36\x38\x30\x65\x39"
b"\x30\x61\x36\x62\x37\x63\x38\x34\x36\x37\x00\x00"
)
)
assert not RTCP_SDES.info
assert len(RTCP_SDES.reports) == 0
assert bytes(RTCP_SDES) == (
b"\x81\xca\x00\x06\x28\xaa\x34\x78\x01\x10\x35\x36\x38\x30\x65\x39"
b"\x30\x61\x36\x62\x37\x63\x38\x34\x36\x37\x00\x00"
)
def test_RTCP_XR():
RTCP_XR = RTCP(
b"\x81\xcf\x00\x1b\x58\xfe\xf5\x57\x04\x00\x00\x02\xe6\xa2\x5f\xaa"
b"\x71\x4e\x01\xaf\x05\x00\x00\x03\x28\xaa\x34\x78\x5f\xa9\x29\x04"
b"\x00\x01\x69\x35\x06\xe0\x00\x09\x28\xaa\x34\x78\x2a\x13\x2a\x1a"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x51\x00\x00\x02\xff"
b"\x00\x00\x01\xbf\x00\x00\x00\xdd\x00\x00\x00\x00\x07\x00\x00\x08"
b"\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x48\x01\xaa"
b"\x7f\x7f\x7f\x10\x7f\x7f\x7f\x7f\xb7\x00\x00\x78\x00\x78\x05\xdc"
)
assert RTCP_XR.version == 2
assert RTCP_XR.p == 0
assert RTCP_XR.cc == 1
assert RTCP_XR.pt == PT_XR
assert RTCP_XR.len == 27
assert len(RTCP_XR) == 112
assert RTCP_XR.info
assert RTCP_XR.info.ssrc == 0x58FEF557
assert len(RTCP_XR.reports) == 1
assert len(RTCP_XR.reports[0].blocks) == 4
assert RTCP_XR.reports[0].blocks[0].type == BT_RCVR
assert RTCP_XR.reports[0].blocks[0].spec == 0
assert RTCP_XR.reports[0].blocks[0].len == 2
assert RTCP_XR.reports[0].blocks[0].block.ntp_ts_msw == 0xE6A25FAA
assert RTCP_XR.reports[0].blocks[0].block.ntp_ts_lsw == 0x714E01AF
assert RTCP_XR.reports[0].blocks[1].type == BT_DLRR
assert RTCP_XR.reports[0].blocks[1].spec == 0
assert RTCP_XR.reports[0].blocks[1].len == 3
assert (
RTCP_XR.reports[0].blocks[1].block.data
== b"\x28\xaa\x34\x78\x5f\xa9\x29\x04\x00\x01\x69\x35"
)
assert RTCP_XR.reports[0].blocks[2].type == BT_STAT
assert RTCP_XR.reports[0].blocks[2].spec == 0xE0
assert RTCP_XR.reports[0].blocks[2].len == 9
assert RTCP_XR.reports[0].blocks[2].block.ssrc == 0x28AA3478
assert RTCP_XR.reports[0].blocks[2].block.beg_seq == 10771
assert RTCP_XR.reports[0].blocks[2].block.end_seq == 10778
assert RTCP_XR.reports[0].blocks[2].block.loss == 0
assert RTCP_XR.reports[0].blocks[2].block.dupl == 0
assert RTCP_XR.reports[0].blocks[2].block.min_jitter == 81
assert RTCP_XR.reports[0].blocks[2].block.max_jitter == 767
assert RTCP_XR.reports[0].blocks[2].block.avg_jitter == 447
assert RTCP_XR.reports[0].blocks[2].block.dev_jitter == 221
assert RTCP_XR.reports[0].blocks[2].block.min_ttl_or_hl == 0
assert RTCP_XR.reports[0].blocks[2].block.max_ttl_or_hl == 0
assert RTCP_XR.reports[0].blocks[2].block.mean_ttl_or_hl == 0
assert RTCP_XR.reports[0].blocks[2].block.dev_ttl_or_hl == 0
assert RTCP_XR.reports[0].blocks[3].type == BT_VOIP
assert RTCP_XR.reports[0].blocks[3].spec == 0
assert RTCP_XR.reports[0].blocks[3].len == 8
assert RTCP_XR.reports[0].blocks[3].block.ssrc == 0x28AA3478
assert RTCP_XR.reports[0].blocks[3].block.loss_rate == 0
assert RTCP_XR.reports[0].blocks[3].block.disc_rate == 0
assert RTCP_XR.reports[0].blocks[3].block.burst_density == 0
assert RTCP_XR.reports[0].blocks[3].block.gap_density == 0
assert RTCP_XR.reports[0].blocks[3].block.burst_duration == 0
assert RTCP_XR.reports[0].blocks[3].block.gap_duration == 0
assert RTCP_XR.reports[0].blocks[3].block.rtt == 72
assert RTCP_XR.reports[0].blocks[3].block.end_sys_delay == 426
assert RTCP_XR.reports[0].blocks[3].block.signal_level == 127
assert RTCP_XR.reports[0].blocks[3].block.noise_level == 127
assert RTCP_XR.reports[0].blocks[3].block.RERL == 127
assert RTCP_XR.reports[0].blocks[3].block.Gmin == 16
assert RTCP_XR.reports[0].blocks[3].block.RFactor == 127
assert RTCP_XR.reports[0].blocks[3].block.ext_RFactor == 127
assert RTCP_XR.reports[0].blocks[3].block.MOS_LQ == 127
assert RTCP_XR.reports[0].blocks[3].block.MOS_CQ == 127
assert RTCP_XR.reports[0].blocks[3].block.RX_config == 0xB7
assert RTCP_XR.reports[0].blocks[3].block.reserved == 0
assert RTCP_XR.reports[0].blocks[3].block.nominal_jitter == 120
assert RTCP_XR.reports[0].blocks[3].block.max_jitter == 120
assert RTCP_XR.reports[0].blocks[3].block.abs_max_jitter == 1500
assert bytes(RTCP_XR) == (
b"\x81\xcf\x00\x1b\x58\xfe\xf5\x57\x04\x00\x00\x02\xe6\xa2\x5f\xaa"
b"\x71\x4e\x01\xaf\x05\x00\x00\x03\x28\xaa\x34\x78\x5f\xa9\x29\x04"
b"\x00\x01\x69\x35\x06\xe0\x00\x09\x28\xaa\x34\x78\x2a\x13\x2a\x1a"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x51\x00\x00\x02\xff"
b"\x00\x00\x01\xbf\x00\x00\x00\xdd\x00\x00\x00\x00\x07\x00\x00\x08"
b"\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x48\x01\xaa"
b"\x7f\x7f\x7f\x10\x7f\x7f\x7f\x7f\xb7\x00\x00\x78\x00\x78\x05\xdc"
)
def test_build_RTCP_XR():
RTCP_XR = RTCP(pt=PT_XR)
RTCP_XR.addInfo(RRInfo(ssrc=0x58FEF557))
xr = XReport()
blk = XReportBlock()
blk.setBlock(XBlockRcvr(ntp_ts_msw=0xE6A25FAA, ntp_ts_lsw=0x714E01AF))
xr.addBlock(blk)
blk = XReportBlock()
blk.setBlock(XBlockDlrr(data=b"\x28\xaa\x34\x78\x5f\xa9\x29\x04\x00\x01\x69\x35"))
xr.addBlock(blk)
blk = XReportBlock(spec=0xE0)
blk.setBlock(
XBlockStat(
ssrc=0x28AA3478,
beg_seq=10771,
end_seq=10778,
loss=0,
dupl=0,
min_jitter=81,
max_jitter=767,
avg_jitter=447,
dev_jitter=221,
min_ttl_or_hl=0,
max_ttl_or_hl=0,
mean_ttl_or_hl=0,
dev_ttl_or_hl=0,
)
)
xr.addBlock(blk)
blk = XReportBlock()
blk.setBlock(
XBlockVoip(
ssrc=0x28AA3478,
loss_rate=0,
disc_rate=0,
burst_density=0,
gap_density=0,
burst_duration=0,
gap_duration=0,
rtt=72,
end_sys_delay=426,
signal_level=127,
noise_level=127,
RERL=127,
Gmin=16,
RFactor=127,
ext_RFactor=127,
MOS_LQ=127,
MOS_CQ=127,
RX_config=0xB7,
nominal_jitter=120,
max_jitter=120,
abs_max_jitter=1500,
)
)
xr.addBlock(blk)
RTCP_XR.addReport(xr)
assert len(RTCP_XR.reports) == 1
assert len(RTCP_XR.reports[0].blocks) == 4
assert bytes(RTCP_XR) == (
b"\x81\xcf\x00\x1b\x58\xfe\xf5\x57\x04\x00\x00\x02\xe6\xa2\x5f\xaa"
b"\x71\x4e\x01\xaf\x05\x00\x00\x03\x28\xaa\x34\x78\x5f\xa9\x29\x04"
b"\x00\x01\x69\x35\x06\xe0\x00\x09\x28\xaa\x34\x78\x2a\x13\x2a\x1a"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x51\x00\x00\x02\xff"
b"\x00\x00\x01\xbf\x00\x00\x00\xdd\x00\x00\x00\x00\x07\x00\x00\x08"
b"\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x48\x01\xaa"
b"\x7f\x7f\x7f\x10\x7f\x7f\x7f\x7f\xb7\x00\x00\x78\x00\x78\x05\xdc"
)
def test_build_RTCP_XR_Blocks():
blk = XReportBlock()
blk.setBlock(XBlockLoss())
assert blk.type == BT_LOSS
blk.setBlock(XBlockDupl())
assert blk.type == BT_DUPL
blk.setBlock(XBlockRcvt())
assert blk.type == BT_RCVT
try:
assert blk.setBlock(XReportBlock()) and False
except ValueError:
pass
blk = XReportBlock(
b"\x01\x00\x00\x03\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00"
)
assert isinstance(blk.block, XBlockLoss)
blk = XReportBlock(
b"\x02\x00\x00\x03\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00"
)
assert isinstance(blk.block, XBlockDupl)
blk = XReportBlock(
b"\x03\x00\x00\x03\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00"
)
assert isinstance(blk.block, XBlockRcvt)
try:
assert (
XReportBlock(
b"\x22\x00\x00\x03\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00"
)
and False
)
except ValueError:
pass
def test_build_RTCP_XR_Report():
try:
assert (
XReport(b"\x22\x00\x00\x03\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00")
and False
)
except ValueError:
pass
buf = (
b"\x03\x00\x00\x03\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x22\x00\x00\x03\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00"
)
xr = XReport(buf)
assert len(xr.blocks) == 1
assert (
buf[len(xr):]
== b"\x22\x00\x00\x03\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00"
)
def test_build_RTCP_addInfo():
RTCP_SDES = RTCP(pt=PT_SDES)
try:
assert RTCP_SDES.addInfo(RRInfo(ssrc=0x28AA3478)) and False
except ValueError:
pass
RTCP_BYE = RTCP(pt=PT_BYE)
try:
assert RTCP_BYE.addInfo(RRInfo(ssrc=0x28AA3478)) and False
except ValueError:
pass
RTCP_APP = RTCP(pt=PT_APP)
try:
assert RTCP_APP.addInfo(RRInfo(ssrc=0x28AA3478)) and False
except ValueError:
pass
RTCP_SR = RTCP(pt=PT_SR)
RTCP_SR.addReport(
Report(
ssrc=0x58FEF557,
lossfrac=0,
losscumm=0,
seq=15028,
jitter=785,
lsr=1604880137,
dlsr=27509,
)
)
assert len(RTCP_SR.reports) == 1
RTCP_SR.addInfo(
SRInfo(
ssrc=0x28AA3478,
ntp_ts_msw=3869401001,
ntp_ts_lsw=688116527,
rtp_ts=34560,
pkts=9,
octs=210,
)
)
assert bytes(RTCP_SR) == (
b"\x81\xc8\x00\x0c\x28\xaa\x34\x78\xe6\xa2\x5f\xa9\x29\x03\xd3\x2f"
b"\x00\x00\x87\x00\x00\x00\x00\x09\x00\x00\x00\xd2\x58\xfe\xf5\x57"
b"\x00\x00\x00\x00\x00\x00\x3a\xb4\x00\x00\x03\x11\x5f\xa8\x87\x09"
b"\x00\x00\x6b\x75"
)
RTCP_XR = RTCP(pt=PT_XR)
xr = XReport()
blk = XReportBlock()
blk.setBlock(XBlockRcvr(ntp_ts_msw=0xE6A25FAA, ntp_ts_lsw=0x714E01AF))
xr.addBlock(blk)
blk = XReportBlock()
blk.setBlock(XBlockDlrr(data=b"\x28\xaa\x34\x78\x5f\xa9\x29\x04\x00\x01\x69\x35"))
xr.addBlock(blk)
blk = XReportBlock(spec=0xE0)
blk.setBlock(
XBlockStat(
ssrc=0x28AA3478,
beg_seq=10771,
end_seq=10778,
loss=0,
dupl=0,
min_jitter=81,
max_jitter=767,
avg_jitter=447,
dev_jitter=221,
min_ttl_or_hl=0,
max_ttl_or_hl=0,
mean_ttl_or_hl=0,
dev_ttl_or_hl=0,
)
)
xr.addBlock(blk)
blk = XReportBlock()
blk.setBlock(
XBlockVoip(
ssrc=0x28AA3478,
loss_rate=0,
disc_rate=0,
burst_density=0,
gap_density=0,
burst_duration=0,
gap_duration=0,
rtt=72,
end_sys_delay=426,
signal_level=127,
noise_level=127,
RERL=127,
Gmin=16,
RFactor=127,
ext_RFactor=127,
MOS_LQ=127,
MOS_CQ=127,
RX_config=0xB7,
nominal_jitter=120,
max_jitter=120,
abs_max_jitter=1500,
)
)
xr.addBlock(blk)
RTCP_XR.addReport(xr)
assert len(RTCP_XR.reports) == 1
assert len(RTCP_XR.reports[0].blocks) == 4
RTCP_XR.addInfo(RRInfo(ssrc=0x58FEF557))
assert bytes(RTCP_XR) == (
b"\x81\xcf\x00\x1b\x58\xfe\xf5\x57\x04\x00\x00\x02\xe6\xa2\x5f\xaa"
b"\x71\x4e\x01\xaf\x05\x00\x00\x03\x28\xaa\x34\x78\x5f\xa9\x29\x04"
b"\x00\x01\x69\x35\x06\xe0\x00\x09\x28\xaa\x34\x78\x2a\x13\x2a\x1a"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x51\x00\x00\x02\xff"
b"\x00\x00\x01\xbf\x00\x00\x00\xdd\x00\x00\x00\x00\x07\x00\x00\x08"
b"\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x48\x01\xaa"
b"\x7f\x7f\x7f\x10\x7f\x7f\x7f\x7f\xb7\x00\x00\x78\x00\x78\x05\xdc"
)
def test_build_RTCP_addReport():
RTCP_SDES = RTCP(pt=PT_SDES)
try:
assert RTCP_SDES.addReport(Report()) and False
except ValueError:
pass
RTCP_BYE = RTCP(pt=PT_BYE)
try:
assert RTCP_BYE.addReport(Report()) and False
except ValueError:
pass
RTCP_APP = RTCP(pt=PT_APP)
try:
assert RTCP_APP.addReport(Report()) and False
except ValueError:
pass
def test_build_RTCP_addData():
RTCP_RR = RTCP(pt=PT_RR)
try:
assert (
RTCP_RR.addData(
b"\x22\x00\x00\x03\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00"
)
and False
)
except ValueError:
pass
RTCP_XR = RTCP(pt=PT_XR)
try:
assert (
RTCP_XR.addData(
b"\x22\x00\x00\x03\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00"
)
and False
)
except ValueError:
pass
RTCP_SR = RTCP(pt=PT_SR)
RTCP_SR.addReport(
Report(
ssrc=0x58FEF557,
lossfrac=0,
losscumm=0,
seq=15028,
jitter=785,
lsr=1604880137,
dlsr=27509,
)
)
assert len(RTCP_SR.reports) == 1
RTCP_SR.addInfo(
SRInfo(
ssrc=0x28AA3478,
ntp_ts_msw=3869401001,
ntp_ts_lsw=688116527,
rtp_ts=34560,
pkts=9,
octs=210,
)
)
RTCP_SR.addData(b"\x22\x00\x00\x03\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00")
assert bytes(RTCP_SR) == (
b"\x81\xc8\x00\x10\x28\xaa\x34\x78\xe6\xa2\x5f\xa9\x29\x03\xd3\x2f"
b"\x00\x00\x87\x00\x00\x00\x00\x09\x00\x00\x00\xd2\x58\xfe\xf5\x57"
b"\x00\x00\x00\x00\x00\x00\x3a\xb4\x00\x00\x03\x11\x5f\xa8\x87\x09"
b"\x00\x00\x6b\x75"
b"\x22\x00\x00\x03\x28\xaa\x34\x78\x00\x00\x00\x00\x00\x00\x00\x00"
)
def test_RTCP_version_padding():
try:
assert (
RTCP(
b"\x41\xca\x00\x06\x28\xaa\x34\x78\x01\x10\x35\x36\x38\x30\x65\x39"
b"\x30\x61\x36\x62\x37\x63\x38\x34\x36\x37\x00\x00"
)
and False
)
except dpkt.UnpackError:
pass
try:
assert (
RTCP(
b"\xa1\xca\x00\x06\x28\xaa\x34\x78\x01\x10\x35\x36\x38\x30\x65\x39"
b"\x30\x61\x36\x62\x37\x63\x38\x34\x36\x37\x00\x00"
)
and False
)
except dpkt.UnpackError:
pass
def test_RTCP_BYE():
RTCP_BYE = RTCP(b"\x81\xcb\x00\x01\x58\xfe\xf5\x57")
assert RTCP_BYE.version == 2
assert RTCP_BYE.p == 0
assert RTCP_BYE.cc == 1
assert RTCP_BYE.pt == PT_BYE
assert RTCP_BYE.len == 1
assert len(RTCP_BYE) == 8
assert not RTCP_BYE.info
assert len(RTCP_BYE.reports) == 0
assert RTCP_BYE.data == (b"\x58\xfe\xf5\x57")
assert bytes(RTCP_BYE) == (b"\x81\xcb\x00\x01\x58\xfe\xf5\x57")
def test_RTCP_APP():
RTCP_APP = RTCP(b"\x81\xcc\x00\x01\x58\xfe\xf5\x57")
assert RTCP_APP.version == 2
assert RTCP_APP.p == 0
assert RTCP_APP.cc == 1
assert RTCP_APP.pt == PT_APP
assert RTCP_APP.len == 1
assert len(RTCP_APP) == 8
assert not RTCP_APP.info
assert len(RTCP_APP.reports) == 0
assert RTCP_APP.data == (b"\x58\xfe\xf5\x57")
assert bytes(RTCP_APP) == (b"\x81\xcc\x00\x01\x58\xfe\xf5\x57")
def test_RTCP_FF():
try:
assert RTCP(b"\x81\xff\x00\x01\x58\xfe\xf5\x57") and False
except dpkt.UnpackError:
pass
| 40,027 | 34.204925 | 100 |
py
|
dpkt
|
dpkt-master/dpkt/vrrp.py
|
# $Id: vrrp.py 88 2013-03-05 19:43:17Z [email protected] $
# -*- coding: utf-8 -*-
"""Virtual Router Redundancy Protocol."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
class VRRP(dpkt.Packet):
"""Virtual Router Redundancy Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of VRRP.
TODO.
"""
__hdr__ = (
('_v_type', 'B', 0x21),
('vrid', 'B', 0),
('priority', 'B', 0),
('count', 'B', 0),
('atype', 'B', 0),
('advtime', 'B', 0),
('sum', 'H', 0),
)
__bit_fields__ = {
'_v_type': (
('v', 4),
('type', 4),
)
}
addrs = ()
auth = ''
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l_ = []
off = 0
for off in range(0, 4 * self.count, 4):
l_.append(self.data[off:off + 4])
self.addrs = l_
self.auth = self.data[off + 4:]
self.data = ''
def __len__(self):
return self.__hdr_len__ + (4 * self.count) + len(self.auth)
def __bytes__(self):
data = b''.join(self.addrs) + self.auth
if not self.sum:
self.sum = dpkt.in_cksum(self.pack_hdr() + data)
return self.pack_hdr() + data
def test_vrrp():
# no addresses
s = b'\x00\x00\x00\x00\x00\x00\xff\xff'
v = VRRP(s)
assert v.sum == 0xffff
assert bytes(v) == s
# have address
s = b'\x21\x01\x64\x01\x00\x01\xba\x52\xc0\xa8\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00'
v = VRRP(s)
assert v.count == 1
assert v.addrs == [b'\xc0\xa8\x00\x01'] # 192.168.0.1
assert bytes(v) == s
# test checksum generation
v.sum = 0
assert bytes(v) == s
# test length
assert len(v) == len(s)
# test getters
assert v.v == 2
assert v.type == 1
# test setters
v.v = 3
v.type = 2
assert bytes(v)[0] == b'\x32'[0]
| 1,990 | 21.625 | 91 |
py
|
APNN-TC
|
APNN-TC-master/cutlass_kernel/run-gemm.py
|
#!/usr/bin/env python
import os
B = 64
N_K_list = [
128,
256,
384,
512,
640,
768,
896,
1024
]
for N_K in N_K_list:
os.system("./bench_gemm.bin {} {} {}".format(B, N_K, N_K))
| 212 | 10.833333 | 62 |
py
|
APNN-TC
|
APNN-TC-master/cutlass_kernel/run-conv.py
|
#!/usr/bin/env python
import os
os.system("./bench_conv.bin --benchmark --iteration 1000")
| 91 | 22 | 58 |
py
|
APNN-TC
|
APNN-TC-master/APNN-TC-lib/bmmaTensorCoreGemm/others/bit_and_utility.py
|
# w = "574528ee 5d2aa02a 4cf15010 52a214fa 32e7ba61 2d357b57 184882df 47a35144 4d75bd23 2a0eaeec 2f31ba80 217dedaf 1a861652 0129747a 3d4f71a6 1e565b6a 12715a44 5c692df2 1f6f4752 65504cc9 4e5785aa 042408ac 2ab32c6b 25521f4a 16a4fb2f 4f3509ca 0ca1a2ad 584a5056 2a39fb9d 52a99e65 2a827cbc 017f248c 2fd43e8f 7773cccd 54213986 62bbf8f0"
# x = "7fdcc233 1befd79f 41a7c4c9 6b68079a 4e6afb66 25e45d32 519b500d 431bd7b7 3f2dba31 7c83e458 257130a3 62bbd95a 189a769b 54e49eb4 71f32454 2ca88611 0836c40e 02901d82 3a95f874 08138641 1e7ff521 7c3dbd3d 737b8ddc 6ceaf087 2463b9ea 5e884adc 51ead36b 2d517796 580bd78f 153ea438 3855585c 70a64e2a 6a2342ec 2a487cb0 1d4ed43b 725a06fb"
# x = "6b8b4567 327b23c6 643c9869 66334873 74b0dc51 19495cff 2ae8944a 625558ec 238e1f29 46e87ccd 3d1b58ba 507ed7ab 2eb141f2 41b71efb 79e2a9e3 7545e146 515f007c 5bd062c2 12200854 4db127f8 0216231b 1f16e9e8 1190cde7 66ef438d 140e0f76 3352255a 109cf92e 0ded7263 7fdcc233 1befd79f 41a7c4c9 6b68079a 4e6afb66 25e45d32 519b500d 431bd7b7 3f2dba31 7c83e458 257130a3 62bbd95a 436c6125 628c895d 333ab105 721da317 2443a858 2d1d5ae9 6763845e 75a2a8d4 32fff902 684a481a 579478fe 749abb43 3dc240fb 1ba026fa 79a1deaa 75c6c33a 12e685fb 70c6a529 520eedd1 374a3fe6 4f4ef005 23f9c13c 649bb77c 275ac794 39386575 1cf10fd8 180115be 235ba861 47398c89 354fe9f9 15b5af5c 741226bb 0d34b6a8 10233c99 3f6ab60f 61574095 7e0c57b1 77ae35eb 579be4f1 310c50b3 5ff87e05 2f305def 25a70bf7 1dbabf00 4ad084e9 1f48eaa1 1381823a 5db70ae5 100f8fca 6590700b 15014acb 5f5e7fd0 098a3148 799d0247 06b94764 42c296bd 5fb8370b 50801ee1 0488ac1a 5fb8011c 6aa78f7f 7672bd23 6fc75af8 6a5f7029 7d5e18f8 5f3534a4 73a1821b 7de67713 555c55b5 3fa62aca 14fce74e 6a3dd3e8 71c91298 09daf632 53299938 1fbfe8e0 5092ca79 1d545c4d 59adea3d 288f1a34 2a155dbc 1d9f6e5f 097e1b4e 51088277 1ca0c5fa 53584bcb 415e286c 7c58fd05 23d86aac 45e6d486 5c10fe21 0e7ffa2b 3c5991aa 4bd8591a 78df6a55 39b7aaa2 2b0d8dbe 6c80ec70 379e21b5 0069e373 2c27173b 4c9b0904 6aa7b75c 1df029d3"
# w = "631e22c1 603ea557 6a4376e0 5f69c330 26bba08c 1f494e05 4a495ddd 523e20ef 12c5229d 6dd2367d 274cd717 602a6fa5 3d2d3cbc 02ca49a6 6a355d28 01882c9d 4d159234 773c8b3e 033d51b3 4b98dbea 55e7c3e7 49d2139e 001b01f4 55dba830 5a48d2f8 06f7ce9c 2abbd4c6 1ebeb57c 12622f5a 4b0b780f 6a342c8b 7580521b 2b4a1d67 5477a36b 54ea154b 5205bdf3 73c0f171 1f337328 2443dee2 0686140e 0d05a9a5 4b90b5f9 66b083b3 4a32e662 4e5affa0 50e5e0db 4bbb12ff 1b7091d4 48226c1a 4ef864b2 67096dbe 1e0a3001 18ca7850 67246fb2 73e5d831 73134b48 6e1c3e4e 1ea1acf7 11d200c5 007e6da8 69ad2507 7c062d50 75febfc3 14f7426e 507dd0bb 4ae8d50e 66fd0061 443ec22c 6a1c4836 0b40df43 4ac4d63a 7721f1dc 56d1953d 317559ed 4154d83e 252c94dd 025b3ac9 0d0feb3d 409d26b1 4a7da6e3 5c084fef 27a6946f 6887d6e4 74d2c83f 0ecb0421 5c6daf16 67e61387 7ce7426f 7b0f5c0d 79b8144c 7d65b017 64bc8114 75be419c 73646fda 79b3c382 463c1258 3e4d44e8 60b0c3e3 0a7ad484 28698d1e 6bf1a327 553faabf 1f8b7efa 42c33864 06b504ac 60e05738 67efcd41 09103f75 6df04275 288cf3f2 538de658 49f89264 50338861 3c15bd3d 3ecb5aa3 5efe8c82 18836c53 26b16e2b 5be5cef1 1392c860 20698277 594b7f08 784f4975 1627c414 4cafeee2 72030cf7 5c63d66c 0afd33ca 52b3d0db 66deaaf0 3366c0e8 3ea57402 3c1e55af 52f23fe3 0168ac66 42d35a5c 33d2971b 695879a7 4be399d1 21c2d991 11e56d99 1f71802a 6bbb6bf5 6218f5fa"
W = ["6b8b4567 327b23c6 643c9869 66334873", "5d888a08 2a082c70 5ec6afd4 19e21bb2", "4695ae95 777a4eaa 3f48b982 46ba8fca", "2c06dcf3 2e129658 059f0446 030df306"]
X = ["ffffffff ffffffff ffffffff ffffffff"]*4
map = {'0':'0000', '1':'0001', '2':'0010', '3': '0011', '4':'0100', '5':'0101', '6':"0110", '7':'0111', '8':'1000', '9':'1001', 'a':'1010', 'b':'1011', 'c':'1100', 'd':'1101', 'e':'1110', 'f':'1111'}
def compute(w, x):
val = 0
for i in range(len(x)):
if w[i] == ' ':
continue
tmp0 = map[w[i]]
tmp1 = map[x[i]]
for j in range(4):
val += int(tmp0[j])*int(tmp1[j])
return val
import numpy as np
data = np.zeros((4,4))
for i in range(4):
for j in range(4):
data[i,j] = compute(W[i], X[j])
print(data)
final_val = 0
for i in range(4):
for j in range(4):
final_val += data[i,j]*(2**(i+j))
print(final_val)
| 4,195 | 101.341463 | 1,309 |
py
|
APNN-TC
|
APNN-TC-master/APNN-TC-lib/bmmaTensorCoreGemm/others/speed.py
|
Batch = 8
H = 32
W = 32
CIN = 128
COUT = 128
bmma_ms_avg = 0.0165
TOPS = ((Batch * 9.0 * CIN * H * W * COUT * 2)/(bmma_ms_avg/1000.0)) / 1e12
print("Conv2: ", TOPS)
Batch = 8
H = 16
W = 16
CIN = 128
COUT = 256
bmma_ms_avg = 0.0248
TOPS = ((Batch * 9.0 * CIN * H * W * COUT * 2)/(bmma_ms_avg/1000.0)) / 1e12
print("Conv3: ", TOPS)
| 337 | 13.083333 | 75 |
py
|
APNN-TC
|
APNN-TC-master/cutlass_nn/analysis.py
|
#!/usr/bin/env python3
import re
import sys
if len(sys.argv) < 2:
raise ValueError("Usage: ./1_analysis.py result.log")
fp = open(sys.argv[1], "r")
dataset_li = []
time_li = []
for line in fp:
if "(ms):" in line:
time = re.findall(r'[0-9].[0-9]+', line)[0]
print(time)
time_li.append(float(time))
fp.close()
print("{} (ms): {:.3f}".format(sys.argv[1].strip(".log"), sum(time_li)))
# fout = open(sys.argv[1].strip(".log")+".csv", 'w')
# fout.write("dataset,Avg.Epoch (ms)\n")
# for data, time in zip(dataset_li, time_li):
# fout.write("{},{}\n".format(data, time))
# fout.close()
| 618 | 25.913043 | 72 |
py
|
OPTMLSTM
|
OPTMLSTM-main/example_OPTM_LSTM.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Adam Ntakaris ([email protected], @gmail.com)
"""
from keras.layers import Dense
import keras
import numpy as np
import OPTMCell
# Note: Random data example for illustration purposes only
# OPTM-LSTM is a narrow artificial intelligence model
# Input Dim --> (num_of_data_samples, 1, 41)
# 41 = 40 LOB Price and Volume levels + Current mid_price/Guarantor
three_dim_inpt = np.random.rand(600, 1, 41)
# Regression Labels --> [mid_prices,]
lbls = np.random.rand(600,)
batch_size = 1
num_of_hidden_units = 8
input_1 = keras.Input(batch_shape = (batch_size, 1, 41))
layer_1 = keras.layers.RNN(OPTMCell.OPTMLSTMCell(num_of_hidden_units),
return_sequences=True, stateful=False)(input_1)
output_1 = Dense(1)(layer_1)
model = keras.Model(inputs=input_1, outputs=output_1)
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mse'])
model.fit(three_dim_inpt, lbls, batch_size=1, epochs=5)
| 1,004 | 27.714286 | 75 |
py
|
OPTMLSTM
|
OPTMLSTM-main/OPTMCell.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Adam Ntakaris ([email protected], @gmail.com)
Important: This is an extension based on
https://github.com/keras-team/keras/blob/v2.10.0/keras/layers/rnn/lstm.py
"""
import tensorflow.compat.v2 as tf
from keras import activations
from keras import backend
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.layers.recurrent import DropoutRNNCellMixin
from keras.utils import tf_utils
from keras.engine.base_layer import Layer
# isort: off
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
# allow Numpy
from tensorflow.python.ops.numpy_ops import np_config
np_config.enable_numpy_behavior()
RECURRENT_DROPOUT_WARNING_MSG = (
'RNN `implementation=2` is not supported when `recurrent_dropout` is set. '
'Using `implementation=1`.')
@keras_export(v1=['keras.layers.LSTMCell'])
class OPTMLSTMCell(DropoutRNNCellMixin, Layer):
""" Optimum Output Long Short-Term Memory Layer
This is an optimized extension of the current TensorFlow LSTM layer.
This class processes each individual LOB time-step input.
Args (similar to prototype LSTM layer):
units: Positive integer, dimensionality of the output space.
Input Placeholder (updated based on the Revised LSTM layer):
inputs: A 3D tensor, with shape of `[batch=1, timesteps, features +
guarantor]`.
"""
def __init__(
self,
units ,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs
):
if units < 0:
raise ValueError(
f'Received an invalid value for argument `units`, '
f'expected a positive integer, got {units}.'
)
# By default use cached variable under v2 mode, see b/143699808.
if tf.compat.v1.executing_eagerly_outside_functions():
self._enable_caching_device = kwargs.pop(
'enable_caching_device', True
)
else:
self._enable_caching_device = kwargs.pop(
'enable_caching_device', False
)
super(OPTMLSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
implementation = kwargs.pop('implementation', 1)
if self.recurrent_dropout != 0 and implementation != 1:
logging.debug(RECURRENT_DROPOUT_WARNING_MSG)
self.implementation = 1
else:
self.implementation = implementation
self.state_size = [self.units, self.units]
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
input_dim = input_shape[-1]-1
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return backend.concatenate(
[
self.bias_initializer(
(self.units,), *args, **kwargs
),
initializers.get('ones')(
(self.units,), *args, **kwargs
),
self.bias_initializer(
(self.units * 2,), *args, **kwargs
),
]
)
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
self.built = True
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
"""Computes carry and output using split kernels."""
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + backend.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]
)
)
f = self.recurrent_activation(
x_f
+ backend.dot(
h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]
)
)
c_t = self.activation(
x_c
+ backend.dot(
h_tm1_c,
self.recurrent_kernel[:, self.units * 2:self.units * 3]
)
)
c = f * c_tm1 + i * c_t
o = self.recurrent_activation(
x_o + backend.dot(
h_tm1_o, self.recurrent_kernel[:, self.units * 3:]
)
)
return c, o, i, f, c_t
def _compute_carry_and_output_fused(self, z, c_tm1):
"""Computes carry and output using fused kernels."""
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
return c, o
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
inputs_1 = inputs[0][0:-1].reshape(1, -1)
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=4)
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs_1 * dp_mask[0]
inputs_f = inputs_1 * dp_mask[1]
inputs_c = inputs_1 * dp_mask[2]
inputs_o = inputs_1 * dp_mask[3]
else:
inputs_i = inputs_1
inputs_f = inputs_1
inputs_c = inputs_1
inputs_o = inputs_1
k_i, k_f, k_c, k_o = tf.split(
self.kernel, num_or_size_splits=4, axis=1)
x_i = backend.dot(inputs_i, k_i)
x_f = backend.dot(inputs_f, k_f)
x_c = backend.dot(inputs_c, k_c)
x_o = backend.dot(inputs_o, k_o)
if self.use_bias:
b_i, b_f, b_c, b_o = tf.split(
self.bias, num_or_size_splits=4, axis=0)
x_i = backend.bias_add(x_i, b_i)
x_f = backend.bias_add(x_f, b_f)
x_c = backend.bias_add(x_c, b_c)
x_o = backend.bias_add(x_o, b_o)
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
x = (x_i, x_f, x_c, x_o)
h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o)
c, o, i, f, c_t = self._compute_carry_and_output(x, h_tm1, c_tm1)
else:
if 0. < self.dropout < 1.:
inputs_1 = inputs_1 * dp_mask[0]
z = backend.dot(inputs_1, self.kernel)
z += backend.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = backend.bias_add(z, self.bias)
z = tf.split(z, num_or_size_splits=4, axis=1)
c, o = self._compute_carry_and_output_fused(z, c_tm1)
h_temp = o * self.activation(c)
# Guarantor
copies = 1 # Curse of Dimensionality Helper
gated_vector = tf.concat([i, f, c_t, c, o, h_temp], axis=1)
gated_vector_copy = tf.tile(gated_vector, (copies, 1))
gated_labels = tf.tile(inputs[0][-1].reshape(1, -1), (copies, 1))
n_epoch = 13
learning_rate = 0.0001
# Gradient Descent
theta_1 = tf.ones([self.units*6, 1])
for epoch in range(n_epoch):
y_pred = tf.matmul(gated_vector_copy, theta_1)
error = y_pred - gated_labels
gradients = 2/copies * tf.matmul(
tf.transpose(gated_vector_copy), tf.cast(error, tf.float32)
)
theta_1 = theta_1 - learning_rate * gradients
importance = theta_1
# Collect Gates and States
i_gate_out = importance[:self.units, :]
f_gate_out = importance[self.units:self.units*2, :]
can_gate_out = importance[self.units*2:self.units*3, :]
c_gate_out = importance[self.units*3:self.units*4, :]
o_gate_out = importance[self.units*4:self.units*5, :]
h_gate_out = importance[self.units*5:self.units*6, :]
# Importance Score
improtance_i = tf.math.reduce_mean(i_gate_out, axis=0)
importance_f = tf.math.reduce_mean(f_gate_out, axis=0)
importance_can = tf.math.reduce_mean(can_gate_out, axis=0)
importance_c = tf.math.reduce_mean(c_gate_out, axis=0)
importance_o = tf.math.reduce_mean(o_gate_out, axis=0)
importance_h = tf.math.reduce_mean(h_gate_out, axis=0)
# Final/Optimized Ouput
merge_output = tf.stack(
[improtance_i,
importance_f,
importance_can,
importance_c,
importance_o,
importance_h],
axis=0
)
result = tf.where(
merge_output == tf.math.reduce_max(merge_output, axis=0)
)
# Best Gate Filter
if result[0][0] == 0:
h = tf.transpose(i_gate_out)
elif result[0][0] == 1:
h = tf.transpose(f_gate_out)
elif result[0][0] == 2:
h = tf.transpose(can_gate_out)
elif result[0][0] == 3:
h = tf.transpose(c_gate_out)
elif result[0][0] == 4:
h = tf.transpose(o_gate_out)
else:
h = tf.transpose(h_gate_out)
return h, [h, c]
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
base_config = super(RevisedLSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 13,682 | 35.782258 | 79 |
py
|
simdutf
|
simdutf-master/singleheader/amalgamate.py
|
#!/usr/bin/env python3
#
# Creates the amalgamated source files.
#
import sys
import os.path
import subprocess
import os
import re
import shutil
import datetime
if sys.version_info[0] < 3:
sys.stdout.write("Sorry, requires Python 3.x or better\n")
sys.exit(1)
SCRIPTPATH = os.path.dirname(os.path.abspath(sys.argv[0]))
PROJECTPATH = os.path.dirname(SCRIPTPATH)
print(f"SCRIPTPATH={SCRIPTPATH} PROJECTPATH={PROJECTPATH}")
print("We are about to amalgamate all simdutf files into one source file.")
print("See https://www.sqlite.org/amalgamation.html and https://en.wikipedia.org/wiki/Single_Compilation_Unit for rationale.")
if "AMALGAMATE_SOURCE_PATH" not in os.environ:
AMALGAMATE_SOURCE_PATH = os.path.join(PROJECTPATH, "src")
else:
AMALGAMATE_SOURCE_PATH = os.environ["AMALGAMATE_SOURCE_PATH"]
if "AMALGAMATE_INCLUDE_PATH" not in os.environ:
AMALGAMATE_INCLUDE_PATH = os.path.join(PROJECTPATH, "include")
else:
AMALGAMATE_INCLUDE_PATH = os.environ["AMALGAMATE_INCLUDE_PATH"]
if "AMALGAMATE_OUTPUT_PATH" not in os.environ:
AMALGAMATE_OUTPUT_PATH = os.path.join(SCRIPTPATH)
else:
AMALGAMATE_OUTPUT_PATH = os.environ["AMALGAMATE_OUTPUT_PATH"]
# this list excludes the "src/generic headers"
ALLCFILES = ["simdutf.cpp"]
# order matters
ALLCHEADERS = ["simdutf.h"]
found_includes = []
current_implementation=''
def doinclude(fid, file, line):
p = os.path.join(AMALGAMATE_INCLUDE_PATH, file)
pi = os.path.join(AMALGAMATE_SOURCE_PATH, file)
if os.path.exists(p):
# generic includes are included multiple times
if re.match('.*generic/.*.h', file):
dofile(fid, AMALGAMATE_INCLUDE_PATH, file)
# begin/end_implementation are also included multiple times
elif re.match('.*/begin.h', file):
dofile(fid, AMALGAMATE_INCLUDE_PATH, file)
elif re.match('.*/end.h', file):
dofile(fid, AMALGAMATE_INCLUDE_PATH, file)
elif file not in found_includes:
found_includes.append(file)
dofile(fid, AMALGAMATE_INCLUDE_PATH, file)
else:
pass
elif os.path.exists(pi):
# generic includes are included multiple times
# generic includes are included multiple times
if re.match('.*generic/.*.h', file):
dofile(fid, AMALGAMATE_SOURCE_PATH, file)
# begin/end_implementation are also included multiple times
elif re.match('.*/begin.h', file):
dofile(fid, AMALGAMATE_SOURCE_PATH, file)
elif re.match('.*/end.h', file):
dofile(fid, AMALGAMATE_SOURCE_PATH, file)
elif file not in found_includes:
found_includes.append(file)
dofile(fid, AMALGAMATE_SOURCE_PATH, file)
else:
pass
else:
# If we don't recognize it, just emit the #include
print(line, file=fid)
def dofile(fid, prepath, filename):
global current_implementation
print(f"// dofile: invoked with prepath={prepath}, filename={filename}",file=fid)
file = os.path.join(prepath, filename)
RELFILE = os.path.relpath(file, PROJECTPATH)
# Last lines are always ignored. Files should end by an empty lines.
print(f"/* begin file {RELFILE} */", file=fid)
includepattern = re.compile('\s*#\s*include "(.*)"')
redefines_simdutf_implementation = re.compile('^#define\s+SIMDUTF_IMPLEMENTATION\s+(.*)')
undefines_simdutf_implementation = re.compile('^#undef\s+SIMDUTF_IMPLEMENTATION\s*$')
uses_simdutf_implementation = re.compile('SIMDUTF_IMPLEMENTATION([^_a-zA-Z0-9]|$)')
with open(file, 'r') as fid2:
for line in fid2:
line = line.rstrip('\n')
s = includepattern.search(line)
if s:
includedfile = s.group(1)
# include all from simdutf.cpp except simdutf.h
if includedfile == "simdutf.h" and filename == "simdutf.cpp":
print(line, file=fid)
continue
if includedfile.startswith('../'):
includedfile = includedfile[2:]
# we explicitly include simdutf headers, one time each (unless they are generic, in which case multiple times is fine)
doinclude(fid, includedfile, line)
else:
# does it contain a redefinition of SIMDUTF_IMPLEMENTATION ?
s=redefines_simdutf_implementation.search(line)
if s:
current_implementation=s.group(1)
print(f"// redefining SIMDUTF_IMPLEMENTATION to \"{current_implementation}\"\n// {line}", file=fid)
elif undefines_simdutf_implementation.search(line):
# Don't include #undef SIMDUTF_IMPLEMENTATION since we're handling it ourselves
# print(f"// {line}")
pass
else:
# copy the line, with SIMDUTF_IMPLEMENTATION replace to what it is currently defined to
print(uses_simdutf_implementation.sub(current_implementation+"\\1",line), file=fid)
print(f"/* end file {RELFILE} */", file=fid)
# Get the generation date from git, so the output is reproducible.
# The %ci specifier gives the unambiguous ISO 8601 format, and
# does not change with locale and timezone at time of generation.
# Forcing it to be UTC is difficult, because it needs to be portable
# between gnu date and busybox date.
try:
timestamp = subprocess.run(['git', 'show', '-s', '--format=%ci', 'HEAD'],
stdout=subprocess.PIPE).stdout.decode('utf-8').strip()
except:
print("git not found, timestamp based on current time")
timestamp = str(datetime.datetime.now())
print(f"timestamp is {timestamp}")
os.makedirs(AMALGAMATE_OUTPUT_PATH, exist_ok=True)
AMAL_H = os.path.join(AMALGAMATE_OUTPUT_PATH, "simdutf.h")
AMAL_C = os.path.join(AMALGAMATE_OUTPUT_PATH, "simdutf.cpp")
DEMOCPP = os.path.join(AMALGAMATE_OUTPUT_PATH, "amalgamation_demo.cpp")
README = os.path.join(AMALGAMATE_OUTPUT_PATH, "README.md")
print(f"Creating {AMAL_H}")
amal_h = open(AMAL_H, 'w')
print(f"/* auto-generated on {timestamp}. Do not edit! */", file=amal_h)
for h in ALLCHEADERS:
doinclude(amal_h, h, f"ERROR {h} not found")
amal_h.close()
print()
print()
print(f"Creating {AMAL_C}")
amal_c = open(AMAL_C, 'w')
print(f"/* auto-generated on {timestamp}. Do not edit! */", file=amal_c)
for c in ALLCFILES:
doinclude(amal_c, c, f"ERROR {c} not found")
amal_c.close()
# copy the README and DEMOCPP
if SCRIPTPATH != AMALGAMATE_OUTPUT_PATH:
shutil.copy2(os.path.join(SCRIPTPATH,"amalgamation_demo.cpp"),AMALGAMATE_OUTPUT_PATH)
shutil.copy2(os.path.join(SCRIPTPATH,"README.md"),AMALGAMATE_OUTPUT_PATH)
import zipfile
zf = zipfile.ZipFile(os.path.join(AMALGAMATE_OUTPUT_PATH,'singleheader.zip'), 'w', zipfile.ZIP_DEFLATED)
zf.write(os.path.join(AMALGAMATE_OUTPUT_PATH,"simdutf.cpp"), "simdutf.cpp")
zf.write(os.path.join(AMALGAMATE_OUTPUT_PATH,"simdutf.h"), "simdutf.h")
zf.write(os.path.join(AMALGAMATE_OUTPUT_PATH,"amalgamation_demo.cpp"), "amalgamation_demo.cpp")
print("Done with all files generation.")
print(f"Files have been written to directory: {AMALGAMATE_OUTPUT_PATH}/")
print("Done with all files generation.")
#
# Instructions to create demo
#
print("\nGiving final instructions:")
with open(README) as r:
for line in r:
print(line)
| 7,400 | 38.57754 | 134 |
py
|
simdutf
|
simdutf-master/benchmarks/dataset/scripts/utf8type.py
|
#!/usr/bin/env python
import sys
if(len(sys.argv)<2):
print("please provide a file name")
sys.exit(-1)
assert sys.version_info >= (3, 0)
filename = sys.argv[1]
counts=[0,0,0,0]#ascii, ascii+two, ascii+two_three, others
block_count=0
with open(filename, "rb") as file_content:
array = file_content.read()
maxv = max(array)
minv = min(array)
if(minv < 0): print("bug")
if(maxv>=240):
print("four bytes")
elif(maxv>=0b11110000):
print("three bytes")
elif(maxv>=0b11100000):
print("two bytes")
else:
print("ascii")
counter = [0, 0, 0, 0]
for x in array:
if(x>=0b11110000):
counter[3] += 1
elif(maxv>=0b11100000):
counter[2] += 1
elif(maxv>=0b11000000):
counter[1] += 1
elif(maxv < 0b10000000):
counter[0] += 1
else:
#we have a continuation byte
pass
print("ASCII: {} 2-Bytes: {} 3-Bytes: {} 4-Bytes: {}".format(*counter))
l = len(array)
counter = [c * 100.0 / l for c in counter]
print("ASCII: {}% 2-Bytes: {}% 3-Bytes: {}% 4-Bytes: {}%".format(*counter))
| 1,153 | 27.146341 | 81 |
py
|
simdutf
|
simdutf-master/benchmarks/dataset/wikipedia_mars/convert_to_utf6.py
|
#!/usr/bin/env python3
from pathlib import Path
def main():
def input_files():
for path in Path('.').glob('*.*'):
if path.suffix in ('.html', '.txt'):
yield path
for path in input_files():
text = path.read_text(encoding='utf8')
dstpath = path.parent / (path.name + '.utf16')
print("Writing %s" % dstpath)
dstpath.write_text(text, encoding='utf16')
if __name__ == '__main__':
main()
| 476 | 21.714286 | 54 |
py
|
simdutf
|
simdutf-master/benchmarks/competition/inoue2008/script.py
|
## prefix_to_length_table
## We do
## const uint8_t prefix = (input[position] >> 5);
## so
## 00000000 becomes 000
## 10000000 becomes 100
## 11000000 becomes 110
## 11100000 becomes 111
prefix_to_length_table = []
for i in range(0b111 + 1):
if(i < 0b100):
# ascii
prefix_to_length_table.append(1)
elif(i<0b110):
# continuation
prefix_to_length_table.append(0)
elif(i<0b111):
prefix_to_length_table.append(2)
else:
prefix_to_length_table.append(3)
print("prefix_to_length_table")
print(prefix_to_length_table)
def decode(i):
answer = []
for j in range(8):
rem = (i % 3) + 1
answer.append(rem)
i = i // 3
answer.reverse()
return answer
table_pattern1=[]
table_pattern2=[]
for i in range(3**8):
x = decode(i)
# pattern1 captures the second and third position
# pattern2 captures the first position
pattern1 = []
pattern2 = []
pos = 0
for i in x:
pattern2.append(pos+i-1)
pattern2.append(0xFF)
if(i == 1):
pattern1.append(0xFF)
pattern1.append(0xFF)
elif(i == 2):
pattern1.append(pos)
pattern1.append(0xFF)
elif(i==3):
pattern1.append(pos +1)
pattern1.append(pos)
else:
print("BUG")
pos += i
table_pattern1.append(pattern1)
table_pattern2.append(pattern2)
assert(len(pattern1) == 16)
assert(len(pattern2) == 16)
print("const static uint8_t pattern1["+str(len(table_pattern1))+"][16]={")
for x in table_pattern1:
assert(len(x) == 16)
print('{%s},' % (', '.join(map(str, x))))
print("};")
print("const static uint8_t pattern2["+str(len(table_pattern1))+"][16]={")
for x in table_pattern2:
assert(len(x) == 16)
print('{%s},' % (', '.join(map(str, x))))
print("};")
| 1,872 | 24.310811 | 74 |
py
|
simdutf
|
simdutf-master/benchmarks/competition/u8u16/proto/u8u16.py
|
#!/usr/bin/python
# u8u16.py
#
# Python prototype implementation
# Robert D. Cameron
# revised August 5, 2009 - use generated UTF-8 definitions
#
#
#
#----------------------------------------------------------------------------
#
# We use python's unlimited precision integers for unbounded bit streams.
# This permits simple logical operations on the entire stream.
# Assumption: bitstreams are little-endian (e.g., as on x86).
#
#----------------------------------------------------------------------------
#
# Utility functions for demo purposes. Slowwwww, but simple.
#
def readfile(filename):
f = open(filename)
contents = f.read()
f.close()
return contents
def count_leading_zeroes(strm):
zeroes = 0
while (strm & 0xFFFFFFFF) == 0:
zeroes += 32
strm >>= 32
while (strm & 1) == 0:
zeroes += 1
strm >>= 1
return zeroes
#
def transpose_streams(s):
b = []
mask = 128
index = 0
while index < 8:
current = 0
cursor = 1
for c in s:
if isinstance(c,str):
val = ord(c)
else:
val = c
if (val & mask != 0):
current += cursor
cursor <<= 1
index+=1
mask>>=1
b.append(current)
return b
def inverse_transpose(bitset, len):
bytestream=""
cursor = 1
for i in range(0, len):
byteval = 0
for j in range(0,8):
if bitset[j] & cursor != 0:
byteval += 128 >> j
bytestream += chr(byteval)
cursor += cursor
return bytestream
def filter_bytes(bytestream, delmask):
newstream=""
cursor = 1
for c in bytestream:
if delmask & cursor == 0:
newstream += c
cursor += cursor
return newstream
def merge_bytes(stream1, stream2):
s = ""
for i in range(len(stream1)):
s += stream1[i]
s += stream2[i]
return s
def bitstream2string(stream, lgth):
str = ""
for i in range(lgth):
if stream & 1 == 1: str += '1'
else: str += '_'
stream >>= 1
return str
#
# Advance all cursors by one position.
def Advance(stream):
return stream + stream
def ShiftBack(stream):
return stream >> 1
def u8_streams(u8bit):
u8 = {}
u8['unibyte'] = ~u8bit[0]
u8['prefix'] = u8bit[0] & u8bit[1]
u8['suffix'] = u8bit[0] & ~u8bit[1]
u8prefix3or4 = u8['prefix'] & u8bit[2]
u8['prefix2'] = u8['prefix'] & ~u8bit[2]
u8['prefix3'] = u8prefix3or4 & ~u8bit[3]
u8['prefix4'] = u8prefix3or4 & u8bit[3]
u8['scope22'] = Advance(u8['prefix2'])
u8['scope32'] = Advance(u8['prefix3'])
u8['scope42'] = Advance(u8['prefix4'])
u8['scope33'] = Advance(u8['scope32'])
u8['scope43'] = Advance(u8['scope42'])
u8['scope44'] = Advance(u8['scope43'])
u8lastscope = u8['scope22'] | u8['scope33'] | u8['scope44']
u8anyscope = u8lastscope | u8['scope32'] | u8['scope42'] | u8['scope43']
# C0-C1 are illegal
error_mask = u8['prefix2'] &~ (u8bit[3] | u8bit[4] | u8bit[5] | u8bit[6])
prefix_E0ED = u8['prefix3'] &~ ((u8bit[6] | (u8bit[4] ^ u8bit[7])) | (u8bit[4] ^ u8bit[5]))
E0ED_constraint = Advance(u8bit[5]) ^ u8bit[2]
error_mask |= Advance(prefix_E0ED) &~ E0ED_constraint
prefix_F5FF = u8['prefix4'] & (u8bit[4] | (u8bit[5] & (u8bit[6] | u8bit[7])))
error_mask |= prefix_F5FF
prefix_F0F4 = u8['prefix4'] &~ (u8bit[4] | u8bit[6] | u8bit[7])
F0F4_constraint = Advance(u8bit[5]) ^ (u8bit[2] | u8bit[3])
error_mask |= Advance(prefix_F0F4) &~ F0F4_constraint
error_mask |= u8anyscope ^ u8['suffix']
u8['error'] = error_mask
return u8
#
# Generated version using chardeflist2py(DefinitionSet['UTF8'])
#
def u8_streams_generated(bit):
unibyte = (~bit[0]);
prefix = (bit[0] & bit[1]);
prefix2 = (prefix &~ bit[2]);
temp1 = (bit[2] &~ bit[3]);
prefix3 = (prefix & temp1);
temp2 = (bit[2] & bit[3]);
prefix4 = (prefix & temp2);
suffix = (bit[0] &~ bit[1]);
temp3 = (bit[2] | bit[3]);
temp4 = (prefix &~ temp3);
temp5 = (bit[4] | bit[5]);
temp6 = (temp5 | bit[6]);
temp7 = (temp4 &~ temp6);
temp8 = (bit[6] | bit[7]);
temp9 = (bit[5] & temp8);
temp10 = (bit[4] | temp9);
temp11 = (prefix4 & temp10);
badprefix = (temp7 | temp11);
temp12 = (temp5 | temp8);
xE0 = (prefix3 &~ temp12);
temp13 = (bit[4] & bit[5]);
temp14 = (bit[7] &~ bit[6]);
temp15 = (temp13 & temp14);
xED = (prefix3 & temp15);
xF0 = (prefix4 &~ temp12);
temp16 = (bit[5] &~ bit[4]);
temp17 = (temp16 &~ temp8);
xF4 = (prefix4 & temp17);
xA0_xBF = (suffix & bit[2]);
x80_x9F = (suffix &~ bit[2]);
x90_xBF = (suffix & temp3);
x80_x8F = (suffix &~ temp3);
#
# End of generated code
u8 = {}
u8['unibyte'] = unibyte
u8['prefix'] = prefix
u8['suffix'] = suffix
u8['prefix2'] = prefix2
u8['prefix3'] = prefix3
u8['prefix4'] = prefix4
u8['scope22'] = Advance(u8['prefix2'])
u8['scope32'] = Advance(u8['prefix3'])
u8['scope42'] = Advance(u8['prefix4'])
u8['scope33'] = Advance(u8['scope32'])
u8['scope43'] = Advance(u8['scope42'])
u8['scope44'] = Advance(u8['scope43'])
u8lastscope = u8['scope22'] | u8['scope33'] | u8['scope44']
u8anyscope = u8lastscope | u8['scope32'] | u8['scope42'] | u8['scope43']
# C0-C1 and F5-FF are illegal
error_mask = badprefix
error_mask |= Advance(xE0) & x80_x9F
error_mask |= Advance(xED) & xA0_xBF
error_mask |= Advance(xF0) & x80_x8F
error_mask |= Advance(xF4) & x90_xBF
error_mask |= u8anyscope ^ u8['suffix']
u8['error'] = error_mask
return u8
#
# The following calculation of UTF-16 bit streams is consistent
# with the original u8u16, calculating streams at u8scope42 and
# u8scope44 positions.
#
def u16_streams(u8, u8bit):
u16hi = [0,0,0,0,0,0,0,0]
u16lo = [0,0,0,0,0,0,0,0]
u8lastscope = u8['scope22'] | u8['scope33'] | u8['scope44']
u8lastbyte = u8['unibyte'] | u8lastscope
u16lo[2] = u8lastbyte & u8bit[2]
u16lo[3] = u8lastbyte & u8bit[3]
u16lo[4] = u8lastbyte & u8bit[4]
u16lo[5] = u8lastbyte & u8bit[5]
u16lo[6] = u8lastbyte & u8bit[6]
u16lo[7] = u8lastbyte & u8bit[7]
u16lo[1] = (u8['unibyte'] & u8bit[1]) | (u8lastscope & Advance(u8bit[7]))
u16lo[0] = u8lastscope & Advance(u8bit[6])
u16hi[5] = u8lastscope & Advance(u8bit[3])
u16hi[6] = u8lastscope & Advance(u8bit[4])
u16hi[7] = u8lastscope & Advance(u8bit[5])
u16hi[0] = u8['scope33'] & Advance(Advance(u8bit[4]))
u16hi[1] = u8['scope33'] & Advance(Advance(u8bit[5]))
u16hi[2] = u8['scope33'] & Advance(Advance(u8bit[6]))
u16hi[3] = u8['scope33'] & Advance(Advance(u8bit[7]))
u16hi[4] = u8['scope33'] & Advance(u8bit[2])
u8surrogate = u8['scope42'] | u8['scope44']
u16hi[0] = u16hi[0] | u8surrogate
u16hi[1] = u16hi[1] | u8surrogate
u16hi[3] = u16hi[3] | u8surrogate
u16hi[4] = u16hi[4] | u8surrogate
u16hi[5] = u16hi[5] | u8['scope44']
s42lo1 = ~u8bit[3] # subtract 1
u16lo[1] = u16lo[1] | (u8['scope42'] & s42lo1)
s42lo0 = u8bit[2] ^ s42lo1 # borrow *
u16lo[0] = u16lo[0] | (u8['scope42'] & s42lo0)
borrow1 = s42lo1 & ~u8bit[2]
s42hi7 = Advance(u8bit[7]) ^ borrow1
u16hi[7]= u16hi[7] | (u8['scope42'] & s42hi7)
borrow2 = borrow1 & ~Advance(u8bit[7])
s42hi6 = Advance(u8bit[6]) ^ borrow2
u16hi[6] = u16hi[6] | (u8['scope42'] & s42hi6)
u16lo[2] = u16lo[2] | (u8['scope42'] & u8bit[4])
u16lo[3] = u16lo[3] | (u8['scope42'] & u8bit[5])
u16lo[4] = u16lo[4] | (u8['scope42'] & u8bit[6])
u16lo[5] = u16lo[5] | (u8['scope42'] & u8bit[7])
u16lo[6] = u16lo[6] | (u8['scope42'] & ShiftBack(u8bit[2]))
u16lo[7] = u16lo[7] | (u8['scope42'] & ShiftBack(u8bit[3]))
delmask = u8['prefix'] | u8['scope32'] | u8['scope43']
return (u16hi, u16lo, delmask)
#
# The following calculation of UTF-16 bit streams uses the
# u8scope43 position rather than the u8scope42 position for
# the bits of the first UTF-16 code unit of a surrogate pair.
# This requires more shifting than with the use of u8scope42,
# but has the advantage that all shifts are in the forward
# direction only and can hence be implemented using addition
# on little-endian architecture.
#
def u16_streams_fwdonly(u8, u8bit):
u16hi = [0,0,0,0,0,0,0,0]
u16lo = [0,0,0,0,0,0,0,0]
u8lastscope = u8['scope22'] | u8['scope33'] | u8['scope44']
u8lastbyte = u8['unibyte'] | u8lastscope
u16lo[2] = u8lastbyte & u8bit[2]
u16lo[3] = u8lastbyte & u8bit[3]
u16lo[4] = u8lastbyte & u8bit[4]
u16lo[5] = u8lastbyte & u8bit[5]
u16lo[6] = u8lastbyte & u8bit[6]
u16lo[7] = u8lastbyte & u8bit[7]
u16lo[1] = (u8['unibyte'] & u8bit[1]) | (u8lastscope & Advance(u8bit[7]))
u16lo[0] = u8lastscope & Advance(u8bit[6])
u16hi[5] = u8lastscope & Advance(u8bit[3])
u16hi[6] = u8lastscope & Advance(u8bit[4])
u16hi[7] = u8lastscope & Advance(u8bit[5])
u16hi[0] = u8['scope33'] & Advance(Advance(u8bit[4]))
u16hi[1] = u8['scope33'] & Advance(Advance(u8bit[5]))
u16hi[2] = u8['scope33'] & Advance(Advance(u8bit[6]))
u16hi[3] = u8['scope33'] & Advance(Advance(u8bit[7]))
u16hi[4] = u8['scope33'] & Advance(u8bit[2])
u8surrogate = u8['scope43'] | u8['scope44']
u16hi[0] = u16hi[0] | u8surrogate
u16hi[1] = u16hi[1] | u8surrogate
u16hi[3] = u16hi[3] | u8surrogate
u16hi[4] = u16hi[4] | u8surrogate
u16hi[5] = u16hi[5] | u8['scope44']
s42lo1 = ~u8bit[3] # subtract 1
u16lo[1] = u16lo[1] | (u8['scope43'] & Advance(s42lo1))
s42lo0 = u8bit[2] ^ s42lo1 # borrow *
u16lo[0] = u16lo[0] | (u8['scope43'] & Advance(s42lo0))
borrow1 = s42lo1 & ~u8bit[2]
advance_bit7 = Advance(u8bit[7])
s42hi7 = advance_bit7 ^ borrow1
u16hi[7]= u16hi[7] | (u8['scope43'] & Advance(s42hi7))
borrow2 = borrow1 & ~advance_bit7
s42hi6 = Advance(u8bit[6]) ^ borrow2
u16hi[6] = u16hi[6] | (u8['scope43'] & Advance(s42hi6))
u16lo[2] = u16lo[2] | (u8['scope43'] & Advance(u8bit[4]))
u16lo[3] = u16lo[3] | (u8['scope43'] & Advance(u8bit[5]))
u16lo[4] = u16lo[4] | (u8['scope43'] & Advance(u8bit[6]))
u16lo[5] = u16lo[5] | (u8['scope43'] & Advance(u8bit[7]))
u16lo[6] = u16lo[6] | (u8['scope43'] & u8bit[2])
u16lo[7] = u16lo[7] | (u8['scope43'] & u8bit[3])
delmask = u8['prefix'] | u8['scope32'] | u8['scope42']
return (u16hi, u16lo, delmask)
#
# Messages to duplicate u8u16 error reporting.
#
def IllegalSequenceMessage(pos):
return "Illegal UTF-8 sequence at position %i in source.\n" % pos
def IncompleteSequenceMessage(pos):
return "EOF with incomplete UTF-8 sequence at position %i in source.\n" % pos
import sys
def main():
if len(sys.argv) < 2:
sys.stderr.write("Usage: u8u16.py u8file [u16file]\n")
exit
if len(sys.argv) == 3:
outfile = open(sys.argv[2],"w")
else: outfile = sys.stdout
u8data = readfile(sys.argv[1])
u8len = len(u8data)
u8bit = transpose_streams(u8data)
# u8 = u8_streams(u8bit)
u8 = u8_streams_generated(u8bit)
if u8['error'] != 0:
err_pos = count_leading_zeroes(u8['error'])
at_EOF = err_pos == len(u8data)
if (err_pos >= 1) and ord(u8data[err_pos-1]) >= 0xC0: err_pos -= 1
elif err_pos >= 2 and ord(u8data[err_pos-2]) >= 0xE0: err_pos -= 2
elif err_pos >= 3 and ord(u8data[err_pos-3]) >= 0xF0: err_pos -= 3
if at_EOF:
sys.stderr.write(IncompleteSequenceMessage(err_pos))
else:
sys.stderr.write(IllegalSequenceMessage(err_pos))
u8len = err_pos
# Originally, we used the u16_streams version.
# (u16hi, u16lo, delmask) = u16_streams(u8, u8bit)
(u16hi, u16lo, delmask) = u16_streams_fwdonly(u8, u8bit)
U16H = filter_bytes(inverse_transpose(u16hi, u8len), delmask)
U16L = filter_bytes(inverse_transpose(u16lo, u8len), delmask)
U16final = merge_bytes(U16H, U16L)
outfile.write(U16final)
outfile.close()
if __name__ == "__main__": main()
| 11,320 | 27.953964 | 92 |
py
|
simdutf
|
simdutf-master/benchmarks/competition/u8u16/lib/libgen/make_test.py
|
#
# make_simd_operation_test.py
#
# Copyright (C) 2007 Dan Lin, Robert D. Cameron
# Licensed to International Characters Inc. and Simon Fraser University
# under the Academic Free License version 3.0
# Licensed to the public under the Open Software License version 3.0.
# make_simd_operation_test.py generates test file simd_operation_test.c
# which compares the result computed by simd operations and
# result simulated by this test file
from random import randint
ops = ["rotl"]
mods = ["x","h","l"]
fws = [32]
merge_fws = fws[:-1]
bitBlock_size = 64
r1=0
r2=0
def modified_operand(N, n, operand, modifier):
if modifier == "x": return operand
elif modifier == "h":
for i in range(0,N/n):
operand[i] = operand[i] >> ((n+1)/2)
return operand
else:
for i in range(0,N/n):
operand[i] = operand[i] % (2** ((n+1)/2))
return operand
def split_up(r,N,n):
remainder = r
result = []
for i in range(0,N/n):
quotient = remainder / (2**(N-n*(i+1)))
remainder = remainder % (2**(N-n*(i+1)))
result.append(quotient)
return result
def join_up(r,N,n):
result = 0
for i in range(0,N/n):
result = result + (r[i] * (2**(N-n*(i+1))))
return result
def gen_const(c,N,n):
return c*(2**N-1)/(2**n-1)
def get_mask(n):
count = 1
temp = n
while (n/2 != 1):
n = n/2
count = count + 1
return count
def simulate_add (N, n, m1, m2, r1, r2):
r1_field = modified_operand(N, n, split_up(r1,N,n), m1)
r2_field = modified_operand(N, n, split_up(r2,N,n), m2)
r3_field = []
for i in range(0, N/n):
r3_field.append((r1_field[i] + r2_field[i])%(2**n))
r3 = join_up(r3_field, N, n)
return r3
def simulate_sub (N, n, m1, m2, r1, r2):
r1_field = modified_operand(N, n, split_up(r1,N,n), m1)
r2_field = modified_operand(N, n, split_up(r2,N,n), m2)
r3_field = []
for i in range(0, N/n):
r3_field.append((r1_field[i] - r2_field[i] + 2**n)%(2**n))
r3 = join_up(r3_field, N, n)
return r3
def simulate_srl (N, n, m1, m2, r1, r2):
r1_field = modified_operand(N, n, split_up(r1,N,n), m1)
r2_field = modified_operand(N, n, split_up(r2,N,n), m2)
r3_field = []
for i in range(0, N/n):
r3_field.append(r1_field[i] >> (r2_field[i] % 2**get_mask(n)))
r3 = join_up(r3_field, N, n)
return r3
def simulate_sll (N, n, m1, m2, r1, r2):
r1_field = modified_operand(N, n, split_up(r1,N,n), m1)
r2_field = modified_operand(N, n, split_up(r2,N,n), m2)
r3_field = []
for i in range(0, N/n):
r3_field.append((r1_field[i] << (r2_field[i] % 2**get_mask(n)))% (2**n))
r3 = join_up(r3_field, N, n)
return r3
def simulate_sra (N, n, m1, m2, r1, r2):
r1_field = modified_operand(N, n, split_up(r1,N,n), m1)
r2_field = modified_operand(N, n, split_up(r2,N,n), m2)
r3_field = []
for i in range(0, N/n):
supplement = 0
shift = r2_field[i] % (2**get_mask(n))
for j in range(1, shift+1):
supplement = supplement + (r1_field[i]/(2**(n-1)))*2**(n-j)
r3_field.append((r1_field[i] >> shift) + supplement)
r3 = join_up(r3_field, N, n)
return r3
def simulate_rotl (N, n, m1, m2, r1, r2):
r1_field = modified_operand(N, n, split_up(r1,N,n), m1)
r2_field = modified_operand(N, n, split_up(r2,N,n), m2)
r3_field = []
for i in range(0, N/n):
shift = r2_field[i] % 2**get_mask(n)
r3_field.append((r1_field[i] << shift) % 2**n + ((r1_field[i] >> (n - shift))% (2**n)))
r3 = join_up(r3_field, N, n)
return r3
def simulate_mergeh (N, n, m1, m2, r1, r2):
r1_field = modified_operand(N, n, split_up(r1,N,n), m1)
r2_field = modified_operand(N, n, split_up(r2,N,n), m2)
r3_field = []
for i in range(0, N/n/2):
r3_field.append(r1_field[i])
r3_field.append(r2_field[i])
r3 = join_up(r3_field, N, n)
return r3
def simulate_mergel (N, n, m1, m2, r1, r2):
r1_field = modified_operand(N, n, split_up(r1,N,n), m1)
r2_field = modified_operand(N, n, split_up(r2,N,n), m2)
r3_field = []
for i in range(N/n/2, N/n):
r3_field.append(r1_field[i])
r3_field.append(r2_field[i])
r3 = join_up(r3_field, N, n)
return r3
def simulate_pack (N, n, m1, m2, r1, r2):
r1_field = modified_operand(N, n, split_up(r1,N,n), m1)
r2_field = modified_operand(N, n, split_up(r2,N,n), m2)
r3_field = []
for i in range(0, N/n):
r3_field.append(r1_field[i] % (2** (n/2)))
for j in range(0, N/n):
r3_field.append(r2_field[j] % (2** (n/2)))
r3 = join_up(r3_field, N, n/2)
return r3
def make_modified_operand(fw, operand, modifier):
if modifier == "x": return operand
elif modifier == "h": return "simd_srli_"+str(fw)+"(%s, %i)" % (operand, fw/2)
else: return "simd_andc(%s, simd_himask_%i)" % (operand, fw)
def split_64to16(r):
result = []
result_hi = r/2**32
result_lo = r%2**32
result.append(result_hi/2**16)
result.append(result_hi%2**16)
result.append(result_lo/2**16)
result.append(result_lo%2**16)
return result
def reverse(n):
mask = 255
result = 0
for i in range(0,8):
temp = (n & mask)>>(i*8)
result = (result | temp)<< 8
mask = mask << 8
return result>>8
def make_test(op, fw, m1, m2, r1, r2):
test_operation = "simd_%s_%i_%s%s (a, b)"
template1 = "\ttest_rslt = %s;\n" % test_operation
template1 += "\tif (!(simd_all_eq_8(test_rslt, simd_if(simd_himask_64,simd_if(simd_himask_32,simd_const_16(%s),simd_const_16(%s)),simd_if(simd_himask_32,simd_const_16(%s),simd_const_16(%s)))))) {\n"
err_stmts = "\t\t\tprintf(\"error in %s \\n\");\n" % test_operation
err_stmts += "\t\t\tprint_bit_block(\"Computed:\", test_rslt);\n"
err_stmts += "\t\t\tprintf(\"\\tExpected: %s\\n\");\n"
err_stmts += "\t\t}\n"
template = template1 + err_stmts
if op == "add":
r = simulate_add(bitBlock_size,fw,m1,m2,r1,r2)
result = split_64to16(r)
return template % (op, fw, m1, m2,result[0],result[1],result[2],result[3],op,fw,m1,m2,hex(reverse(r)))
elif op == "sub":
r = simulate_sub(bitBlock_size,fw,m1,m2,r1,r2)
result = split_64to16(r)
return template % (op, fw, m1, m2,result[0],result[1],result[2],result[3],op,fw,m1,m2,hex(reverse(r)))
elif op == "pack":
r = simulate_pack(bitBlock_size,fw,m1,m2,r1,r2)
result = split_64to16(r)
return template % (op, fw, m1, m2,result[0],result[1],result[2],result[3],op,fw,m1,m2,hex(reverse(r)))
elif op == "mergel":
r = simulate_mergel(bitBlock_size,fw,m1,m2,r1,r2)
result = split_64to16(r)
return template % (op, fw, m1, m2,result[0],result[1],result[2],result[3],op,fw,m1,m2,hex(reverse(r)))
elif op == "mergeh":
r = simulate_mergeh(bitBlock_size,fw,m1,m2,r1,r2)
result = split_64to16(r)
return template % (op, fw, m1, m2,result[0],result[1],result[2],result[3],op,fw,m1,m2,hex(reverse(r)))
elif op == "srl":
r = simulate_srl(bitBlock_size,fw,m1,m2,r1,r2)
result = split_64to16(r)
return template % (op, fw, m1, m2,result[0],result[1],result[2],result[3],op,fw,m1,m2,hex(reverse(r)))
elif op == "sll":
r = simulate_sll(bitBlock_size,fw,m1,m2,r1,r2)
result = split_64to16(r)
return template % (op, fw, m1, m2,result[0],result[1],result[2],result[3],op,fw,m1,m2,hex(reverse(r)))
elif op == "sra":
r = simulate_sra(bitBlock_size,fw,m1,m2,r1,r2)
result = split_64to16(r)
return template % (op, fw, m1, m2,result[0],result[1],result[2],result[3],op,fw,m1,m2,hex(reverse(r)))
elif op == "rotl":
r = simulate_rotl(bitBlock_size,fw,m1,m2,r1,r2)
result = split_64to16(r)
return template % (op, fw, m1, m2,result[0],result[1],result[2],result[3],op,fw,m1,m2,hex(reverse(r)))
else:
return template % (op, fw, m1, m2, "0x0","0x0","0x0","0x0",op,fw,m1,m2,"0x0")
def make_all_for_fw(fw,r1,r2):
test_list = ''
for m1 in mods:
for m2 in mods:
for op in ops:
test_list += make_test(op, fw, m1, m2, r1, r2)
return test_list
def gen_operand_sequence(fw):
if fw == 2:
test_operand = [0x0, 0x1, 0x2, 0x3]
elif fw == 4:
test_operand = [0x0, 0x7, 0x8, 0xf]
test_operand.append(randint(0x1, 0xe))
elif fw == 8:
test_operand = [0x0, 0x7f, 0x80, 0xff]
test_operand.append(randint(0x1, 0xfe))
elif fw == 16:
test_operand = [0x0, 0x7fff, 0x8000, 0xffff]
test_operand.append(randint(0x1, 0xfffe))
elif fw == 32:
test_operand = [0x0, 0x7fffffff, 0x80000000, 0xffffffff]
test_operand.append(randint(0x1, 0xfffffffe))
return test_operand
def generate_and_write_versions(filename, ops,fws):
test_list = r"""#include <stdio.h>
#include "%s.h"
int main() {
SIMD_type a, b,test_rslt;
"""
test_list = test_list % filename
for fw in fws:
for r1 in gen_operand_sequence(fw):
for r2 in gen_operand_sequence(fw):
test_list +='\ta=simd_const_'+str(fw)+'('+hex(r1)+');\n\tb=simd_const_'+str(fw)+'('+hex(r2)+');\n'
test_list += make_all_for_fw (fw,gen_const(r1,64,fw),gen_const(r2,64,fw))
test_list += '\treturn 0;\n}\n'
file = open(filename+"_test.c", 'w')
file.write(test_list)
file.close()
import sys
if __name__ == "__main__":
if len(sys.argv) < 2: library_under_test = "mmx_simd"
else: library_under_test = sys.argv[1]
generate_and_write_versions(library_under_test, ops, fws)
| 9,550 | 34.243542 | 202 |
py
|
simdutf
|
simdutf-master/benchmarks/competition/u8u16/lib/libgen/make_basic_ops.py
|
#
# make_basic_ops.py
#
# Copyright (C) 2007 Dan Lin, Robert D. Cameron
# Licensed to International Characters Inc. and Simon Fraser University
# under the Academic Free License version 3.0
# Licensed to the public under the Open Software License version 3.0.
# make_basic_ops.py generates inline definitions for
# idealized basic SIMD operations that are not included
# in the built-in functions.
ops = ["sub", "add", "sll", "srl", "sra", "pack", "mergeh", "mergel", "rotl"]
# 1 for "add" and "sub"
# 2 for "sll", "srl" and "sra"
# 3 for "pack"
# 4 for "mergel" and "mergeh"
# Note: For new operations added into this file,
# if different fieldwidths are needed, add more to the following list
# otherwise share with the operations above
fws = { 1: [4], 2: [32,16,8,4], 3: [8,4,2], 4: [4,2,1]}
ops_immediateshift= ["sll","srl"]
fws_immediateshift = [2,4,8]
# make the structure of a inline function
def make_inline(op,fw,body):
operation = "simd_%s_%i" % (op,fw)
return "#ifndef %s\ninline SIMD_type %s(SIMD_type a,SIMD_type b){\n\treturn %s;}\n#endif\n\n" % (operation,operation,body)
# this is the main function that generate simd operations in n bit field
# by using simd operations in 2n bit field
#
# Considering that simd_const_64() is not included in simd_built_in operations
# we use _mm_cvtsi32_si64() as the mask for shift operations in 32 bit field
# instead of masking the operand twice with simd_himask_32 and smid_const_32()
#
# Note: "rotl" has not been tested
# "srl", "sll" and "sra" in 4 bit field might not be efficient
# by using 8 bit field operations
def make_halfsize_defn(op,fw):
template = "simd_if(simd_himask_"+str(fw*2)+", simd_"+op+"_%i(%s,%s)\n\t,"+"simd_"+op+"_%i(%s,%s))"
if (op == "add") or (op == "sub"):
return template % (fw*2, "simd_and(a,simd_himask_"+str(fw*2)+")",
"simd_and(b,simd_himask_"+str(fw*2)+")",
fw*2,"simd_andc(a,simd_himask_"+str(fw*2)+")",
"simd_andc(b,simd_himask_"+str(fw*2)+")")
elif op == "srl":
common_part = template % (fw*2,"a","simd_and(simd_const_"+str(fw)+"("+str(fw-1)+"),simd_srli_"+str(fw*2)+"(b,"+str(fw)+"))",
fw*2,"simd_andc(a,simd_himask_"+str(fw*2)+")","simd_and(%s)")
if fw == 32:
return common_part % "_mm_cvtsi32_si64(31),b"
else:
return common_part % ("b,simd_const_"+str(fw*2)+"("+str(fw-1)+")")
elif op == "sll":
common_part = template % (fw*2, "simd_and(a,simd_himask_"+str(fw*2)+")",
"simd_and(simd_const_"+str(fw)+"("+str(fw-1)+"),simd_srli_"+str(fw*2)+"(b,"+str(fw)+"))",
fw*2,"a", "simd_and(%s)")
if fw == 32:
return common_part % "_mm_cvtsi32_si64(31),b"
else:
return common_part % ("b,simd_const_"+str(fw*2)+"("+str(fw-1)+")")
elif op == "sra":
if fw == 32:
return """simd_if(simd_himask_64(),
_mm_sra_pi32(a, simd_and(simd_const_32(31),simd_srli_64(b,32))),
_mm_sra_pi32(a, simd_and(_mm_cvtsi32_si64(31), b)))"""
else:
return template % (fw*2,"simd_and(a,simd_himask_"+str(fw*2)+")",
"simd_and(sisd_srli(b,"+str(fw)+"),simd_const_"+str(fw*2)+"("+str(fw-1)+"))",
fw*2,"simd_srai_"+str(fw*2)+"(sisd_slli(a,"+str(fw)+"),"+str(fw)+")",
"simd_and(b,simd_const_"+str(fw*2)+"("+str(fw-1)+"))")
elif op == "rotl":
return "simd_or(simd_sll_"+str(fw)+"(a,b),simd_srl_"+str(fw)+"(a,simd_sub_"+str(fw)+"(simd_const_"+str(fw)+"("+str(fw)+"),b)))"
elif op == "pack":
return "simd_pack_"+str(fw*2)+"(%s,\n\t%s)" % ("simd_if(simd_himask_"+str(fw)+",sisd_srli(a,"+str(fw/2)+"),a)",
"simd_if(simd_himask_"+str(fw)+",sisd_srli(b,"+str(fw/2)+"),b)")
elif op == "mergeh" or op == "mergel":
return "simd_"+op+"_"+str(fw*2)+"(%s,\n\t%s)" % ("simd_if(simd_himask_"+str(fw*2)+",a,sisd_srli(b,"+str(fw)+"))",
"simd_if(simd_himask_"+str(fw*2)+",sisd_slli(a,"+str(fw)+"),b)")
else:
raise Exception("Bad operator %s" % op)
def make_immediateshift_defn(op,fw):
template = "inline SIMD_type simd_"+op+"i_"+str(fw)+"(SIMD_type r, int sh){\n\t return %s"
if op== "sll":
return template % "simd_and(sisd_"+op+"i(r,sh),simd_const_"+str(fw)+"(("+str(2**fw-1)+"<<sh)&"+str(2**fw-1)+"));}\n"
else:
return template % "simd_and(sisd_"+op+"i(r,sh),simd_const_"+str(fw)+"("+str(2**fw-1)+">>sh));}\n"
def make_all_half_fieldwidth_versions (ops, fws):
defn_list = ''
for op in ops_immediateshift:
for fw in fws_immediateshift:
defn_list += make_immediateshift_defn(op, fw)
for op in ops:
if (op=="add" or op=="sub"):
for fw in fws[1]:
defn_list += make_inline(op, fw, make_halfsize_defn(op,fw))
elif (op=="sll" or op=="srl" or op=="sra" or op=="rotl"):
for fw in fws[2]:
defn_list += make_inline(op, fw, make_halfsize_defn(op,fw))
elif (op=="pack"):
for fw in fws[3]:
defn_list += make_inline(op, fw, make_halfsize_defn(op,fw))
elif (op=="mergeh" or op=="mergel"):
for fw in fws[4]:
defn_list += make_inline(op, fw, make_halfsize_defn(op,fw))
else: raise Exception("Bad operator %s" % op)
return defn_list
def generate_and_write_versions(filename, ops, fws):
defn_list = make_all_half_fieldwidth_versions (ops, fws)
file = open(filename, 'w')
file.write(defn_list)
file.close()
if __name__ == "__main__":
generate_and_write_versions("mmx_simd_basic.h", ops, fws)
| 5,447 | 44.024793 | 135 |
py
|
simdutf
|
simdutf-master/benchmarks/competition/u8u16/lib/libgen/make_half_operand_versions.py
|
#
# make_half_operand_versions.py
#
# Copyright (C) 2007 Robert D. Cameron, Dan Lin
# Licensed to International Characters Inc. and Simon Fraser University
# under the Academic Free License version 3.0
# Licensed to the public under the Open Software License version 3.0.
#
# make_halfoperand_versions.py generates macro definitions for
# the half-operand versions of idealized SIMD operations.
#
# The half-operand versions of a (binary) operation on n-bit fields
# are named by # extending the operation name with an underscore and
# two # operand modification letters for the two operands:
# operand modifier "h" indicates that the high n/2 bits of fields
# are taken as the operand values (shifted right by n/2).
# operand modifier "l" indicates that the low n/2 bits of fields
# masking off the high n/2 bits.
# operand modifier "x" indicates that all n bits of the field are
# used as the operand value without modification.
#
ops = ["sub", "add", "pack", "mergeh", "mergel", "sll", "srl", "sra", "rotl"]
mods = ["x", "l", "h"]
fws = [2, 4, 8, 16, 32]
# The generic approach to defining these operations is to simply
# include code to modify each of the operands and then perform
# the desired operation. However, this program generates more
# efficient code when certain optimizations may apply.
#
# 1. Lo-Mask Optimization.
# The masking for an "l" operand can be eliminated if
# that operation ignores the high n/2 bits. The
# ignores_operand_hi_half property declares this.
# 2. Hi-Mask Optimization
# The shifting of an "h" operand can be optimized if the
# operation ignores the high n/2 bits of the operand. In this
# case, it is possible to replace a simulated shift for n bit
# fields with any built-in shift for fields of a multiple of n.
# 3. Double-High Shift Optimization
# In some cases, the two shift operations for a pair of "h" operands
# may be eliminated in favor of a single shift operation after
# the basic operation is perfomed (support_hh_postshift property).
# 4. LH Add Optimization
# If both operands are modified (either "h" or "l" modifiers),
# a simulated add on small field widths can be replaced by a
# built-in add operation for fields of a multiple of n.
#
##########################################################################
#
# Operation Properties to Support Optimization
#
# 1. Lo-Mask Optimization.
#
# An operation satisfies the ignores_operand_hi_half property
# for a particular operand if the high n/2 bits of that
# operand play no role in the operation applied to n bit
# fields. This property applies for the second operand
# of all shifts and rotates, as well as for both operands
# of packs.
#
ignores_operand_hi_half = { 1: ["pack"], 2: ["sll", "srl", "sra", "rotl", "pack"]}
#
# 2. Hi-Mask Optimization
#
# Shifts of 2, 4 or 8 bits are simulated on common architectures. Replace
# if possible with 16-bit shifts, which are built-in on common architectures.
#
simulated_shift_replacement = {2: 16, 4: 16, 8: 16}
#
# 3. Double-High Shift Optimization
#
support_hh_postshift = ["mergeh", "mergel"]
#
# 4. LH Add Optimization
#
# Adds of 2-bit or 4-bit fields are simulated on common architectures. Replace
# if possible with 8-bit adds, which are built-in on common architectrues.
#
simulated_add_replacement = {2: 8, 4: 8}
#
# Generate a definition that won't override any hand-coded version.
#
defn_mode = "INLINE"
def define_macro_if_undefined(op_name, body):
return "#ifndef %s\n#define %s(v1, v2) %s\n#endif\n\n" % (op_name, op_name, body)
def define_inline_function_if_undefined(op_name, body):
prototype = "inline SIMD_type %s(SIMD_type v1, SIMD_type v2)" % (op_name)
return "#ifndef %s\n%s {\n return %s;\n}\n#endif\n\n" % (op_name, prototype, body)
def define_if_undefined(op_name, body):
global defn_mode
if defn_mode == "MACRO": return define_macro_if_undefined(op_name, body)
else: return define_inline_function_if_undefined(op_name, body)
def operand_name(operand_no):
return "v%i" % operand_no
def make_modified_operand(operation, fw, operand_no, modifier):
operand = operand_name(operand_no)
if modifier == "x": return operand
elif operation in ignores_operand_hi_half[operand_no]:
if modifier == "l": return operand
elif modifier == "h":
if fw in simulated_shift_replacement.keys():
shft_op = "simd_srli_%i" % (simulated_shift_replacement[fw])
else: shft_op = "simd_srli_%i" % (fw)
return "%s(%s, %i)" % (shft_op, operand, fw/2)
else: raise Exception("Bad modifier %s" % modifier)
elif modifier == "h": return "simd_srli_%i(%s, %i)" % (fw, operand, fw/2)
elif modifier == "l": return "simd_andc(%s, simd_himask_%i)" % (operand, fw)
else: raise Exception("Bad modifier %s" % modifier)
def make_optimized_defn(op, fw, m1, m2):
base_operation = "simd_%s_%i" % (op, fw)
op_name = base_operation + "_" + m1 + m2
operand1 = make_modified_operand(op, fw, 1, m1)
operand2 = make_modified_operand(op, fw, 2, m2)
if (m1 == "h") and (m2 == "h") and (op in support_hh_postshift):
code = "%s(%s, %s)" % (base_operation, operand1, operand2)
return define_if_undefined(op_name, code)
if (op == "add") and (m1 != "x") and (m2 != "x") and (fw in simulated_add_replacement.keys()):
base_operation = "simd_%s_%i" % (op, simulated_add_replacement[fw])
return define_if_undefined(op_name, "%s(%s, %s)" % (base_operation, operand1, operand2))
def make_all_for_op_fw(op, fw):
defn_list = ''
for m1 in mods:
for m2 in mods:
defn_list += make_optimized_defn(op, fw, m1, m2)
return defn_list
#
# Usage: make_all_half_operand_versions(ops, fws) to generate
# a complete file of all the "half-operand" modified versions
# of a set of simd operations for each operation op within
# the list ops and each field width fw in the list of field
# widths.
#
def make_all_half_operand_versions (ops, fws):
defn_list = ''
for op in ops:
for fw in fws:
defn_list += make_all_for_op_fw(op, fw)
return defn_list
def generate_and_write_versions(filename, ops, fws):
defn_list = make_all_half_operand_versions (ops, fws)
file = open(filename, 'w')
file.write(defn_list)
file.close()
if __name__ == "__main__":
generate_and_write_versions("mmx_simd_modified.h", ops, fws)
| 6,451 | 39.578616 | 98 |
py
|
simdutf
|
simdutf-master/benchmarks/competition/utf8lut/scripts/measure.py
|
import subprocess, os, sys, re, shutil, json
RunsCount = 10000
ConvertToUtf16 = "FileConverter_msvc %s %s"
Solutions_Decode = {
"trivial": "FileConverter_msvc %s temp.out -b=0 -k={0} --small",
"utf8lut:1S": "FileConverter_msvc %s temp.out -b=3 -k={0} --small",
"utf8lut:4S": "FileConverter_msvc %s temp.out -b=3 -k={0}",
"u8u16": "u8u16_ssse3 %s temp.out {0}",
"utf8sse4": "utf8sse4 %s temp.out {0}",
}
Solutions_Encode = {
"trivial": "FileConverter_msvc -s=utf16 -d=utf8 %s temp.out -b=0 -k={0} --small",
"utf8lut:1S": "FileConverter_msvc -s=utf16 -d=utf8 %s temp.out -b=3 -k={0} --small",
"utf8lut:4S": "FileConverter_msvc -s=utf16 -d=utf8 %s temp.out -b=3 -k={0}",
}
encode = (sys.argv[1] == 'encode')
Solutions = (Solutions_Encode if encode else Solutions_Decode)
for k,v in Solutions.items():
Solutions[k] = v.format(RunsCount)
Tests = {
"[rnd1111:400000]": "rnd1111_utf8.txt",
"[rnd1110:500000]": "rnd1110_utf8.txt",
"chinese": "chinese_book.txt",
"russian": "war_and_piece.fb2",
"english": "english_book.txt",
"unicode": "unicode_table.html",
}
def log_name_of(sol_name, test_name):
log_name = sol_name + "__" + test_name
log_name = "logs/" + re.sub(r'[\W]', '_', log_name) + ".log"
return log_name
def run_sol_test(sol_name, test_name, encode=False):
if encode:
subprocess.run((ConvertToUtf16 % (Tests[test_name], 'temp.in')).split(), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
cmd = Solutions[sol_name] % 'temp.in'
else:
cmd = Solutions[sol_name] % Tests[test_name]
log_name = log_name_of(sol_name, test_name)
with open(log_name, 'wt') as f:
subprocess.run(cmd.split(), stdout=f, stderr=f)
return log_name
def parse_log(log_name):
with open(log_name, 'rt') as f:
text = f.read()
external_result = None
internal_result = None
# typical overall measurement, added to every solution by myself
m = re.search(r'From total time\s+:\s+([\d.]+)\s+cyc/el\n', text)
if m:
external_result = float(m.group(1))
# internal timings in utf8lut
for m in re.finditer(r'slot \d+\s+(DECODE|ENCODE)\s+:\s+([\d.]+) cyc/el\s+(\d+) elems\n', text):
if int(m.group(3)) > 0:
internal_result = float(m.group(2))
# internal timings in u8u16
for m in re.finditer(r'BOM \d+: \d+ \(avg time: \d+ cyc/kElem\) Cumulative: \d+ \(avg: (\d+) cyc/kElem\)', text):
internal_result = float(m.group(1)) / 1000.0
return internal_result, external_result
def main():
shutil.rmtree('logs', ignore_errors=True)
os.mkdir('logs')
#print(parse_log(run_sol_test("utf8lut:1S", "chinese")))
#print(parse_log(run_sol_test("u8u16", "chinese")))
#print(parse_log(run_sol_test("utf8sse4", "chinese")))
jsonall = {}
jsonall['solutions'] = list(Solutions.keys())
jsonall['tests'] = list(Tests.keys())
data = jsonall['xdata'] = {}
for test in Tests:
data[test] = {}
for sol in Solutions:
print("Test %s, Sol %s:" % (test, sol), end="", flush=True)
log_name = run_sol_test(sol, test, encode)
print(" finished: ", end="", flush=True)
result = parse_log(log_name)
print(result[0], '/', result[1], flush=True)
data[test][sol] = (result[0], result[1], log_name)
with open("logs/results.json", "wt") as f:
json.dump(jsonall, f, indent=2, sort_keys=True)
if __name__ == "__main__":
main()
| 3,519 | 36.052632 | 134 |
py
|
simdutf
|
simdutf-master/benchmarks/competition/utf8lut/scripts/html_table.py
|
import json, sys
with open(sys.argv[1] + "/results.json", 'rt') as f:
data = json.load(f)
tests = data['tests']
solutions = data['solutions']
print('<tr>')
print(' <th></th>')
for sol in solutions:
print(' <th>%s</th>' % sol)
print('</tr>')
for test in tests:
print('<tr>')
print(' <th>%s</th>' % test)
for sol in solutions:
values = data['xdata'][test][sol]
for i in range(2):
if values[i] is None:
values[i] = '???'
else:
values[i] = '%.2f' % values[i]
print(' <td>%s / %s</td>' % (values[0], values[1]))
print('</tr>')
| 633 | 23.384615 | 60 |
py
|
simdutf
|
simdutf-master/benchmarks/competition/utf8lut/scripts/resize.py
|
import sys, os
filename = sys.argv[1]
want_size = int(sys.argv[2])
buff_size = 2**20
remains = want_size
with open(filename + '.tmp', 'wb') as fo:
while remains > 0:
with open(filename, 'rb') as fi:
while remains > 0:
chunk = fi.read(buff_size)
if len(chunk) == 0:
break
if len(chunk) > remains:
chunk = chunk[:remains]
fo.write(chunk)
remains -= len(chunk)
os.replace(filename + '.tmp', filename)
| 544 | 26.25 | 43 |
py
|
simdutf
|
simdutf-master/scripts/sse_validate_utf16le_testcases.py
|
from itertools import product
from random import randint, seed
from sse_validate_utf16le_proof import bitmask
# This is a copy from sse_validate_utf16le_proof.py with
# adjusted the mask for the 16-bit base
def mask(words):
L = bitmask(words, 'L')
H = bitmask(words, 'H')
V = (~(L | H)) & 0xffff
a = L & (H >> 1)
b = a << 1
c = V | a | b
return c
class Record:
def __init__(self):
self.words = []
def add(self, word):
self.words.append(word)
@property
def is_valid(self):
c = mask(self.words)
if c == 0xffff:
return True
if c == 0x7fff:
# in test we reject cases when 'L' or 'H' ends a chunk
if self.words[-1] in ('L', 'H'):
return False
else:
return True
return False
def __str__(self):
words = ''.join(self.words)
if self.is_valid:
return 'T' + words
else:
return 'F' + words
def test_words():
collection = set()
for seq in test_words_aux():
collection.add(tuple(seq))
return sorted(collection)
def test_words_aux():
# 1. all valid
yield ['V'] * 16
# 2. only low surrogates
yield ['L'] * 16
# 3. only high surrogates
yield ['H'] * 16
# 4. sole low surrogate
for i in range(16):
seq = ['V'] * 16
seq[i] = 'L'
yield seq
# 5. sole high surrogate
for i in range(16):
seq = ['V'] * 16
seq[i] = 'H'
yield seq
# 6. scattered three surrogates
for i in range(16):
for j in range(16):
for k in range(16):
seq = ['V'] * 16
for a, b, c in product('LH', repeat=3):
seq[i] = a
seq[j] = b
seq[k] = c
yield seq
# To cover all 16-byte inputs we would need 3**16 cases (43'046'721)
# Instead, we cover all possible 6-element combinations (3**6 = 729)
# and move it within 16-element input. This yields 729 * 10 cases.
k = 6
for combination in product('VLH', repeat=k):
for position in range(16 - k):
seq = ['V'] * 16
for i, v in enumerate(combination):
seq[i + position] = v
yield seq
TXT = """# generated by scripts/sse_validate_utf16le_testcases.py
"""
def write_file(file):
file.write(TXT)
for words in test_words():
record = Record()
for word in words:
record.add(word)
file.write(str(record))
file.write('\n')
def main():
seed(0)
with open('validate_utf16_testcases.txt', 'w') as f:
write_file(f)
if __name__ == '__main__':
main()
| 2,787 | 20.446154 | 72 |
py
|
simdutf
|
simdutf-master/scripts/benchmark_print.py
|
import sys
from pathlib import Path
from table import Table
class Input:
def __init__(self):
self.procedure = None
self.input_size = None
self.iterations = None
self.dataset = None
@property
def dataset_name(self):
if self.dataset:
return self.dataset.stem
else:
return 'none'
def __str__(self):
return '<Input: %s, size: %d, iterations: %s, dataset: %s>' % \
(self.procedure, self.input_size, self.iterations, self.dataset)
__repr__ = __str__
class Result:
def __init__(self):
self.instruction_per_byte = None
self.instruction_per_cycle = None
self.speed_gbs = None
self.branch_misses = None
self.cache_misses = None
def __str__(self):
return '<Result: %f ins/byte, %f ins/cycle, %f GB, %f b.misses/byte %f c.misses/byte>' % \
(self.instruction_per_byte, self.instruction_per_cycle,
self.speed_gbs, self.branch_misses, self.cache_misses)
__repr__ = __str__
def parse(file):
result = []
for line in file:
for item in parse_line(line):
if isinstance(item, Input):
result.append(item)
else:
assert isinstance(result[-1], Input)
result[-1] = (result[-1], item)
return result
def parse_line(line):
if 'input size' in line:
yield parse_input(normalize_line(line))
elif 'ins/byte' in line:
yield parse_result(normalize_line(line))
def normalize_line(line):
line = line.replace(',', ' ')
line = line.replace(':', ' ')
line = line.replace('(', ' ')
line = line.replace(')', ' ')
return line.split()
def parse_input(fields):
input = Input()
input.procedure = fields.pop(0)
assert fields.pop(0) == 'input'
assert fields.pop(0) == 'size'
input.input_size = int(fields.pop(0))
assert fields.pop(0) == 'iterations'
input.iterations = int(fields.pop(0))
try:
assert fields.pop(0) == 'dataset'
input.dataset = Path(fields.pop(0))
except IndexError:
pass
return input
def parse_result(fields):
result = Result()
result.instruction_per_byte = float(fields.pop(0))
assert fields.pop(0) == 'ins/byte'
fields.pop(0)
fields.pop(0)
result.speed_gbs = float(fields.pop(0))
assert fields.pop(0) == 'GB/s'
fields.pop(0)
fields.pop(0)
result.instruction_per_cycle = float(fields.pop(0))
assert fields.pop(0) == 'ins/cycle'
result.branch_misses = float(fields.pop(0))
assert fields.pop(0) == 'b.misses/byte'
result.cache_misses = float(fields.pop(0))
assert fields.pop(0) == 'c.mis/byte'
return result
def print_speed_comparison(data):
procedures = set()
datasets = set()
results = {}
for input, result in data:
procedures.add(input.procedure)
datasets.add(input.dataset_name)
results[(input.procedure, input.dataset_name)] = result
datasets = list(sorted(datasets))
procedures = list(sorted(procedures))
def by_procedure():
table = Table()
table.set_header(['procedure'] + datasets)
for procedure in procedures:
row = []
row.append(procedure)
for dataset in datasets:
try:
result = results[(procedure, dataset)]
row.append('%0.3f GB/s' % (result.speed_gbs))
except KeyError:
row.append('--')
table.add_row(row)
return table
def by_dataset():
table = Table()
table.set_header(['dataset'] + procedures)
for dataset in datasets:
row = []
row.append(dataset)
for procedure in procedures:
try:
result = results[(procedure, dataset)]
row.append('%0.3f GB/s' % (result.speed_gbs))
except KeyError:
row.append('--')
table.add_row(row)
return table
if len(procedures) >= len(datasets):
print(by_procedure())
else:
print(by_dataset())
def main():
if len(sys.argv) < 2:
print("No input files")
print("Provide output from the benchmark utility")
return
for path in sys.argv[1:]:
with open(path, 'rt') as f:
data = parse(f)
print_speed_comparison(data)
if __name__ == '__main__':
main()
| 4,564 | 23.281915 | 98 |
py
|
simdutf
|
simdutf-master/scripts/table.py
|
class TableBase(object):
def __init__(self):
self.headers = []
self.rows = []
def set_header(self, header):
assert len(header) > 0
self.headers = [self.normalize(header)]
def add_header(self, header):
assert len(header) > 0
self.headers.append(self.normalize(header))
def add_row(self, row):
assert len(row) > 0
self.rows.append(self.normalize(row))
def normalize(self, row):
tmp = []
for text, count in self.iter_spans(row):
assert count >= 1
if count == 1:
tmp.append(text)
else:
tmp.append((text, count))
return tmp
def is_raw(self, row):
return all(type(val) == str for val in row)
def iter_spans(self, row):
for item in row:
if type(item) is tuple:
text = item[0]
count = item[1]
else:
text = item
count = 1
yield (text, count)
class TableValidator(object):
def __init__(self, table):
self.table = table
self.columns = self.get_table_columns_count()
if self.columns is None:
raise ValueError("Table doesn't have header which define column layout")
self.validate()
def get_table_columns_count(self):
# only from headers
for header in self.table.headers:
if self.table.is_raw(header):
return len(header)
def validate(self):
for i, header in enumerate(self.table.headers):
n = self.get_columns_count(header)
if n != self.columns:
raise ValueError("header #%d has %d column(s), expected %d: %s" % (i, n, self.columns, header))
for i, row in enumerate(self.table.rows):
n = self.get_columns_count(row)
if n != self.columns:
raise ValueError("row #%d has %d column(s), expected %d: %s" % (i, n, self.columns, row))
def get_columns_count(self, row):
n = 0
for _, count in self.table.iter_spans(row):
n += count
return n
ALIGN_RIGHT = '>'
ALIGN_LEFT = '<'
CENTER = '^'
class RestructuredTextTableRenderer(object):
def __init__(self, table):
self.validator = TableValidator(table)
self.table = table
self.padding = 1
self.widths = self._calculate_widths()
self._adjust_widths()
def get_headers(self):
return self.table.headers
def get_rows(self):
return self.table.rows
def _calculate_widths(self):
width = [0] * self.validator.columns
# get width from fixed
for row in self.get_headers() + self.get_rows():
index = 0
for text, count in self.table.iter_spans(row):
if count > 1:
index += count
continue
w = len(text)
width[index] = max(w, width[index])
index += 1
return width
def _adjust_widths(self):
for row in self.get_headers() + self.get_rows():
index = 0
for text, count in self.table.iter_spans(row):
if count == 1:
index += count
continue
width = self._get_columns_width(index, count)
requested = len(text) + 2 * self.padding
if requested <= width:
index += count
continue
def widen(d):
while True:
for i in range(index, index + count):
self.widths[i] += 1
d -= 1
if d == 0:
return
widen(requested - width)
index += count
def _get_columns_width(self, start, count):
assert count >= 1
w = 0
for index in range(start, start + count):
w += self.widths[index]
w += 2 * self.padding
w += (count - 1) # for the columns spacing '|'
return w
def _render_separator(self, row, fill):
assert len(fill) == 1
result = '+'
index = 0
for text, count in self.table.iter_spans(row):
result += fill * self._get_columns_width(index, count)
result += '+'
index += count
return result
def _render_row(self, row, align = None):
if align is not None:
def get_align(text):
return align
else:
def get_align(text):
if is_float_or_int(text):
return ALIGN_RIGHT
else:
return ALIGN_LEFT
result = '|'
index = 0
for text, count in self.table.iter_spans(row):
width = self._get_columns_width(index, count)
width -= 2 * self.padding
result += ' ' * self.padding
result += u'{:{align}{width}}'.format(text, align=get_align(text), width=width)
result += ' ' * self.padding
result += '|'
index += count
return result
def _merge_rendered_separators(self, sep1, sep2):
# sep1 = '+-----+-----+------+'
# sep2 = '+---+-----------+--+'
# res = '+---+-+-----+---+--+'
assert len(sep1) == len(sep2)
def merge(c1, c2):
if c1 == '+' or c2 == '+':
return '+'
assert c1 == c2
return c1
return ''.join(merge(*pair) for pair in zip(sep1, sep2))
def get_image(self): # rest = RestructuredText
lines = []
lines.append(self._render_separator(self.table.headers[0], '-'))
for header in self.table.headers:
prev = lines[-1]
curr = self._render_separator(header, '-');
lines[-1] = self._merge_rendered_separators(prev, curr)
lines.append(self._render_row(header, CENTER))
lines.append(self._render_separator(header, '-'))
last_header_sep = len(lines) - 1
for row in self.get_rows():
prev = lines[-1]
curr = self._render_separator(header, '-');
lines[-1] = self._merge_rendered_separators(prev, curr)
lines.append(self._render_row(row))
lines.append(self._render_separator(row, '-'))
lines[last_header_sep] = lines[last_header_sep].replace('-', '=')
return '\n'.join(lines)
class Table(TableBase):
def __unicode__(self):
renderer = RestructuredTextTableRenderer(self)
return renderer.get_image()
def __str__(self):
renderer = RestructuredTextTableRenderer(self)
return renderer.get_image()
def is_float_or_int(text):
try:
float(text)
return True
except ValueError:
return False
if __name__ == '__main__':
table = Table()
table.set_header(["procedure", "size", "time"])
table.add_row(["foo", "100", "0.5"])
table.add_row(["bar", "105", "1.5"])
table.add_row(["baz", "111", "0.2"])
print(table)
table2 = Table()
table2.add_header([("The first experiment", 5)])
table2.add_header([("input", 2), ("procedure", 3)])
table2.add_header(["size1", "size2", "proc1", "proc2", "proc3"])
table2.add_row(["1", "2", "a", "b", "c"])
table2.add_row(["9", "3", "A", "B", "C"])
table2.add_row(["42", "-", ("N/A", 3)])
print(table2)
| 7,636 | 25.517361 | 111 |
py
|
simdutf
|
simdutf-master/scripts/release.py
|
#!/usr/bin/env python3
########################################################################
# Generates a new release.
########################################################################
import sys
import re
import subprocess
import io
import os
import fileinput
if sys.version_info < (3, 0):
sys.stdout.write("Sorry, requires Python 3.x or better\n")
sys.exit(1)
def colored(r, g, b, text):
return "\033[38;2;{};{};{}m{} \033[38;2;255;255;255m".format(r, g, b, text)
def extractnumbers(s):
return tuple(map(int,re.findall("(\d+)\.(\d+)\.(\d+)",str(s))[0]))
def toversionstring(major, minor, rev):
return str(major)+"."+str(minor)+"."+str(rev)
def topaddedversionstring(major, minor, rev):
return str(major)+str(minor).zfill(3)+str(rev).zfill(3)
pipe = subprocess.Popen(["git", "rev-parse", "--abbrev-ref", "HEAD"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
branchresult = pipe.communicate()[0].decode().strip()
if(branchresult != "master"):
print(colored(255, 0, 0, "We recommend that you release on master, you are on '"+branchresult+"'"))
ret = subprocess.call(["git", "remote", "update"])
if(ret != 0):
sys.exit(ret)
pipe = subprocess.Popen(["git", "log", "HEAD..", "--oneline"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
uptodateresult = pipe.communicate()[0].decode().strip()
if(len(uptodateresult) != 0):
print(uptodateresult)
sys.exit(-1)
pipe = subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
maindir = pipe.communicate()[0].decode().strip()
scriptlocation = os.path.dirname(os.path.abspath(__file__))
print("repository: "+maindir)
pipe = subprocess.Popen(["git", "describe", "--abbrev=0", "--tags"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
versionresult = pipe.communicate()[0].decode().strip()
print("last version: "+versionresult )
try:
currentv = extractnumbers(versionresult)
except:
currentv = [0,0,0]
if(len(sys.argv) != 2):
nextv = (currentv[0],currentv[1], currentv[2]+1)
print ("please specify version number, e.g. "+toversionstring(*nextv))
sys.exit(-1)
try:
newversion = extractnumbers(sys.argv[1])
except:
print("can't parse version number "+sys.argv[1])
sys.exit(-1)
print("checking that new version is valid")
if(newversion[0] != currentv[0]):
assert newversion[0] == currentv[0] + 1
assert newversion[1] == 0
assert newversion[2] == 0
elif (newversion[1] != currentv[1]):
assert newversion[1] == currentv[1] + 1
assert newversion[2] == 0
else :
assert newversion[2] == currentv[2] + 1
atleastminor= (currentv[0] != newversion[0]) or (currentv[1] != newversion[1])
if(atleastminor):
print(colored(0, 255, 0, "This is more than a revision."))
releasefile = maindir + os.sep + "RELEASES.md"
releasedata = open(releasefile).read()
pattern = re.compile("#\s+\d+\.\d+")
m = pattern.search(releasedata)
if(m == None):
print(colored(255, 0, 0, "You are preparing a new minor release and you have not yet updated RELEASES.md."))
sys.exit(-1)
versionfilerel = os.sep + "include" + os.sep + "simdutf" + os.sep + "simdutf_version.h"
versionfile = maindir + versionfilerel
with open(versionfile, 'w') as file:
file.write("// /include/simdutf/simdutf_version.h automatically generated by release.py,\n")
file.write("// do not change by hand\n")
file.write("#ifndef SIMDUTF_SIMDUTF_VERSION_H\n")
file.write("#define SIMDUTF_SIMDUTF_VERSION_H\n")
file.write("\n")
file.write("/** The version of simdutf being used (major.minor.revision) */\n")
file.write("#define SIMDUTF_VERSION \""+toversionstring(*newversion)+"\"\n")
file.write("\n")
file.write("namespace simdutf {\n")
file.write("enum {\n")
file.write(" /**\n")
file.write(" * The major version (MAJOR.minor.revision) of simdutf being used.\n")
file.write(" */\n")
file.write(" SIMDUTF_VERSION_MAJOR = "+str(newversion[0])+",\n")
file.write(" /**\n")
file.write(" * The minor version (major.MINOR.revision) of simdutf being used.\n")
file.write(" */\n")
file.write(" SIMDUTF_VERSION_MINOR = "+str(newversion[1])+",\n")
file.write(" /**\n")
file.write(" * The revision (major.minor.REVISION) of simdutf being used.\n")
file.write(" */\n")
file.write(" SIMDUTF_VERSION_REVISION = "+str(newversion[2])+"\n")
file.write("};\n")
file.write("} // namespace simdutf\n")
file.write("\n")
file.write("#endif // SIMDUTF_SIMDUTF_VERSION_H\n")
print(versionfile + " modified")
newmajorversionstring = str(newversion[0])
mewminorversionstring = str(newversion[1])
newrevversionstring = str(newversion[2])
newversionstring = str(newversion[0]) + "." + str(newversion[1]) + "." + str(newversion[2])
cmakefile = maindir + os.sep + "CMakeLists.txt"
sonumber = None
pattern = re.compile("set\(SIMDUTF_LIB_SOVERSION \"(\d+)\" CACHE STRING \"simdutf library soversion\"\)")
with open (cmakefile, 'rt') as myfile:
for line in myfile:
m = pattern.search(line)
if m != None:
sonumber = int(m.group(1))
break
print("so library number "+str(sonumber))
if(atleastminor):
print("Given that we have a minor revision, it seems necessary to bump the so library number")
sonumber += 1
for line in fileinput.input(cmakefile, inplace=1, backup='.bak'):
line = re.sub(' VERSION \d+\.\d+\.\d+',' VERSION '+newmajorversionstring+'.'+mewminorversionstring+'.'+newrevversionstring, line.rstrip())
line = re.sub('SIMDUTF_LIB_VERSION "\d+\.\d+\.\d+','SIMDUTF_LIB_VERSION "'+newversionstring, line)
line = re.sub('set\(SIMDUTF_LIB_SOVERSION \"\d+\"','set(SIMDUTF_LIB_SOVERSION \"'+str(sonumber)+'\"', line)
print(line)
print("modified "+cmakefile+", a backup was made")
doxyfile = maindir + os.sep + "Doxyfile"
for line in fileinput.input(doxyfile, inplace=1, backup='.bak'):
line = re.sub('PROJECT_NUMBER = "\d+\.\d+\.\d+','PROJECT_NUMBER = "'+newversionstring, line.rstrip())
print(line)
print("modified "+doxyfile+", a backup was made")
cp = subprocess.run([sys.executable, "amalgamate.py"], stdout=subprocess.DEVNULL, cwd=maindir+ os.sep + "singleheader") # doesn't capture output
if(cp.returncode != 0):
print("Failed to run amalgamate")
else:
print("The singleheader/singleheader.zip file has been updated.")
cp = subprocess.run(["doxygen"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, cwd=maindir) # doesn't capture output
if(cp.returncode != 0):
print("Failed to run doxygen")
readmefile = maindir + os.sep + "README.md"
for line in fileinput.input(readmefile, inplace=1, backup='.bak'):
line = re.sub('\s*https://github.com/simdutf/simdutf/releases/download/v(\d+\.\d+\.\d+)/singleheader.zip\s*','https://github.com/simdutf/simdutf/releases/download/v'+newversionstring+'/singleheader.zip', line.rstrip())
print(line)
print("modified "+readmefile+", a backup was made")
print("Please run the tests before issuing a release. \n")
print("to issue release, enter \n git commit -a && git push && git tag -a v"+toversionstring(*newversion)+" -m \"version "+toversionstring(*newversion)+"\" && git push --tags \n")
| 7,269 | 37.877005 | 222 |
py
|
simdutf
|
simdutf-master/scripts/common.py
|
import textwrap
import sys
if sys.version_info[0] < 3:
print('You need to run this with Python 3')
sys.exit(1)
indent = ' ' * 4
def fill(text):
tmp = textwrap.fill(text)
return textwrap.indent(tmp, indent)
def filltab(text):
tmp = textwrap.fill(text, width=120)
return textwrap.indent(tmp, '\t')
def cpp_array_initializer(arr):
return '{%s}' % (', '.join(map(str, arr)))
def compose2(f, g):
return lambda x: f(g(x))
def cpp_arrayarray_initializer(arr):
return '{%s}' % (',\n '.join(map(compose2(filltab,cpp_array_initializer), arr)))
| 576 | 22.08 | 84 |
py
|
simdutf
|
simdutf-master/scripts/sse_utf8_utf16_decode.py
|
#!/usr/bin/env python3
from common import *
def is_bit_set(mask, i):
return (mask & ( 1<<i )) == ( 1<<i )
# computes the location of the 0 bits (index starts at zero)
def compute_locations(mask):
answer = []
i = 0
while( (mask >> i) > 0 ):
if(is_bit_set(mask,i)):
answer.append(i)
i += 1
return answer
# computes the gaps between the 1, assuming we had an initial 1
def compute_code_point_size(mask):
positions = compute_locations(mask)
answer = []
oldx = -1
for i in range(len(positions)):
x = positions[i]
answer.append(x-oldx)
oldx = x
return answer
## check that we have 6 1-2 byte (at least)
def easy_case12(code_point_size):
if(len(code_point_size)<6):
return False
return max(code_point_size[:6])<=2
## check that we have 4 1-2-3 byte (at least)
def easy_case123(code_point_size):
if(len(code_point_size)<4):
return False
return max(code_point_size[:4])<=3
## check that we have 4 1-2-3 byte (at least)
def easy_case1234(code_point_size):
if(len(code_point_size)<3):
return False
return max(code_point_size[:3])<=4
def grab_easy_case12_code_point_size(code_point_size):
return code_point_size[:6]
def grab_easy_case123_code_point_size(code_point_size):
return code_point_size[:4]
def grab_easy_case1234_code_point_size(code_point_size):
return code_point_size[:3]
def buildshuf12_twobytes(sizes):
answer = [0 for i in range(16)]
pos = 0
for i in range(len(sizes)):
if(sizes[i] == 1):
answer[2*i] = pos
answer[2*i+1] = 0xff
pos += 1
else:
answer[2*i] = pos + 1
answer[2*i+1] = pos
pos += 2
return answer
def buildshuf123_threebytes(sizes):
answer = [0 for i in range(16)]
pos = 0
for i in range(len(sizes)): # 4 * 4 = 16
if(sizes[i] == 1):
answer[4*i] = pos
answer[4*i+1] = 0xff
answer[4*i+2] = 0xff
answer[4*i+3] = 0xff
pos += 1
elif(sizes[i] == 2):
answer[4*i] = pos + 1
answer[4*i+1] = pos
answer[4*i+2] = 0xff
answer[4*i+3] = 0xff
pos += 2
else: # must be three
answer[4*i] = pos + 2
answer[4*i+1] = pos + 1
answer[4*i+2] = pos
answer[4*i+3] = 0xff
pos += 3
return answer
def buildshuf1234_fourbytes(sizes):
answer = [0 for i in range(16)]
pos = 0
for i in range(len(sizes)): # 3 * 4 = 12
if(sizes[i] == 1):
answer[4*i] = pos
answer[4*i+1] = 0xff
answer[4*i+2] = 0xff
answer[4*i+3] = 0xff
pos += 1
elif(sizes[i] == 2):
answer[4*i] = pos + 1
answer[4*i+1] = pos
answer[4*i+2] = 0xff
answer[4*i+3] = 0xff
pos += 2
elif(sizes[i] == 3):
answer[4*i] = pos + 2
answer[4*i+1] = pos + 1
answer[4*i+2] = pos
answer[4*i+3] = 0xff
pos += 3
else: # must be four
answer[4*i] = pos + 3
answer[4*i+1] = pos + 2
answer[4*i+2] = pos + 1
answer[4*i+3] = pos
pos += 4
return answer
def main():
easycase12 = set()
easycase123 = set()
easycase1234 = set()
for x in range(1<<12):
sizes = compute_code_point_size(x)
if(easy_case12(sizes)):
z1 = grab_easy_case12_code_point_size(sizes)
easycase12.add(tuple(z1))
elif(easy_case123(sizes)):
z1 = grab_easy_case123_code_point_size(sizes)
easycase123.add(tuple(z1))
elif(easy_case1234(sizes)):
z1 = grab_easy_case1234_code_point_size(sizes)
easycase1234.add(tuple(z1))
easycase12sorted = [x for x in easycase12]
easycase12sorted.sort()
easycase123sorted = [x for x in easycase123]
easycase123sorted.sort()
easycase1234sorted = [x for x in easycase1234]
easycase1234sorted.sort()
print("#include <cstdint>")
allshuf = [buildshuf12_twobytes(z) for z in easycase12sorted] + [buildshuf123_threebytes(z) for z in easycase123sorted] + [buildshuf1234_fourbytes(z) for z in easycase1234sorted]
print("const uint8_t shufutf8["+str(len(easycase12sorted+easycase123sorted+easycase1234sorted))+"][16] = ")
print(cpp_arrayarray_initializer(allshuf), end=";\n")
print("/* number of two bytes : "+ str(len(easycase12sorted))+ " */")
print("/* number of two + three bytes : "+ str(len(easycase12sorted+easycase123sorted))+ " */")
print("/* number of two + three + four bytes : "+ str(len(easycase12sorted+easycase123sorted+easycase1234sorted))+ " */")
c = 0
index = {}
for t in easycase12sorted + easycase123sorted + easycase1234sorted:
index[t] = c
c = c + 1
arrg=[]
for x in range(1<<12):
sizes = compute_code_point_size(x)
if(easy_case12(sizes)):
z1 = grab_easy_case12_code_point_size(sizes)
idx = index[tuple(z1)]
s = sum(z1)
arrg.append((idx,s))
elif(easy_case123(sizes)):
z1 = grab_easy_case123_code_point_size(sizes)
idx = index[tuple(z1)]
s = sum(z1)
arrg.append((idx,s))
elif(easy_case1234(sizes)):
z1 = grab_easy_case1234_code_point_size(sizes)
idx = index[tuple(z1)]
s = sum(z1)
arrg.append((idx,s))
else:
# we are in error, use a bogus index
arrg.append((209,12))
print("const uint8_t utf8bigindex["+str(len(arrg))+"][2] = ")
print(cpp_arrayarray_initializer(arrg), end=";\n")
if __name__ == '__main__':
main()
| 5,685 | 29.406417 | 183 |
py
|
simdutf
|
simdutf-master/scripts/sse_validate_utf16le_proof.py
|
# Note: Validation is done for 8-word input, we just need to check 3^8 = 6561 cases
# Validation for 16-word inputs reqires 3**16 = 43'046'721 checks
ELEMENTS_COUNT = 8
ALL_MASK = (1 << ELEMENTS_COUNT) - 1
ALL_BUT_ONE_MASK = (ALL_MASK >> 1)
# 'V' - single-word character (always valid)
# 'L' - low surrogate (must be followed by the high surrogate)
# 'H' - high surrogate
def all_sequences():
index = ['V'] * ELEMENTS_COUNT
def increment():
nonlocal index
for i in range(ELEMENTS_COUNT):
if index[i] == 'V':
index[i] = 'L'
return False
if index[i] == 'L':
index[i] = 'H'
return False
if index[i] == 'H':
index[i] = 'V' # wrap around
pass
return True
overflow = False
while not overflow:
yield index
overflow = increment()
def find_error_in_words(words):
prev = None
if words[0] == 'H':
# We assume that our vector algorithm loads proper data into vectors.
# In the case low surrogate was the last item in the previous iteration.
return 'high surrogate must not start a chunk'
for i, kind in enumerate(words):
if kind == 'V':
if prev == 'L':
return f'low surrogate {i - 1} must be followed by high surrogate'
elif kind == 'L':
if prev == 'L':
return f'low surrogate {i - 1} must be followed by high surrogate'
elif kind == 'H':
if prev != 'L':
return f'high surrogate {i} must be preceded by low surrogate'
prev = kind
return ''
def bitmask(words, state):
result = 0
for bit, type in enumerate(words):
if type == state:
# In SSE vector algorithm we compare 2 x 16 higher bytes of input
# words, which yields a 16-bit mask.
result |= 1 << bit
return result
def mask(words):
L = bitmask(words, 'L')
H = bitmask(words, 'H')
V = (~(L | H)) & ALL_MASK
a = L & (H >> 1)
b = a << 1
c = V | a | b
return c
def dump():
for words in all_sequences():
c = mask(words)
words_image = "[ %s ]" % ' | '.join(words)
error = find_error_in_words(words)
if error == '':
valid_image = 'T'
else:
valid_image = ' '
print(words_image, valid_image, '{:016b} {:04x}'.format(c, c))
def proof():
case1_hit = False
case2_hit = False
for words in all_sequences():
c = mask(words)
if c == ALL_MASK:
case1_hit = True
# all 16 words are valid (either 'V' or pairs 'L', 'H')
assert find_error_in_words(words) == '', (words, find_error_in_words(words))
if c == ALL_BUT_ONE_MASK:
case2_hit = True
# all 15 words are valid (either 'V' or pairs 'L', 'H')
# the last words is either 'L' or 'H' (the word will be
# re-examined in the next iteration of an algorithm)
if words[-1] == 'H':
assert find_error_in_words(words) == 'high surrogate 7 must be preceded by low surrogate'
elif words[-1] == 'L':
assert find_error_in_words(words) == ''
else:
assert False
assert case1_hit
assert case2_hit
print("All OK")
def main():
if 0:
dump()
else:
proof()
if __name__ == '__main__':
main()
| 3,541 | 24.666667 | 105 |
py
|
simdutf
|
simdutf-master/scripts/sse_convert_utf16_to_utf8.py
|
#!/usr/bin/env python3
import sys
def format_array(array):
result = []
for value in array:
if value < 0 or value == 0x80:
result.append('0x80')
else:
result.append(str(value))
return ', '.join(result)
def assure_array_length(array, size, value = 0x80):
while len(array) < size:
array.append(value)
CPP_1_2 = """
// 1 byte for length, 16 bytes for mask
const uint8_t pack_1_2_utf8_bytes[256][17] = {
%(rows)s
};
"""
# For all patterns the 0th element of shuffle is 0.
# We may reuse that entry to store length, but it would
# require some changes in C++ code.
def shuffle_for_conversion_1_or_2_utf8_bytes(file):
rows = []
indent = (' ' * 4)
for shuffle, size in shuffle_for_conversion_1_or_2_utf8_bytes_aux():
array_str = []
for value in [size] + shuffle:
if value == 0x80:
array_str.append('0x80')
else:
array_str.append(str(value))
array = ','.join(array_str)
rows.append(f'{indent}{{{array}}}')
file.write(CPP_1_2 % {'rows': ',\n'.join(rows)})
def shuffle_for_conversion_1_or_2_utf8_bytes_aux():
# We process 8 x 16-bit word
# a bit one indices a word having values 0x00..0x7f (produces a single UTF-8 byte)
# a bit zero indices a word having values 0x0080..0x7ff (produces two UTF-8 bytes)
# Our input is a 16-bit word in form hhggffeeddccbbaa -- the bits are doubled
# (h - MSB, a - LSB). In a C++ code we transform it using the following formula:
#
# in = hhggffeeddccbbaa
# t0 = in & 0x5555 // t0 = 0h0g0f0e0d0c0b0a
# t1 = t0 >> 7 // t1 = 00000000h0g0f0e0
# t2 = (t0 | t1) & 0xff // t2 = hdgcfbea
for mask in range(256):
def getbit(k):
return (mask & (1 << k) != 0)
a = getbit(0)
b = getbit(2)
c = getbit(4)
d = getbit(6)
e = getbit(1)
f = getbit(3)
g = getbit(5)
h = getbit(7)
shuffle = []
for word_index, bit in enumerate([a, b, c, d, e, f, g, h]):
if bit: # 1 byte
shuffle.append(word_index * 2)
else: # 2 bytes
shuffle.append(word_index * 2 + 1)
shuffle.append(word_index * 2)
output_bytes = len(shuffle)
while (len(shuffle) < 16):
shuffle.append(0x80)
yield (shuffle, output_bytes)
CPP_1_2_3 = """
// 1 byte for length, 16 bytes for mask
const uint8_t pack_1_2_3_utf8_bytes[256][17] = {
%(rows)s
};
"""
def shuffle_for_conversion_1_2_3_utf8_bytes(file):
rows = []
indent = (' ' * 4)
for shuffle, size in shuffle_for_conversion_1_2_3_utf8_bytes_aux():
array_str = []
for value in [size] + shuffle:
if value == 0x80:
array_str.append('0x80')
else:
array_str.append(str(value))
array = ','.join(array_str)
rows.append(f'{indent}{{{array}}}')
file.write(CPP_1_2_3 % {'rows': ',\n'.join(rows)})
def shuffle_for_conversion_1_2_3_utf8_bytes_aux():
# There are two 8-bit bitmask telling how many bytes each word produces (1, 2 or 3).
# mask1 = ddccbbaa -- output exactly one byte (d - MSB, a - LSB)
# mask2 = hhggffee -- output one or two bytes
# Please note that each bit is duplicated. In final form these bits are interleaved:
# mask = (mask1 & 0x5555) | (mask2 & 0xaaaa)
# = hdgcfbea
# Each two-bit subword decides how many bytes will be copied from a 32-bit word of register:
# | e | a | ea |
# +---+---+----+-------
# | 0 | 0 | 0 | 3 bytes
# | 0 | 1 | 1 | -- such combination will never come from C++ code, it has no sense
# | 1 | 0 | 2 | 2 bytes
# | 1 | 1 | 3 | 1 byte
for mask in range(256):
empty = 0x80
shuffle = []
for i in range(4):
subword = mask & 0b11
mask >>= 2
if subword == 0:
shuffle.append(i*4 + 2)
shuffle.append(i*4 + 3)
shuffle.append(i*4 + 1)
elif subword == 3:
shuffle.append(i*4 + 0)
elif subword == 2:
shuffle.append(i*4 + 3)
shuffle.append(i*4 + 1)
output_bytes = len(shuffle)
while (len(shuffle) < 16):
shuffle.append(empty)
yield (shuffle, output_bytes)
CPP_EXPAND_SURROGATES = """
// 2x16 bytes for masks, dwords_consumed
const uint8_t expand_surrogates[256][33] = {
%(rows)s
};
"""
def shuffle_for_expanding_surrogate_pairs(file):
rows = []
indent = (' ' * 4)
for shuffle, dwords_consumed in shuffle_for_expanding_surrogate_pairs_aux():
# If we consume, say 6 dwords of 8, then anyway the C++ conversion
# routing convert 2 extra dwords (zeroed) into 2 UTF-8 bytes. Thus
# we have to subtract this zero_dwords from saved bytes, to get
# the real number of output bytes.
zero_dwords = 8 - dwords_consumed;
assert len(shuffle) == 32
rows.append('%s{%s}' % (indent, format_array(shuffle + [zero_dwords])))
file.write(CPP_EXPAND_SURROGATES % {'rows': ',\n'.join(rows)})
# Our input 8-bit bitmask informs which word contains a surrogate (low or high one).
# At this point we do not need to know which is which, as we assume that word
# expansion is done after validation. (Let's assume L - low surrogate, H - high
# surrogate, V - any valid non-surrogate word).
#
# Example 1: bitmask 1001'1110 describes a sequence V-L-H-L-H-V-V-? -- the last
# surrogate word might be either L or H, we'll ignore it. Two adjacent bits
# are expected to contain low & high surrogates
#
# Example 2: bitmask 0011'0110 describes a sequence V-L-K-V-L-H-V-V.
#
# Example 3: bitmask 0000'0001 is not valid --- sole surrogate word must not start
# a chunk of string, and C++ takes care not to pass such wrong input.
#
# Example 4: bitmask 0000'1110 is not valid too
#
# We expand all words into 32-bit lanes, spanning two SSE registers.
def shuffle_for_expanding_surrogate_pairs_aux():
def shuffle_mask(mask):
result = []
prev = 'V'
dwords_consumed = 0
for i in range(8):
bit = bool(mask & (1 << i))
if bit:
if prev == 'V':
curr = 'L'
elif prev == 'L':
curr = 'H'
elif prev == 'H':
curr = 'L'
result.append(2*i + 0)
result.append(2*i + 1)
if curr == 'L':
dwords_consumed += 1
else:
if prev == 'V':
curr = 'V'
elif prev == 'L':
raise ValueError('invalid sequence')
elif prev == 'H':
curr = 'V'
result.append(2*i + 0)
result.append(2*i + 1)
result.append(-1)
result.append(-1)
dwords_consumed += 1
prev = curr
#for
if curr == 'L': # a sole low surrogate word at the end, discard it (C++ code deals with this case)
del result[-1]
del result[-1]
dwords_consumed -= 1
while len(result) < 32:
result.append(-1)
return result, dwords_consumed
invalid = 0
# our input is in form: hdgcfbea
# we need bits in seq: hgfedcba
def as_mask(x):
def bit(k):
return int(bool((1 << k) & x))
return bit(0) \
| (bit(2) << 1) \
| (bit(4) << 2) \
| (bit(6) << 3) \
| (bit(1) << 4) \
| (bit(3) << 5) \
| (bit(5) << 6) \
| (bit(7) << 7)
if False:
print('{:08b}'.format(as_mask(0x85)))
shuffle_mask(as_mask(0x85))
sys.exit(1)
for x in range(256):
mask = as_mask(x)
try:
yield shuffle_mask(mask)
except ValueError:
yield (([-1] * 32), 0)
CPP_UCS4_TO_UTF8 = """
struct UCS4_to_UTF8 {
uint8_t shuffle[16];
uint8_t const_bits_mask[16];
uint8_t output_bytes;
};
static_assert(sizeof(UCS4_to_UTF8) == 33, "Structure must be packed");
const UCS4_to_UTF8 ucs4_to_utf8[256] = {
%(rows)s
};
"""
"""
The input is 8-bit mask: geca'hfdb. Two-bit words: ab, cd, ef, gh
encodes how many UTF-8 bytes are store in each dword of an SSE
register:
- 00 - 1 byte
- 01 - 2 bytes
- 10 - 3 bytes
- 11 - 4 bytes
We output 3 values:
- a shuffle mask to extract UTF-8 bytes,
- mask to complete UTF-8 format,
- the total number of UTF-8 bytes.
"""
def ucs4_to_utf8(file):
rows = []
indent = (' ' * 4)
for shuffle, const_bits_mask, output_bytes in ucs4_to_utf8_aux():
#print(output_bytes)
rows.append('%s{{%s}, {%s}, %d}' % (indent,
format_array(shuffle),
format_array(const_bits_mask),
output_bytes))
file.write(CPP_UCS4_TO_UTF8 % {'rows': ',\n'.join(rows)})
def ucs4_to_utf8_aux():
for x in range(256):
shuffle = []
utf8bits = []
output_bytes = 0
def bit(k):
return int(bool((1 << k) & x))
def code(bit1, bit0):
return 2*bit1 + bit0
ab = code(bit(1), bit(0))
cd = code(bit(5), bit(4))
ef = code(bit(3), bit(2))
gh = code(bit(7), bit(6))
for i, count in enumerate([ab, cd, ef, gh]):
if count == 0:
shuffle.append(4*i + 0)
utf8bits.append(0x00)
utf8bits.append(0x00)
utf8bits.append(0x00)
utf8bits.append(0x00)
output_bytes += 1
elif count == 1:
shuffle.append(4*i + 1)
shuffle.append(4*i + 0)
utf8bits.append(0b10000000)
utf8bits.append(0b11000000)
utf8bits.append(0x00)
utf8bits.append(0x00)
output_bytes += 2
elif count == 2:
shuffle.append(4*i + 2)
shuffle.append(4*i + 1)
shuffle.append(4*i + 0)
utf8bits.append(0b10000000)
utf8bits.append(0b10000000)
utf8bits.append(0b11100000)
utf8bits.append(0x00)
output_bytes += 3
elif count == 3:
shuffle.append(4*i + 3)
shuffle.append(4*i + 2)
shuffle.append(4*i + 1)
shuffle.append(4*i + 0)
utf8bits.append(0b10000000)
utf8bits.append(0b10000000)
utf8bits.append(0b10000000)
utf8bits.append(0b11110000)
output_bytes += 4
else:
assert False
assure_array_length(shuffle, 16, 0x80)
assert len(utf8bits) == 16
assert len(shuffle) == 16
yield (shuffle, utf8bits, output_bytes)
CPP_HEADER = """// file generated by scripts/sse_convert_utf16_to_utf8.py
#ifndef SIMDUTF_UTF16_TO_UTF8_TABLES_H
#define SIMDUTF_UTF16_TO_UTF8_TABLES_H
namespace simdutf {
namespace {
namespace tables {
namespace utf16_to_utf8 {
"""
CPP_FOOTER = """} // utf16_to_utf8 namespace
} // tables namespace
} // unnamed namespace
} // namespace simdutf
#endif // SIMDUTF_UTF16_TO_UTF8_TABLES_H
"""
def main():
with open('utf16_to_utf8_tables.h', 'wt') as f:
f.write(CPP_HEADER)
shuffle_for_conversion_1_or_2_utf8_bytes(f)
shuffle_for_conversion_1_2_3_utf8_bytes(f)
shuffle_for_expanding_surrogate_pairs(f)
ucs4_to_utf8(f)
f.write(CPP_FOOTER)
if __name__ == '__main__':
main()
| 11,181 | 25.434988 | 102 |
py
|
simdutf
|
simdutf-master/scripts/create_latex_table.py
|
#!/usr/bin/env python3
import sys
import re
import argparse
# Construct an argument parser
all_args = argparse.ArgumentParser()
# Add arguments to the parser
all_args.add_argument("-f", "--file", required=True,
help="file name")
args = vars(all_args.parse_args())
filename = args['file']
with open(filename) as f:
content = f.readlines()
table = []
currentrow = {}
datasets = set()
codecs = set()
for line in content:
if line.startswith("convert"):
codec = re.search(r"\+(\w+)",line).group(1)
rfile = re.search(r"/(\w+)[\.-]",line).group(1)
currentrow["codec"] = codec
currentrow["dataset"] = rfile
datasets.add(rfile)
codecs.add(codec)
m = re.search(r"\s([\.0-9]+) Gc/s",line)
if m:
v = float(m.group(1))
currentrow["result"] = '{:#.2g}'.format(v)
table.append(currentrow)
currentrow = {}
favorite_kernels = ["icu", "llvm", "hoehrmann", "cppcon2018", "u8u16", "utf8sse4", "utf8lut", "haswell", "arm64"]
kernels = []
s = " "
for k in favorite_kernels:
if k in codecs:
kernels.append(k)
s += " & " + k
s += " \\\\"
print(s)
def get(d, k):
for x in table:
if(x['codec'] == k) and (x['dataset'] == d):
return x["result"]
datasets=sorted(datasets)
for dataset in datasets:
s = dataset
for k in kernels:
s += " & " + get(dataset, k)
s += " \\\\"
print(s)
| 1,426 | 22.783333 | 114 |
py
|
InDuDoNet
|
InDuDoNet-main/test_clinic.py
|
import os.path
import os
import os.path
import argparse
import numpy as np
import torch
from CLINIC_metal.preprocess_clinic.preprocessing_clinic import clinic_input_data
from network.indudonet import InDuDoNet
import nibabel
import time
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
parser = argparse.ArgumentParser(description="YU_Test")
parser.add_argument("--model_dir", type=str, default="models", help='path to model and log files')
parser.add_argument("--data_path", type=str, default="CLINIC_metal/test/", help='path to training data')
parser.add_argument("--use_GPU", type=bool, default=True, help='use GPU or not')
parser.add_argument("--save_path", type=str, default="results/CLINIC_metal/", help='path to training data')
parser.add_argument('--num_channel', type=int, default=32, help='the number of dual channels')
parser.add_argument('--T', type=int, default=4, help='the number of ResBlocks in every ProxNet')
parser.add_argument('--S', type=int, default=10, help='the number of total iterative stages')
parser.add_argument('--eta1', type=float, default=1, help='initialization for stepsize eta1')
parser.add_argument('--eta2', type=float, default=5, help='initialization for stepsize eta2')
parser.add_argument('--alpha', type=float, default=0.5, help='initialization for weight factor')
opt = parser.parse_args()
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
print("--- new folder... ---")
print("--- " + path + " ---")
else:
print("--- There exsits folder " + path + " ! ---")
Pred_nii = opt.save_path +'/X_mar/'
mkdir(Pred_nii)
def image_get_minmax():
return 0.0, 1.0
def proj_get_minmax():
return 0.0, 4.0
def normalize(data, minmax):
data_min, data_max = minmax
data = np.clip(data, data_min, data_max)
data = (data - data_min) / (data_max - data_min)
data = data * 255.0
data = data.astype(np.float32)
data = np.expand_dims(np.transpose(np.expand_dims(data, 2), (2, 0, 1)),0)
return data
def test_image(allXma, allXLI, allM, allSma, allSLI, allTr, vol_idx, slice_idx):
Xma = allXma[vol_idx][...,slice_idx]
XLI = allXLI[vol_idx][...,slice_idx]
M = allM[vol_idx][...,slice_idx]
Sma = allSma[vol_idx][...,slice_idx]
SLI = allSLI[vol_idx][...,slice_idx]
Tr = allTr[vol_idx][...,slice_idx]
Xma = normalize(Xma, image_get_minmax()) # *255
XLI = normalize(XLI, image_get_minmax())
Sma = normalize(Sma, proj_get_minmax())
SLI = normalize(SLI, proj_get_minmax())
Tr = 1-Tr.astype(np.float32)
Tr = np.expand_dims(np.transpose(np.expand_dims(Tr, 2), (2, 0, 1)),0) # 1*1*h*w
Mask = M.astype(np.float32)
Mask = np.expand_dims(np.transpose(np.expand_dims(Mask, 2), (2, 0, 1)),0)
return torch.Tensor(Xma).cuda(), torch.Tensor(XLI).cuda(), torch.Tensor(Mask).cuda(), \
torch.Tensor(Sma).cuda(), torch.Tensor(SLI).cuda(), torch.Tensor(Tr).cuda()
def main():
# Build model
print('Loading model ...\n')
net = InDuDoNet(opt).cuda()
net.load_state_dict(torch.load(os.path.join(opt.model_dir)))
net.eval()
print('--------------load---------------all----------------nii-------------')
allXma, allXLI, allM, allSma, allSLI, allTr, allaffine, allfilename = clinic_input_data(opt.data_path)
print('--------------test---------------all----------------nii-------------')
for vol_idx in range(len(allXma)):
print('test %d th volume.......' % vol_idx)
num_s = allXma[vol_idx].shape[2]
pre_Xout = np.zeros_like(allXma[vol_idx])
pre_name = allfilename[vol_idx]
for slice_idx in range(num_s):
Xma, XLI, M, Sma, SLI, Tr = test_image(allXma, allXLI, allM, allSma, allSLI, allTr, vol_idx, slice_idx)
with torch.no_grad():
if opt.use_GPU:
torch.cuda.synchronize()
start_time = time.time()
ListX, ListS, ListYS= net(Xma, XLI, M, Sma, SLI, Tr)
Xout= ListX[-1] / 255.0
pre_Xout[..., slice_idx] = Xout.data.cpu().numpy().squeeze()
nibabel.save(nibabel.Nifti1Image(pre_Xout, allaffine[vol_idx]), Pred_nii + pre_name)
if __name__ == "__main__":
main()
| 4,225 | 43.484211 | 116 |
py
|
InDuDoNet
|
InDuDoNet-main/train.py
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Created on Tue Nov 5 11:56:06 2020
@author: hongwang ([email protected])
MICCAI2021: ``InDuDoNet: An Interpretable Dual Domain Network for CT Metal Artifact Reduction''
paper link: https://arxiv.org/pdf/2109.05298.pdf
"""
from __future__ import print_function
import argparse
import os
import torch
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import time
import matplotlib.pyplot as plt
import numpy as np
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from math import ceil
from deeplesion.Dataset import MARTrainDataset
from network.indudonet import InDuDoNet
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", type=str, default="./deep_lesion/", help='txt path to training spa-data')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=0)
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--patchSize', type=int, default=416, help='the height / width of the input image to network')
parser.add_argument('--niter', type=int, default=100, help='total number of training epochs')
parser.add_argument('--num_channel', type=int, default=32, help='the number of dual channels') # refer to https://github.com/hongwang01/RCDNet for the channel concatenation strategy
parser.add_argument('--T', type=int, default=4, help='the number of ResBlocks in every ProxNet')
parser.add_argument('--S', type=int, default=10, help='the number of total iterative stages')
parser.add_argument('--resume', type=int, default=0, help='continue to train')
parser.add_argument("--milestone", type=int, default=[40, 80], help="When to decay learning rate")
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate')
parser.add_argument('--log_dir', default='./logs/', help='tensorboard logs')
parser.add_argument('--model_dir', default='./models/', help='saving model')
parser.add_argument('--eta1', type=float, default=1, help='initialization for stepsize eta1')
parser.add_argument('--eta2', type=float, default=5, help='initialization for stepsize eta2')
parser.add_argument('--alpha', type=float, default=0.5, help='initialization for weight factor')
parser.add_argument('--gamma', type=float, default=1e-1, help='hyper-parameter for balancing different loss items')
opt = parser.parse_args()
# create path
try:
os.makedirs(opt.log_dir)
except OSError:
pass
try:
os.makedirs(opt.model_dir)
except OSError:
pass
cudnn.benchmark = True
def train_model(net,optimizer, scheduler,datasets):
data_loader = DataLoader(datasets, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers),
pin_memory=True)
num_data = len(datasets)
num_iter_epoch = ceil(num_data / opt.batchSize)
writer = SummaryWriter(opt.log_dir)
step = 0
for epoch in range(opt.resume, opt.niter):
mse_per_epoch = 0
tic = time.time()
# train stage
lr = optimizer.param_groups[0]['lr']
phase = 'train'
for ii, data in enumerate(data_loader):
Xma, XLI, Xgt, mask, Sma, SLI, Sgt, Tr = [x.cuda() for x in data]
net.train()
optimizer.zero_grad()
ListX, ListS, ListYS= net(Xma, XLI, mask, Sma, SLI, Tr)
loss_l2YSmid = 0.1 * F.mse_loss(ListYS[opt.S -2], Sgt)
loss_l2Xmid = 0.1 * F.mse_loss(ListX[opt.S -2] * (1 - mask), Xgt * (1 - mask))
loss_l2YSf = F.mse_loss(ListYS[-1], Sgt)
loss_l2Xf = F.mse_loss(ListX[-1] * (1 - mask), Xgt * (1 - mask))
loss_l2YS = loss_l2YSf + loss_l2YSmid
loss_l2X = loss_l2Xf + loss_l2Xmid
loss = opt.gamma * loss_l2YS + loss_l2X
loss.backward()
optimizer.step()
mse_iter = loss.item()
mse_per_epoch += mse_iter
if ii % 400 == 0:
template = '[Epoch:{:>2d}/{:<2d}] {:0>5d}/{:0>5d}, Loss={:5.2e}, Lossl2YS={:5.2e}, Lossl2X={:5.2e}, lr={:.2e}'
print(template.format(epoch + 1, opt.niter, ii, num_iter_epoch, mse_iter, loss_l2YS, loss_l2X, lr))
writer.add_scalar('Loss', loss, step)
writer.add_scalar('Loss_YS', loss_l2YS, step)
writer.add_scalar('Loss_X', loss_l2X, step)
step += 1
mse_per_epoch /= (ii + 1)
print('Loss={:+.2e}'.format(mse_per_epoch))
print('-' * 100)
scheduler.step()
# save model
torch.save(net.state_dict(), os.path.join(opt.model_dir, 'InDuDoNet_latest.pt'))
if epoch % 10 == 0:
# save model
model_prefix = 'model_'
save_path_model = os.path.join(opt.model_dir, model_prefix + str(epoch + 1))
torch.save({
'epoch': epoch + 1,
'step': step + 1,
}, save_path_model)
torch.save(net.state_dict(), os.path.join(opt.model_dir, 'InDuDoNet_%d.pt' % (epoch + 1)))
toc = time.time()
print('This epoch take time {:.2f}'.format(toc - tic))
writer.close()
print('Reach the maximal epochs! Finish training')
if __name__ == '__main__':
def print_network(name, net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print('name={:s}, Total number={:d}'.format(name, num_params))
net = InDuDoNet(opt).cuda()
print_network("InDuDoNet:", net)
optimizer= optim.Adam(net.parameters(), betas=(0.5, 0.999), lr=opt.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.milestone,gamma=0.5) # learning rates
# from opt.resume continue to train
for _ in range(opt.resume):
scheduler.step()
if opt.resume:
net.load_state_dict(torch.load(os.path.join(opt.model_dir, 'InDuDoNet_%d.pt' % (opt.resume))))
print('loaded checkpoints, epoch{:d}'.format(opt.resume))
# load dataset
train_mask = np.load(os.path.join(opt.data_path, 'trainmask.npy'))
train_dataset = MARTrainDataset(opt.data_path, opt.patchSize, train_mask)
# train model
train_model(net, optimizer, scheduler,train_dataset)
| 6,328 | 44.532374 | 182 |
py
|
InDuDoNet
|
InDuDoNet-main/test_deeplesion.py
|
import os
import os.path
import argparse
import numpy as np
import torch
import time
import matplotlib.pyplot as plt
import h5py
import PIL
from PIL import Image
from network.indudonet import InDuDoNet
from deeplesion.build_gemotry import initialization, build_gemotry
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
parser = argparse.ArgumentParser(description="YU_Test")
parser.add_argument("--model_dir", type=str, default="models", help='path to model and log files')
parser.add_argument("--data_path", type=str, default="deeplesion/test/", help='path to training data')
parser.add_argument("--use_GPU", type=bool, default=True, help='use GPU or not')
parser.add_argument("--save_path", type=str, default="./test_results/", help='path to training data')
parser.add_argument('--num_channel', type=int, default=32, help='the number of dual channels')
parser.add_argument('--T', type=int, default=4, help='the number of ResBlocks in every ProxNet')
parser.add_argument('--S', type=int, default=10, help='the number of total iterative stages')
parser.add_argument('--eta1', type=float, default=1, help='initialization for stepsize eta1')
parser.add_argument('--eta2', type=float, default=5, help='initialization for stepsize eta2')
parser.add_argument('--alpha', type=float, default=0.5, help='initialization for weight factor')
opt = parser.parse_args()
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
print("--- new folder... ---")
print("--- " + path + " ---")
else:
print("--- There exsits folder " + path + " ! ---")
input_dir = opt.save_path + '/Xma/'
gt_dir = opt.save_path + '/Xgt/'
outX_dir = opt.save_path+'/X/'
outYS_dir = opt.save_path +'/YS/'
mkdir(input_dir)
mkdir(gt_dir)
mkdir(outX_dir)
mkdir(outYS_dir)
def image_get_minmax():
return 0.0, 1.0
def proj_get_minmax():
return 0.0, 4.0
def normalize(data, minmax):
data_min, data_max = minmax
data = np.clip(data, data_min, data_max)
data = (data - data_min) / (data_max - data_min)
data = data * 255.0
data = data.astype(np.float32)
data = np.expand_dims(np.transpose(np.expand_dims(data, 2), (2, 0, 1)),0)
return data
param = initialization()
ray_trafo = build_gemotry(param)
test_mask = np.load(os.path.join(opt.data_path, 'testmask.npy'))
def test_image(data_path, imag_idx, mask_idx):
txtdir = os.path.join(data_path, 'test_640geo_dir.txt')
mat_files = open(txtdir, 'r').readlines()
gt_dir = mat_files[imag_idx]
file_dir = gt_dir[:-6]
data_file = file_dir + str(mask_idx) + '.h5'
abs_dir = os.path.join(data_path, 'test_640geo/', data_file)
gt_absdir = os.path.join(data_path, 'test_640geo/', gt_dir[:-1])
gt_file = h5py.File(gt_absdir, 'r')
Xgt = gt_file['image'][()]
gt_file.close()
file = h5py.File(abs_dir, 'r')
Xma= file['ma_CT'][()]
Sma = file['ma_sinogram'][()]
XLI = file['LI_CT'][()]
SLI = file['LI_sinogram'][()]
Tr = file['metal_trace'][()]
Sgt = np.asarray(ray_trafo(Xgt))
file.close()
M512 = test_mask[:,:,mask_idx]
M = np.array(Image.fromarray(M512).resize((416, 416), PIL.Image.BILINEAR))
Xma = normalize(Xma, image_get_minmax()) # *255
Xgt = normalize(Xgt, image_get_minmax())
XLI = normalize(XLI, image_get_minmax())
Sma = normalize(Sma, proj_get_minmax())
Sgt = normalize(Sgt, proj_get_minmax())
SLI = normalize(SLI, proj_get_minmax())
Tr = 1 - Tr.astype(np.float32)
Tr = np.expand_dims(np.transpose(np.expand_dims(Tr, 2), (2, 0, 1)), 0) # 1*1*h*w
Mask = M.astype(np.float32)
Mask = np.expand_dims(np.transpose(np.expand_dims(Mask, 2), (2, 0, 1)),0)
return torch.Tensor(Xma).cuda(), torch.Tensor(XLI).cuda(), torch.Tensor(Xgt).cuda(), torch.Tensor(Mask).cuda(), \
torch.Tensor(Sma).cuda(), torch.Tensor(SLI).cuda(), torch.Tensor(Sgt).cuda(), torch.Tensor(Tr).cuda()
def print_network(name, net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print('name={:s}, Total number={:d}'.format(name, num_params))
def main():
print('Loading model ...\n')
net = InDuDoNet(opt).cuda()
print_network("InDuDoNet", net)
net.load_state_dict(torch.load(opt.model_dir))
net.eval()
time_test = 0
count = 0
for imag_idx in range(1): # for demo
print(imag_idx)
for mask_idx in range(10):
Xma, XLI, Xgt, M, Sma, SLI, Sgt, Tr = test_image(opt.data_path, imag_idx, mask_idx)
with torch.no_grad():
if opt.use_GPU:
torch.cuda.synchronize()
start_time = time.time()
ListX, ListS, ListYS= net(Xma, XLI, M, Sma, SLI, Tr)
end_time = time.time()
dur_time = end_time - start_time
time_test += dur_time
print('Times: ', dur_time)
Xoutclip = torch.clamp(ListX[-1] / 255.0, 0, 0.5)
Xgtclip = torch.clamp(Xgt / 255.0, 0, 0.5)
Xmaclip = torch.clamp(Xma /255.0, 0, 0.5)
Xoutnorm = Xoutclip / 0.5
Xmanorm = Xmaclip / 0.5
Xgtnorm = Xgtclip / 0.5
YS = torch.clamp(ListYS[-1]/255, 0, 1)
idx = imag_idx *10+ mask_idx + 1
plt.imsave(input_dir + str(idx) + '.png', Xmanorm.data.cpu().numpy().squeeze(), cmap="gray")
plt.imsave(gt_dir + str(idx) + '.png', Xgtnorm.data.cpu().numpy().squeeze(), cmap="gray")
plt.imsave(outX_dir + str(idx) + '.png', Xoutnorm.data.cpu().numpy().squeeze(), cmap="gray")
plt.imsave(outYS_dir + str(idx) + '.png', YS.data.cpu().numpy().squeeze(), cmap="gray")
count += 1
print('Avg.time={:.4f}'.format(time_test/count))
if __name__ == "__main__":
main()
| 5,780 | 39.145833 | 117 |
py
|
InDuDoNet
|
InDuDoNet-main/deeplesion/Dataset.py
|
import os
import os.path
import numpy as np
import random
import h5py
import torch
import torch.utils.data as udata
import PIL.Image as Image
from numpy.random import RandomState
import scipy.io as sio
import PIL
from PIL import Image
from .build_gemotry import initialization, build_gemotry
param = initialization()
ray_trafo = build_gemotry(param)
def image_get_minmax():
return 0.0, 1.0
def proj_get_minmax():
return 0.0, 4.0
def normalize(data, minmax):
data_min, data_max = minmax
data = np.clip(data, data_min, data_max)
data = (data - data_min) / (data_max - data_min)
data = data.astype(np.float32)
data = data*255.0
data = np.transpose(np.expand_dims(data, 2), (2, 0, 1))
return data
class MARTrainDataset(udata.Dataset):
def __init__(self, dir, patchSize, mask):
super().__init__()
self.dir = dir
self.train_mask = mask
self.patch_size = patchSize
self.txtdir = os.path.join(self.dir, 'train_640geo_dir.txt')
self.mat_files = open(self.txtdir, 'r').readlines()
self.file_num = len(self.mat_files)
self.rand_state = RandomState(66)
def __len__(self):
return self.file_num
def __getitem__(self, idx):
gt_dir = self.mat_files[idx]
#random_mask = random.randint(0, 89) # include 89
random_mask = random.randint(0, 9) # for demo
file_dir = gt_dir[:-6]
data_file = file_dir + str(random_mask) + '.h5'
abs_dir = os.path.join(self.dir, 'train_640geo/', data_file)
gt_absdir = os.path.join(self.dir,'train_640geo/', gt_dir[:-1])
gt_file = h5py.File(gt_absdir, 'r')
Xgt = gt_file['image'][()]
gt_file.close()
file = h5py.File(abs_dir, 'r')
Xma= file['ma_CT'][()]
Sma = file['ma_sinogram'][()]
XLI =file['LI_CT'][()]
SLI = file['LI_sinogram'][()]
Tr = file['metal_trace'][()]
file.close()
Sgt = np.asarray(ray_trafo(Xgt))
M512 = self.train_mask[:,:,random_mask]
M = np.array(Image.fromarray(M512).resize((416, 416), PIL.Image.BILINEAR))
Xma = normalize(Xma, image_get_minmax())
Xgt = normalize(Xgt, image_get_minmax())
XLI = normalize(XLI, image_get_minmax())
Sma = normalize(Sma, proj_get_minmax())
Sgt = normalize(Sgt, proj_get_minmax())
SLI = normalize(SLI, proj_get_minmax())
Tr = 1 -Tr.astype(np.float32)
Tr = np.transpose(np.expand_dims(Tr, 2), (2, 0, 1))
Mask = M.astype(np.float32)
Mask = np.transpose(np.expand_dims(Mask, 2), (2, 0, 1))
return torch.Tensor(Xma), torch.Tensor(XLI), torch.Tensor(Xgt), torch.Tensor(Mask), \
torch.Tensor(Sma), torch.Tensor(SLI), torch.Tensor(Sgt), torch.Tensor(Tr)
| 2,795 | 34.846154 | 93 |
py
|
InDuDoNet
|
InDuDoNet-main/deeplesion/build_gemotry.py
|
import odl
import numpy as np
## 640geo
class initialization:
def __init__(self):
self.param = {}
self.reso = 512 / 416 * 0.03
# image
self.param['nx_h'] = 416
self.param['ny_h'] = 416
self.param['sx'] = self.param['nx_h']*self.reso
self.param['sy'] = self.param['ny_h']*self.reso
## view
self.param['startangle'] = 0
self.param['endangle'] = 2 * np.pi
self.param['nProj'] = 640
## detector
self.param['su'] = 2*np.sqrt(self.param['sx']**2+self.param['sy']**2)
self.param['nu_h'] = 641
self.param['dde'] = 1075*self.reso
self.param['dso'] = 1075*self.reso
self.param['u_water'] = 0.192
def build_gemotry(param):
reco_space_h = odl.uniform_discr(
min_pt=[-param.param['sx'] / 2.0, -param.param['sy'] / 2.0],
max_pt=[param.param['sx'] / 2.0, param.param['sy'] / 2.0], shape=[param.param['nx_h'], param.param['ny_h']],
dtype='float32')
angle_partition = odl.uniform_partition(param.param['startangle'], param.param['endangle'],
param.param['nProj'])
detector_partition_h = odl.uniform_partition(-(param.param['su'] / 2.0), (param.param['su'] / 2.0),
param.param['nu_h'])
geometry_h = odl.tomo.FanBeamGeometry(angle_partition, detector_partition_h,
src_radius=param.param['dso'],
det_radius=param.param['dde'])
ray_trafo_hh = odl.tomo.RayTransform(reco_space_h, geometry_h, impl='astra_cuda')
return ray_trafo_hh
| 1,687 | 32.76 | 116 |
py
|
InDuDoNet
|
InDuDoNet-main/deeplesion/__init__.py
|
from .Dataset import MARTrainDataset
from .build_gemotry import initialization, build_gemotry
| 94 | 46.5 | 57 |
py
|
InDuDoNet
|
InDuDoNet-main/network/priornet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class UNet(nn.Module):
def __init__(self, n_channels=2, n_classes=1, n_filter=32):
super(UNet, self).__init__()
self.inc = inconv(n_channels, n_filter)
self.down1 = down(n_filter, n_filter*2)
self.down2 = down(n_filter*2, n_filter*4)
self.down3 = down(n_filter*4, n_filter*8)
self.down4 = down(n_filter*8, n_filter*8)
self.up1 = up(n_filter*16, n_filter*4)
self.up2 = up(n_filter*8, n_filter*2)
self.up3 = up(n_filter*4, n_filter)
self.up4 = up(n_filter*2, n_filter)
self.outc = outconv(n_filter, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return x
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
double_conv(in_ch, out_ch)
)
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
self.activ = nn.Tanh()
def forward(self, x):
# x = self.activ(self.conv(x))
x = self.conv(x)
return x
| 3,438 | 28.646552 | 122 |
py
|
InDuDoNet
|
InDuDoNet-main/network/indudonet.py
|
"""
MICCAI2021: ``InDuDoNet: An Interpretable Dual Domain Network for CT Metal Artifact Reduction''
paper link: https://arxiv.org/pdf/2109.05298.pdf
"""
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from odl.contrib import torch as odl_torch
from .priornet import UNet
import sys
#sys.path.append("deeplesion/")
from .build_gemotry import initialization, build_gemotry
para_ini = initialization()
fp = build_gemotry(para_ini)
op_modfp = odl_torch.OperatorModule(fp)
op_modpT = odl_torch.OperatorModule(fp.adjoint)
filter = torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) / 9 # for initialization
filter = filter.unsqueeze(dim=0).unsqueeze(dim=0)
class InDuDoNet (nn.Module):
def __init__(self, args):
super(InDuDoNet, self).__init__()
self.S = args.S # Stage number S includes the initialization process
self.iter = self.S - 1 # not include the initialization process
self.num_u = args.num_channel + 1 # concat extra 1 term
self.num_f = args.num_channel + 2 # concat extra 2 terms
self.T = args.T
# stepsize
self.eta1const = args.eta1
self.eta2const = args.eta2
self.eta1 = torch.Tensor([self.eta1const]) # initialization for eta1 at all stages
self.eta2 = torch.Tensor([self.eta2const]) # initialization for eta2 at all stages
self.eta1S = self.make_coeff(self.S, self.eta1) # learnable in iterative process
self.eta2S = self.make_coeff(self.S, self.eta2)
# weight factor
self.alphaconst = args.alpha
self.alpha = torch.Tensor([self.alphaconst])
self.alphaS = self.make_coeff(self.S, self.alpha) # learnable in iterative process
# priornet
self.priornet = UNet(n_channels=2, n_classes=1, n_filter=32)
# proxNet for initialization
self.proxNet_X0 = CTnet(args.num_channel + 1, self.T) # args.num_channel: the number of channel concatenation 1: gray CT image
self.proxNet_S0 = Projnet(args.num_channel + 1, self.T) # args.num_channel: the number of channel concatenation 1: gray normalized sinogram
# proxNet for iterative process
self.proxNet_Xall = self.make_Xnet(self.S, args.num_channel+1, self.T)
self.proxNet_Sall = self.make_Snet(self.S, args.num_channel+1, self.T)
# Initialization S-domain by convoluting on XLI and SLI, respectively
self.CX_const = filter.expand(args.num_channel, 1, -1, -1)
self.CX = nn.Parameter(self.CX_const, requires_grad=True)
self.CS_const = filter.expand(args.num_channel, 1, -1, -1)
self.CS = nn.Parameter(self.CS_const, requires_grad=True)
self.bn = nn.BatchNorm2d(1)
def make_coeff(self, iters,const):
const_dimadd = const.unsqueeze(dim=0)
const_f = const_dimadd.expand(iters,-1)
coeff = nn.Parameter(data=const_f, requires_grad = True)
return coeff
def make_Xnet(self, iters, channel, T): #
layers = []
for i in range(iters):
layers.append(CTnet(channel, T))
return nn.Sequential(*layers)
def make_Snet(self, iters, channel, T):
layers = []
for i in range(iters):
layers.append(Projnet(channel, T))
return nn.Sequential(*layers)
def forward(self, Xma, XLI, M, Sma, SLI, Tr):
# save mid-updating results
ListS = [] # saving the reconstructed normalized sinogram
ListX = [] # saving the reconstructed CT image
ListYS = [] # saving the reconstructed sinogram
# with the channel concatenation and detachment operator (refer to https://github.com/hongwang01/RCDNet) for initializing dual-domain
XZ00 = F.conv2d(XLI, self.CX, stride=1, padding=1)
input_Xini = torch.cat((XLI, XZ00), dim=1) #channel concatenation
XZ_ini = self.proxNet_X0(input_Xini)
X0 = XZ_ini[:, :1, :, :] #channel detachment
XZ = XZ_ini[:, 1:, :, :] #auxiliary variable in image domain
X = X0 # the initialized CT image
SZ00 = F.conv2d(SLI, self.CS, stride=1, padding=1)
input_Sini = torch.cat((SLI, SZ00), dim=1)
SZ_ini = self.proxNet_S0(input_Sini)
S0 = SZ_ini[:, :1, :, :]
SZ = SZ_ini[:, 1:, :, :] # auxiliary variable in sinogram domain
S = S0 # the initialized normalized sinogram
ListS.append(S)
# PriorNet
prior_input = torch.cat((Xma, XLI), dim=1)
Xs = XLI + self.priornet(prior_input)
Y = op_modfp(F.relu(self.bn(Xs)) / 255)
Y = Y / 4.0 * 255 #normalized coefficients
# 1st iteration: Updating X0, S0-->S1
PX= op_modfp(X/255)/ 4.0 * 255
GS = Y * (Y*S - PX) + self.alphaS[0]*Tr * Tr * Y * (Y * S - Sma)
S_next = S - self.eta1S[0]/10*GS
inputS = torch.cat((S_next, SZ), dim=1)
outS = self.proxNet_Sall[0](inputS)
S = outS[:,:1,:,:] # the updated normalized sinogram at the 1th stage
SZ = outS[:,1:,:,:]
ListS.append(S)
ListYS.append(Y*S)
# 1st iteration: Updating X0, S1-->X1
ESX = PX - Y*S
GX = op_modpT((ESX/255) * 4.0)
X_next = X - self.eta2S[0] / 10 * GX
inputX = torch.cat((X_next, XZ), dim=1)
outX = self.proxNet_Xall[0](inputX)
X = outX[:, :1, :, :] # the updated CT image at the 1th stage
XZ = outX[:, 1:, :, :]
ListX.append(X)
for i in range(self.iter):
# updating S
PX = op_modfp(X / 255) / 4.0 * 255
GS = Y * (Y * S - PX) + self.alphaS[i+1] * Tr * Tr * Y * (Y * S - Sma)
S_next = S - self.eta1S[i+1] / 10 * GS
inputS = torch.cat((S_next, SZ), dim=1)
outS = self.proxNet_Sall[i+1](inputS)
S = outS[:, :1, :, :]
SZ = outS[:, 1:, :, :]
ListS.append(S)
ListYS.append(Y * S)
# updating X
ESX = PX - Y * S
GX = op_modpT((ESX / 255) * 4.0)
X_next = X - self.eta2S[i+1] / 10 * GX
inputX = torch.cat((X_next, XZ), dim=1)
outX = self.proxNet_Xall[i+1](inputX)
X = outX[:, :1, :, :]
XZ = outX[:, 1:, :, :]
ListX.append(X)
return ListX, ListS, ListYS
# proxNet_S
class Projnet(nn.Module):
def __init__(self, channel, T):
super(Projnet, self).__init__()
self.channels = channel
self.T = T
self.layer = self.make_resblock(self.T)
def make_resblock(self, T):
layers = []
for i in range(T):
layers.append(
nn.Sequential(nn.Conv2d(self.channels, self.channels, kernel_size=3, stride=1, padding=1, dilation=1),
nn.BatchNorm2d(self.channels),
nn.ReLU(),
nn.Conv2d(self.channels, self.channels, kernel_size=3, stride=1, padding=1, dilation=1),
nn.BatchNorm2d(self.channels),
))
return nn.Sequential(*layers)
def forward(self, input):
S = input
for i in range(self.T):
S = F.relu(S + self.layer[i](S))
return S
# proxNet_X
class CTnet(nn.Module):
def __init__(self, channel, T):
super(CTnet, self).__init__()
self.channels = channel
self.T = T
self.layer = self.make_resblock(self.T)
def make_resblock(self, T):
layers = []
for i in range(T):
layers.append(nn.Sequential(
nn.Conv2d(self.channels, self.channels, kernel_size=3, stride=1, padding=1, dilation=1),
nn.BatchNorm2d(self.channels),
nn.ReLU(),
nn.Conv2d(self.channels, self.channels, kernel_size=3, stride=1, padding=1, dilation=1),
nn.BatchNorm2d(self.channels),
))
return nn.Sequential(*layers)
def forward(self, input):
X = input
for i in range(self.T):
X = F.relu(X + self.layer[i](X))
return X
| 8,679 | 41.54902 | 168 |
py
|
InDuDoNet
|
InDuDoNet-main/network/build_gemotry.py
|
import odl
import numpy as np
## 640geo
class initialization:
def __init__(self):
self.param = {}
self.reso = 512 / 416 * 0.03
# image
self.param['nx_h'] = 416
self.param['ny_h'] = 416
self.param['sx'] = self.param['nx_h']*self.reso
self.param['sy'] = self.param['ny_h']*self.reso
## view
self.param['startangle'] = 0
self.param['endangle'] = 2 * np.pi
self.param['nProj'] = 640
## detector
self.param['su'] = 2*np.sqrt(self.param['sx']**2+self.param['sy']**2)
self.param['nu_h'] = 641
self.param['dde'] = 1075*self.reso
self.param['dso'] = 1075*self.reso
self.param['u_water'] = 0.192
def build_gemotry(param):
reco_space_h = odl.uniform_discr(
min_pt=[-param.param['sx'] / 2.0, -param.param['sy'] / 2.0],
max_pt=[param.param['sx'] / 2.0, param.param['sy'] / 2.0], shape=[param.param['nx_h'], param.param['ny_h']],
dtype='float32')
angle_partition = odl.uniform_partition(param.param['startangle'], param.param['endangle'],
param.param['nProj'])
detector_partition_h = odl.uniform_partition(-(param.param['su'] / 2.0), (param.param['su'] / 2.0),
param.param['nu_h'])
geometry_h = odl.tomo.FanBeamGeometry(angle_partition, detector_partition_h,
src_radius=param.param['dso'],
det_radius=param.param['dde'])
ray_trafo_hh = odl.tomo.RayTransform(reco_space_h, geometry_h, impl='astra_cuda')
return ray_trafo_hh
| 1,687 | 32.76 | 116 |
py
|
InDuDoNet
|
InDuDoNet-main/network/__init__.py
|
from .indudonet import InDuDoNet
from .priornet import UNet
from .build_gemotry import initialization, build_gemotry
| 119 | 39 | 57 |
py
|
InDuDoNet
|
InDuDoNet-main/CLINIC_metal/preprocess_clinic/preprocessing_clinic.py
|
# Given clinical Xma, generate data,including: XLI, M, Sma, SLI, Tr for infering InDuDoNet
import nibabel
import numpy as np
import os
from scipy.interpolate import interp1d
from .utils import get_config
from .build_gemotry import initialization, imaging_geo
import PIL
from PIL import Image
config = get_config('CLINIC_metal/preprocess_clinic/dataset_py_640geo.yaml')
CTpara = config['CTpara'] # CT imaging parameters
mask_thre = 2500 /1000 * 0.192 + 0.192 # taking 2500HU as a thresholding to segment the metal region
param = initialization()
ray_trafo, FBPOper = imaging_geo(param) # CT imaging geometry, ray_trafo is fp, FBPoper is fbp
allXma = []
allXLI = []
allM = []
allSma = []
allSLI = []
allTr = []
allaffine = []
allfilename=[]
# process and save all the to-the-tested volumes
def clinic_input_data(test_path):
for file_name in os.listdir(test_path):
file_path = test_path+'/'+file_name
img = nibabel.load(file_path)
imag = img.get_fdata() # imag with pixel as HU unit
affine = img.affine
allaffine.append(affine)
num_s = imag.shape[2]
M = np.zeros((CTpara['imPixNum'], CTpara['imPixNum'], num_s), dtype='float32')
Xma = np.zeros_like(M)
XLI = np.zeros_like(M)
Tr = np.zeros((CTpara['sinogram_size_x'], CTpara['sinogram_size_y'], num_s), dtype='float32')
Sma = np.zeros_like(Tr)
SLI = np.zeros_like(Tr)
for i in range(num_s):
image = np.array(Image.fromarray(imag[:,:,i]).resize((CTpara['imPixNum'], CTpara['imPixNum']), PIL.Image.BILINEAR))
image[image < -1000] = -1000
image = image / 1000 * 0.192 + 0.192
Xma[...,i] = image
[rowindex, colindex] = np.where(image > mask_thre)
M[rowindex, colindex, i] = 1
Pmetal_kev = np.asarray(ray_trafo(M[:,:,i]))
Tr[...,i] = Pmetal_kev > 0
Sma[...,i] = np.asarray(ray_trafo(image))
SLI[...,i] = interpolate_projection(Sma[...,i], Tr[...,i])
XLI[...,i] = np.asarray(FBPOper(SLI[...,i]))
allXma.append(Xma)
allXLI.append(XLI)
allM.append(M)
allSma.append(Sma)
allSLI.append(SLI)
allTr.append(Tr)
allfilename.append(file_name)
return allXma, allXLI, allM, allSma, allSLI, allTr, allaffine, allfilename
def interpolate_projection(proj, metalTrace):
# projection linear interpolation
# Input:
# proj: uncorrected projection
# metalTrace: metal trace in projection domain (binary image)
# Output:
# Pinterp: linear interpolation corrected projection
Pinterp = proj.copy()
for i in range(Pinterp.shape[0]):
mslice = metalTrace[i]
pslice = Pinterp[i]
metalpos = np.nonzero(mslice==1)[0]
nonmetalpos = np.nonzero(mslice==0)[0]
pnonmetal = pslice[nonmetalpos]
pslice[metalpos] = interp1d(nonmetalpos,pnonmetal)(metalpos)
Pinterp[i] = pslice
return Pinterp
| 3,013 | 37.151899 | 127 |
py
|
InDuDoNet
|
InDuDoNet-main/CLINIC_metal/preprocess_clinic/build_gemotry.py
|
import odl # https://github.com/odlgroup/odl
import numpy as np
## 640geo
class initialization:
def __init__(self):
self.param = {}
self.reso = 512 / 416 * 0.03
# image
self.param['nx_h'] = 416
self.param['ny_h'] = 416
self.param['sx'] = self.param['nx_h']*self.reso
self.param['sy'] = self.param['ny_h']*self.reso
## view
self.param['startangle'] = 0
self.param['endangle'] = 2 * np.pi
self.param['nProj'] = 640
## detector
self.param['su'] = 2*np.sqrt(self.param['sx']**2+self.param['sy']**2)
self.param['nu_h'] = 641
self.param['dde'] = 1075*self.reso
self.param['dso'] = 1075*self.reso
self.param['u_water'] = 0.192 #0.0205
def imaging_geo(param):
reco_space_h = odl.uniform_discr(
min_pt=[-param.param['sx'] / 2.0, -param.param['sy'] / 2.0],
max_pt=[param.param['sx'] / 2.0, param.param['sy'] / 2.0], shape=[param.param['nx_h'], param.param['ny_h']],
dtype='float32')
angle_partition = odl.uniform_partition(param.param['startangle'], param.param['endangle'],
param.param['nProj'])
detector_partition_h = odl.uniform_partition(-(param.param['su'] / 2.0), (param.param['su'] / 2.0),
param.param['nu_h'])
geometry_h = odl.tomo.FanBeamGeometry(angle_partition, detector_partition_h,
src_radius=param.param['dso'],
det_radius=param.param['dde'])
ray_trafo_hh = odl.tomo.RayTransform(reco_space_h, geometry_h, impl='astra_cuda') #https://github.com/astra-toolbox/astra-toolbox
FBPOper_hh = odl.tomo.fbp_op(ray_trafo_hh, filter_type='Ram-Lak', frequency_scaling=1.0)
return ray_trafo_hh, FBPOper_hh
| 1,878 | 38.978723 | 134 |
py
|
InDuDoNet
|
InDuDoNet-main/CLINIC_metal/preprocess_clinic/utils/torch.py
|
"""Helper functions for torch
"""
__all__ = [
"get_device", "is_cuda", "copy_model", "find_layer", "to_npy", "get_last_checkpoint",
"print_model", "save_graph", "backprop_on", "backprop_off", "add_post", "flatten_model",
"FunctionModel"]
import os
import os.path as path
import numpy as np
import torch
import torch.nn as nn
from copy import copy
from .misc import read_dir
from collections import OrderedDict
def get_device(model):
return next(model.parameters()).device
def is_cuda(model):
return next(model.parameters()).is_cuda
def copy_model(model):
"""shallow copy a model
"""
if len(list(model.children())) == 0: return model
model_ = copy(model)
model_._modules = copy(model_._modules)
for k, m in model._modules.items():
model_._modules[k] = copy_model(m)
return model_
def find_layer(module, filter_fcn):
def find_layer_(module, found):
for k, m in module.named_children():
if filter_fcn(m): found.append((module, k))
else: find_layer_(m, found)
found = []
find_layer_(module, found)
return found
class FunctionModel(nn.Module):
def __init__(self, fcn):
super(FunctionModel, self).__init__()
self.fcn = fcn
def forward(self, *inputs):
return self.fcn(*inputs)
def to_npy(*tensors, squeeze=False):
if len(tensors) == 1:
if squeeze: return tensors[0].detach().cpu().numpy().squeeze()
else: return tensors[0].detach().cpu().numpy()
else:
if squeeze: return [t.detach().cpu().numpy().squeeze() for t in tensors]
else: return [t.detach().cpu().numpy() for t in tensors]
def set_requires_grad(*nets, requires_grad=False):
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def backprop_on(*nets): set_requires_grad(*nets, requires_grad=True)
def backprop_off(*nets): set_requires_grad(*nets, requires_grad=False)
def get_last_checkpoint(checkpoint_dir, predicate=None, pattern=None):
if predicate is None:
predicate = lambda x: x.endswith('pth') or x.endswith('pt')
checkpoints = read_dir(checkpoint_dir, predicate)
if len(checkpoints) == 0:
return "", 0
checkpoints = sorted(checkpoints, key=lambda x: path.getmtime(x))
checkpoint = checkpoints[-1]
if pattern is None:
pattern = lambda x: int(path.basename(x).split('_')[-1].split('.')[0])
return checkpoint, pattern(checkpoint)
def print_model(model): print(get_graph(model))
def save_graph(model, graph_file):
with open(graph_file, 'w') as f: f.write(get_graph(model))
def get_graph(model):
def get_graph_(model, param_cnts):
model_str = ""
if hasattr(model, 'parameters'):
model_str += model.__repr__() + "\n"
parameters = [p for p in model.parameters() if p.requires_grad]
num_parameters = sum([np.prod(p.size()) for p in parameters])
param_cnts.append(num_parameters)
else:
for k in model.__dir__():
if not k.startswith("_"):
v = getattr(model, k)
if hasattr(v, 'parameters'):
model_str += k + ":\n"
model_str += get_graph_(v, param_cnts)
return model_str
model_str = ""
param_cnts = []
model_str += '============ Model Initialized ============\n'
model_str += get_graph_(model, param_cnts)
model_str += '===========================================\n'
model_str += "Number of parameters: {:.4e}\n".format(sum(param_cnts))
return model_str
def add_post(loader, post_fcn):
class LoaderWrapper(object):
def __init__(self, loader, post_fcn):
self.loader = loader
self.post_fcn = post_fcn
def __getattribute__(self, name):
if not name.startswith("__") and name not in object.__getattribute__(self, "__dict__") :
return getattr(object.__getattribute__(self, "loader"), name)
return object.__getattribute__(self, name)
def __len__(self): return len(self.loader)
def __iter__(self):
for data in self.loader:
yield self.post_fcn(data)
return LoaderWrapper(loader, post_fcn)
def flatten_model(model):
def flatten_model_(model, output):
model_list = list(model.children())
if len(model_list) == 1: model = model_list[0]
if type(model) is nn.Sequential:
for m in model.children():
flatten_model_(m, output)
else:
output.append(model)
output = []
flatten_model_(model, output)
return nn.Sequential(*output)
| 4,786 | 28.549383 | 100 |
py
|
InDuDoNet
|
InDuDoNet-main/CLINIC_metal/preprocess_clinic/utils/misc.py
|
__all__ = ["read_dir", "get_config", "update_config", "save_config",
"convert_coefficient2hu", "convert_hu2coefficient", "arange", "get_connected_components",
"EasyDict"]
import os
import os.path as path
import yaml
import numpy as np
class EasyDict(object):
def __init__(self, opt): self.opt = opt
def __getattribute__(self, name):
if name == 'opt' or name.startswith("_") or name not in self.opt:
return object.__getattribute__(self, name)
else: return self.opt[name]
def __setattr__(self, name, value):
if name == 'opt': object.__setattr__(self, name, value)
else: self.opt[name] = value
def __getitem__(self, name):
return self.opt[name]
def __setitem__(self, name, value):
self.opt[name] = value
def __contains__(self, item):
return item in self.opt
def __repr__(self):
return self.opt.__repr__()
def keys(self):
return self.opt.keys()
def values(self):
return self.opt.values()
def items(self):
return self.opt.items()
def resolve_expression(config):
if type(config) is dict:
new_config = {}
for k, v in config.items():
if type(v) is str and v.startswith("!!python"):
v = eval(v[8:])
elif type(v) is dict:
v = resolve_expression(v)
new_config[k] = v
config = new_config
return config
def get_config(config_file, config_names=[]):
''' load config from file
'''
with open(config_file) as f:
config = resolve_expression(yaml.load(f, Loader=yaml.FullLoader))
if type(config_names) == str: return EasyDict(config[config_names])
while len(config_names) != 0:
config_name = config_names.pop(0)
if config_name not in config:
raise ValueError("Invalid config name: {}".format(config_name))
config = config[config_name]
return EasyDict(config)
def update_config(config, args):
''' rewrite default config with user input
'''
if args is None: return
if hasattr(args, "__dict__"): args = args.__dict__
for arg, val in args.items():
# if not (val is None or val is False) and arg in config: config[arg] = val
# TODO: this may cause bugs for other programs
if arg in config: config[arg] = val
for _, val in config.items():
if type(val) == dict: update_config(val, args)
def save_config(config, config_file, print_opts=True):
config_str = yaml.dump(config, default_flow_style=False)
with open(config_file, 'w') as f: f.write(config_str)
print('================= Options =================')
print(config_str[:-1])
print('===========================================')
def read_dir(dir_path, predicate=None, name_only=False, recursive=False):
if type(predicate) is str:
if predicate in {'dir', 'file'}:
predicate = {
'dir': lambda x: path.isdir(path.join(dir_path, x)),
'file':lambda x: path.isfile(path.join(dir_path, x))
}[predicate]
else:
ext = predicate
predicate = lambda x: ext in path.splitext(x)[-1]
elif type(predicate) is list:
exts = predicate
predicate = lambda x: path.splitext(x)[-1][1:] in exts
def read_dir_(output, dir_path, predicate, name_only, recursive):
if not path.isdir(dir_path): return
for f in os.listdir(dir_path):
d = path.join(dir_path, f)
if predicate is None or predicate(f):
output.append(f if name_only else d)
if recursive and path.isdir(d):
read_dir_(output, d, predicate, name_only, recursive)
output = []
read_dir_(output, dir_path, predicate, name_only, recursive)
return sorted(output)
def convert_coefficient2hu(image):
image = (image - 0.192) / 0.192 * 1000
return image
def convert_hu2coefficient(image):
image = image * 0.192 / 1000 + 0.192
return image
def arange(start, stop, step):
""" Matlab-like arange
"""
r = np.arange(start, stop, step).tolist()
if r[-1] + step == stop:
r.append(stop)
return np.array(r)
def get_connected_components(points):
def get_neighbors(point):
p0, p1 = point
neighbors = [
(p0 - 1, p1 - 1), (p0 - 1, p1), (p0 - 1, p1 + 1),
(p0 + 1, p1 - 1), (p0 + 1, p1), (p0 + 1, p1 + 1),
(p0, p1 - 1), (p0, p1 + 1)]
return neighbors
components = []
while points:
component = []
unchecked = [points.pop()]
while unchecked:
point = unchecked.pop(0)
neighbors = get_neighbors(point)
for n in neighbors:
if n in points:
points.remove(n)
unchecked.append(n)
component.append(point)
components.append(component)
return components
| 4,990 | 28.886228 | 93 |
py
|
InDuDoNet
|
InDuDoNet-main/CLINIC_metal/preprocess_clinic/utils/log.py
|
import os
import os.path as path
import csv
import numpy as np
import yaml
from PIL import Image
from tqdm import tqdm
from collections import defaultdict, OrderedDict
class Logger(object):
def __init__(self, log_dir, epoch=0, name="log"):
self.log_dir = log_dir
self.epoch = epoch
self.name = name if name != "" else "log"
self.iter_visual_freq = float('inf')
self.loss_freq = float('inf')
self.save_freq = float('inf')
self.format_float = \
lambda x: np.format_float_scientific(x, exp_digits=1, precision=2)
def _to_dict(self, d):
# TODO: two dicts pointing to each other triggers an infinite recursion
if type(d) is defaultdict:
d = dict(d)
for k, v in d.items():
if type(v) is dict or type(v) is defaultdict:
d[k] = self._to_dict(v)
return d
def reset(self):
if hasattr(self, 'loss'): self.loss = defaultdict(list)
if hasattr(self, 'metrics'): self.metrics = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
def add_loss_log(self, loss_fcn, loss_freq, window_size=100):
self.loss = defaultdict(list)
self.loss_fcn = loss_fcn
self.loss_freq = loss_freq
self.window_size = window_size
def add_save_log(self, save_fcn, save_freq):
self.save_fcn = save_fcn
self.save_freq = save_freq
if hasattr(self.save_fcn, "__self__"):
model = self.save_fcn.__self__
with open(path.join(self.log_dir, "graph.txt"), "w") as f:
f.write(self.get_graph(model))
def add_eval_log(self, eval_fcn, eval_freq):
self.eval_fcn = eval_fcn
self.eval_freq = eval_freq
def add_metric_log(self, pair_fcn, metrics_fcns, metrics_freq=1):
self.pair_fcn = pair_fcn
self.metrics_cnt = 0
self.metrics = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
self.metrics_fcns = metrics_fcns
self.metrics_freq = metrics_freq
def add_iter_visual_log(self, iter_visual_fcn, iter_visual_freq, name=""):
self.iter_visual_fcn = iter_visual_fcn
self.iter_visual_freq = iter_visual_freq
self.iter_visual_name = name
def add_epoch_visual_log(self, epoch_visual_fcn, epoch_visual_freq, name=""):
self.epoch_visual_fcn = epoch_visual_fcn
self.epoch_visual_freq = epoch_visual_freq
self.epoch_visual_name = name
def set_progress(self, progress):
desc = '[{}][epoch{}]'.format(self.name, self.epoch)
if hasattr(self, 'loss'):
if len(self.loss) < 5:
loss_str = " ".join(["{} {:.2e}({:.2e})".format(
k, v[-1], np.mean(v)) for k, v in self.loss.items()])
else:
loss_str = " ".join(["{} {}".format(
k, self.format_float(np.mean(v)))
for k, v in self.loss.items()])
desc += loss_str
if hasattr(self, 'metrics'):
res_str = " "
for k, res in self.metrics['mean'].items():
res_str += "{}-> ".format(k)
for j, m in res.items():
res_str += "{}: {:.2e} ".format(j, m)
res_str += " "
desc += res_str
progress.set_description(desc=desc)
def get_graph(self, model):
model_str = ""
if hasattr(model, 'parameters'):
model_str += model.__repr__() + "\n"
else:
for k in model.__dir__():
if not k.startswith("_"):
v = getattr(model, k)
if hasattr(v, 'parameters'):
model_str += k + ":\n"
model_str += self.get_graph(v)
return model_str
def __call__(self, iterable):
progress = tqdm(iterable, bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt}')
for it, obj in enumerate(progress):
yield obj
if hasattr(self, 'loss_fcn') and it % self.loss_freq == 0:
loss = self.loss_fcn()
for k, v in loss.items():
if len(self.loss[k]) > self.window_size:
self.loss[k].pop(0)
self.loss[k].append(v)
log_file = path.join(self.log_dir, 'loss.csv')
with open(log_file, 'a') as f:
writer = csv.writer(f)
writer.writerow([self.epoch, it] + list(loss.values()))
if hasattr(self, 'iter_visual_fcn') and it % self.iter_visual_freq == 0:
for k, v in self.iter_visual_fcn().items():
iter_visual_dir = path.join(self.log_dir, self.iter_visual_name)
if not path.isdir(iter_visual_dir): os.makedirs(iter_visual_dir)
visual_file = path.join(iter_visual_dir,
"epoch{}_iter{}_{}.png".format(self.epoch, it, k))
Image.fromarray(v).convert('RGB').save(visual_file)
if hasattr(self, 'pair_fcn') and it % self.metrics_freq == self.metrics_freq - 1:
pairs, name = self.pair_fcn()
for i in range(len(pairs[0][1][0])):
n = len(self.metrics) - ('mean' in self.metrics)
for j, pair in pairs:
for k, metrics_fcn in self.metrics_fcns:
m = metrics_fcn(pair[0][i], pair[1][i]).tolist()
self.metrics[name[i] if name else n][j][k] = m
self.metrics['mean'][j][k] = (self.metrics['mean'][j][k] * n + m) / (n + 1)
metric_file = path.join(self.log_dir, "metrics_{}.yaml".format(self.epoch))
metrics_str = yaml.dump(self._to_dict(self.metrics), default_flow_style=False)
with open(metric_file, 'w') as f: f.write(metrics_str)
self.set_progress(progress)
if hasattr(self, 'save_fcn') and \
self.epoch % self.save_freq == self.save_freq - 1:
save_file = path.join(self.log_dir, "net_{}.pt".format(self.epoch))
print("[Epoch {}] Saving {}".format(self.epoch, save_file))
self.save_fcn(save_file)
if hasattr(self, 'eval_fcn') and \
self.epoch % self.eval_freq == self.eval_freq - 1:
self.eval_fcn()
if hasattr(self, 'epoch_visual_fcn') and \
self.epoch % self.epoch_visual_freq == self.epoch_visual_freq - 1:
epoch_visual_dir = path.join(self.log_dir, self.epoch_visual_name)
visual_dir = path.join(epoch_visual_dir, "epoch{}".format(self.epoch))
if not path.isdir(visual_dir): os.makedirs(visual_dir)
print("[Epoch {}] Evaluating...".format(self.epoch))
for i, visuals in enumerate(self.epoch_visual_fcn()):
for k, v in visuals.items():
visual_file = path.join(visual_dir,
"epoch{}_{}_{}.png".format(self.epoch, k, i))
Image.fromarray(v).convert('RGB').save(visual_file)
self.epoch += 1
| 7,187 | 41.282353 | 112 |
py
|
InDuDoNet
|
InDuDoNet-main/CLINIC_metal/preprocess_clinic/utils/__init__.py
|
from .misc import *
from .torch import *
from .log import Logger
| 64 | 20.666667 | 23 |
py
|
baselines
|
baselines-master/setup.py
|
import re
from setuptools import setup, find_packages
import sys
if sys.version_info.major != 3:
print('This Python is only compatible with Python 3, but you are running '
'Python {}. The installation will likely fail.'.format(sys.version_info.major))
extras = {
'test': [
'filelock',
'pytest',
'pytest-forked',
'atari-py',
'matplotlib',
'pandas'
],
'mpi': [
'mpi4py'
]
}
all_deps = []
for group_name in extras:
all_deps += extras[group_name]
extras['all'] = all_deps
setup(name='baselines',
packages=[package for package in find_packages()
if package.startswith('baselines')],
install_requires=[
'gym>=0.15.4, <0.16.0',
'scipy',
'tqdm',
'joblib',
'cloudpickle',
'click',
'opencv-python'
],
extras_require=extras,
description='OpenAI baselines: high quality implementations of reinforcement learning algorithms',
author='OpenAI',
url='https://github.com/openai/baselines',
author_email='[email protected]',
version='0.1.6')
# ensure there is some tensorflow build with version above 1.4
import pkg_resources
tf_pkg = None
for tf_pkg_name in ['tensorflow', 'tensorflow-gpu', 'tf-nightly', 'tf-nightly-gpu']:
try:
tf_pkg = pkg_resources.get_distribution(tf_pkg_name)
except pkg_resources.DistributionNotFound:
pass
assert tf_pkg is not None, 'TensorFlow needed, of version above 1.4'
from distutils.version import LooseVersion
assert LooseVersion(re.sub(r'-?rc\d+$', '', tf_pkg.version)) >= LooseVersion('1.4.0')
| 1,670 | 26.393443 | 104 |
py
|
baselines
|
baselines-master/baselines/results_plotter.py
|
import numpy as np
import matplotlib
matplotlib.use('TkAgg') # Can change to 'Agg' for non-interactive mode
import matplotlib.pyplot as plt
plt.rcParams['svg.fonttype'] = 'none'
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def plot_curves(xy_list, xaxis, yaxis, title):
fig = plt.figure(figsize=(8,2))
maxx = max(xy[0][-1] for xy in xy_list)
minx = 0
for (i, (x, y)) in enumerate(xy_list):
color = COLORS[i % len(COLORS)]
plt.scatter(x, y, s=2)
x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes
plt.plot(x, y_mean, color=color)
plt.xlim(minx, maxx)
plt.title(title)
plt.xlabel(xaxis)
plt.ylabel(yaxis)
plt.tight_layout()
fig.canvas.mpl_connect('resize_event', lambda event: plt.tight_layout())
plt.grid(True)
def split_by_task(taskpath):
return taskpath['dirname'].split('/')[-1].split('-')[0]
def plot_results(dirs, num_timesteps=10e6, xaxis=X_TIMESTEPS, yaxis=Y_REWARD, title='', split_fn=split_by_task):
results = plot_util.load_results(dirs)
plot_util.plot_results(results, xy_fn=lambda r: ts2xy(r['monitor'], xaxis, yaxis), split_fn=split_fn, average_group=True, resample=int(1e6))
# Example usage in jupyter-notebook
# from baselines.results_plotter import plot_results
# %matplotlib inline
# plot_results("./log")
# Here ./log is a directory containing the monitor.csv files
def main():
import argparse
import os
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dirs', help='List of log directories', nargs = '*', default=['./log'])
parser.add_argument('--num_timesteps', type=int, default=int(10e6))
parser.add_argument('--xaxis', help = 'Varible on X-axis', default = X_TIMESTEPS)
parser.add_argument('--yaxis', help = 'Varible on Y-axis', default = Y_REWARD)
parser.add_argument('--task_name', help = 'Title of plot', default = 'Breakout')
args = parser.parse_args()
args.dirs = [os.path.abspath(dir) for dir in args.dirs]
plot_results(args.dirs, args.num_timesteps, args.xaxis, args.yaxis, args.task_name)
plt.show()
if __name__ == '__main__':
main()
| 3,455 | 35.378947 | 144 |
py
|
baselines
|
baselines-master/baselines/logger.py
|
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), 'expected file or str, got %s'%filename_or_file
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, '__float__'):
valstr = '%-8.3g' % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[:maxlen-3] + '...' if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(' ')
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % log_suffix))
elif format == 'json':
return JSONOutputFormat(osp.join(ev_dir, 'progress%s.json' % log_suffix))
elif format == 'csv':
return CSVOutputFormat(osp.join(ev_dir, 'progress%s.csv' % log_suffix))
elif format == 'tensorboard':
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb%s' % log_suffix))
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = 'wait_' + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
from baselines.common import mpi_util
d = mpi_util.mpi_weighted_mean(self.comm,
{name : (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()})
if self.comm.rank != 0:
d['dummy'] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']:
if varname in os.environ:
return int(os.environ[varname])
return 0
def configure(dir=None, format_strs=None, comm=None, log_suffix=''):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv('OPENAI_LOGDIR')
if dir is None:
dir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log('Logging to %s'%dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
# ================================================================
def _demo():
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
dir = "/tmp/testlogging"
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv_mean("b", -22.5)
logkv_mean("b", -44.4)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see b = -33.3")
logkv("b", -2.5)
dumpkvs()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
# ================================================================
# Readers
# ================================================================
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
def read_tb(path):
"""
path : a tensorboard file OR a directory, where we will find all TB files
of the form events.*
"""
import pandas
import numpy as np
from glob import glob
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, "events.*"))
elif osp.basename(path).startswith("events."):
fnames = [path]
else:
raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s"%path)
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if summary.step > 0:
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx,tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[step-1, colidx] = value
return pandas.DataFrame(data, columns=tags)
if __name__ == "__main__":
_demo()
| 14,802 | 28.429423 | 122 |
py
|
baselines
|
baselines-master/baselines/run.py
|
import sys
import re
import multiprocessing
import os.path as osp
import gym
from collections import defaultdict
import tensorflow as tf
import numpy as np
from baselines.common.vec_env import VecFrameStack, VecNormalize, VecEnv
from baselines.common.vec_env.vec_video_recorder import VecVideoRecorder
from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env
from baselines.common.tf_util import get_session
from baselines import logger
from importlib import import_module
try:
from mpi4py import MPI
except ImportError:
MPI = None
try:
import pybullet_envs
except ImportError:
pybullet_envs = None
try:
import roboschool
except ImportError:
roboschool = None
_game_envs = defaultdict(set)
for env in gym.envs.registry.all():
# TODO: solve this with regexes
env_type = env.entry_point.split(':')[0].split('.')[-1]
_game_envs[env_type].add(env.id)
# reading benchmark names directly from retro requires
# importing retro here, and for some reason that crashes tensorflow
# in ubuntu
_game_envs['retro'] = {
'BubbleBobble-Nes',
'SuperMarioBros-Nes',
'TwinBee3PokoPokoDaimaou-Nes',
'SpaceHarrier-Nes',
'SonicTheHedgehog-Genesis',
'Vectorman-Genesis',
'FinalFight-Snes',
'SpaceInvaders-Snes',
}
def train(args, extra_args):
env_type, env_id = get_env_type(args)
print('env_type: {}'.format(env_type))
total_timesteps = int(args.num_timesteps)
seed = args.seed
learn = get_learn_function(args.alg)
alg_kwargs = get_learn_function_defaults(args.alg, env_type)
alg_kwargs.update(extra_args)
env = build_env(args)
if args.save_video_interval != 0:
env = VecVideoRecorder(env, osp.join(logger.get_dir(), "videos"), record_video_trigger=lambda x: x % args.save_video_interval == 0, video_length=args.save_video_length)
if args.network:
alg_kwargs['network'] = args.network
else:
if alg_kwargs.get('network') is None:
alg_kwargs['network'] = get_default_network(env_type)
print('Training {} on {}:{} with arguments \n{}'.format(args.alg, env_type, env_id, alg_kwargs))
model = learn(
env=env,
seed=seed,
total_timesteps=total_timesteps,
**alg_kwargs
)
return model, env
def build_env(args):
ncpu = multiprocessing.cpu_count()
if sys.platform == 'darwin': ncpu //= 2
nenv = args.num_env or ncpu
alg = args.alg
seed = args.seed
env_type, env_id = get_env_type(args)
if env_type in {'atari', 'retro'}:
if alg == 'deepq':
env = make_env(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': True})
elif alg == 'trpo_mpi':
env = make_env(env_id, env_type, seed=seed)
else:
frame_stack_size = 4
env = make_vec_env(env_id, env_type, nenv, seed, gamestate=args.gamestate, reward_scale=args.reward_scale)
env = VecFrameStack(env, frame_stack_size)
else:
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
config.gpu_options.allow_growth = True
get_session(config=config)
flatten_dict_observations = alg not in {'her'}
env = make_vec_env(env_id, env_type, args.num_env or 1, seed, reward_scale=args.reward_scale, flatten_dict_observations=flatten_dict_observations)
if env_type == 'mujoco':
env = VecNormalize(env, use_tf=True)
return env
def get_env_type(args):
env_id = args.env
if args.env_type is not None:
return args.env_type, env_id
# Re-parse the gym registry, since we could have new envs since last time.
for env in gym.envs.registry.all():
env_type = env.entry_point.split(':')[0].split('.')[-1]
_game_envs[env_type].add(env.id) # This is a set so add is idempotent
if env_id in _game_envs.keys():
env_type = env_id
env_id = [g for g in _game_envs[env_type]][0]
else:
env_type = None
for g, e in _game_envs.items():
if env_id in e:
env_type = g
break
if ':' in env_id:
env_type = re.sub(r':.*', '', env_id)
assert env_type is not None, 'env_id {} is not recognized in env types'.format(env_id, _game_envs.keys())
return env_type, env_id
def get_default_network(env_type):
if env_type in {'atari', 'retro'}:
return 'cnn'
else:
return 'mlp'
def get_alg_module(alg, submodule=None):
submodule = submodule or alg
try:
# first try to import the alg module from baselines
alg_module = import_module('.'.join(['baselines', alg, submodule]))
except ImportError:
# then from rl_algs
alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))
return alg_module
def get_learn_function(alg):
return get_alg_module(alg).learn
def get_learn_function_defaults(alg, env_type):
try:
alg_defaults = get_alg_module(alg, 'defaults')
kwargs = getattr(alg_defaults, env_type)()
except (ImportError, AttributeError):
kwargs = {}
return kwargs
def parse_cmdline_kwargs(args):
'''
convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible
'''
def parse(v):
assert isinstance(v, str)
try:
return eval(v)
except (NameError, SyntaxError):
return v
return {k: parse(v) for k,v in parse_unknown_args(args).items()}
def configure_logger(log_path, **kwargs):
if log_path is not None:
logger.configure(log_path)
else:
logger.configure(**kwargs)
def main(args):
# configure logger, disable logging in child MPI processes (with rank > 0)
arg_parser = common_arg_parser()
args, unknown_args = arg_parser.parse_known_args(args)
extra_args = parse_cmdline_kwargs(unknown_args)
if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
rank = 0
configure_logger(args.log_path)
else:
rank = MPI.COMM_WORLD.Get_rank()
configure_logger(args.log_path, format_strs=[])
model, env = train(args, extra_args)
if args.save_path is not None and rank == 0:
save_path = osp.expanduser(args.save_path)
model.save(save_path)
if args.play:
logger.log("Running trained model")
obs = env.reset()
state = model.initial_state if hasattr(model, 'initial_state') else None
dones = np.zeros((1,))
episode_rew = np.zeros(env.num_envs) if isinstance(env, VecEnv) else np.zeros(1)
while True:
if state is not None:
actions, _, state, _ = model.step(obs,S=state, M=dones)
else:
actions, _, _, _ = model.step(obs)
obs, rew, done, _ = env.step(actions)
episode_rew += rew
env.render()
done_any = done.any() if isinstance(done, np.ndarray) else done
if done_any:
for i in np.nonzero(done)[0]:
print('episode_rew={}'.format(episode_rew[i]))
episode_rew[i] = 0
env.close()
return model
if __name__ == '__main__':
main(sys.argv)
| 7,388 | 28.438247 | 176 |
py
|
baselines
|
baselines-master/baselines/__init__.py
| 0 | 0 | 0 |
py
|
|
baselines
|
baselines-master/baselines/deepq/deepq.py
|
import os
import tempfile
import tensorflow as tf
import zipfile
import cloudpickle
import numpy as np
import baselines.common.tf_util as U
from baselines.common.tf_util import load_variables, save_variables
from baselines import logger
from baselines.common.schedules import LinearSchedule
from baselines.common import set_global_seeds
from baselines import deepq
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from baselines.deepq.utils import ObservationInput
from baselines.common.tf_util import get_session
from baselines.deepq.models import build_q_func
class ActWrapper(object):
def __init__(self, act, act_params):
self._act = act
self._act_params = act_params
self.initial_state = None
@staticmethod
def load_act(path):
with open(path, "rb") as f:
model_data, act_params = cloudpickle.load(f)
act = deepq.build_act(**act_params)
sess = tf.Session()
sess.__enter__()
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
load_variables(os.path.join(td, "model"))
return ActWrapper(act, act_params)
def __call__(self, *args, **kwargs):
return self._act(*args, **kwargs)
def step(self, observation, **kwargs):
# DQN doesn't use RNNs so we ignore states and masks
kwargs.pop('S', None)
kwargs.pop('M', None)
return self._act([observation], **kwargs), None, None, None
def save_act(self, path=None):
"""Save model to a pickle located at `path`"""
if path is None:
path = os.path.join(logger.get_dir(), "model.pkl")
with tempfile.TemporaryDirectory() as td:
save_variables(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
cloudpickle.dump((model_data, self._act_params), f)
def save(self, path):
save_variables(path)
def load_act(path):
"""Load act function that was returned by learn function.
Parameters
----------
path: str
path to the act function pickle
Returns
-------
act: ActWrapper
function that takes a batch of observations
and returns actions.
"""
return ActWrapper.load_act(path)
def learn(env,
network,
seed=None,
lr=5e-4,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
checkpoint_path=None,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
load_path=None,
**network_kwargs
):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
network: string or a function
neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
(mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
seed: int or None
prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
lr: float
learning rate for adam optimizer
total_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
batch_size: int
size of a batch sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to total_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
load_path: str
path to load the model from. (default: None)
**network_kwargs
additional keyword arguments to pass to the network builder.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = get_session()
set_global_seeds(seed)
q_func = build_q_func(network, **network_kwargs)
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space = env.observation_space
def make_obs_ph(name):
return ObservationInput(observation_space, name=name)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = total_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
with tempfile.TemporaryDirectory() as td:
td = checkpoint_path or td
model_file = os.path.join(td, "model")
model_saved = False
if tf.train.latest_checkpoint(td) is not None:
load_variables(model_file)
logger.log('Loaded model from {}'.format(model_file))
model_saved = True
elif load_path is not None:
load_variables(load_path)
logger.log('Loaded model from {}'.format(load_path))
for t in range(total_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_100ep_reward))
save_variables(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
load_variables(model_file)
return act
| 13,125 | 38.417417 | 145 |
py
|
baselines
|
baselines-master/baselines/deepq/utils.py
|
from baselines.common.input import observation_input
from baselines.common.tf_util import adjust_shape
# ================================================================
# Placeholders
# ================================================================
class TfInput(object):
def __init__(self, name="(unnamed)"):
"""Generalized Tensorflow placeholder. The main differences are:
- possibly uses multiple placeholders internally and returns multiple values
- can apply light postprocessing to the value feed to placeholder.
"""
self.name = name
def get(self):
"""Return the tf variable(s) representing the possibly postprocessed value
of placeholder(s).
"""
raise NotImplementedError
def make_feed_dict(self, data):
"""Given data input it to the placeholder(s)."""
raise NotImplementedError
class PlaceholderTfInput(TfInput):
def __init__(self, placeholder):
"""Wrapper for regular tensorflow placeholder."""
super().__init__(placeholder.name)
self._placeholder = placeholder
def get(self):
return self._placeholder
def make_feed_dict(self, data):
return {self._placeholder: adjust_shape(self._placeholder, data)}
class ObservationInput(PlaceholderTfInput):
def __init__(self, observation_space, name=None):
"""Creates an input placeholder tailored to a specific observation space
Parameters
----------
observation_space:
observation space of the environment. Should be one of the gym.spaces types
name: str
tensorflow name of the underlying placeholder
"""
inpt, self.processed_inpt = observation_input(observation_space, name=name)
super().__init__(inpt)
def get(self):
return self.processed_inpt
| 1,885 | 30.433333 | 91 |
py
|
baselines
|
baselines-master/baselines/deepq/defaults.py
|
def atari():
return dict(
network='conv_only',
lr=1e-4,
buffer_size=10000,
exploration_fraction=0.1,
exploration_final_eps=0.01,
train_freq=4,
learning_starts=10000,
target_network_update_freq=1000,
gamma=0.99,
prioritized_replay=True,
prioritized_replay_alpha=0.6,
checkpoint_freq=10000,
checkpoint_path=None,
dueling=True
)
def retro():
return atari()
| 480 | 20.863636 | 40 |
py
|
baselines
|
baselines-master/baselines/deepq/models.py
|
import tensorflow as tf
import tensorflow.contrib.layers as layers
def build_q_func(network, hiddens=[256], dueling=True, layer_norm=False, **network_kwargs):
if isinstance(network, str):
from baselines.common.models import get_network_builder
network = get_network_builder(network)(**network_kwargs)
def q_func_builder(input_placeholder, num_actions, scope, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
latent = network(input_placeholder)
if isinstance(latent, tuple):
if latent[1] is not None:
raise NotImplementedError("DQN is not compatible with recurrent policies yet")
latent = latent[0]
latent = layers.flatten(latent)
with tf.variable_scope("action_value"):
action_out = latent
for hidden in hiddens:
action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
action_out = layers.layer_norm(action_out, center=True, scale=True)
action_out = tf.nn.relu(action_out)
action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)
if dueling:
with tf.variable_scope("state_value"):
state_out = latent
for hidden in hiddens:
state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
state_out = layers.layer_norm(state_out, center=True, scale=True)
state_out = tf.nn.relu(state_out)
state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
q_out = state_score + action_scores_centered
else:
q_out = action_scores
return q_out
return q_func_builder
| 2,194 | 46.717391 | 111 |
py
|
baselines
|
baselines-master/baselines/deepq/__init__.py
|
from baselines.deepq import models # noqa
from baselines.deepq.build_graph import build_act, build_train # noqa
from baselines.deepq.deepq import learn, load_act # noqa
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer # noqa
def wrap_atari_dqn(env):
from baselines.common.atari_wrappers import wrap_deepmind
return wrap_deepmind(env, frame_stack=True, scale=False)
| 409 | 44.555556 | 87 |
py
|
baselines
|
baselines-master/baselines/deepq/replay_buffer.py
|
import numpy as np
import random
from baselines.common.segment_tree import SumSegmentTree, MinSegmentTree
class ReplayBuffer(object):
def __init__(self, size):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, done):
data = (obs_t, action, reward, obs_tp1, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, size, alpha):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha >= 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
p_total = self._it_sum.sum(0, len(self._storage) - 1)
every_range_len = p_total / batch_size
for i in range(batch_size):
mass = random.random() * every_range_len + i * every_range_len
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
| 6,475 | 32.729167 | 108 |
py
|
baselines
|
baselines-master/baselines/deepq/build_graph.py
|
"""Deep Q learning graph
The functions in this file can are used to create the following functions:
======= act ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon a new value, if negative no update happens
(default: no update)
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= act (in case of parameter noise) ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon to a new value, if negative no update happens
(default: no update)
reset_ph: bool
reset the perturbed policy by sampling a new perturbation
update_param_noise_threshold_ph: float
the desired threshold for the difference between non-perturbed and perturbed policy
update_param_noise_scale_ph: bool
whether or not to update the scale of the noise for the next time it is re-perturbed
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= train =======
Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:
td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))
loss = huber_loss[td_error]
Parameters
----------
obs_t: object
a batch of observations
action: np.array
actions that were selected upon seeing obs_t.
dtype must be int32 and shape must be (batch_size,)
reward: np.array
immediate reward attained after executing those actions
dtype must be float32 and shape must be (batch_size,)
obs_tp1: object
observations that followed obs_t
done: np.array
1 if obs_t was the last observation in the episode and 0 otherwise
obs_tp1 gets ignored, but must be of the valid shape.
dtype must be float32 and shape must be (batch_size,)
weight: np.array
imporance weights for every element of the batch (gradient is multiplied
by the importance weight) dtype must be float32 and shape must be (batch_size,)
Returns
-------
td_error: np.array
a list of differences between Q(s,a) and the target in Bellman's equation.
dtype is float32 and shape is (batch_size,)
======= update_target ========
copy the parameters from optimized Q function to the target Q function.
In Q learning we actually optimize the following error:
Q(s,a) - (r + gamma * max_a' Q'(s', a'))
Where Q' is lagging behind Q to stablize the learning. For example for Atari
Q' is set to Q once every 10000 updates training steps.
"""
import tensorflow as tf
import baselines.common.tf_util as U
def scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope if isinstance(scope, str) else scope.name
)
def scope_name():
"""Returns the name of current scope as a string, e.g. deepq/q_func"""
return tf.get_variable_scope().name
def absolute_scope_name(relative_scope_name):
"""Appends parent scope name to `relative_scope_name`"""
return scope_name() + "/" + relative_scope_name
def default_param_noise_filter(var):
if var not in tf.trainable_variables():
# We never perturb non-trainable vars.
return False
if "fully_connected" in var.name:
# We perturb fully-connected layers.
return True
# The remaining layers are likely conv or layer norm layers, which we do not wish to
# perturb (in the former case because they only extract features, in the latter case because
# we use them for normalization purposes). If you change your network, you will likely want
# to re-consider which layers to perturb and which to keep untouched.
return False
def build_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None):
"""Creates the act function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
with tf.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True},
updates=[update_eps_expr])
def act(ob, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps)
return act
def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, param_noise_filter_func=None):
"""Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
if param_noise_filter_func is None:
param_noise_filter_func = default_param_noise_filter
with tf.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.placeholder(tf.bool, (), name="reset")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False)
param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False)
# Unmodified Q.
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
# Perturbable Q used for the actual rollout.
q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func")
# We have to wrap this code into a function due to the way tf.cond() works. See
# https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for
# a more detailed discussion.
def perturb_vars(original_scope, perturbed_scope):
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
# Perturb this variable.
op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale))
else:
# Do not perturb, just assign.
op = tf.assign(perturbed_var, var)
perturb_ops.append(op)
assert len(perturb_ops) == len(all_vars)
return tf.group(*perturb_ops)
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func")
perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func")
kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1)
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,
lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))
# Put everything together.
deterministic_actions = tf.argmax(q_values_perturbed, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])),
tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
update_param_noise_threshold_expr,
]
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False},
updates=updates)
def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)
return act
def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0,
double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None):
"""Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
"""
if param_noise:
act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,
param_noise_filter_func=param_noise_filter_func)
else:
act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)
with tf.variable_scope(scope, reuse=reuse):
# set up placeholders
obs_t_input = make_obs_ph("obs_t")
act_t_ph = tf.placeholder(tf.int32, [None], name="action")
rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
obs_tp1_input = make_obs_ph("obs_tp1")
done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight")
# q network evaluation
q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/q_func")
# target q network evalution
q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func")
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/target_q_func")
# q scores for actions which we know were selected in the given state.
q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)
# compute estimate of best possible value starting from state at t + 1
if double_q:
q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)
else:
q_tp1_best = tf.reduce_max(q_tp1, 1)
q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked
# compute the error (potentially clipped)
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
errors = U.huber_loss(td_error)
weighted_error = tf.reduce_mean(importance_weights_ph * errors)
# compute optimization op (potentially with gradient clipping)
if grad_norm_clipping is not None:
gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
optimize_expr = optimizer.apply_gradients(gradients)
else:
optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_expr = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(var_target.assign(var))
update_target_expr = tf.group(*update_target_expr)
# Create callable functions
train = U.function(
inputs=[
obs_t_input,
act_t_ph,
rew_t_ph,
obs_tp1_input,
done_mask_ph,
importance_weights_ph
],
outputs=td_error,
updates=[optimize_expr]
)
update_target = U.function([], [], updates=[update_target_expr])
q_values = U.function([obs_t_input], q_t)
return act_f, train, update_target, {'q_values': q_values}
| 20,635 | 44.857778 | 168 |
py
|
baselines
|
baselines-master/baselines/deepq/experiments/enjoy_pong.py
|
import gym
from baselines import deepq
def main():
env = gym.make("PongNoFrameskip-v4")
env = deepq.wrap_atari_dqn(env)
model = deepq.learn(
env,
"conv_only",
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[256],
dueling=True,
total_timesteps=0
)
while True:
obs, done = env.reset(), False
episode_rew = 0
while not done:
env.render()
obs, rew, done, _ = env.step(model(obs[None])[0])
episode_rew += rew
print("Episode reward", episode_rew)
if __name__ == '__main__':
main()
| 625 | 20.586207 | 61 |
py
|
baselines
|
baselines-master/baselines/deepq/experiments/enjoy_mountaincar.py
|
import gym
from baselines import deepq
from baselines.common import models
def main():
env = gym.make("MountainCar-v0")
act = deepq.learn(
env,
network=models.mlp(num_layers=1, num_hidden=64),
total_timesteps=0,
load_path='mountaincar_model.pkl'
)
while True:
obs, done = env.reset(), False
episode_rew = 0
while not done:
env.render()
obs, rew, done, _ = env.step(act(obs[None])[0])
episode_rew += rew
print("Episode reward", episode_rew)
if __name__ == '__main__':
main()
| 600 | 20.464286 | 59 |
py
|
baselines
|
baselines-master/baselines/deepq/experiments/train_mountaincar.py
|
import gym
from baselines import deepq
from baselines.common import models
def main():
env = gym.make("MountainCar-v0")
# Enabling layer_norm here is import for parameter space noise!
act = deepq.learn(
env,
network=models.mlp(num_hidden=64, num_layers=1),
lr=1e-3,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.1,
print_freq=10,
param_noise=True
)
print("Saving model to mountaincar_model.pkl")
act.save("mountaincar_model.pkl")
if __name__ == '__main__':
main()
| 616 | 21.851852 | 67 |
py
|
baselines
|
baselines-master/baselines/deepq/experiments/train_cartpole.py
|
import gym
from baselines import deepq
def callback(lcl, _glb):
# stop training if reward exceeds 199
is_solved = lcl['t'] > 100 and sum(lcl['episode_rewards'][-101:-1]) / 100 >= 199
return is_solved
def main():
env = gym.make("CartPole-v0")
act = deepq.learn(
env,
network='mlp',
lr=1e-3,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to cartpole_model.pkl")
act.save("cartpole_model.pkl")
if __name__ == '__main__':
main()
| 646 | 19.870968 | 84 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.