code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
'''random preprocessing for real-time data augmentation'''
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
if not random:
# resize image
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
image_data=0
if proc_img:
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image)/255.
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:, [0,2]] = box[:, [0,2]]*scale + dx
box[:, [1,3]] = box[:, [1,3]]*scale + dy
box_data[:len(box)] = box
return image_data, box_data
# resize image
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
# place image
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand()<.5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = rgb_to_hsv(np.array(image)/255.)
x[..., 0] += hue
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x>1] = 1
x[x<0] = 0
image_data = hsv_to_rgb(x) # numpy array, 0 to 1
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if flip: box[:, [0,2]] = w - box[:, [2,0]]
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
if len(box)>max_boxes: box = box[:max_boxes]
box_data[:len(box)] = box
return image_data, box_data
|
random preprocessing for real-time data augmentation
|
get_random_data
|
python
|
qqwweee/keras-yolo3
|
yolo3/utils.py
|
https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/utils.py
|
MIT
|
def test_handles_bytes_subclasses(self) -> None:
"""
Ensure the library can support being used in projects that might work with values that are
subclasses of `bytes`. Let's embrace Python's duck-typing, not shy away from it
"""
class CustomBytes(bytes):
def __new__(cls, data: str):
data_bytes = base64url_to_bytes(data)
self = bytes.__new__(cls, memoryview(data_bytes).tobytes())
return self
verification = verify_authentication_response(
credential=AuthenticationCredential(
id="fq9Nj0nS24B5y6Pkw_h3-9GEAEA3-0LBPxE2zvTdLjDqtSeCSNYFe9VMRueSpAZxT3YDc6L1lWXdQNwI-sVNYrefEcRR1Nsb_0jpHE955WEtFud2xxZg3MvoLMxHLet63i5tajd1fHtP7I-00D6cehM8ZWlLp2T3s9lfZgVIFcA",
raw_id=CustomBytes(
"fq9Nj0nS24B5y6Pkw_h3-9GEAEA3-0LBPxE2zvTdLjDqtSeCSNYFe9VMRueSpAZxT3YDc6L1lWXdQNwI-sVNYrefEcRR1Nsb_0jpHE955WEtFud2xxZg3MvoLMxHLet63i5tajd1fHtP7I-00D6cehM8ZWlLp2T3s9lfZgVIFcA"
),
response=AuthenticatorAssertionResponse(
authenticator_data=CustomBytes(
"SZYN5YgOjGh0NBcPZHZgW4_krrmihjLHmVzzuoMdl2MBAAAABw"
),
client_data_json=CustomBytes(
"eyJ0eXBlIjoid2ViYXV0aG4uZ2V0IiwiY2hhbGxlbmdlIjoiZVo0ZWVBM080ank1Rkl6cURhU0o2SkROR3UwYkJjNXpJMURqUV9rTHNvMVdOcWtHNms1bUNZZjFkdFFoVlVpQldaV2xaa3pSNU1GZWVXQ3BKUlVOWHciLCJvcmlnaW4iOiJodHRwOi8vbG9jYWxob3N0OjUwMDAiLCJjcm9zc09yaWdpbiI6ZmFsc2V9"
),
signature=CustomBytes(
"RRWV8mYDRvK7YdQgdtZD4pJ2dh1D_IWZ_D6jsZo6FHJBoenbj0CVT5nA20vUzlRhN4R6dOEUHmUwP1F8eRBhBg"
),
),
),
expected_challenge=CustomBytes(
"eZ4eeA3O4jy5FIzqDaSJ6JDNGu0bBc5zI1DjQ_kLso1WNqkG6k5mCYf1dtQhVUiBWZWlZkzR5MFeeWCpJRUNXw"
),
expected_rp_id="localhost",
expected_origin="http://localhost:5000",
credential_public_key=CustomBytes(
"pAEBAycgBiFYIMz6_SUFLiDid2Yhlq0YboyJ-CDrIrNpkPUGmJp4D3Dp"
),
credential_current_sign_count=3,
)
assert verification.new_sign_count == 7
|
Ensure the library can support being used in projects that might work with values that are
subclasses of `bytes`. Let's embrace Python's duck-typing, not shy away from it
|
test_handles_bytes_subclasses
|
python
|
duo-labs/py_webauthn
|
tests/test_bytes_subclass_support.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/test_bytes_subclass_support.py
|
BSD-3-Clause
|
def test_handles_memoryviews(self) -> None:
"""
Ensure support for libraries that leverage memoryviews
"""
def base64url_to_memoryview(data: str) -> memoryview:
data_bytes = base64url_to_bytes(data)
return memoryview(data_bytes)
verification = verify_authentication_response(
credential=AuthenticationCredential(
id="fq9Nj0nS24B5y6Pkw_h3-9GEAEA3-0LBPxE2zvTdLjDqtSeCSNYFe9VMRueSpAZxT3YDc6L1lWXdQNwI-sVNYrefEcRR1Nsb_0jpHE955WEtFud2xxZg3MvoLMxHLet63i5tajd1fHtP7I-00D6cehM8ZWlLp2T3s9lfZgVIFcA",
raw_id=base64url_to_memoryview(
"fq9Nj0nS24B5y6Pkw_h3-9GEAEA3-0LBPxE2zvTdLjDqtSeCSNYFe9VMRueSpAZxT3YDc6L1lWXdQNwI-sVNYrefEcRR1Nsb_0jpHE955WEtFud2xxZg3MvoLMxHLet63i5tajd1fHtP7I-00D6cehM8ZWlLp2T3s9lfZgVIFcA"
),
response=AuthenticatorAssertionResponse(
authenticator_data=base64url_to_memoryview(
"SZYN5YgOjGh0NBcPZHZgW4_krrmihjLHmVzzuoMdl2MBAAAABw"
),
client_data_json=base64url_to_memoryview(
"eyJ0eXBlIjoid2ViYXV0aG4uZ2V0IiwiY2hhbGxlbmdlIjoiZVo0ZWVBM080ank1Rkl6cURhU0o2SkROR3UwYkJjNXpJMURqUV9rTHNvMVdOcWtHNms1bUNZZjFkdFFoVlVpQldaV2xaa3pSNU1GZWVXQ3BKUlVOWHciLCJvcmlnaW4iOiJodHRwOi8vbG9jYWxob3N0OjUwMDAiLCJjcm9zc09yaWdpbiI6ZmFsc2V9"
),
signature=base64url_to_memoryview(
"RRWV8mYDRvK7YdQgdtZD4pJ2dh1D_IWZ_D6jsZo6FHJBoenbj0CVT5nA20vUzlRhN4R6dOEUHmUwP1F8eRBhBg"
),
),
),
expected_challenge=base64url_to_memoryview(
"eZ4eeA3O4jy5FIzqDaSJ6JDNGu0bBc5zI1DjQ_kLso1WNqkG6k5mCYf1dtQhVUiBWZWlZkzR5MFeeWCpJRUNXw"
),
expected_rp_id="localhost",
expected_origin="http://localhost:5000",
credential_public_key=base64url_to_memoryview(
"pAEBAycgBiFYIMz6_SUFLiDid2Yhlq0YboyJ-CDrIrNpkPUGmJp4D3Dp"
),
credential_current_sign_count=3,
)
assert verification.new_sign_count == 7
|
Ensure support for libraries that leverage memoryviews
|
test_handles_memoryviews
|
python
|
duo-labs/py_webauthn
|
tests/test_bytes_subclass_support.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/test_bytes_subclass_support.py
|
BSD-3-Clause
|
def test_supports_options_to_json_output(self) -> None:
"""
Test that output from `generate_authentication_options()` that's fed directly into
`options_to_json()` gets parsed back into the original options without any changes along
the way.
"""
opts = generate_authentication_options(
rp_id="example.com",
challenge=b"1234567890",
timeout=12000,
allow_credentials=[
PublicKeyCredentialDescriptor(
id=b"1234567890",
transports=[AuthenticatorTransport.INTERNAL, AuthenticatorTransport.HYBRID],
)
],
user_verification=UserVerificationRequirement.REQUIRED,
)
opts_json = options_to_json(opts)
parsed_opts_json = parse_authentication_options_json(opts_json)
self.assertEqual(parsed_opts_json.rp_id, opts.rp_id)
self.assertEqual(parsed_opts_json.challenge, opts.challenge)
self.assertEqual(parsed_opts_json.allow_credentials, opts.allow_credentials)
self.assertEqual(parsed_opts_json.timeout, opts.timeout)
self.assertEqual(parsed_opts_json.user_verification, opts.user_verification)
|
Test that output from `generate_authentication_options()` that's fed directly into
`options_to_json()` gets parsed back into the original options without any changes along
the way.
|
test_supports_options_to_json_output
|
python
|
duo-labs/py_webauthn
|
tests/test_parse_authentication_options.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/test_parse_authentication_options.py
|
BSD-3-Clause
|
def _generate_auth_data(
sign_count: int = 0,
up: bool = True,
uv: bool = False,
be: bool = False,
bs: bool = False,
at: bool = False,
ed: bool = False,
) -> Tuple[bytes, bytes, int, Optional[bytes], Optional[bytes], Optional[bytes]]:
"""A helper to generate auth_data
Args:
`sign_count`: How many times the authenticator has been used
`up`: Whether user was present
`uv`: Whether user was verified
`be`: Whether credential can be backed up
`bs`: Whether credential has been backed up
`at`: Whether attested credential data is present
`ed`: Whether extension data is present
Returns:
A `tuple` comprised of the following values:
`bytes`: Authenticator data
`bytes`: RP ID hash
`int`: Sign count
`Optional[bytes]`: AAGUID
`Optional[bytes]`: Credential ID
`Optional[bytes]`: Credential public key
"""
rp_id_hash = secrets.token_bytes(32)
flags = 0b00000000
if up is True:
flags = flags | 1 << 0
if uv is True:
flags = flags | 1 << 2
if be is True:
flags = flags | 1 << 3
if bs is True:
flags = flags | 1 << 4
if at is True:
flags = flags | 1 << 6
if ed is True:
flags = flags | 1 << 7
bytes_to_join = [
rp_id_hash,
flags.to_bytes(1, byteorder="big"),
sign_count.to_bytes(4, byteorder="big"),
]
aaguid: Optional[bytes] = None
credential_id: Optional[bytes] = None
credential_public_key: Optional[bytes] = None
if at is True:
aaguid = secrets.token_bytes(16)
credential_id = secrets.token_bytes(32)
credential_public_key = secrets.token_bytes(32)
attested_data = [
aaguid,
len(credential_id).to_bytes(2, byteorder="big"),
credential_id,
credential_public_key,
]
bytes_to_join += attested_data
auth_data = b"".join(bytes_to_join)
return (
auth_data,
rp_id_hash,
sign_count,
aaguid,
credential_id,
credential_public_key,
)
|
A helper to generate auth_data
Args:
`sign_count`: How many times the authenticator has been used
`up`: Whether user was present
`uv`: Whether user was verified
`be`: Whether credential can be backed up
`bs`: Whether credential has been backed up
`at`: Whether attested credential data is present
`ed`: Whether extension data is present
Returns:
A `tuple` comprised of the following values:
`bytes`: Authenticator data
`bytes`: RP ID hash
`int`: Sign count
`Optional[bytes]`: AAGUID
`Optional[bytes]`: Credential ID
`Optional[bytes]`: Credential public key
|
_generate_auth_data
|
python
|
duo-labs/py_webauthn
|
tests/test_parse_authenticator_data.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/test_parse_authenticator_data.py
|
BSD-3-Clause
|
def test_parses_bad_eddsa_auth_data(self) -> None:
"""
Help out particular YubiKeys that incorrectly CBOR-encode authData when they use Ed25519
for their public key.
See https://github.com/duo-labs/py_webauthn/issues/160
"""
auth_data = bytearray.fromhex(
"16B02DFBC3D4CCA37EBC2F6516659B12210DB9E1018AB9F13A9690638EA6FDA845000000012FC0579F811347EAB116BB5A8DB9202A0080E82FE6BDE300E4ECC93E0016448AD00FA6F28A011A6F87FF7B0CFCA499BEAF83344C3660B5ECABF72A3B2838A0CC7D87D3FA58292B53449CFF13AD69732D7521649D365CCBC5D0A0FA4B4E09EAE99537261F2F44093F8F4FD4CF5796E0FE58FF0615FFC5882836BBE7B99B08BE2986721C1C5A6AC7F32D3220D9B34D8DEE2FC9A301634F4B5003272067456432353531392198201618F6185918FA182E141875183A18841718521874187A18C51318D918C51883182D18ED181818EA188F182E187407185E18F41518CC18C9186D"
)
output = parse_authenticator_data(auth_data)
cred_data = output.attested_credential_data
self.assertIsNotNone(cred_data)
assert cred_data # Make mypy happy
self.assertEqual(
cred_data.credential_id.hex(),
"e82fe6bde300e4ecc93e0016448ad00fa6f28a011a6f87ff7b0cfca499beaf83344c3660b5ecabf72a3b2838a0cc7d87d3fa58292b53449cff13ad69732d7521649d365ccbc5d0a0fa4b4e09eae99537261f2f44093f8f4fd4cf5796e0fe58ff0615ffc5882836bbe7b99b08be2986721c1c5a6ac7f32d3220d9b34d8dee2fc9",
)
self.assertEqual(
cred_data.credential_public_key.hex(),
"a401634f4b5003272067456432353531392198201618f6185918fa182e141875183a18841718521874187a18c51318d918c51883182d18ed181818ea188f182e187407185e18f41518cc18c9186d",
)
|
Help out particular YubiKeys that incorrectly CBOR-encode authData when they use Ed25519
for their public key.
See https://github.com/duo-labs/py_webauthn/issues/160
|
test_parses_bad_eddsa_auth_data
|
python
|
duo-labs/py_webauthn
|
tests/test_parse_authenticator_data.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/test_parse_authenticator_data.py
|
BSD-3-Clause
|
def test_supports_options_to_json_output(self) -> None:
"""
Test that output from `generate_registration_options()` that's fed directly into
`options_to_json()` gets parsed back into the original options without any changes along
the way.
"""
opts = generate_registration_options(
rp_id="example.com",
rp_name="Example Co",
user_id=bytes([1, 2, 3, 4]),
user_name="lee",
user_display_name="Lee",
attestation=AttestationConveyancePreference.DIRECT,
authenticator_selection=AuthenticatorSelectionCriteria(
authenticator_attachment=AuthenticatorAttachment.PLATFORM,
resident_key=ResidentKeyRequirement.REQUIRED,
require_resident_key=True,
user_verification=UserVerificationRequirement.DISCOURAGED,
),
challenge=bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]),
exclude_credentials=[
PublicKeyCredentialDescriptor(
id=b"1234567890",
transports=[AuthenticatorTransport.INTERNAL, AuthenticatorTransport.HYBRID],
),
],
supported_pub_key_algs=[COSEAlgorithmIdentifier.ECDSA_SHA_512],
timeout=12000,
hints=[
PublicKeyCredentialHint.CLIENT_DEVICE,
PublicKeyCredentialHint.SECURITY_KEY,
PublicKeyCredentialHint.HYBRID,
],
)
opts_json = options_to_json(opts)
parsed_opts_json = parse_registration_options_json(opts_json)
self.assertEqual(parsed_opts_json.rp, opts.rp)
self.assertEqual(parsed_opts_json.user, opts.user)
self.assertEqual(parsed_opts_json.attestation, opts.attestation)
self.assertEqual(parsed_opts_json.authenticator_selection, opts.authenticator_selection)
self.assertEqual(parsed_opts_json.challenge, opts.challenge)
self.assertEqual(parsed_opts_json.exclude_credentials, opts.exclude_credentials)
self.assertEqual(parsed_opts_json.pub_key_cred_params, opts.pub_key_cred_params)
self.assertEqual(parsed_opts_json.timeout, opts.timeout)
self.assertEqual(parsed_opts_json.hints, opts.hints)
|
Test that output from `generate_registration_options()` that's fed directly into
`options_to_json()` gets parsed back into the original options without any changes along
the way.
|
test_supports_options_to_json_output
|
python
|
duo-labs/py_webauthn
|
tests/test_parse_registration_options_json.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/test_parse_registration_options_json.py
|
BSD-3-Clause
|
def test_parse_registration_credential_json(self):
"""
Check that we can properly parse some values that aren't really here-or-there for response
verification, but can still be useful to RP's to fine-tune the WebAuthn experience.
"""
parsed = parse_registration_credential_json(
"""{
"id": "ZoIKP1JQvKdrYj1bTUPJ2eTUsbLeFkv-X5xJQNr4k6s",
"rawId": "ZoIKP1JQvKdrYj1bTUPJ2eTUsbLeFkv-X5xJQNr4k6s",
"response": {
"attestationObject": "o2NmbXRkbm9uZWdhdHRTdG10oGhhdXRoRGF0YVkBZ0mWDeWIDoxodDQXD2R2YFuP5K65ooYyx5lc87qDHZdjRQAAAAAAAAAAAAAAAAAAAAAAAAAAACBmggo_UlC8p2tiPVtNQ8nZ5NSxst4WS_5fnElA2viTq6QBAwM5AQAgWQEA31dtHqc70D_h7XHQ6V_nBs3Tscu91kBL7FOw56_VFiaKYRH6Z4KLr4J0S12hFJ_3fBxpKfxyMfK66ZMeAVbOl_wemY4S5Xs4yHSWy21Xm_dgWhLJjZ9R1tjfV49kDPHB_ssdvP7wo3_NmoUPYMgK-edgZ_ehttp_I6hUUCnVaTvn_m76b2j9yEPReSwl-wlGsabYG6INUhTuhSOqG-UpVVQdNJVV7GmIPHCA2cQpJBDZBohT4MBGme_feUgm4sgqVCWzKk6CzIKIz5AIVnspLbu05SulAVnSTB3NxTwCLNJR_9v9oSkvphiNbmQBVQH1tV_psyi9HM1Jtj9VJVKMeyFDAQAB",
"clientDataJSON": "eyJ0eXBlIjoid2ViYXV0aG4uY3JlYXRlIiwiY2hhbGxlbmdlIjoiQ2VUV29nbWcwY2NodWlZdUZydjhEWFhkTVpTSVFSVlpKT2dhX3hheVZWRWNCajBDdzN5NzN5aEQ0RmtHU2UtUnJQNmhQSkpBSW0zTFZpZW40aFhFTGciLCJvcmlnaW4iOiJodHRwOi8vbG9jYWxob3N0OjUwMDAiLCJjcm9zc09yaWdpbiI6ZmFsc2V9",
"transports": ["internal", "hybrid"]
},
"type": "public-key",
"clientExtensionResults": {},
"authenticatorAttachment": "platform"
}"""
)
self.assertEqual(
parsed.response.transports,
[
AuthenticatorTransport.INTERNAL,
AuthenticatorTransport.HYBRID,
],
)
self.assertEqual(parsed.authenticator_attachment, AuthenticatorAttachment.PLATFORM)
|
Check that we can properly parse some values that aren't really here-or-there for response
verification, but can still be useful to RP's to fine-tune the WebAuthn experience.
|
test_parse_registration_credential_json
|
python
|
duo-labs/py_webauthn
|
tests/test_structs.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/test_structs.py
|
BSD-3-Clause
|
def test_verify_attestation_android_key_hardware_authority(
self,
patched_x509store: X509Store,
) -> None:
"""
This android-key attestation was generated on a Pixel 8a in January 2025 via an origin
trial. Google will be sunsetting android-safetynet attestation for android-key attestations
for device-bound passkeys (i.e. `"residentKey": "discouraged"`) in April 2025
See here for more info:
https://android-developers.googleblog.com/2024/09/attestation-format-change-for-android-fido2-api.html
"""
credential = """{
"id": "AYNe4CBKc8H30FuAb8uaht6JbEQfbSBnS0SX7B6MFg8ofI92oR5lheRDJCgwY-JqB_QSJtezdhMbf8Wzt_La5N0",
"rawId": "AYNe4CBKc8H30FuAb8uaht6JbEQfbSBnS0SX7B6MFg8ofI92oR5lheRDJCgwY-JqB_QSJtezdhMbf8Wzt_La5N0",
"response": {
"attestationObject": "o2NmbXRrYW5kcm9pZC1rZXlnYXR0U3RtdKNjYWxnJmNzaWdYSDBGAiEAs9Aufj5f5HyLKEFsgfmqyaXfAih-hGuTJqgmxZGijzYCIQDAMddAq1gwH3MtesYR6WE6IAockRz8ilR7CFw_kgdmv2N4NWOFWQLQMIICzDCCAnKgAwIBAgIBATAKBggqhkjOPQQDAjA5MSkwJwYDVQQDEyBkNjAyYTAzYTY3MmQ4NjViYTVhNDg1ZTMzYTIwN2M3MzEMMAoGA1UEChMDVEVFMB4XDTcwMDEwMTAwMDAwMFoXDTQ4MDEwMTAwMDAwMFowHzEdMBsGA1UEAxMUQW5kcm9pZCBLZXlzdG9yZSBLZXkwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATXVi3-n-rBsrP3A4Pj9P8e6PNh3eNdC38PaFiCZyMWdUVA6PbE6985PSUDDcnk3Knnpyc66J_HFOu_geuqiWtAo4IBgzCCAX8wDgYDVR0PAQH_BAQDAgeAMIIBawYKKwYBBAHWeQIBEQSCAVswggFXAgIBLAoBAQICASwKAQEEIFZS4txFVJqW-Wr6IlUC-H-twIpgvAITksC-jFBi_V9eBAAwd7-FPQgCBgGUcHc4or-FRWcEZTBjMT0wGwQWY29tLmdvb2dsZS5hbmRyb2lkLmdzZgIBIzAeBBZjb20uZ29vZ2xlLmFuZHJvaWQuZ21zAgQO6jzjMSIEIPD9bFtBDyXLJcO1M0bIly-uMPjudBHfkQSArWstYNuDMIGpoQUxAwIBAqIDAgEDowQCAgEApQUxAwIBBKoDAgEBv4N4AwIBA7-DeQMCAQq_hT4DAgEAv4VATDBKBCCd4l-wK7VTDUQUnRSEN8guJn5VcyJTCqbwOwrC6Skx2gEB_woBAAQg6y0px0ZXc5v2bsVb45w-6IiMbXzp3gyHIWKS1mbz6gu_hUEFAgMCSfC_hUIFAgMDFwW_hU4GAgQBNP35v4VPBgIEATT9-TAKBggqhkjOPQQDAgNIADBFAiEAzNz6wyTo4t5ixo9G4zXPwh4zSB9F854sU_KDGTf0dxYCICaQVSWzWgTZLQYv13MXJJee8S8_luQB3W5lPPzP0exsWQHjMIIB3zCCAYWgAwIBAgIRANYCoDpnLYZbpaSF4zogfHMwCgYIKoZIzj0EAwIwKTETMBEGA1UEChMKR29vZ2xlIExMQzESMBAGA1UEAxMJRHJvaWQgQ0EzMB4XDTI1MDEwNzE3MDg0M1oXDTI1MDIwMjEwMzUyN1owOTEpMCcGA1UEAxMgZDYwMmEwM2E2NzJkODY1YmE1YTQ4NWUzM2EyMDdjNzMxDDAKBgNVBAoTA1RFRTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABFPbPYqm91rYvZVCBdFaHRMg0tw7U07JA1EcD9ZP4d0lK2NFM4A0wGKS4jbTR_bu7NTt_YyF388S0PWAJTluqnOjfjB8MB0GA1UdDgQWBBSXyrsZ_A1NnJGRq0sm2G9nm-NC5zAfBgNVHSMEGDAWgBTFUX4F2MtjWykYrAIa8sh9bBL-kjAPBgNVHRMBAf8EBTADAQH_MA4GA1UdDwEB_wQEAwICBDAZBgorBgEEAdZ5AgEeBAuiAQgDZkdvb2dsZTAKBggqhkjOPQQDAgNIADBFAiEAysd6JDoI8X4NEdrRwUwtIAy-hLxSEKUVS2XVWS2CP04CIFNQQzM4TkA_xaZj8KyiS61nb-aOBP35tlA34JCOlv9nWQHcMIIB2DCCAV2gAwIBAgIUAIUK9vrO5iIEbQx0izdwqlWwtk0wCgYIKoZIzj0EAwMwKTETMBEGA1UEChMKR29vZ2xlIExMQzESMBAGA1UEAxMJRHJvaWQgQ0EyMB4XDTI0MTIwOTA2Mjg1M1oXDTI1MDIxNzA2Mjg1MlowKTETMBEGA1UEChMKR29vZ2xlIExMQzESMBAGA1UEAxMJRHJvaWQgQ0EzMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEPjbr-yt9xhgcbKLXoN3RK-1FcCjwIpeMPJZjayW0dqNtFflHp2smO0DxN_6x7M7NAGbcC9lM1_E-N6z51ODv-6NjMGEwDgYDVR0PAQH_BAQDAgIEMA8GA1UdEwEB_wQFMAMBAf8wHQYDVR0OBBYEFMVRfgXYy2NbKRisAhryyH1sEv6SMB8GA1UdIwQYMBaAFKYLhqTwyH8ztWE5Ys0956c6QoNIMAoGCCqGSM49BAMDA2kAMGYCMQCuzU0wV_NkOQzgqzyqP66SJN6lilrU-NDVU6qNCnbFsUoZQOm4wBwUw7LqfoUhx7YCMQDFEvqHfc2hwN2J4I9Z4rTHiLlsy6gA33WvECzIZmVMpKcyEiHlm4c9XR0nVkAjQ_5ZA4QwggOAMIIBaKADAgECAgoDiCZnYGWJloYOMA0GCSqGSIb3DQEBCwUAMBsxGTAXBgNVBAUTEGY5MjAwOWU4NTNiNmIwNDUwHhcNMjIwMTI2MjI0OTQ1WhcNMzcwMTIyMjI0OTQ1WjApMRMwEQYDVQQKEwpHb29nbGUgTExDMRIwEAYDVQQDEwlEcm9pZCBDQTIwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAT72ZtYJ0I2etFhouvtVs0sBzvYsx8thNCZV1wsDPvsMDSTPij-M1wBFD00OUn2bfU5b7K2_t2NkXc2-_V9g--mdb6SoRGmJ_AG9ScY60LKSA7iPT7gZ_5-q0tnEPPZJCqjZjBkMB0GA1UdDgQWBBSmC4ak8Mh_M7VhOWLNPeenOkKDSDAfBgNVHSMEGDAWgBQ2YeEAfIgFCVGLRGxH_xpMyepPEjASBgNVHRMBAf8ECDAGAQH_AgECMA4GA1UdDwEB_wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEArpB2eLbKHNcS6Q3Td3N7ZCgVLN0qA7CboM-Ftu4YYAcHxh-e_sk7T7XOg5S4d9a_DD7mIXgENSBPB_fVqCnBaSDKNJ3nUuC1_9gcT95p4kKJo0tqcsWw8WgKVJhNuZCN7d_ziHLiRRcrKtaj944THzsy7vB-pSai7gTah_RJrDQI91bDUJgld8_p_QAbVnYA8o-msO0sRKxgF1V5QuBwBTfpdkqshqL3nwBm0sofqI_rM-JOQava3-IurHvfkzioiOJ0uFJnBGVjpZFwGwsmyKwzl-3qRKlkHggAOKt3lQQ4GiJnOCm10JrxPa2Za0K6_kyk6YyvvRcFNai5ej3nMKJPg-eeG2nST6N6ePFuaeoNQnD4XkagGFEQYzcqvsdFsmsbUFMghFl7zEVYdscuSgCG939wxW1JgKyG5ce7CI40328w9IuOf8mUS_W3i4jSfxqCJbegyo_SKDpDILnhJUBy0T3fN8mv9AyO0uoJBlvnogIVv2SdpYUt92vyOiGMy3Jx_ZRWjIRa7iIV3VnjLI__pgCrXQLMinZWEWsxVxg25nrk8u32nZd67DJN3k2FufRbsmHZly9CLo0P79lkIEC3rifLqqJeDyHQNaBMUC6BSDZ5RJCtMjSZw2xL5z0X9_zBsKVPkMW61hMhKzVmYNLe1DJQANRP-enru5i1oXlZBSAwggUcMIIDBKADAgECAgkA1Q_yW6Py1rMwDQYJKoZIhvcNAQELBQAwGzEZMBcGA1UEBRMQZjkyMDA5ZTg1M2I2YjA0NTAeFw0xOTExMjIyMDM3NThaFw0zNDExMTgyMDM3NThaMBsxGTAXBgNVBAUTEGY5MjAwOWU4NTNiNmIwNDUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvtseCK7GnAewrtC6LzFQWY6vvmC8yx391MQMMl1JLG1_oCfvHKqlFH3Q8vZpvEzV0SqVed_a2rDU17hfCXmOVF92ckuY3SlPL_iWPj_u2_RKTeKIqTKmcRS1HpZ8yAfRBl8oczX52L7L1MVG2_rL__Stv5P5bxr2ew0v-CCOdqvzrjrWo7Ss6zZxeOneQ4bUUQnkxWYWYEa2esqlrvdelfJOpHEH8zSfWf9b2caoLgVJhrThPo3lEhkYE3bPYxPkgoZsWVsLxStbQPFbsBgiZBBwe0aX-bTRAtVa60dChUlicU-VdNwdi8BIu75GGGxsObEyAknSZwOm-wLg-O8H5PHLASWBLvS8TReYsP44m2-wGyUdm88EoI51PQxL62BI4h-Br7PVnWDv4NVqB_uq6-ZqDyN8-KjIq_Gcr8SCxNRWLaCHOrzCbbu53-YgzsBjaoQ5FHwajdNUHgfNZCClmu3eLkwiUJpjnTgvNJGKKAcLMA-UfCz5bSsHk356vn_akkqd8FIOIKIUBW0Is5nuAuIybSOE7YHq1Rccj_4xE-PLTaLn2Ug0xFF6_noYq1x32o7_SRQlZ1lN0DZehLzaLE-9m1dClSm4vXZpv70RoMrxnhEclhh8JPdDm80BdqJZD7w9NabZCAFH9uTBJZz42lQWA0830-9CLxYSDlSYAYwIDAQABo2MwYTAdBgNVHQ4EFgQUNmHhAHyIBQlRi0RsR_8aTMnqTxIwHwYDVR0jBBgwFoAUNmHhAHyIBQlRi0RsR_8aTMnqTxIwDwYDVR0TAQH_BAUwAwEB_zAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAE4xoFzyi6Zdva-hztcJae5cqEEErd7YowbPf23uUDdddF7ZkssCQsznLcnu1RGR_lrVK61907JcCZ4TpJGjzdSHpazOh2YyTErkYzgkaue3ikGKy7mKBcTJ1pbuqrYJ0LoM4aMb6YSQ3z9MDqndyegv-w_LPp692MuVJ4nysUEfrFbIhkJutylgQnNdpQ4RrHFfGBjPn9xOJUo3YzUbaiRAFQhhJjpuMQvhpQ3lx-juiA_dS-WISjcSjRiDC7NHa_QpHoLVxmpklJOeCEgL-8APfYp01D5zc36-XY5OxRUwLUaJaSeA3HU47X6Rdb5hOedNQ604izBQ_9Wp3lJiAAiYwB9jxT3-IiCRCPpPZboWxJzL3gg318WETVS3OYugEi5QWxVckxPP4m5y2H4iqhYW5r2_VH3f-T3ynjWmO0Vf4fwOyVWB8_T3u-O7goOWo3rjFXWCvDdkuXgKI578D3Wh4ubZQc6rrCfd6wHivYQhApvqNNUa7mxgJx1alevQBRWpwAE92Av4fuomC4HDT2iObrE0ivDY6hysMqy52T-iSv8DCoTI8rD1acyVCAsgrDWs4MbY29T2hHcZUZ0yRQFm60vxW4WQRFAa3q9DY4LDSxXjtUyS5htpwr_HJkWJFys8k9vjXOBtCP1cATIsoId7HRJ0OvH61ZQOobwC3YkcaGF1dGhEYXRhWMVJlg3liA6MaHQ0Fw9kdmBbj-SuuaKGMseZXPO6gx2XY0UAAAAAuT_ZYfLmRi-xIoIAIkfeeABBAYNe4CBKc8H30FuAb8uaht6JbEQfbSBnS0SX7B6MFg8ofI92oR5lheRDJCgwY-JqB_QSJtezdhMbf8Wzt_La5N2lAQIDJiABIVgg11Yt_p_qwbKz9wOD4_T_HujzYd3jXQt_D2hYgmcjFnUiWCBFQOj2xOvfOT0lAw3J5Nyp56cnOuifxxTrv4HrqolrQA",
"clientDataJSON": "eyJ0eXBlIjoid2ViYXV0aG4uY3JlYXRlIiwiY2hhbGxlbmdlIjoidDRMV0kwaVlKU1RXUGw5V1hVZE5oZEhBbnJQRExGOWVXQVA5bEhnbUhQOCIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6ODAwMCIsImNyb3NzT3JpZ2luIjpmYWxzZX0"
},
"type": "public-key",
"clientExtensionResults": {
"credProps": { "rk": false }
},
"authenticatorAttachment": "platform"
}"""
challenge = base64url_to_bytes("t4LWI0iYJSTWPl9WXUdNhdHAnrPDLF9eWAP9lHgmHP8")
rp_id = "localhost"
expected_origin = "http://localhost:8000"
# Setting the time to something that satisfies all these:
# (Leaf) 19700101000000Z <-> 20480101000000Z
# (Int.) 20250107170843Z <-> 20250202103527Z <- Earliest expiration
# (Int.) 20241209062853Z <-> 20250217062852Z
# (Int.) 20220126224945Z <-> 20370122224945Z
# (Root) 20191122203758Z <-> 20341118203758Z
patched_x509store.set_time(datetime(2025, 1, 8, 0, 0, 0))
verification = verify_registration_response(
credential=credential,
expected_challenge=challenge,
expected_origin=expected_origin,
expected_rp_id=rp_id,
)
assert verification.fmt == AttestationFormat.ANDROID_KEY
assert verification.credential_id == base64url_to_bytes(
"AYNe4CBKc8H30FuAb8uaht6JbEQfbSBnS0SX7B6MFg8ofI92oR5lheRDJCgwY-JqB_QSJtezdhMbf8Wzt_La5N0"
)
|
This android-key attestation was generated on a Pixel 8a in January 2025 via an origin
trial. Google will be sunsetting android-safetynet attestation for android-key attestations
for device-bound passkeys (i.e. `"residentKey": "discouraged"`) in April 2025
See here for more info:
https://android-developers.googleblog.com/2024/09/attestation-format-change-for-android-fido2-api.html
|
test_verify_attestation_android_key_hardware_authority
|
python
|
duo-labs/py_webauthn
|
tests/test_verify_registration_response_android_key.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/test_verify_registration_response_android_key.py
|
BSD-3-Clause
|
def test_verify_attestation_android_safetynet_basic_integrity_true_cts_profile_match_false(
self,
mock_cbor2_loads: MagicMock,
mock_b64encode: MagicMock,
mock_verify_certificate: MagicMock,
):
"""
We're not working with a full WebAuthn response here so we have to mock out some values
because all we really want to test is that such a response is allowed through
"""
mock_cbor2_loads.return_value = {"authData": bytes()}
mock_b64encode.return_value = "3N7YJmISsFM0cdvMAYcHcw==".encode("utf-8")
# Cert chain validation is not important to this test
mock_verify_certificate.return_value = True
# basicIntegrity: True, ctsProfileMatch: False
jws_result_only_fail_cts_check = (
"eyJ4NWMiOiBbIk1JSURXekNDQWtNQ0FRb3dEUVlKS29aSWh2Y05BUUVMQlFBd2NqRUxNQWtHQT"
"FVRUJoTUNWVk14Q3pBSkJnTlZCQWdNQWsxSk1Rc3dDUVlEVlFRSERBSkJRVEVVTUJJR0ExVUVD"
"Z3dMUkhWdlUyVmpkWEpwZEhreEdUQVhCZ05WQkFzTUVGTmhabVYwZVU1bGRGUmxjM1JwYm1jeE"
"dEQVdCZ05WQkFNTUQxTmhabVYwZVc1bGRGUmxjM1JEUVRBZUZ3MHhPVEV3TVRneU1ESTJOVEZh"
"RncwME5qQXpNakF5TURJMk5URmFNSFV4Q3pBSkJnTlZCQVlUQWxWVE1Rc3dDUVlEVlFRSURBSk"
"5TVEVMTUFrR0ExVUVCd3dDUVVFeEZEQVNCZ05WQkFvTUMwUjFiMU5sWTNWeWFYUjVNUmt3RndZ"
"RFZRUUxEQkJUWVdabGRIbE9aWFJVWlhOMGFXNW5NUnN3R1FZRFZRUUREQkpoZEhSbGMzUXVZVz"
"VrY205cFpDNWpiMjB3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFR"
"RGNvL0dIbDQzNU8yQkZlTlVhdDJtbi9ZNVdGMk1RQWZiUWxwcVVWc2xVTjlZTnJSV1FYVFlYN3"
"pLTjE3U2RRcmVaU05uZTN2dDVWY0o2ZjZNK1o3NGRTUHpnOVBlN3dSWEVxMVk2aDNEQWVEdGN6"
"VGZGdWdOM3ArRWJhK01LcWkxL29UOHpzUysyL3RzVnpDVTJjNDd5QlQrT1ZRYTBTaUJsRjJlej"
"F3QkQ1VFFJRCt4VjJwOWNmWW5sYzBYSmpnZzFyRGVuR05xUm9zdERqeDJqTWViWG5vK05RM2Zj"
"N21HTHFrb2R0QmZEbWNHcHhxM3c5alBQQy9jbTZTaHlNM2g5ZXR1bzdHbFZVelhuOXQ3djU4RX"
"ZKTWJySkc2MHorT0ZTYTlJbG93U3NYMDlPbzBlaHhYRlpLbklXdisyMGtVNE1IcVZKcDIzeFhi"
"SElXZS9uZndEQWdNQkFBRXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRmpzVVJCRXJOMGUwNU"
"JZRlMyRUU3YkZ5VnNVMUhQT0NNZ21TN0s0Nm1OZFdGUXZNT3lzdEdrUkQ3S2YramlxdmF6eWVy"
"NUdVbllSOXZCZVJrVko3TkZmY2gvd05JKzBwaTdUNDk2WmNBM0JKemp4STVxcnROZFBIY1FsNk"
"dLQThHZmQ1ZzNFQ3JUNjhoN1paQ2hJUXlHVUxKTVdwVkljL3dPT1FqNTNieEZQRTJ0U0VWQlhp"
"SUx6Tnh4NkxuZUwxaWdaMEZzdVdoU3dnRVArVXA0WFBYN3ZQbXZoczBDb3pUOHNXbG9BOEJzbG"
"dDZGlpeVI5ZThGQTR5RG5VTTFRWnBxMkFNUlBMc3ZJcDVnQndXYVNnejQxaUo0Qk5pOE1rWkJP"
"cklBckV0UzVxYzFIamN4ZklXaURoUjR5MTJqcEhud1Y0ZXpVdHNtVCtwdjFpVUQwUWVzPSJdLC"
"AiYWxnIjogIlJTMjU2In0.eyJ0aW1lc3RhbXBNcyI6IDE1NDM4NDk4NjQ5NzAsICJhZHZpY2Ui"
"OiAiTE9DS19CT09UTE9BREVSIiwgIm5vbmNlIjogIjNON1lKbUlTc0ZNMGNkdk1BWWNIY3c9PS"
"IsICJhcGtQYWNrYWdlTmFtZSI6ICJjb20uZHVvc2VjdXJpdHkuZHVvbW9iaWxlLmRlYnVnIiwg"
"ImFwa0RpZ2VzdFNoYTI1NiI6ICJIVm94ZlBNM1BwYkJaQkRmcWxORGt0Lyt3aXNhTTlrY0Exb2"
"l1NEhabDZJPSIsICJjdHNQcm9maWxlTWF0Y2giOiBmYWxzZSwgImJhc2ljSW50ZWdyaXR5Ijog"
"dHJ1ZSwgImFwa0NlcnRpZmljYXRlRGlnZXN0U2hhMjU2IjogWyJweUVSMGp6cnJWcU1KdW1pUW"
"pGUjdSVS9SdEVLbGkxckxGUEVUMGpPZnlrPSJdfQ.WJhEXK1a2mNycdH_bYYkXhvkADLRsxLaX"
"RzglwYpQXKgHuJap6x1UmWkFiygrgbd6jFfRGqGhifjubgfjHMkrMOJhA723hJNKKvfp-voZYS"
"TILmFsb1LrXjYyaut8V1POWgt3cw4HKfWXgKE2hw-KGkaD9Mrq1vBfXn8LSEkJsv7TyGtkiIbW"
"cYw0wEym7H6CyVFygwyx2B7fVz02Y15IYjz7NuHj3f9OMCScO70mGrvw7BPwaVs4LSNv8zEFOg"
"S2W1MzvpXwq1KMFvrcka7C4t5vyOhMMYwY6BWEnAGcx5_tpJsqegXTgTHSrr4TFQJzsa-H8wb1"
"YaxlMcRVSqOew"
)
attestation_statement = AttestationStatement(
ver="0",
response=jws_result_only_fail_cts_check.encode("ascii"),
)
verified = verify_android_safetynet(
attestation_statement=attestation_statement,
attestation_object=bytes(),
client_data_json=bytes(),
pem_root_certs_bytes=[],
verify_timestamp_ms=False,
)
assert verified is True
|
We're not working with a full WebAuthn response here so we have to mock out some values
because all we really want to test is that such a response is allowed through
|
test_verify_attestation_android_safetynet_basic_integrity_true_cts_profile_match_false
|
python
|
duo-labs/py_webauthn
|
tests/test_verify_registration_response_android_safetynet.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/test_verify_registration_response_android_safetynet.py
|
BSD-3-Clause
|
def test_raise_attestation_android_safetynet_basic_integrity_false_cts_profile_match_false(
self,
mock_cbor2_loads: MagicMock,
mock_b64encode: MagicMock,
mock_verify_certificate: MagicMock,
):
"""
We're not working with a full WebAuthn response here so we have to mock out some values
because all we really want to test is that a response fails the basicIntegrity check
"""
mock_cbor2_loads.return_value = {"authData": bytes()}
mock_b64encode.return_value = "NumMA+QH27ik6Mu737RgWg==".encode("utf-8")
# Cert chain validation is not important to this test
mock_verify_certificate.return_value = True
# basicIntegrity: False, ctsProfileMatch: False
jws_result_fail = (
"eyJ4NWMiOiBbIk1JSURXekNDQWtNQ0FRb3dEUVlKS29aSWh2Y05BUUVMQlFBd2NqRUxNQWtHQT"
"FVRUJoTUNWVk14Q3pBSkJnTlZCQWdNQWsxSk1Rc3dDUVlEVlFRSERBSkJRVEVVTUJJR0ExVUVD"
"Z3dMUkhWdlUyVmpkWEpwZEhreEdUQVhCZ05WQkFzTUVGTmhabVYwZVU1bGRGUmxjM1JwYm1jeE"
"dEQVdCZ05WQkFNTUQxTmhabVYwZVc1bGRGUmxjM1JEUVRBZUZ3MHhPVEV3TVRneU1ESTJOVEZh"
"RncwME5qQXpNakF5TURJMk5URmFNSFV4Q3pBSkJnTlZCQVlUQWxWVE1Rc3dDUVlEVlFRSURBSk"
"5TVEVMTUFrR0ExVUVCd3dDUVVFeEZEQVNCZ05WQkFvTUMwUjFiMU5sWTNWeWFYUjVNUmt3RndZ"
"RFZRUUxEQkJUWVdabGRIbE9aWFJVWlhOMGFXNW5NUnN3R1FZRFZRUUREQkpoZEhSbGMzUXVZVz"
"VrY205cFpDNWpiMjB3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFR"
"RGNvL0dIbDQzNU8yQkZlTlVhdDJtbi9ZNVdGMk1RQWZiUWxwcVVWc2xVTjlZTnJSV1FYVFlYN3"
"pLTjE3U2RRcmVaU05uZTN2dDVWY0o2ZjZNK1o3NGRTUHpnOVBlN3dSWEVxMVk2aDNEQWVEdGN6"
"VGZGdWdOM3ArRWJhK01LcWkxL29UOHpzUysyL3RzVnpDVTJjNDd5QlQrT1ZRYTBTaUJsRjJlej"
"F3QkQ1VFFJRCt4VjJwOWNmWW5sYzBYSmpnZzFyRGVuR05xUm9zdERqeDJqTWViWG5vK05RM2Zj"
"N21HTHFrb2R0QmZEbWNHcHhxM3c5alBQQy9jbTZTaHlNM2g5ZXR1bzdHbFZVelhuOXQ3djU4RX"
"ZKTWJySkc2MHorT0ZTYTlJbG93U3NYMDlPbzBlaHhYRlpLbklXdisyMGtVNE1IcVZKcDIzeFhi"
"SElXZS9uZndEQWdNQkFBRXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRmpzVVJCRXJOMGUwNU"
"JZRlMyRUU3YkZ5VnNVMUhQT0NNZ21TN0s0Nm1OZFdGUXZNT3lzdEdrUkQ3S2YramlxdmF6eWVy"
"NUdVbllSOXZCZVJrVko3TkZmY2gvd05JKzBwaTdUNDk2WmNBM0JKemp4STVxcnROZFBIY1FsNk"
"dLQThHZmQ1ZzNFQ3JUNjhoN1paQ2hJUXlHVUxKTVdwVkljL3dPT1FqNTNieEZQRTJ0U0VWQlhp"
"SUx6Tnh4NkxuZUwxaWdaMEZzdVdoU3dnRVArVXA0WFBYN3ZQbXZoczBDb3pUOHNXbG9BOEJzbG"
"dDZGlpeVI5ZThGQTR5RG5VTTFRWnBxMkFNUlBMc3ZJcDVnQndXYVNnejQxaUo0Qk5pOE1rWkJP"
"cklBckV0UzVxYzFIamN4ZklXaURoUjR5MTJqcEhud1Y0ZXpVdHNtVCtwdjFpVUQwUWVzPSJdLC"
"AiYWxnIjogIlJTMjU2In0.eyJ0aW1lc3RhbXBNcyI6IDE1NDM4NTAzNjAyMTQsICJhZHZpY2Ui"
"OiAiUkVTVE9SRV9UT19GQUNUT1JZX1JPTSIsICJub25jZSI6ICJOdW1NQStRSDI3aWs2TXU3Mz"
"dSZ1dnPT0iLCAiYXBrUGFja2FnZU5hbWUiOiAiY29tLmR1b3NlY3VyaXR5LmR1b21vYmlsZS5k"
"ZWJ1ZyIsICJhcGtEaWdlc3RTaGEyNTYiOiAiYzhFd2NMQUVRNHIycVlzanBDdE9NOUR1QjZyZ0"
"E3WWxjTXBOZm9kSHo0bz0iLCAiY3RzUHJvZmlsZU1hdGNoIjogZmFsc2UsICJiYXNpY0ludGVn"
"cml0eSI6IGZhbHNlLCAiYXBrQ2VydGlmaWNhdGVEaWdlc3RTaGEyNTYiOiBbInB5RVIwanpycl"
"ZxTUp1bWlRakZSN1JVL1J0RUtsaTFyTEZQRVQwak9meWs9Il19.UgwRHy2UMio8eN2Y994Kyzi"
"wqlpzDwRIybYiem4dj8BYWC3Ta48BAR0NN45TDdsGvDGUujVo0LSayfTcgo-vbilz5Y7LWCEgb"
"GoAFhoDDPAMPtthrYTnGDVfhjHTVo00AxsZVgL-HZOD0KecqWcOL8-DWARl3rTAjBWqfon7ZC2"
"IaxzJVrcWtyhPyKdzVB5hJ4NPKIAPlCUkMVUzPY9Xhg1DFLmvaIv8qcZo8xpY0JZDm9cxR1GwP"
"4OVdwMd5seh5483VEpqAmzX7NcZ0aoiMl5PhLGgzHZTrsd1Mc-RZqgc3hAYjnubxONN8vOWGzP"
"gI2Vzgr4VzLOZsWfYwKSR5g"
)
attestation_statement = AttestationStatement(
ver="0",
response=jws_result_fail.encode("ascii"),
)
with self.assertRaisesRegex(
InvalidRegistrationResponse,
"Could not verify device integrity",
):
verify_android_safetynet(
attestation_statement=attestation_statement,
attestation_object=bytes(),
client_data_json=bytes(),
pem_root_certs_bytes=[],
verify_timestamp_ms=False,
)
|
We're not working with a full WebAuthn response here so we have to mock out some values
because all we really want to test is that a response fails the basicIntegrity check
|
test_raise_attestation_android_safetynet_basic_integrity_false_cts_profile_match_false
|
python
|
duo-labs/py_webauthn
|
tests/test_verify_registration_response_android_safetynet.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/test_verify_registration_response_android_safetynet.py
|
BSD-3-Clause
|
def test_verify_attestation_surface_pro_4(self) -> None:
"""
TPM Mfgr: INTC (Intel)
Mfgr Version: 500.5.0.0
TPM Version: 2.0
"""
credential = """{
"id": "2O_TSbHXS3KJwx5uwajcqbKwWCBeHjOBCXXb7vrPfUU",
"rawId": "2O_TSbHXS3KJwx5uwajcqbKwWCBeHjOBCXXb7vrPfUU",
"response": {
"clientDataJSON": "eyJ0eXBlIjoid2ViYXV0aG4uY3JlYXRlIiwiY2hhbGxlbmdlIjoiWlhsS2FHSkhZMmxQYVVwSlZYcEpNVTVwU1hOSmJsSTFZME5KTmtscmNGaFdRMG81TG1WNVNuQlpXRkZwVDJwRk1rMXFUWGxPVkd0M1RXcE5jMGx0VmpSalEwazJUVlJaZVUxNlRUTlBWRUY1VFhsM2FXTXpWbWxKYW05cFpGaE9iR050TldoaVYxWnZXbGhLYkVsdU1DNTNhbVZJVWpSNFNuRkdVUzFTVTBabFgxZFVWVjlPUm5odk4zZHRRakJ5Y3pWSE1uRnBSRjluVkRObiIsIm9yaWdpbiI6Imh0dHBzOi8vd2ViYXV0aG50ZXN0LmF6dXJld2Vic2l0ZXMubmV0IiwiY3Jvc3NPcmlnaW4iOmZhbHNlfQ==",
"attestationObject": "o2NmbXRjdHBtZ2F0dFN0bXSmY2FsZzn//mNzaWdZAQAW8nyy4aArkiLbIKwObN2DgpGdJfU7klHgF1hAk2fJVzxZe5OOzhN1ZWuDZy+I0Q8vJzQfyO/xz6zZM0lcTIXL8bOO82Wvd6QwzB9HbzQZ8mjtRis4139S+OgF5UfReijMF1TMQCSzqo4K+1w2Bo0ppS1Tygr5P4iFV6qnQ9V3xr/1Afv4i2fpPeNtRT9REW599PNwMA2pCnBGC8tJRlbWXJURe5TGBtMc1k7Qg65H8uDcYJZt6TsiuFpkkMlXnbgma9ZffLqgEKjwEPF7W/SsILLDcFs8HcNI/mE2wJXSxI1bSipf7Hao7xV1w2a/etKd76HgUTVUqQy25Zk/BK4LY3ZlcmMyLjBjeDVjglkFvTCCBbkwggOhoAMCAQICEBVC9wOQ6UasrBmEcAVMotMwDQYJKoZIhvcNAQELBQAwQjFAMD4GA1UEAxM3V1VTLUlOVEMtS0VZSUQtRTcwODNGMjIxNTJBNzQ5MkVDNTlCMEM0MjQzNDM3NjQ4QjE1REJCNzAeFw0yMTA0MDEyMzExMjdaFw0yNTA1MjIyMDMyMjFaMAAwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0xn6eF6c3LHsT+QPrZZc9zYxgd/0zJAQZfKeDP4V59kQZZWBRnjVzamUCd11tdbVWtBbydTotghZ+vofinvIrShv4Va/jUl+Fd2r/Iu868PejBmgtlzLG7BXht1ooDxlSpKB70k83PyAeOLoIzecWpjz4lgJvUV6SHIgj/HVZg0O4E5lXsF1ko6+YGMWo2t4l49ffuYBmM2PXg6Yk7YEpsS9vO+LVQMhRdxE0e9U0KdtiDsFjyRZyeiQOXWnzB2oMmueWpAzoFgpIZVlmeswsWF4mef70Ze/SEtddHyqnAZ56v97NNx3Eirint/KEyoN1gjvEjiapBv1x3prkYrjBAgMBAAGjggHrMIIB5zAOBgNVHQ8BAf8EBAMCB4AwDAYDVR0TAQH/BAIwADBtBgNVHSABAf8EYzBhMF8GCSsGAQQBgjcVHzBSMFAGCCsGAQUFBwICMEQeQgBUAEMAUABBACAAIABUAHIAdQBzAHQAZQBkACAAIABQAGwAYQB0AGYAbwByAG0AIAAgAEkAZABlAG4AdABpAHQAeTAQBgNVHSUECTAHBgVngQUIAzBQBgNVHREBAf8ERjBEpEIwQDEWMBQGBWeBBQIBDAtpZDo0OTRFNTQ0MzEOMAwGBWeBBQICDANJQ0wxFjAUBgVngQUCAwwLaWQ6MDAwMjAwMDAwHwYDVR0jBBgwFoAUJKtDKWNW/+lQbKrQmv+0KVRndZ0wHQYDVR0OBBYEFBoLz1WCz4WjgFvf4gdjDwposC2+MIGzBggrBgEFBQcBAQSBpjCBozCBoAYIKwYBBQUHMAKGgZNodHRwOi8vYXpjc3Byb2R3dXNhaWtwdWJsaXNoLmJsb2IuY29yZS53aW5kb3dzLm5ldC93dXMtaW50Yy1rZXlpZC1lNzA4M2YyMjE1MmE3NDkyZWM1OWIwYzQyNDM0Mzc2NDhiMTVkYmI3L2NjZjJmMTYzLTU1YzAtNGMyZC04Y2FkLWViZDMzM2EzMGEyZi5jZXIwDQYJKoZIhvcNAQELBQADggIBAI2YgavqGYCUe8crkcaOy20oWQN/3Ap/i0i78puCTz+c72X2R+nZMT1QUC+XwL6diX9SEKKWWJc17OdHcF5lCtQVzjbVUp7UXsX89NUIOtiyZtiGo7AM+VgrTDxylyVSkTgovktrCWl6ycwpiD7vEwtl0ShlSbvLgKZDXWJtRBFIv+NTNje8WHhclnovKWCZnISA2ExKshGIALeLundGcgITINr8uTcC8ZTFSwhQrwHf7pjhrYllfO+7Fh3Cb1kMIfYC+GfjtFjKUm2jLUsEXAYZA2KEk2QdNgxDmy/b0SN9MiVtm9Pn7cPpxkBJuIPunA+3WlsKor1o87U2//oOHssq2HqUm9Kji9wR5pG8V1rmezhtHN606FMAkwlPly3ihu40GXPEPV7na2dnPPv8kHyRPtSOhotpZtXHzWW6vw6TrqNxFL93gExdzzF7K1x96Wb3AHsuhM7+HiPweBw/+Xl+c3A6rz1VAH9/K3IjLLFpFoyLsiTiYLAc+q5QCLhRImSe5TIao3O7GPnBUHigzsuwpQydTsZfe5RFzxU1bEdroOOaDPCEtiXZnBcIPE/Ec9/Xg9DFAMxJ43z9KrHEmsoRdVfZiCy+3aVnDkSz63GUs+tpHcEi+CSTixgxejtGZMd6bA4a55axuamE5Yd+kb5glT4dJRxRuAioF0MVpRV6WQbwMIIG7DCCBNSgAwIBAgITMwAAAtJlMfxfe3kISgAAAAAC0jANBgkqhkiG9w0BAQsFADCBjDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjE2MDQGA1UEAxMtTWljcm9zb2Z0IFRQTSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAyMDE0MB4XDTE5MDUyMjIwMzIyMVoXDTI1MDUyMjIwMzIyMVowQjFAMD4GA1UEAxM3V1VTLUlOVEMtS0VZSUQtRTcwODNGMjIxNTJBNzQ5MkVDNTlCMEM0MjQzNDM3NjQ4QjE1REJCNzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANsHFapiqDMZD3nY6Jevf1zAWMI+hV4w0CmSZEb+S73hBTplWkg6uv75G7P/x4AFle8/uOlLPOqLlKKKnNNVPAnbS+WfspyUFMSsCF/ZOEaP4YBtdQjuoQxrN7X5qmY6C/ZOgt8VmVgkza5PymaxZDPPDKEP9LatDVkUzXutiY1YsUGc6xMq/oa4I3JL7I6nXGWxVN7slSziYHAhBTpef5PK235k6AIE+oEbpdmlrEj5UT41SfFIyC8el+Vy2obmuulsziyzyUCbZqBQ9yHa3ACCUMqIaDvVin8cEMXA6jcxVI+oYug6Nx77735GuC2we2aQwlaRvOFvxZLphIb/3h17EqakM0NMxFgIVxvvmnmrNIBylN3Uhh6FbvCviWssrl0NR0NNFnV8KCsdIsy8w0ALl6wAh0UCitEKuG+fThczYQpMv4KmKPBF2Kq1dloXDK3f9bT5I2pGXpUQHmkAs8TSRNlTso6vfdZ5g5jTJvWNJGUA2H5IgAWs59+ZHZVMlzbGUBIMyo1Po+KClGhEXmBA5Y77qWob/ebAGLibMH2lq9I9eREa/WTpQxcT7uInO45XaU0cxcthNNKsPOyg5aX3HoClpzPdvizE9iC3y5ydjrvndcg4D/jLrUAZJLwmS+VP+qrDR4/AG/yiS38lPvAeeUQD80WX3oonZBYHHd53AgMBAAGjggGOMIIBijAOBgNVHQ8BAf8EBAMCAoQwGwYDVR0lBBQwEgYJKwYBBAGCNxUkBgVngQUIAzAWBgNVHSAEDzANMAsGCSsGAQQBgjcVHzASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBQkq0MpY1b/6VBsqtCa/7QpVGd1nTAfBgNVHSMEGDAWgBR6jArOL0hiF+KU0a5VwVLscXSkVjBwBgNVHR8EaTBnMGWgY6Bhhl9odHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NybC9NaWNyb3NvZnQlMjBUUE0lMjBSb290JTIwQ2VydGlmaWNhdGUlMjBBdXRob3JpdHklMjAyMDE0LmNybDB9BggrBgEFBQcBAQRxMG8wbQYIKwYBBQUHMAKGYWh0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9wa2lvcHMvY2VydHMvTWljcm9zb2Z0JTIwVFBNJTIwUm9vdCUyMENlcnRpZmljYXRlJTIwQXV0aG9yaXR5JTIwMjAxNC5jcnQwDQYJKoZIhvcNAQELBQADggIBAJ24mLPy6pE9mVLnL+VQqlPNxohciCr7OeLFudlqpVQooX2Tv5TISYM+jXIckTgqUnLkTP330fdt38ov+K/ZCnw6OymkaqKgDXfojhcNqAmCN2fX4j8LhOPhidqxOy2WlNkdXs0SF8c/iR70jsJoXxluotuKbciZ2b8gZquyST0R5hn8k2ea59/XO0rzIeZ9fozCDmDuBZcejtFync48TkMUDlwjDLXkBtGBkmE9ZVLL3jr1Mu8Nt0gal7eHs3IxPO6n+kC33MF7PxgkWmzqOrs+nStyj2WLNqTkiCCFhEBaePZqptfMejk8k5HJGtqVg9OE0r2HFRQOxkES5BDXwG74L6nw9plEksjjz2tXEKDju9JrL1aNMLgy035exLjWgRa+xiJ9hTgnsAoM9zkJM21dHMnHwGL37YD9lEHyLX+IgO/r/WtKoiJScaDqmdow9EmGTqvUqBcE+z3wiT0WIcglea1JidVIWAnoeCQApybX17ihBUYgUycvIc6QpmHqrlkEutPc3pQx7ThbIkaq2Sx4VkDWGWw1H/TPnQ4hSEM6DlWJBdvdWWoH4yXpF3HZvCBtOyXabnfpIPPX4G+trrpch4xaLxwpDST1VkJ9xRSOqoQ2uoIrZWG1fcjEtSh3P+zxDJzFjl0GGJ2zHV9G/N7bvngbho/SV3cETzZoL8YiZ3B1YkFyZWFZATYAAQALAAYEcgAgnf/L82w4OuaZ+5ho3G3LidcVOIS+KAOSLBJBWL+tIq4AEAAQCAAAAAAAAQC00gwM+S0CrYvZMzdrGmNFkIUzADgIUzylOBVgLXvAYvVY3E+UhvYYFP/eAW4Vz4js6H6Bw9O/Z4KJ5rt7/1f/I0khA7GK9paagKVYavgrmgFyJrxcrh1VLIbDcSdVa3PlSy8UU3cB+kWdgfxKV2KAYxvE88MfZJ8i/c5bOHrg6usYgdOPY6v6hI2EMFyPUxs+I1KxkdCm9iZS7sU2GFQlIqWiM2mWsKZ7gshAFLBUPE6y0s5aMl5nBtI3WFzYQFkBGskBj69kmJYGbnRFx4mIpabrVlXfqePgnmAspsIDxcV3CHZafAhL2USit0CyXkayoigOruSmqdPgSTFSO0HXaGNlcnRJbmZvWKH/VENHgBcAIgALVyJme0o1XzkiFQlMAdVlvHLGyQO8I7Vt7rV5SStq5s4AFGALRChBmfPTEklbBB/05/spyAKPAAAAABowKVoWsH2xUidz+AGXZzFL+mZgVAAiAAvnHCKQB95B4Xfgs0bhBwKMFmLhDZ64ruepNaz2Gu14iQAiAAt/6ITaQ6fFP85wdCypCkGZk7wfFctzf+AalnXK5I+GgWhhdXRoRGF0YVkBZ+RTKdA6IGjRyvf3uwrpVOaw5iWXRfMvSCn3UPBQEfnCRQAAAAAImHBYytxLgbbhMN5Q3L6WACDY79NJsddLconDHm7BqNypsrBYIF4eM4EJddvu+s99RaQBAwM5AQAgWQEAtNIMDPktAq2L2TM3axpjRZCFMwA4CFM8pTgVYC17wGL1WNxPlIb2GBT/3gFuFc+I7Oh+gcPTv2eCiea7e/9X/yNJIQOxivaWmoClWGr4K5oBcia8XK4dVSyGw3EnVWtz5UsvFFN3AfpFnYH8SldigGMbxPPDH2SfIv3OWzh64OrrGIHTj2Or+oSNhDBcj1MbPiNSsZHQpvYmUu7FNhhUJSKlojNplrCme4LIQBSwVDxOstLOWjJeZwbSN1hc2EBZARrJAY+vZJiWBm50RceJiKWm61ZV36nj4J5gLKbCA8XFdwh2WnwIS9lEordAsl5GsqIoDq7kpqnT4EkxUjtB1yFDAQAB"
},
"type": "public-key"
}"""
challenge = base64url_to_bytes(
"ZXlKaGJHY2lPaUpJVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SnBZWFFpT2pFMk1qTXlOVGt3TWpNc0ltVjRjQ0k2TVRZeU16TTNPVEF5TXl3aWMzVmlJam9pZFhObGNtNWhiV1ZvWlhKbEluMC53amVIUjR4SnFGUS1SU0ZlX1dUVV9ORnhvN3dtQjByczVHMnFpRF9nVDNn"
)
rp_id = "webauthntest.azurewebsites.net"
expected_origin = "https://webauthntest.azurewebsites.net"
verification = verify_registration_response(
credential=credential,
expected_challenge=challenge,
expected_origin=expected_origin,
expected_rp_id=rp_id,
)
assert verification.fmt == AttestationFormat.TPM
assert verification.credential_id == base64url_to_bytes(
"2O_TSbHXS3KJwx5uwajcqbKwWCBeHjOBCXXb7vrPfUU"
)
|
TPM Mfgr: INTC (Intel)
Mfgr Version: 500.5.0.0
TPM Version: 2.0
|
test_verify_attestation_surface_pro_4
|
python
|
duo-labs/py_webauthn
|
tests/test_verify_registration_response_tpm.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/test_verify_registration_response_tpm.py
|
BSD-3-Clause
|
def test_verify_attestation_dell_xps_13(self) -> None:
"""
TPM Mfgr: NTC (Nuvoton Technology)
Mfgr Version: 1.3.2.8
TPM Version: 2.0
"""
credential = """{
"id": "56iW7RC7YLiknnNU70kO5Bb-jip9-WTUbohh_Aqq1q4",
"rawId": "56iW7RC7YLiknnNU70kO5Bb-jip9-WTUbohh_Aqq1q4",
"response": {
"clientDataJSON": "eyJ0eXBlIjoid2ViYXV0aG4uY3JlYXRlIiwiY2hhbGxlbmdlIjoiWlhsS2FHSkhZMmxQYVVwSlZYcEpNVTVwU1hOSmJsSTFZME5KTmtscmNGaFdRMG81TG1WNVNuQlpXRkZwVDJwRk1rMXFVVEJPYWswMFRWUnJjMGx0VmpSalEwazJUVlJaZVU1RVZUUk5lbWQ0VDFOM2FXTXpWbWxKYW05cFdXMDVhVWx1TUM1UmFIVnJaMWsxZEV3eVpITTRkWGRCY25STFVHRnJTemRFYW5wa1ptMDRPWGx2VTNnMFpsOURWMDlGIiwib3JpZ2luIjoiaHR0cHM6Ly93ZWJhdXRobnRlc3QuYXp1cmV3ZWJzaXRlcy5uZXQiLCJjcm9zc09yaWdpbiI6ZmFsc2V9",
"attestationObject": "o2NmbXRjdHBtZ2F0dFN0bXSmY2FsZzn//mNzaWdZAQB3sjVkad71PLRQCe71PxT2tqD1bhmhLVenpDYCX9btDVE820cfPKEtblWLiD/T4qJuqCU5RvhYHvURF7w4xP6A29gyry0w+0Xr4hywvN2FjeJJRpFHmcGo+5YdyxNEKWSyyBm1eTosu8OMKbn3risVPa1q2t3OMIrRIfD1VX2rCcQ3E6j68AbQU0aLyKwwe44jyDZ4gwuXfuiDP7xnHLoXQTeBu88wPO0kJmcj5c8Yn0O53pKYdhEopIZ0595vuUxIC82TGm4nB96H9JqiE7BgPFODTLjCTWqSu0p3/x++Kk/ejPLawC0HEOcbkdeTt9avrYUtjLGJP/5SUfrU8n86Y3ZlcmMyLjBjeDVjglkFtTCCBbEwggOZoAMCAQICEDjcB1a+TEKnlCxgEo5FnxcwDQYJKoZIhvcNAQELBQAwQTE/MD0GA1UEAxM2RVVTLU5UQy1LRVlJRC05RkJCNzlBQTBGNTI2Mjc4QkVEMTUwOTI5QTcxNzFFOTZBMzVCRUY3MB4XDTIxMDYyMTIyNTM0MFoXDTI1MDMyMTIwMjk1OVowADCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANXcBvyBeHpXOet2tBLshN3WrJSMeVy2+iKLQFbhUUEvdByiTr2ak5vwzEoeH3NvMRlSER86DTGQ8lYnWnMlG3TQHqcvkDmqVfuX4sbgFzA7a5CpO8ECkW0FRT3qJIgyT8yZdxMBANnbz1VLWdLgsuoxSBCdv0lEpciOd0sqk3oj6la4cv6C93DJPvw13TQr7CfTGQ2eX+oSH+Jk3lGe1iYWcbYA6hpU9Fku44OhbSelHj1aiUH+s3bz95vYHDwjDoNZW8N8QKKKXPVOrCMteyCl8VBIk6PIRSjjJumUMFCfibDOasFg7i0HaI9LNEArqMNYrB+8ldIq/xpGwK23KAkCAwEAAaOCAeQwggHgMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8EAjAAMG0GA1UdIAEB/wRjMGEwXwYJKwYBBAGCNxUfMFIwUAYIKwYBBQUHAgIwRB5CAFQAQwBQAEEAIAAgAFQAcgB1AHMAdABlAGQAIAAgAFAAbABhAHQAZgBvAHIAbQAgACAASQBkAGUAbgB0AGkAdAB5MBAGA1UdJQQJMAcGBWeBBQgDMEoGA1UdEQEB/wRAMD6kPDA6MTgwDgYFZ4EFAgMMBWlkOjEzMBAGBWeBBQICDAdOUENUNnh4MBQGBWeBBQIBDAtpZDo0RTU0NDMwMDAfBgNVHSMEGDAWgBTi/ac6Qo2SkulUIB+srFhcNO6SPTAdBgNVHQ4EFgQUuiYcjdQypfz03n3Y6VMpu2DQ+twwgbIGCCsGAQUFBwEBBIGlMIGiMIGfBggrBgEFBQcwAoaBkmh0dHA6Ly9hemNzcHJvZGV1c2Fpa3B1Ymxpc2guYmxvYi5jb3JlLndpbmRvd3MubmV0L2V1cy1udGMta2V5aWQtOWZiYjc5YWEwZjUyNjI3OGJlZDE1MDkyOWE3MTcxZTk2YTM1YmVmNy9mYTYyNGYyMC0wZTRkLTQ4MzQtOGMxZS1iYjA5OWYwMTgxYzEuY2VyMA0GCSqGSIb3DQEBCwUAA4ICAQBxnJR8FvJeCjXFlSTjO5yTSJTVM8NOX0q2O5w0N65jAn0z5Atxr6NH01yBbTSjtCOhjIcuTwlR6u5LqQDE/uxmeXsN1xaERikD+x6dygjNj09ECmjhga2hjvYgEgw49P+RERJv9GK9gvfgIsgpbedieKKVwVv5y2qxsCZe4gdEMsa8meErPKuiTT3HhphBKf8D+6GA7mHrfSWD7HblUd3ymwAJUefpai/GXyxfx8myZR4jcqoUH3LyFNrrtUl4euw9IdT0KzDF1VfrWXeCNWeIuc3TcfwFhgQlCPn64cqmBBs676oPpp//Al0tfEfRGfTSH7cgJs1htlEdxmFi67BPp8bBOx8Wl6FltN/FHPkT+P4jIAIGwU2lg7/RZxUVNxMijXYDsvwiGEVwPPsfZ4ljoB+0knt8iMe0vhbv8TDop/vxSOR9w9dHg4kptwf+X5uI9+px9T4vMU3nhqYT1V8F0Bj6P9AkT86Y0bTh2V31emVe7MoPlDmIXMI+tIkfo5FrANxAI2aZ5MSAgof0QdWZI0LS8CuYN955gu/i+ZaF6DHMcJ4fzjfIfMEPCOTY/QNn091fcO9XVkbLFbrbWag11BXMVI0B9bXcFv8qT/t0/tXquMbjSKGr7PSVdPGtuhcmBA+sdcKE52viu1UX6iY6nTRoKlXne4b8DB8NKsavKFkG7zCCBuswggTToAMCAQICEzMAAAJuDyBFUK0XLToAAAAAAm4wDQYJKoZIhvcNAQELBQAwgYwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xNjA0BgNVBAMTLU1pY3Jvc29mdCBUUE0gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgMjAxNDAeFw0xOTAzMjEyMDI5NTlaFw0yNTAzMjEyMDI5NTlaMEExPzA9BgNVBAMTNkVVUy1OVEMtS0VZSUQtOUZCQjc5QUEwRjUyNjI3OEJFRDE1MDkyOUE3MTcxRTk2QTM1QkVGNzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK/3kJL1eroc6Y9m0gAbRpHLJuhOMu++qhq4gYLwBwnoyhB8JYEqw2/9D/baLUnLg4DQomwdAlARSxY0nVqa9rT2lKeO7+2C6foX3TkrMgp/7JyrXdv9Pz9LNvfKRmeuCCzoAF4k0pCXzeXdgCWVRaQR6MAhRc9nV6128GRsXpy6e8TL6B3B7qllURWTh3v2Lsr6cpEBAzhi3l0gVKh5YU8WktlNygLUfilrR60cUDlpE8WeP1kBTklKEySmNzVQz+O4ekwvgb9U2ZbNqvcM70PujHPTcLCMZwy7MVYSt1k7WauIDeHSqdryjFXSF/sWrzFTMkAWbDCDxLJ+RxNohF59tNBUleIiboxvYoga9TaWeE8b62a8sUAvxgTEQjs09C/DRsAOv9sI2IDQkLm5uiltoW4DDAC8sSjm9MtrkR/UUyQlFR7wGUaz9L8RlnwtEACP8O1Oo2vFhufpjSyjseRtVI9UfIY/SAukbUKyrKBnKoVogGh68GKCTfFWF6jEZOeU0v4WIW0l8mCTMK2h1iFv3iom4lLv037ESj4RJc0sX4VbFZe+TY/ylWmT9fjiumLx+YRdo/kd5N2QnyTWIVLrlvAzoJbRF3mk3Zcnm0fXBYw1p7ebA21VfTLX/X30M0LNzT/aIvWo7CQRDa821W58jbVYEcWWGvbANHETqt0rAgMBAAGjggGOMIIBijAOBgNVHQ8BAf8EBAMCAoQwGwYDVR0lBBQwEgYJKwYBBAGCNxUkBgVngQUIAzAWBgNVHSAEDzANMAsGCSsGAQQBgjcVHzASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTi/ac6Qo2SkulUIB+srFhcNO6SPTAfBgNVHSMEGDAWgBR6jArOL0hiF+KU0a5VwVLscXSkVjBwBgNVHR8EaTBnMGWgY6Bhhl9odHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NybC9NaWNyb3NvZnQlMjBUUE0lMjBSb290JTIwQ2VydGlmaWNhdGUlMjBBdXRob3JpdHklMjAyMDE0LmNybDB9BggrBgEFBQcBAQRxMG8wbQYIKwYBBQUHMAKGYWh0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9wa2lvcHMvY2VydHMvTWljcm9zb2Z0JTIwVFBNJTIwUm9vdCUyMENlcnRpZmljYXRlJTIwQXV0aG9yaXR5JTIwMjAxNC5jcnQwDQYJKoZIhvcNAQELBQADggIBAAtrPmUf7ZrsYrEDhR9ANAHVRkd+G06CWP6FgI8qxY7RWVfMABhaR/2NO5flJxcU2/YL6rg9RthnP3bQke4HD+JBZUc/9aOlr2d+cARq1e1lu5Ek0TjZekhWVrNn7x01+XmylauoQxr49Hcr7aYWX/ojqi6bCBAPkEprnGNhVg1qlqD+j+zjzn/2fFd794swOZuKDQc6hJ5yTNWpBf8XbaPqriLu9LnKpgIG85XDqzh1z4bWFzJOzsc37lRJr+aE69eLSnvKSg5LZ7HyJ6EQem9kJQR+tVQYQ23Vpru/v8FeTZTrOk/2wa+YBHmnzoymfE09HQe1Ou6Lj8AyO0y4EZcoZHeVqhqYVT2yz0Szscmgr0rE5jxF5jCZk8jlqQhhdBTCt6m5sXc04yRP3FPRQz1iGTDE6Lnf2QbgXN28lrv8PxY4OmD2v5izp3LY/T0+hdtw2YvPe6bB9WUbbqKFXKmqDMY/QrWUycMmCvM1APssLOnGY0Gvf7FmaFT0tJSMQ1SOx7PpB7H1PIFp1QYEaOOxbQnL5uAreXZb33HeKUFQcFkqz7Vxh5GP1RkXekt68FcGdxE3SylR+DKFq9cZKzbtHNhzUoTXvg3Pi2VN6ZnefWWZ0BgoHLTj5AalWx7DVCHntZUJlbVEMNc2pxgHPIerdOgRcm9gG0X6xkGYmUU4Z3B1YkFyZWFZATYAAQALAAYEcgAgnf/L82w4OuaZ+5ho3G3LidcVOIS+KAOSLBJBWL+tIq4AEAAQCAAAAAAAAQDRSxMlctSC4GKQCo/WMxIB0z9PGnTf9hqR6qwgxPX3gCeMyMO1lKSZSn9XFynOVbkL/0cli4o9PNYsbSmMvzRib2Q+CSW9ifGtUGO+F6wb4q6uGFM9mrHnsuP3EePI39i3v3wbQ/nI+EWRnZmvwhFl0jbI2t94/ZHh2EtUSldKa6qdf04ix65ZyamDIkDZUrHUE6yTQmwq2JdWBvNMHYtrkMTfcDY4F0fmBfi3r/XrwItsvcVmacMHEa/KhjxuqDrm40jU9WktmIB8PYq+0fQJP3Fa9MWVp9NvpP9WKJkkJbEhWAcf4QWxExmRbzLznAiumcx7+tcJAz8K/uKVdqj1aGNlcnRJbmZvWKH/VENHgBcAIgALB4pyA0jhOY+h3wBVxNfkTwhCQGZY9EekCR5errAEeZwAFCqZ9Vq/c49Bp8wP4cxMFC0fVa6NAAAABS+iWsK+VbntJXxXIQHn+f2DCwyHjgAiAAuZnP9vYa9pJD9Sn3Tksy9gpWbS3cZN6JpimSGuMbbqyAAiAAsg6l2XD6uaV+3Q3+QlOi0aHjwgdTzGWdDYGa4Y+2MENGhhdXRoRGF0YVkBZ+RTKdA6IGjRyvf3uwrpVOaw5iWXRfMvSCn3UPBQEfnCRQAAAAAImHBYytxLgbbhMN5Q3L6WACDnqJbtELtguKSec1TvSQ7kFv6OKn35ZNRuiGH8CqrWrqQBAwM5AQAgWQEA0UsTJXLUguBikAqP1jMSAdM/Txp03/YakeqsIMT194AnjMjDtZSkmUp/VxcpzlW5C/9HJYuKPTzWLG0pjL80Ym9kPgklvYnxrVBjvhesG+KurhhTPZqx57Lj9xHjyN/Yt798G0P5yPhFkZ2Zr8IRZdI2yNrfeP2R4dhLVEpXSmuqnX9OIseuWcmpgyJA2VKx1BOsk0JsKtiXVgbzTB2La5DE33A2OBdH5gX4t6/168CLbL3FZmnDBxGvyoY8bqg65uNI1PVpLZiAfD2KvtH0CT9xWvTFlafTb6T/ViiZJCWxIVgHH+EFsRMZkW8y85wIrpnMe/rXCQM/Cv7ilXao9SFDAQAB"
},
"type": "public-key"
}"""
challenge = base64url_to_bytes(
"ZXlKaGJHY2lPaUpJVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SnBZWFFpT2pFMk1qUTBOak00TVRrc0ltVjRjQ0k2TVRZeU5EVTRNemd4T1N3aWMzVmlJam9pWW05aUluMC5RaHVrZ1k1dEwyZHM4dXdBcnRLUGFrSzdEanpkZm04OXlvU3g0Zl9DV09F"
)
rp_id = "webauthntest.azurewebsites.net"
expected_origin = "https://webauthntest.azurewebsites.net"
verification = verify_registration_response(
credential=credential,
expected_challenge=challenge,
expected_origin=expected_origin,
expected_rp_id=rp_id,
)
assert verification.fmt == AttestationFormat.TPM
assert verification.credential_id == base64url_to_bytes(
"56iW7RC7YLiknnNU70kO5Bb-jip9-WTUbohh_Aqq1q4"
)
|
TPM Mfgr: NTC (Nuvoton Technology)
Mfgr Version: 1.3.2.8
TPM Version: 2.0
|
test_verify_attestation_dell_xps_13
|
python
|
duo-labs/py_webauthn
|
tests/test_verify_registration_response_tpm.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/test_verify_registration_response_tpm.py
|
BSD-3-Clause
|
def test_verify_attestation_lenovo_carbon_x1(self) -> None:
"""
TPM Mfgr: STM (ST Microelectronics)
Mfgr Version: 73.8.17568.5511
TPM Version: 2.0
"""
credential = """{
"id": "kU6oEC95fTXAtpI6b2w69fQrKGntFFt1l_2ySjmndYM",
"rawId": "kU6oEC95fTXAtpI6b2w69fQrKGntFFt1l_2ySjmndYM",
"response": {
"clientDataJSON": "eyJ0eXBlIjoid2ViYXV0aG4uY3JlYXRlIiwiY2hhbGxlbmdlIjoiWlhsS2FHSkhZMmxQYVVwSlZYcEpNVTVwU1hOSmJsSTFZME5KTmtscmNGaFdRMG81TG1WNVNuQlpXRkZwVDJwRk1rMXFVVEJPZWxVd1RYcGpjMGx0VmpSalEwazJUVlJaZVU1RVZUVk9WRkY2VG5sM2FXTXpWbWxKYW05cFkyMDVjMkpIYkhWSmJqQXVjbkJaTTJwVVpVUjBjekE1VFhOSWJUSlZiWGx0WnpsWlpIWnZlbmMyY2xGUGVsRjZNelZpYkhsbU5BIiwib3JpZ2luIjoiaHR0cHM6Ly93ZWJhdXRobnRlc3QuYXp1cmV3ZWJzaXRlcy5uZXQiLCJjcm9zc09yaWdpbiI6ZmFsc2UsIm90aGVyX2tleXNfY2FuX2JlX2FkZGVkX2hlcmUiOiJkbyBub3QgY29tcGFyZSBjbGllbnREYXRhSlNPTiBhZ2FpbnN0IGEgdGVtcGxhdGUuIFNlZSBodHRwczovL2dvby5nbC95YWJQZXgifQ==",
"attestationObject": "o2NmbXRjdHBtZ2F0dFN0bXSmY2FsZzn//mNzaWdZAQBS7IZvydYyH/NN9PPmST/gE5sw4DV7WLKop7qSBd59uNSryIZSVgA4WjtzUVMD0ERl70gGruankY1iSswdB7HuHFxd37T9VEgyQCpRia0mdbeXmPKchaV1dMxQudgwHyMrvuDediSj2008LUZvb96ETgcDYrrwLyL4YJ0F3GOyVjq5IHlO76D7DK+lJtioPI6C8TfDFN4xBwvwRUX9xwlR0WsGs7cZ5BbT/A929YmuUUJl3bauS5/RnpwE2wOuW1ylk7ITyENtf201hRd0zk/G1aBL2HU7MzOgqmCizPxwnlUfCJBZ4lVig/MjRSyUCprg/8DlpHd3GfA9rJbFKazVY3ZlcmMyLjBjeDVjglkFxDCCBcAwggOooAMCAQICECpazRpKVEvzpzCY3XIaMBgwDQYJKoZIhvcNAQELBQAwQTE/MD0GA1UEAxM2TkNVLVNUTS1LRVlJRC0xQURCOTk0QUI1OEJFNTdBMENDOUI5MDBFNzg1MUUxQTQzQzA4NjYwMB4XDTIwMDMzMTE2MzE0MVoXDTI1MDMyMTIwMzAxNlowADCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN3ldanDSXFPplfnem6/c7/F6a10Zq64YBwKzwrn5tof1SKFd/pKUfSmJpfy1Ic7CAvnGhLpk0pp5EYjPzfy5PNVGNDKwN0VVDIknryOyXKmsmxeRcZ1PX0L6ad/HNtCwXKRLHm+mL2tU6ZsKCqzLri8D5WxSU4UYBYUJ3OJJiVKz+NU5yhS22D4r/oLmGSelQjNGlqPkJA4wtvaMf34BOd8JhAe3M3+zD78c6VqJu2+30kDaGgY73zLJgsLom70T4y1irzncR6S9eNFplOgdNlqLMNlV5E9vPhozGryNF466CyQcbXNrYfvI3XXAznzjgf9KnicsE7xQAt1g0GOE50CAwEAAaOCAfMwggHvMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8EAjAAMG0GA1UdIAEB/wRjMGEwXwYJKwYBBAGCNxUfMFIwUAYIKwYBBQUHAgIwRB5CAFQAQwBQAEEAIAAgAFQAcgB1AHMAdABlAGQAIAAgAFAAbABhAHQAZgBvAHIAbQAgACAASQBkAGUAbgB0AGkAdAB5MBAGA1UdJQQJMAcGBWeBBQgDMFkGA1UdEQEB/wRPME2kSzBJMRYwFAYFZ4EFAgEMC2lkOjUzNTQ0RDIwMRcwFQYFZ4EFAgIMDFNUMzNIVFBIQUhDMDEWMBQGBWeBBQIDDAtpZDowMDQ5MDAwODAfBgNVHSMEGDAWgBRIdDampQdvRRJB3fKLJkLuw4CRNzAdBgNVHQ4EFgQUmfJ2PA1zXutWSVEKGT28u7RJU5AwgbIGCCsGAQUFBwEBBIGlMIGiMIGfBggrBgEFBQcwAoaBkmh0dHA6Ly9hemNzcHJvZG5jdWFpa3B1Ymxpc2guYmxvYi5jb3JlLndpbmRvd3MubmV0L25jdS1zdG0ta2V5aWQtMWFkYjk5NGFiNThiZTU3YTBjYzliOTAwZTc4NTFlMWE0M2MwODY2MC8wMzFmM2NhMS0zNDk0LTQzZjctOGFhOS02Mzc2ZGU2Y2Q1MDcuY2VyMA0GCSqGSIb3DQEBCwUAA4ICAQBL0UwTTUpq7kW04Y4AZ5k3ZJtkuMcjDCiCZumwA9UNyweszRyawhNHlcmgEp9r1xD9KBbMkgyIdTzzXkczF3XnnBMlqKMGeEvULaDgcvQ8FRQH8MPeolr+Rvh7OCX7MQyncQfaVUwXloi83FnifdF2Hblhp8ll2OYsz8UDTAV7R2RhF2jQJxe8SDzTetVaSM2SoVisTN3LU42VQqZ9UPI2PQVvipQcmV9TMpClJ+0jUWoa+KluPAnTP/zMPeK9/GTzFe4y5/AaoRg0GXJn5uWqGNWQvqhB22goAWMSz53S0esiKfJMRI7eFE1fKzpN7sPyc+alsiHAfpVLPMXYPW0C76uQz1wai9AkGqnCqQzflpjLdlEdeVyZoeE9YQTB8Nco1J5Dz7i5Sw6iIiHhTavIBY9crA4d95OW8RLyMvRs2KYZqNUiAeb+PxcqnA1Y+VC0MigzCAbHM+/ERRRVxPEJ+2sfG8VHCfkhGH7h5ZDYAVaX99Lp62YHWwT8yo6q54QftGJp/P5WybNxLcuze9w3raC4nRKr2DSyBqXaWelXP+0SxzXDqrzxG/BCQC2J4pmU8C+g5cI2sbLlyH5vwatrOdQLJDaOon+k3mLpXIZFKFmPpAjeKMEtSeLhhG/syshkZP3DYvBQ2ROiyXlrYqZGq42jTBaAN88TVXvxd67RB1kG7zCCBuswggTToAMCAQICEzMAAAKDoa4UZhh/t6YAAAAAAoMwDQYJKoZIhvcNAQELBQAwgYwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xNjA0BgNVBAMTLU1pY3Jvc29mdCBUUE0gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgMjAxNDAeFw0xOTAzMjEyMDMwMTZaFw0yNTAzMjEyMDMwMTZaMEExPzA9BgNVBAMTNk5DVS1TVE0tS0VZSUQtMUFEQjk5NEFCNThCRTU3QTBDQzlCOTAwRTc4NTFFMUE0M0MwODY2MDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAM+oRDNS8oA9SXH/YRK8HAEMbT+Rq03eIYgtWS8qNdhBt144WxLCEY7koFhYTVK96nQJEwEVc0P131bKa4vPkFD1OpfMJlDUrBdNAvsEw3UHq3sBfuDIQ/vIOfEPgXtKXk2+lgyLhvVEwR0SspTKPpOofmYxjnnVlFoU0OAvXMqzvdNEoT/Bp06OpBMbqBAR27WBG2rn/ZPxh4Sg4lt+ehxgie7qtZoo46gYRFFSf6nrvbqhUHfHb99SaoD6F7XYvOyxePhU6xHBK34FtapqvjOLoxDSC7nDsw/Smm/ynlFzqBIyEgoTdqYbwQXMLtRMHn4Aya8zkq+cYGHBNOIyNC749G+F3mUCQpQfK1+nOaXk56Ty52VlPKSPVQHKMuVff5OPYaLyoIboabMnT7nZemlJ+kAjmNt/+VsW9invsNXyycuNwYRIkXEotJIfaLmKd3nEowntctVsUYLlliRaANLXx00N9mhte+6kBn5hD7VVvWjHUr4zdQCAjHMMd0mM90lZn4PfMmiz5L/PWc31UbMCfe/0TL96dh+s2PWAICGpo+W1euVPZJXe6DHRMM6aBHPpiyzLu8zySWxZsTeuEDxVJvYQYGrWRgD4cu+pku0d73LeiqUMiXdnyNqG2gDHURMSB4RhzNJeqUYkQyUlkyCxMvChj1akTF16GpxWp1yhAgMBAAGjggGOMIIBijAOBgNVHQ8BAf8EBAMCAoQwGwYDVR0lBBQwEgYJKwYBBAGCNxUkBgVngQUIAzAWBgNVHSAEDzANMAsGCSsGAQQBgjcVHzASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBRIdDampQdvRRJB3fKLJkLuw4CRNzAfBgNVHSMEGDAWgBR6jArOL0hiF+KU0a5VwVLscXSkVjBwBgNVHR8EaTBnMGWgY6Bhhl9odHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NybC9NaWNyb3NvZnQlMjBUUE0lMjBSb290JTIwQ2VydGlmaWNhdGUlMjBBdXRob3JpdHklMjAyMDE0LmNybDB9BggrBgEFBQcBAQRxMG8wbQYIKwYBBQUHMAKGYWh0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9wa2lvcHMvY2VydHMvTWljcm9zb2Z0JTIwVFBNJTIwUm9vdCUyMENlcnRpZmljYXRlJTIwQXV0aG9yaXR5JTIwMjAxNC5jcnQwDQYJKoZIhvcNAQELBQADggIBADhBn0wB6bO8M8km/4ljkQZFq+uoi9cXByLKJcyaiX6SBES3Ngb2h4kUqxr+AUhuCY+1Q5e0g6VoG4cjooWqLvt256cUoQ4Y+3SuoRTdLkAbtCdF4+HaomY3CB37A7o78mmAqsEKwVWd5TqPBUgjVRJ3ouEfrw65gcJixoOTzaV1gxXRCXR+vv0b9NZUpv/lQ3hAA4Km84ixX6ZqEtmq6eqlCVqXcEC2KBZsngHdX1xaF/CBqftqgn3BP0fNTKNoJyCex0isCkOJVStSLAe0Cx3+m4G/QQxhO+K3HigdIeUx/jQNpEDciTDnS7/chRazDqKR/N7eQhZnLafu+ht1UIDGgAgzrdZjV2NEnBuh21iLYGTq0VvVw8t5x2FUpsixsEqJDtGXTngvw4a6QESmts5SZDo2rqYhh55brJCVVBCbKMhkTKv/8xLa4sDEIj3FwinYwa1N19CqI04P8wjCl2IcpvFge3Y90J97CQhf9c24zzBKINS++ECfkSMGjzdq58684f6nAb2ZNQHaaMP+10A8k7WVD7iLbXZC2IxvG3Uuwn4qZ3ZEU4lXsJkXF5VPRLSXv1X1EhkkHYkj6x6SibBD+ILKESKEo3xoV18//yWchxo8zvIOVwi9Qd26oEzlw8I0YVXFMS1M2SweBYdXVL4eNtnllCkkjPkruUV4EcgbZ3B1YkFyZWFZATYAAQALAAYEMgAgvp2ZrCI02tjJPUkn/x3nWGK3XUSNr2bb4HXRLCaaJ4IAEAAQCAAAAAAAAQDSx8GMC31CYGprKecBKWjvGW6VT1qPoLcmyLSyCt4CtkxiDLFrNEZIbAQX21vPEVN5FLkkHmDWIHHpYv0ntulRbs++mTC9AobVOZWLyE0dsa7O0XmvQ6dQJS73hTN1KwYN4ba4HSkS+oD+f6WYHg6U3mvSwjAen5VSTip3zMfJiKi+9MWhO23ie0FfiOy5wuQngiEwLl+1yZ/839D21YTNkzJZSlFjG97GKWxoNfnIt+JRWKAQNCdsjYBpIBocHcH1XA2P1Oc/1HkYvzW3mCbr8MlfOJ/MjlQyPMrevDQIavmH0JO1h9RafqhNif3yUqqqUFqDf7o/iCa09zDIJLdhaGNlcnRJbmZvWKH/VENHgBcAIgAL7yNBGBNscwdSxXqKvdiz7oDxaDVUGC0FKmasFYmSBKIAFPh8tTRReA9C0kfctSY0/2tFfcGAAAAAAA6jfNM7l08KqdFq+gF0z8k19eR9bgAiAAv8MZD4Gu2zZPB3bdwe8CfBmxgLOcXP4aYgnKf5zff0FgAiAAsdIkc4moJpmU1nycMFFeWU+UCQiX8d88BKFHk6dlDk5mhhdXRoRGF0YVkBZ+RTKdA6IGjRyvf3uwrpVOaw5iWXRfMvSCn3UPBQEfnCRQAAAACd3RgXr1pGcqK5Pj3ZUACpACCRTqgQL3l9NcC2kjpvbDr19Csoae0UW3WX/bJKOad1g6QBAwM5AQAgWQEA0sfBjAt9QmBqaynnASlo7xlulU9aj6C3Jsi0sgreArZMYgyxazRGSGwEF9tbzxFTeRS5JB5g1iBx6WL9J7bpUW7PvpkwvQKG1TmVi8hNHbGuztF5r0OnUCUu94UzdSsGDeG2uB0pEvqA/n+lmB4OlN5r0sIwHp+VUk4qd8zHyYiovvTFoTtt4ntBX4jsucLkJ4IhMC5ftcmf/N/Q9tWEzZMyWUpRYxvexilsaDX5yLfiUVigEDQnbI2AaSAaHB3B9VwNj9TnP9R5GL81t5gm6/DJXzifzI5UMjzK3rw0CGr5h9CTtYfUWn6oTYn98lKqqlBag3+6P4gmtPcwyCS3YSFDAQAB"
},
"type": "public-key"
}"""
challenge = base64url_to_bytes(
"ZXlKaGJHY2lPaUpJVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SnBZWFFpT2pFMk1qUTBOelUwTXpjc0ltVjRjQ0k2TVRZeU5EVTVOVFF6Tnl3aWMzVmlJam9pY205c2JHbHVJbjAucnBZM2pUZUR0czA5TXNIbTJVbXltZzlZZHZvenc2clFPelF6MzVibHlmNA"
)
rp_id = "webauthntest.azurewebsites.net"
expected_origin = "https://webauthntest.azurewebsites.net"
verification = verify_registration_response(
credential=credential,
expected_challenge=challenge,
expected_origin=expected_origin,
expected_rp_id=rp_id,
)
assert verification.fmt == AttestationFormat.TPM
assert verification.credential_id == base64url_to_bytes(
"kU6oEC95fTXAtpI6b2w69fQrKGntFFt1l_2ySjmndYM"
)
|
TPM Mfgr: STM (ST Microelectronics)
Mfgr Version: 73.8.17568.5511
TPM Version: 2.0
|
test_verify_attestation_lenovo_carbon_x1
|
python
|
duo-labs/py_webauthn
|
tests/test_verify_registration_response_tpm.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/test_verify_registration_response_tpm.py
|
BSD-3-Clause
|
def patch_validate_certificate_chain_x509store_getter(func):
"""
This is a very purpose-built decorator to help set a fixed time for X.509 certificate chain
validation in unittests. It makes the following assumptions, all of which must be true for this
decorator to remain useful:
- X.509 certificate chain validation occurs in **webauthn/helpers/validate_certificate_chain.py::**`validate_certificate_chain`
- `validate_certificate_chain(...)` uses `OpenSSL.crypto.X509Store` to verify certificate chains
- **webauthn/helpers/__init__.py** continues to re-export `validate_certificate_chain`
Usage:
```
from unittest import TestCase
from datetime import datetime
from OpenSSL.crypto import X509Store
class TestX509Validation(TestCase):
@patch_validate_certificate_chain_x509store_getter
def test_validate_x509_chain(self, patched_x509store: X509Store):
patched_x509store.set_time(datetime(2021, 9, 1, 0, 0, 0))
# ...
```
"""
def wrapper(*args, **kwargs):
"""
Using `inspect.getmodule(...)` below helps deal with the fact that, in Python 3.9 and
Python 3.10, `@patch("webauthn.helpers.validate_certificate_chain._generate_new_cert_store")`
errors out because `webauthn.helpers.validate_certificate_chain` is understood to be the method
re-exported via `__all__` in **webauthn/helpers/__init__.py**, not the module of the same name.
"""
with patch.object(
inspect.getmodule(validate_certificate_chain),
"_generate_new_cert_store",
) as mock_generate_new_cert_store:
new_cert_store = X509Store()
mock_generate_new_cert_store.return_value = new_cert_store
return func(*args, new_cert_store, **kwargs)
return wrapper
|
This is a very purpose-built decorator to help set a fixed time for X.509 certificate chain
validation in unittests. It makes the following assumptions, all of which must be true for this
decorator to remain useful:
- X.509 certificate chain validation occurs in **webauthn/helpers/validate_certificate_chain.py::**`validate_certificate_chain`
- `validate_certificate_chain(...)` uses `OpenSSL.crypto.X509Store` to verify certificate chains
- **webauthn/helpers/__init__.py** continues to re-export `validate_certificate_chain`
Usage:
```
from unittest import TestCase
from datetime import datetime
from OpenSSL.crypto import X509Store
class TestX509Validation(TestCase):
@patch_validate_certificate_chain_x509store_getter
def test_validate_x509_chain(self, patched_x509store: X509Store):
patched_x509store.set_time(datetime(2021, 9, 1, 0, 0, 0))
# ...
```
|
patch_validate_certificate_chain_x509store_getter
|
python
|
duo-labs/py_webauthn
|
tests/helpers/x509store.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/helpers/x509store.py
|
BSD-3-Clause
|
def wrapper(*args, **kwargs):
"""
Using `inspect.getmodule(...)` below helps deal with the fact that, in Python 3.9 and
Python 3.10, `@patch("webauthn.helpers.validate_certificate_chain._generate_new_cert_store")`
errors out because `webauthn.helpers.validate_certificate_chain` is understood to be the method
re-exported via `__all__` in **webauthn/helpers/__init__.py**, not the module of the same name.
"""
with patch.object(
inspect.getmodule(validate_certificate_chain),
"_generate_new_cert_store",
) as mock_generate_new_cert_store:
new_cert_store = X509Store()
mock_generate_new_cert_store.return_value = new_cert_store
return func(*args, new_cert_store, **kwargs)
|
Using `inspect.getmodule(...)` below helps deal with the fact that, in Python 3.9 and
Python 3.10, `@patch("webauthn.helpers.validate_certificate_chain._generate_new_cert_store")`
errors out because `webauthn.helpers.validate_certificate_chain` is understood to be the method
re-exported via `__all__` in **webauthn/helpers/__init__.py**, not the module of the same name.
|
wrapper
|
python
|
duo-labs/py_webauthn
|
tests/helpers/x509store.py
|
https://github.com/duo-labs/py_webauthn/blob/master/tests/helpers/x509store.py
|
BSD-3-Clause
|
def generate_authentication_options(
*,
rp_id: str,
challenge: Optional[bytes] = None,
timeout: int = 60000,
allow_credentials: Optional[List[PublicKeyCredentialDescriptor]] = None,
user_verification: UserVerificationRequirement = UserVerificationRequirement.PREFERRED,
) -> PublicKeyCredentialRequestOptions:
"""Generate options for retrieving a credential via navigator.credentials.get()
Args:
`rp_id`: The Relying Party's unique identifier as specified in attestations.
(optional) `challenge`: A byte sequence for the authenticator to return back in its response. Defaults to 64 random bytes.
(optional) `timeout`: How long in milliseconds the browser should give the user to choose an authenticator. This value is a *hint* and may be ignored by the browser.
(optional) `allow_credentials`: A list of credentials registered to the user.
(optional) `user_verification`: The RP's preference for the authenticator's enforcement of the "user verified" flag.
Returns:
Authentication options ready for the browser. Consider using `helpers.options_to_json()` in this library to quickly convert the options to JSON.
"""
if not rp_id:
raise ValueError("rp_id cannot be an empty string")
########
# Set defaults for required values
########
if not challenge:
challenge = generate_challenge()
if not allow_credentials:
allow_credentials = []
return PublicKeyCredentialRequestOptions(
rp_id=rp_id,
challenge=challenge,
timeout=timeout,
allow_credentials=allow_credentials,
user_verification=user_verification,
)
|
Generate options for retrieving a credential via navigator.credentials.get()
Args:
`rp_id`: The Relying Party's unique identifier as specified in attestations.
(optional) `challenge`: A byte sequence for the authenticator to return back in its response. Defaults to 64 random bytes.
(optional) `timeout`: How long in milliseconds the browser should give the user to choose an authenticator. This value is a *hint* and may be ignored by the browser.
(optional) `allow_credentials`: A list of credentials registered to the user.
(optional) `user_verification`: The RP's preference for the authenticator's enforcement of the "user verified" flag.
Returns:
Authentication options ready for the browser. Consider using `helpers.options_to_json()` in this library to quickly convert the options to JSON.
|
generate_authentication_options
|
python
|
duo-labs/py_webauthn
|
webauthn/authentication/generate_authentication_options.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/authentication/generate_authentication_options.py
|
BSD-3-Clause
|
def verify_authentication_response(
*,
credential: Union[str, dict, AuthenticationCredential],
expected_challenge: bytes,
expected_rp_id: str,
expected_origin: Union[str, List[str]],
credential_public_key: bytes,
credential_current_sign_count: int,
require_user_verification: bool = False,
) -> VerifiedAuthentication:
"""Verify a response from navigator.credentials.get()
Args:
- `credential`: The value returned from `navigator.credentials.get()`. Can be either a
stringified JSON object, a plain dict, or an instance of RegistrationCredential
- `expected_challenge`: The challenge passed to the authenticator within the preceding
authentication options.
- `expected_rp_id`: The Relying Party's unique identifier as specified in the preceding
authentication options.
- `expected_origin`: The domain, with HTTP protocol (e.g. "https://domain.here"), on which
the authentication ceremony should have occurred.
- `credential_public_key`: The public key for the credential's ID as provided in a
preceding authenticator registration ceremony.
- `credential_current_sign_count`: The current known number of times the authenticator was
used.
- (optional) `require_user_verification`: Whether or not to require that the authenticator
verified the user.
Returns:
Information about the authenticator
Raises:
`helpers.exceptions.InvalidAuthenticationResponse` if the response cannot be verified
"""
if isinstance(credential, str) or isinstance(credential, dict):
credential = parse_authentication_credential_json(credential)
# FIDO-specific check
if bytes_to_base64url(credential.raw_id) != credential.id:
raise InvalidAuthenticationResponse("id and raw_id were not equivalent")
# FIDO-specific check
if credential.type != PublicKeyCredentialType.PUBLIC_KEY:
raise InvalidAuthenticationResponse(
f'Unexpected credential type "{credential.type}", expected "public-key"'
)
response = credential.response
client_data_bytes = byteslike_to_bytes(response.client_data_json)
authenticator_data_bytes = byteslike_to_bytes(response.authenticator_data)
signature_bytes = byteslike_to_bytes(response.signature)
client_data = parse_client_data_json(client_data_bytes)
if client_data.type != ClientDataType.WEBAUTHN_GET:
raise InvalidAuthenticationResponse(
f'Unexpected client data type "{client_data.type}", expected "{ClientDataType.WEBAUTHN_GET}"'
)
if expected_challenge != client_data.challenge:
raise InvalidAuthenticationResponse("Client data challenge was not expected challenge")
if isinstance(expected_origin, str):
if expected_origin != client_data.origin:
raise InvalidAuthenticationResponse(
f'Unexpected client data origin "{client_data.origin}", expected "{expected_origin}"'
)
else:
try:
expected_origin.index(client_data.origin)
except ValueError:
raise InvalidAuthenticationResponse(
f'Unexpected client data origin "{client_data.origin}", expected one of {expected_origin}'
)
if client_data.token_binding:
status = client_data.token_binding.status
if status not in expected_token_binding_statuses:
raise InvalidAuthenticationResponse(
f'Unexpected token_binding status of "{status}", expected one of "{",".join(expected_token_binding_statuses)}"'
)
auth_data = parse_authenticator_data(authenticator_data_bytes)
# Generate a hash of the expected RP ID for comparison
expected_rp_id_hash = hashlib.sha256()
expected_rp_id_hash.update(expected_rp_id.encode("utf-8"))
expected_rp_id_hash_bytes = expected_rp_id_hash.digest()
if auth_data.rp_id_hash != expected_rp_id_hash_bytes:
raise InvalidAuthenticationResponse("Unexpected RP ID hash")
if not auth_data.flags.up:
raise InvalidAuthenticationResponse("User was not present during authentication")
if require_user_verification and not auth_data.flags.uv:
raise InvalidAuthenticationResponse(
"User verification is required but user was not verified during authentication"
)
if (
auth_data.sign_count > 0 or credential_current_sign_count > 0
) and auth_data.sign_count <= credential_current_sign_count:
# Require the sign count to have been incremented over what was reported by the
# authenticator the last time this credential was used, otherwise this might be
# a replay attack
raise InvalidAuthenticationResponse(
f"Response sign count of {auth_data.sign_count} was not greater than current count of {credential_current_sign_count}"
)
client_data_hash = hashlib.sha256()
client_data_hash.update(client_data_bytes)
client_data_hash_bytes = client_data_hash.digest()
signature_base = authenticator_data_bytes + client_data_hash_bytes
try:
decoded_public_key = decode_credential_public_key(credential_public_key)
crypto_public_key = decoded_public_key_to_cryptography(decoded_public_key)
verify_signature(
public_key=crypto_public_key,
signature_alg=decoded_public_key.alg,
signature=signature_bytes,
data=signature_base,
)
except InvalidSignature:
raise InvalidAuthenticationResponse("Could not verify authentication signature")
parsed_backup_flags = parse_backup_flags(auth_data.flags)
return VerifiedAuthentication(
credential_id=credential.raw_id,
new_sign_count=auth_data.sign_count,
credential_device_type=parsed_backup_flags.credential_device_type,
credential_backed_up=parsed_backup_flags.credential_backed_up,
user_verified=auth_data.flags.uv,
)
|
Verify a response from navigator.credentials.get()
Args:
- `credential`: The value returned from `navigator.credentials.get()`. Can be either a
stringified JSON object, a plain dict, or an instance of RegistrationCredential
- `expected_challenge`: The challenge passed to the authenticator within the preceding
authentication options.
- `expected_rp_id`: The Relying Party's unique identifier as specified in the preceding
authentication options.
- `expected_origin`: The domain, with HTTP protocol (e.g. "https://domain.here"), on which
the authentication ceremony should have occurred.
- `credential_public_key`: The public key for the credential's ID as provided in a
preceding authenticator registration ceremony.
- `credential_current_sign_count`: The current known number of times the authenticator was
used.
- (optional) `require_user_verification`: Whether or not to require that the authenticator
verified the user.
Returns:
Information about the authenticator
Raises:
`helpers.exceptions.InvalidAuthenticationResponse` if the response cannot be verified
|
verify_authentication_response
|
python
|
duo-labs/py_webauthn
|
webauthn/authentication/verify_authentication_response.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/authentication/verify_authentication_response.py
|
BSD-3-Clause
|
def aaguid_to_string(val: bytes) -> str:
"""
Take aaguid bytes and convert them to a GUID string
"""
if len(val) != 16:
raise ValueError(f"AAGUID was {len(val)} bytes, expected 16 bytes")
# Convert to a hexadecimal string representation
to_hex = codecs.encode(val, encoding="hex").decode("utf-8")
# Split up the hex string into segments
# 8 chars
seg_1 = to_hex[0:8]
# 4 chars
seg_2 = to_hex[8:12]
# 4 chars
seg_3 = to_hex[12:16]
# 4 chars
seg_4 = to_hex[16:20]
# 12 chars
seg_5 = to_hex[20:32]
# "00000000-0000-0000-0000-000000000000"
return f"{seg_1}-{seg_2}-{seg_3}-{seg_4}-{seg_5}"
|
Take aaguid bytes and convert them to a GUID string
|
aaguid_to_string
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/aaguid_to_string.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/aaguid_to_string.py
|
BSD-3-Clause
|
def is_rsa_pkcs(alg_id: COSEAlgorithmIdentifier) -> bool:
"""Determine if the specified COSE algorithm ID denotes an RSA PKCSv1 public key"""
return alg_id in (
COSEAlgorithmIdentifier.RSASSA_PKCS1_v1_5_SHA_1,
COSEAlgorithmIdentifier.RSASSA_PKCS1_v1_5_SHA_256,
COSEAlgorithmIdentifier.RSASSA_PKCS1_v1_5_SHA_384,
COSEAlgorithmIdentifier.RSASSA_PKCS1_v1_5_SHA_512,
)
|
Determine if the specified COSE algorithm ID denotes an RSA PKCSv1 public key
|
is_rsa_pkcs
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/algorithms.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/algorithms.py
|
BSD-3-Clause
|
def is_rsa_pss(alg_id: COSEAlgorithmIdentifier) -> bool:
"""Determine if the specified COSE algorithm ID denotes an RSA PSS public key"""
return alg_id in (
COSEAlgorithmIdentifier.RSASSA_PSS_SHA_256,
COSEAlgorithmIdentifier.RSASSA_PSS_SHA_384,
COSEAlgorithmIdentifier.RSASSA_PSS_SHA_512,
)
|
Determine if the specified COSE algorithm ID denotes an RSA PSS public key
|
is_rsa_pss
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/algorithms.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/algorithms.py
|
BSD-3-Clause
|
def get_ec2_sig_alg(alg_id: COSEAlgorithmIdentifier) -> EllipticCurveSignatureAlgorithm:
"""Turn an "ECDSA" COSE algorithm identifier into a corresponding signature
algorithm
"""
if alg_id == COSEAlgorithmIdentifier.ECDSA_SHA_256:
return ECDSA(SHA256())
if alg_id == COSEAlgorithmIdentifier.ECDSA_SHA_512:
return ECDSA(SHA512())
raise UnsupportedAlgorithm(f"Unrecognized EC2 signature alg {alg_id}")
|
Turn an "ECDSA" COSE algorithm identifier into a corresponding signature
algorithm
|
get_ec2_sig_alg
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/algorithms.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/algorithms.py
|
BSD-3-Clause
|
def get_ec2_curve(crv_id: COSECRV) -> EllipticCurve:
"""Turn an EC2 COSE crv identifier into a corresponding curve"""
if crv_id == COSECRV.P256:
return SECP256R1()
elif crv_id == COSECRV.P384:
return SECP384R1()
elif crv_id == COSECRV.P521:
return SECP521R1()
raise UnsupportedEC2Curve(f"Unrecognized EC2 curve {crv_id}")
|
Turn an EC2 COSE crv identifier into a corresponding curve
|
get_ec2_curve
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/algorithms.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/algorithms.py
|
BSD-3-Clause
|
def get_rsa_pkcs1_sig_alg(alg_id: COSEAlgorithmIdentifier) -> HashAlgorithm:
"""Turn an "RSASSA_PKCS1" COSE algorithm identifier into a corresponding signature
algorithm
"""
if alg_id == COSEAlgorithmIdentifier.RSASSA_PKCS1_v1_5_SHA_1:
return SHA1()
if alg_id == COSEAlgorithmIdentifier.RSASSA_PKCS1_v1_5_SHA_256:
return SHA256()
if alg_id == COSEAlgorithmIdentifier.RSASSA_PKCS1_v1_5_SHA_384:
return SHA384()
if alg_id == COSEAlgorithmIdentifier.RSASSA_PKCS1_v1_5_SHA_512:
return SHA512()
raise UnsupportedAlgorithm(f"Unrecognized RSA PKCS1 signature alg {alg_id}")
|
Turn an "RSASSA_PKCS1" COSE algorithm identifier into a corresponding signature
algorithm
|
get_rsa_pkcs1_sig_alg
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/algorithms.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/algorithms.py
|
BSD-3-Clause
|
def get_rsa_pss_sig_alg(alg_id: COSEAlgorithmIdentifier) -> HashAlgorithm:
"""Turn an "RSASSA_PSS" COSE algorithm identifier into a corresponding signature
algorithm
"""
if alg_id == COSEAlgorithmIdentifier.RSASSA_PSS_SHA_256:
return SHA256()
if alg_id == COSEAlgorithmIdentifier.RSASSA_PSS_SHA_384:
return SHA384()
if alg_id == COSEAlgorithmIdentifier.RSASSA_PSS_SHA_512:
return SHA512()
raise UnsupportedAlgorithm(f"Unrecognized RSA PSS signature alg {alg_id}")
|
Turn an "RSASSA_PSS" COSE algorithm identifier into a corresponding signature
algorithm
|
get_rsa_pss_sig_alg
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/algorithms.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/algorithms.py
|
BSD-3-Clause
|
def byteslike_to_bytes(val: Union[bytes, memoryview]) -> bytes:
"""
Massage bytes subclasses into bytes for ease of concatenation, comparison, etc...
"""
if isinstance(val, memoryview):
val = val.tobytes()
return bytes(val)
|
Massage bytes subclasses into bytes for ease of concatenation, comparison, etc...
|
byteslike_to_bytes
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/byteslike_to_bytes.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/byteslike_to_bytes.py
|
BSD-3-Clause
|
def decode_credential_public_key(
key: bytes,
) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:
"""
Decode a CBOR-encoded public key and turn it into a data structure.
Supports OKP, EC2, and RSA public keys
"""
# Occasionally we might be given a public key in an "uncompressed" format,
# typically from older U2F security keys. As per the FIDO spec this is indicated by
# a leading 0x04 "uncompressed point compression method" format byte. In that case
# we need to fill in some blanks to turn it into a full EC2 key for signature
# verification
#
# See https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-registry-v2.0-id-20180227.html#public-key-representation-formats
if key[0] == 0x04:
return DecodedEC2PublicKey(
kty=COSEKTY.EC2,
alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,
crv=COSECRV.P256,
x=key[1:33],
y=key[33:65],
)
decoded_key: dict = parse_cbor(key)
kty = decoded_key[COSEKey.KTY]
alg = decoded_key[COSEKey.ALG]
if not kty:
raise InvalidPublicKeyStructure("Credential public key missing kty")
if not alg:
raise InvalidPublicKeyStructure("Credential public key missing alg")
if kty == COSEKTY.OKP:
crv = decoded_key[COSEKey.CRV]
x = decoded_key[COSEKey.X]
if not crv:
raise InvalidPublicKeyStructure("OKP credential public key missing crv")
if not x:
raise InvalidPublicKeyStructure("OKP credential public key missing x")
return DecodedOKPPublicKey(
kty=kty,
alg=alg,
crv=crv,
x=x,
)
elif kty == COSEKTY.EC2:
crv = decoded_key[COSEKey.CRV]
x = decoded_key[COSEKey.X]
y = decoded_key[COSEKey.Y]
if not crv:
raise InvalidPublicKeyStructure("EC2 credential public key missing crv")
if not x:
raise InvalidPublicKeyStructure("EC2 credential public key missing x")
if not y:
raise InvalidPublicKeyStructure("EC2 credential public key missing y")
return DecodedEC2PublicKey(
kty=kty,
alg=alg,
crv=crv,
x=x,
y=y,
)
elif kty == COSEKTY.RSA:
n = decoded_key[COSEKey.N]
e = decoded_key[COSEKey.E]
if not n:
raise InvalidPublicKeyStructure("RSA credential public key missing n")
if not e:
raise InvalidPublicKeyStructure("RSA credential public key missing e")
return DecodedRSAPublicKey(
kty=kty,
alg=alg,
n=n,
e=e,
)
raise UnsupportedPublicKeyType(f'Unsupported credential public key type "{kty}"')
|
Decode a CBOR-encoded public key and turn it into a data structure.
Supports OKP, EC2, and RSA public keys
|
decode_credential_public_key
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/decode_credential_public_key.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/decode_credential_public_key.py
|
BSD-3-Clause
|
def encode_cbor(val: Any) -> bytes:
"""
Attempt to encode data into CBOR.
Raises:
`helpers.exceptions.InvalidCBORData` if data cannot be decoded
"""
try:
to_return = cbor2.dumps(val)
except Exception as exc:
raise InvalidCBORData("Data could not be encoded to CBOR") from exc
return to_return
|
Attempt to encode data into CBOR.
Raises:
`helpers.exceptions.InvalidCBORData` if data cannot be decoded
|
encode_cbor
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/encode_cbor.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/encode_cbor.py
|
BSD-3-Clause
|
def hash_by_alg(to_hash: bytes, alg: Optional[COSEAlgorithmIdentifier] = None) -> bytes:
"""
Generate a hash of `to_hash` by the specified COSE algorithm ID. Defaults to hashing
with SHA256
"""
# Default to SHA256 for hashing
hash = hashlib.sha256()
if alg in SHA_384:
hash = hashlib.sha384()
elif alg in SHA_512:
hash = hashlib.sha512()
elif alg in SHA_1:
hash = hashlib.sha1()
hash.update(to_hash)
return hash.digest()
|
Generate a hash of `to_hash` by the specified COSE algorithm ID. Defaults to hashing
with SHA256
|
hash_by_alg
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/hash_by_alg.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/hash_by_alg.py
|
BSD-3-Clause
|
def options_to_json(
options: Union[
PublicKeyCredentialCreationOptions,
PublicKeyCredentialRequestOptions,
]
) -> str:
"""
Prepare options for transmission to the front end as JSON
"""
if isinstance(options, PublicKeyCredentialCreationOptions):
_rp = {"name": options.rp.name}
if options.rp.id:
_rp["id"] = options.rp.id
_user: Dict[str, Any] = {
"id": bytes_to_base64url(options.user.id),
"name": options.user.name,
"displayName": options.user.display_name,
}
reg_to_return: Dict[str, Any] = {
"rp": _rp,
"user": _user,
"challenge": bytes_to_base64url(options.challenge),
"pubKeyCredParams": [
{"type": param.type, "alg": param.alg} for param in options.pub_key_cred_params
],
}
# Begin handling optional values
if options.timeout is not None:
reg_to_return["timeout"] = options.timeout
if options.exclude_credentials is not None:
_excluded = options.exclude_credentials
json_excluded = []
for cred in _excluded:
json_excluded_cred: Dict[str, Any] = {
"id": bytes_to_base64url(cred.id),
"type": cred.type.value,
}
if cred.transports:
json_excluded_cred["transports"] = [
transport.value for transport in cred.transports
]
json_excluded.append(json_excluded_cred)
reg_to_return["excludeCredentials"] = json_excluded
if options.authenticator_selection is not None:
_selection = options.authenticator_selection
json_selection: Dict[str, Any] = {}
if _selection.authenticator_attachment is not None:
json_selection["authenticatorAttachment"] = (
_selection.authenticator_attachment.value
)
if _selection.resident_key is not None:
json_selection["residentKey"] = _selection.resident_key.value
if _selection.require_resident_key is not None:
json_selection["requireResidentKey"] = _selection.require_resident_key
if _selection.user_verification is not None:
json_selection["userVerification"] = _selection.user_verification.value
reg_to_return["authenticatorSelection"] = json_selection
if options.attestation is not None:
reg_to_return["attestation"] = options.attestation.value
if options.hints is not None:
reg_to_return["hints"] = [hint.value for hint in options.hints]
return json.dumps(reg_to_return)
if isinstance(options, PublicKeyCredentialRequestOptions):
auth_to_return: Dict[str, Any] = {"challenge": bytes_to_base64url(options.challenge)}
if options.timeout is not None:
auth_to_return["timeout"] = options.timeout
if options.rp_id is not None:
auth_to_return["rpId"] = options.rp_id
if options.allow_credentials is not None:
_allowed = options.allow_credentials
json_allowed = []
for cred in _allowed:
json_allowed_cred: Dict[str, Any] = {
"id": bytes_to_base64url(cred.id),
"type": cred.type.value,
}
if cred.transports:
json_allowed_cred["transports"] = [
transport.value for transport in cred.transports
]
json_allowed.append(json_allowed_cred)
auth_to_return["allowCredentials"] = json_allowed
if options.user_verification:
auth_to_return["userVerification"] = options.user_verification.value
return json.dumps(auth_to_return)
raise TypeError(
"Options was not instance of PublicKeyCredentialCreationOptions or PublicKeyCredentialRequestOptions"
)
|
Prepare options for transmission to the front end as JSON
|
options_to_json
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/options_to_json.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/options_to_json.py
|
BSD-3-Clause
|
def parse_attestation_object(val: bytes) -> AttestationObject:
"""
Decode and peel apart the CBOR-encoded blob `response.attestationObject` into
structured data.
"""
attestation_dict = parse_cbor(val)
decoded_attestation_object = AttestationObject(
fmt=attestation_dict["fmt"],
auth_data=parse_authenticator_data(attestation_dict["authData"]),
)
if "attStmt" in attestation_dict:
decoded_attestation_object.att_stmt = parse_attestation_statement(
attestation_dict["attStmt"]
)
return decoded_attestation_object
|
Decode and peel apart the CBOR-encoded blob `response.attestationObject` into
structured data.
|
parse_attestation_object
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/parse_attestation_object.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/parse_attestation_object.py
|
BSD-3-Clause
|
def parse_attestation_statement(val: dict) -> AttestationStatement:
"""
Turn `response.attestationObject.attStmt` into structured data
"""
attestation_statement = AttestationStatement()
# Populate optional fields that may exist in the attestation statement
if "sig" in val:
attestation_statement.sig = val["sig"]
if "x5c" in val:
attestation_statement.x5c = val["x5c"]
if "response" in val:
attestation_statement.response = val["response"]
if "alg" in val:
attestation_statement.alg = val["alg"]
if "ver" in val:
attestation_statement.ver = val["ver"]
if "certInfo" in val:
attestation_statement.cert_info = val["certInfo"]
if "pubArea" in val:
attestation_statement.pub_area = val["pubArea"]
return attestation_statement
|
Turn `response.attestationObject.attStmt` into structured data
|
parse_attestation_statement
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/parse_attestation_statement.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/parse_attestation_statement.py
|
BSD-3-Clause
|
def parse_authentication_credential_json(json_val: Union[str, dict]) -> AuthenticationCredential:
"""
Parse a JSON form of an authentication credential, as either a stringified JSON object or a
plain dict, into an instance of AuthenticationCredential
"""
if isinstance(json_val, str):
try:
json_val = json.loads(json_val)
except JSONDecodeError:
raise InvalidJSONStructure("Unable to decode credential as JSON")
if not isinstance(json_val, dict):
raise InvalidJSONStructure("Credential was not a JSON object")
cred_id = json_val.get("id")
if not isinstance(cred_id, str):
raise InvalidJSONStructure("Credential missing required id")
cred_raw_id = json_val.get("rawId")
if not isinstance(cred_raw_id, str):
raise InvalidJSONStructure("Credential missing required rawId")
cred_response = json_val.get("response")
if not isinstance(cred_response, dict):
raise InvalidJSONStructure("Credential missing required response")
response_client_data_json = cred_response.get("clientDataJSON")
if not isinstance(response_client_data_json, str):
raise InvalidJSONStructure("Credential response missing required clientDataJSON")
response_authenticator_data = cred_response.get("authenticatorData")
if not isinstance(response_authenticator_data, str):
raise InvalidJSONStructure("Credential response missing required authenticatorData")
response_signature = cred_response.get("signature")
if not isinstance(response_signature, str):
raise InvalidJSONStructure("Credential response missing required signature")
cred_type = json_val.get("type")
try:
# Simply try to get the single matching Enum. We'll set the literal value below assuming
# the code can get past here (this is basically a mypy optimization)
PublicKeyCredentialType(cred_type)
except ValueError as cred_type_exc:
raise InvalidJSONStructure("Credential had unexpected type") from cred_type_exc
response_user_handle = cred_response.get("userHandle")
if isinstance(response_user_handle, str):
# The `userHandle` string will most likely be base64url-encoded for ease of JSON
# transmission as per the L3 Draft spec:
# https://w3c.github.io/webauthn/#dictdef-authenticatorassertionresponsejson
response_user_handle = base64url_to_bytes(response_user_handle)
elif response_user_handle is not None:
# If it's not a string, and it's not None, then it's definitely not valid
raise InvalidJSONStructure("Credential response had unexpected userHandle")
cred_authenticator_attachment = json_val.get("authenticatorAttachment")
if isinstance(cred_authenticator_attachment, str):
try:
cred_authenticator_attachment = AuthenticatorAttachment(cred_authenticator_attachment)
except ValueError as cred_attachment_exc:
raise InvalidJSONStructure(
"Credential had unexpected authenticatorAttachment"
) from cred_attachment_exc
else:
cred_authenticator_attachment = None
try:
authentication_credential = AuthenticationCredential(
id=cred_id,
raw_id=base64url_to_bytes(cred_raw_id),
response=AuthenticatorAssertionResponse(
client_data_json=base64url_to_bytes(response_client_data_json),
authenticator_data=base64url_to_bytes(response_authenticator_data),
signature=base64url_to_bytes(response_signature),
user_handle=response_user_handle,
),
authenticator_attachment=cred_authenticator_attachment,
type=PublicKeyCredentialType.PUBLIC_KEY,
)
except Exception as exc:
raise InvalidAuthenticationResponse(
"Could not parse authentication credential from JSON data"
) from exc
return authentication_credential
|
Parse a JSON form of an authentication credential, as either a stringified JSON object or a
plain dict, into an instance of AuthenticationCredential
|
parse_authentication_credential_json
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/parse_authentication_credential_json.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/parse_authentication_credential_json.py
|
BSD-3-Clause
|
def parse_authentication_options_json(
json_val: Union[str, dict]
) -> PublicKeyCredentialRequestOptions:
"""
Parse a JSON form of authentication options, as either stringified JSON or a plain dict, into an
instance of `PublicKeyCredentialRequestOptions`. Typically useful in mapping output from
`generate_authentication_options()`, that's been persisted as JSON via Redis/etc... back into
structured data.
"""
if isinstance(json_val, str):
try:
json_val = json.loads(json_val)
except JSONDecodeError:
raise InvalidJSONStructure("Unable to decode options as JSON")
if not isinstance(json_val, dict):
raise InvalidJSONStructure("Options were not a JSON object")
"""
Check challenge
"""
options_challenge = json_val.get("challenge")
if not isinstance(options_challenge, str):
raise InvalidJSONStructure("Options missing required challenge")
"""
Check timeout
"""
options_timeout = json_val.get("timeout")
mapped_timeout = None
if isinstance(options_timeout, int):
mapped_timeout = options_timeout
"""
Check rpId
"""
options_rp_id = json_val.get("rpId")
mapped_rp_id = None
if isinstance(options_rp_id, str):
mapped_rp_id = options_rp_id
"""
Check userVerification
"""
options_user_verification = json_val.get("userVerification")
if not isinstance(options_user_verification, str):
raise InvalidJSONStructure("Options missing required userVerification")
try:
mapped_user_verification = UserVerificationRequirement(options_user_verification)
except ValueError as exc:
raise InvalidJSONStructure("Options userVerification was invalid value") from exc
"""
Check allowCredentials
"""
options_allow_credentials = json_val.get("allowCredentials")
mapped_allow_credentials: Optional[List[PublicKeyCredentialDescriptor]] = None
if isinstance(options_allow_credentials, list):
mapped_allow_credentials = []
for cred in options_allow_credentials:
_cred_id = cred.get("id")
if not isinstance(_cred_id, str):
raise InvalidJSONStructure("Options excludeCredentials entry missing required id")
_mapped = PublicKeyCredentialDescriptor(id=base64url_to_bytes(_cred_id))
_transports = cred.get("transports")
if _transports is not None:
if not isinstance(_transports, list):
raise InvalidJSONStructure(
"Options excludeCredentials entry transports was not list"
)
try:
_mapped.transports = [
AuthenticatorTransport(_transport) for _transport in _transports
]
except ValueError as exc:
raise InvalidJSONStructure(
"Options excludeCredentials entry transports had invalid value"
) from exc
mapped_allow_credentials.append(_mapped)
try:
authentication_options = PublicKeyCredentialRequestOptions(
challenge=base64url_to_bytes(options_challenge),
timeout=mapped_timeout,
rp_id=mapped_rp_id,
user_verification=mapped_user_verification,
allow_credentials=mapped_allow_credentials,
)
except Exception as exc:
raise InvalidAuthenticationOptions(
"Could not parse authentication options from JSON data"
) from exc
return authentication_options
|
Parse a JSON form of authentication options, as either stringified JSON or a plain dict, into an
instance of `PublicKeyCredentialRequestOptions`. Typically useful in mapping output from
`generate_authentication_options()`, that's been persisted as JSON via Redis/etc... back into
structured data.
|
parse_authentication_options_json
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/parse_authentication_options_json.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/parse_authentication_options_json.py
|
BSD-3-Clause
|
def parse_authenticator_data(val: bytes) -> AuthenticatorData:
"""
Turn `response.attestationObject.authData` into structured data
"""
val = byteslike_to_bytes(val)
# Don't bother parsing if there aren't enough bytes for at least:
# - rpIdHash (32 bytes)
# - flags (1 byte)
# - signCount (4 bytes)
if len(val) < 37:
raise InvalidAuthenticatorDataStructure(
f"Authenticator data was {len(val)} bytes, expected at least 37 bytes"
)
pointer = 0
rp_id_hash = val[pointer:32]
pointer += 32
# Cast byte to ordinal so we can use bitwise operators on it
flags_bytes = ord(val[pointer : pointer + 1])
pointer += 1
sign_count = val[pointer : pointer + 4]
pointer += 4
# Parse flags
flags = AuthenticatorDataFlags(
up=flags_bytes & (1 << 0) != 0,
uv=flags_bytes & (1 << 2) != 0,
be=flags_bytes & (1 << 3) != 0,
bs=flags_bytes & (1 << 4) != 0,
at=flags_bytes & (1 << 6) != 0,
ed=flags_bytes & (1 << 7) != 0,
)
# The value to return
authenticator_data = AuthenticatorData(
rp_id_hash=rp_id_hash,
flags=flags,
sign_count=int.from_bytes(sign_count, "big"),
)
# Parse AttestedCredentialData if present
if flags.at is True:
aaguid = val[pointer : pointer + 16]
pointer += 16
credential_id_len = int.from_bytes(val[pointer : pointer + 2], "big")
pointer += 2
credential_id = val[pointer : pointer + credential_id_len]
pointer += credential_id_len
"""
Some authenticators incorrectly compose authData when using EdDSA for their public keys.
A CBOR "Map of 3 items" (0xA3) should be "Map of 4 items" (0xA4), and if we manually adjust
the single byte there's a good chance the authData can be correctly parsed. Let's try to
detect when this happens and gracefully handle it.
"""
# Decodes to `{1: "OKP", 3: -8, -1: "Ed25519"}` (it's missing key -2 a.k.a. COSEKey.X)
bad_eddsa_cbor = bytearray.fromhex("a301634f4b500327206745643235353139")
# If we find the bytes here then let's fix the bad data
if val[pointer : pointer + len(bad_eddsa_cbor)] == bad_eddsa_cbor:
# Make a mutable copy of the bytes...
_val = bytearray(val)
# ...Fix the bad byte...
_val[pointer] = 0xA4
# ...Then replace `val` with the fixed bytes
val = bytes(_val)
# Load the next CBOR-encoded value
credential_public_key = parse_cbor(val[pointer:])
credential_public_key_bytes = encode_cbor(credential_public_key)
pointer += len(credential_public_key_bytes)
attested_cred_data = AttestedCredentialData(
aaguid=aaguid,
credential_id=credential_id,
credential_public_key=credential_public_key_bytes,
)
authenticator_data.attested_credential_data = attested_cred_data
if flags.ed is True:
extension_object = parse_cbor(val[pointer:])
extension_bytes = encode_cbor(extension_object)
pointer += len(extension_bytes)
authenticator_data.extensions = extension_bytes
# We should have parsed all authenticator data by this point
if len(val) > pointer:
raise InvalidAuthenticatorDataStructure(
"Leftover bytes detected while parsing authenticator data"
)
return authenticator_data
|
Turn `response.attestationObject.authData` into structured data
|
parse_authenticator_data
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/parse_authenticator_data.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/parse_authenticator_data.py
|
BSD-3-Clause
|
def parse_backup_flags(flags: AuthenticatorDataFlags) -> ParsedBackupFlags:
"""Convert backup eligibility and backup state flags into more useful representations
Raises:
`helpers.exceptions.InvalidBackupFlags` if an invalid backup state is detected
"""
credential_device_type = CredentialDeviceType.SINGLE_DEVICE
# A credential that can be backed up can typically be used on multiple devices
if flags.be:
credential_device_type = CredentialDeviceType.MULTI_DEVICE
if credential_device_type == CredentialDeviceType.SINGLE_DEVICE and flags.bs:
raise InvalidBackupFlags(
"Single-device credential indicated that it was backed up, which should be impossible."
)
return ParsedBackupFlags(
credential_device_type=credential_device_type,
credential_backed_up=flags.bs,
)
|
Convert backup eligibility and backup state flags into more useful representations
Raises:
`helpers.exceptions.InvalidBackupFlags` if an invalid backup state is detected
|
parse_backup_flags
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/parse_backup_flags.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/parse_backup_flags.py
|
BSD-3-Clause
|
def parse_cbor(data: bytes) -> Any:
"""
Attempt to decode CBOR-encoded data.
Raises:
`helpers.exceptions.InvalidCBORData` if data cannot be decoded
"""
try:
to_return = cbor2.loads(data)
except Exception as exc:
raise InvalidCBORData("Could not decode CBOR data") from exc
return to_return
|
Attempt to decode CBOR-encoded data.
Raises:
`helpers.exceptions.InvalidCBORData` if data cannot be decoded
|
parse_cbor
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/parse_cbor.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/parse_cbor.py
|
BSD-3-Clause
|
def parse_client_data_json(val: bytes) -> CollectedClientData:
"""
Break apart `response.clientDataJSON` buffer into structured data
"""
val = byteslike_to_bytes(val)
try:
json_dict = json.loads(val)
except JSONDecodeError:
raise InvalidJSONStructure("Unable to decode client_data_json bytes as JSON")
# Ensure required values are present in client data
if "type" not in json_dict:
raise InvalidJSONStructure('client_data_json missing required property "type"')
if "challenge" not in json_dict:
raise InvalidJSONStructure('client_data_json missing required property "challenge"')
if "origin" not in json_dict:
raise InvalidJSONStructure('client_data_json missing required property "origin"')
client_data = CollectedClientData(
type=json_dict["type"],
challenge=base64url_to_bytes(json_dict["challenge"]),
origin=json_dict["origin"],
)
# Populate optional values if set
if "crossOrigin" in json_dict:
cross_origin = bool(json_dict["crossOrigin"])
client_data.cross_origin = cross_origin
if "tokenBinding" in json_dict:
token_binding_dict = json_dict["tokenBinding"]
# Some U2F devices set a string to `token_binding`, in which case ignore it
if type(token_binding_dict) is dict:
if "status" not in token_binding_dict:
raise InvalidJSONStructure('token_binding missing required property "status"')
status = token_binding_dict["status"]
try:
# This will raise ValidationError on an unexpected status
token_binding = TokenBinding(status=status)
# Handle optional values
if "id" in token_binding_dict:
id = token_binding_dict["id"]
token_binding.id = f"{id}"
client_data.token_binding = token_binding
except Exception:
# If we encounter a status we don't expect then ignore token_binding
# completely
pass
return client_data
|
Break apart `response.clientDataJSON` buffer into structured data
|
parse_client_data_json
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/parse_client_data_json.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/parse_client_data_json.py
|
BSD-3-Clause
|
def parse_registration_credential_json(json_val: Union[str, dict]) -> RegistrationCredential:
"""
Parse a JSON form of a registration credential, as either a stringified JSON object or a
plain dict, into an instance of RegistrationCredential
"""
if isinstance(json_val, str):
try:
json_val = json.loads(json_val)
except JSONDecodeError:
raise InvalidJSONStructure("Unable to decode credential as JSON")
if not isinstance(json_val, dict):
raise InvalidJSONStructure("Credential was not a JSON object")
cred_id = json_val.get("id")
if not isinstance(cred_id, str):
raise InvalidJSONStructure("Credential missing required id")
cred_raw_id = json_val.get("rawId")
if not isinstance(cred_raw_id, str):
raise InvalidJSONStructure("Credential missing required rawId")
cred_response = json_val.get("response")
if not isinstance(cred_response, dict):
raise InvalidJSONStructure("Credential missing required response")
response_client_data_json = cred_response.get("clientDataJSON")
if not isinstance(response_client_data_json, str):
raise InvalidJSONStructure("Credential response missing required clientDataJSON")
response_attestation_object = cred_response.get("attestationObject")
if not isinstance(response_attestation_object, str):
raise InvalidJSONStructure("Credential response missing required attestationObject")
cred_type = json_val.get("type")
try:
# Simply try to get the single matching Enum. We'll set the literal value below assuming
# the code can get past here (this is basically a mypy optimization)
PublicKeyCredentialType(cred_type)
except ValueError as cred_type_exc:
raise InvalidJSONStructure("Credential had unexpected type") from cred_type_exc
transports: Optional[List[AuthenticatorTransport]] = None
response_transports = cred_response.get("transports")
if isinstance(response_transports, list):
transports = []
for val in response_transports:
try:
transport_enum = AuthenticatorTransport(val)
transports.append(transport_enum)
except ValueError:
pass
cred_authenticator_attachment = json_val.get("authenticatorAttachment")
if isinstance(cred_authenticator_attachment, str):
try:
cred_authenticator_attachment = AuthenticatorAttachment(cred_authenticator_attachment)
except ValueError as cred_attachment_exc:
raise InvalidJSONStructure(
"Credential had unexpected authenticatorAttachment"
) from cred_attachment_exc
else:
cred_authenticator_attachment = None
try:
registration_credential = RegistrationCredential(
id=cred_id,
raw_id=base64url_to_bytes(cred_raw_id),
response=AuthenticatorAttestationResponse(
client_data_json=base64url_to_bytes(response_client_data_json),
attestation_object=base64url_to_bytes(response_attestation_object),
transports=transports,
),
authenticator_attachment=cred_authenticator_attachment,
type=PublicKeyCredentialType.PUBLIC_KEY,
)
except Exception as exc:
raise InvalidRegistrationResponse(
"Could not parse registration credential from JSON data"
) from exc
return registration_credential
|
Parse a JSON form of a registration credential, as either a stringified JSON object or a
plain dict, into an instance of RegistrationCredential
|
parse_registration_credential_json
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/parse_registration_credential_json.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/parse_registration_credential_json.py
|
BSD-3-Clause
|
def parse_registration_options_json(
json_val: Union[str, dict]
) -> PublicKeyCredentialCreationOptions:
"""
Parse a JSON form of registration options, as either stringified JSON or a plain dict, into an
instance of `PublicKeyCredentialCreationOptions`. Typically useful in mapping output from
`generate_registration_options()`, that's been persisted as JSON via Redis/etc... back into
structured data.
"""
if isinstance(json_val, str):
try:
json_val = json.loads(json_val)
except JSONDecodeError:
raise InvalidJSONStructure("Unable to decode options as JSON")
if not isinstance(json_val, dict):
raise InvalidJSONStructure("Options were not a JSON object")
"""
Check rp
"""
options_rp = json_val.get("rp")
if not isinstance(options_rp, dict):
raise InvalidJSONStructure("Options missing required rp")
options_rp_id = options_rp.get("id")
if options_rp_id is not None and not isinstance(options_rp_id, str):
raise InvalidJSONStructure("Options rp.id present but not string")
options_rp_name = options_rp.get("name")
if not isinstance(options_rp_name, str):
raise InvalidJSONStructure("Options rp missing required name")
"""
Check user
"""
options_user = json_val.get("user")
if not isinstance(options_user, dict):
raise InvalidJSONStructure("Options missing required user")
options_user_id = options_user.get("id")
if not isinstance(options_user_id, str):
raise InvalidJSONStructure("Options user missing required id")
options_user_name = options_user.get("name")
if not isinstance(options_user_name, str):
raise InvalidJSONStructure("Options user missing required name")
options_user_display_name = options_user.get("displayName")
if not isinstance(options_user_display_name, str):
raise InvalidJSONStructure("Options user missing required displayName")
"""
Check attestation
"""
options_attestation = json_val.get("attestation")
if not isinstance(options_attestation, str):
raise InvalidJSONStructure("Options missing required attestation")
try:
mapped_attestation = AttestationConveyancePreference(options_attestation)
except ValueError as exc:
raise InvalidJSONStructure("Options attestation was invalid value") from exc
"""
Check authenticatorSelection
"""
options_authr_selection = json_val.get("authenticatorSelection")
mapped_authenticator_selection: Optional[AuthenticatorSelectionCriteria] = None
if isinstance(options_authr_selection, dict):
options_authr_selection_attachment = options_authr_selection.get("authenticatorAttachment")
mapped_attachment = None
if options_authr_selection_attachment is not None:
try:
mapped_attachment = AuthenticatorAttachment(options_authr_selection_attachment)
except ValueError as exc:
raise InvalidJSONStructure(
"Options authenticatorSelection attachment was invalid value"
) from exc
options_authr_selection_rkey = options_authr_selection.get("residentKey")
mapped_rkey = None
if options_authr_selection_rkey is not None:
try:
mapped_rkey = ResidentKeyRequirement(options_authr_selection_rkey)
except ValueError as exc:
raise InvalidJSONStructure(
"Options authenticatorSelection residentKey was invalid value"
) from exc
options_authr_selection_require_rkey = options_authr_selection.get("requireResidentKey")
mapped_require_rkey = False
if options_authr_selection_require_rkey is not None:
if not isinstance(options_authr_selection_require_rkey, bool):
raise InvalidJSONStructure(
"Options authenticatorSelection requireResidentKey was invalid boolean"
)
mapped_require_rkey = options_authr_selection_require_rkey
options_authr_selection_uv = options_authr_selection.get("userVerification")
mapped_user_verification = UserVerificationRequirement.PREFERRED
if options_authr_selection_uv is not None:
try:
mapped_user_verification = UserVerificationRequirement(options_authr_selection_uv)
except ValueError as exc:
raise InvalidJSONStructure(
"Options authenticatorSelection userVerification was invalid value"
) from exc
mapped_authenticator_selection = AuthenticatorSelectionCriteria(
authenticator_attachment=mapped_attachment,
resident_key=mapped_rkey,
require_resident_key=mapped_require_rkey,
user_verification=mapped_user_verification,
)
"""
Check challenge is present
"""
options_challenge = json_val.get("challenge")
if not isinstance(options_challenge, str):
raise InvalidJSONStructure("Options missing required challenge")
"""
Check pubKeyCredParams
"""
options_pub_key_cred_params = json_val.get("pubKeyCredParams")
if not isinstance(options_pub_key_cred_params, list):
raise InvalidJSONStructure("Options pubKeyCredParams was invalid value")
try:
mapped_pub_key_cred_params = [
PublicKeyCredentialParameters(
alg=COSEAlgorithmIdentifier(param["alg"]), type="public-key"
)
for param in options_pub_key_cred_params
]
except ValueError as exc:
raise InvalidJSONStructure("Options pubKeyCredParams entry had invalid alg") from exc
"""
Check excludeCredentials
"""
options_exclude_credentials = json_val.get("excludeCredentials")
mapped_exclude_credentials: Optional[List[PublicKeyCredentialDescriptor]] = None
if isinstance(options_exclude_credentials, list):
mapped_exclude_credentials = []
for cred in options_exclude_credentials:
_cred_id = cred.get("id")
if not isinstance(_cred_id, str):
raise InvalidJSONStructure("Options excludeCredentials entry missing required id")
_mapped = PublicKeyCredentialDescriptor(id=base64url_to_bytes(_cred_id))
_transports = cred.get("transports")
if _transports is not None:
if not isinstance(_transports, list):
raise InvalidJSONStructure(
"Options excludeCredentials entry transports was not list"
)
try:
_mapped.transports = [
AuthenticatorTransport(_transport) for _transport in _transports
]
except ValueError as exc:
raise InvalidJSONStructure(
"Options excludeCredentials entry transports had invalid value"
) from exc
mapped_exclude_credentials.append(_mapped)
"""
Check timeout
"""
options_timeout = json_val.get("timeout")
mapped_timeout = None
if isinstance(options_timeout, int):
mapped_timeout = options_timeout
"""
Check hints
"""
options_hints = json_val.get("hints")
mapped_hints = None
if options_hints is not None:
if not isinstance(options_hints, list):
raise InvalidJSONStructure("Options hints was invalid value")
try:
mapped_hints = [PublicKeyCredentialHint(hint) for hint in options_hints]
except ValueError as exc:
raise InvalidJSONStructure("Options hints had invalid value") from exc
try:
registration_options = PublicKeyCredentialCreationOptions(
rp=PublicKeyCredentialRpEntity(
id=options_rp_id,
name=options_rp_name,
),
user=PublicKeyCredentialUserEntity(
id=base64url_to_bytes(options_user_id),
name=options_user_name,
display_name=options_user_display_name,
),
attestation=mapped_attestation,
authenticator_selection=mapped_authenticator_selection,
challenge=base64url_to_bytes(options_challenge),
pub_key_cred_params=mapped_pub_key_cred_params,
exclude_credentials=mapped_exclude_credentials,
timeout=mapped_timeout,
hints=mapped_hints,
)
except Exception as exc:
raise InvalidRegistrationOptions(
"Could not parse registration options from JSON data"
) from exc
return registration_options
|
Parse a JSON form of registration options, as either stringified JSON or a plain dict, into an
instance of `PublicKeyCredentialCreationOptions`. Typically useful in mapping output from
`generate_registration_options()`, that's been persisted as JSON via Redis/etc... back into
structured data.
|
parse_registration_options_json
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/parse_registration_options_json.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/parse_registration_options_json.py
|
BSD-3-Clause
|
def pem_cert_bytes_to_open_ssl_x509(cert: bytes) -> X509:
"""Convert PEM-formatted certificate bytes into an X509 instance usable for cert
chain validation
"""
cert_crypto = load_pem_x509_certificate(cert)
cert_openssl = X509().from_cryptography(cert_crypto)
return cert_openssl
|
Convert PEM-formatted certificate bytes into an X509 instance usable for cert
chain validation
|
pem_cert_bytes_to_open_ssl_x509
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/pem_cert_bytes_to_open_ssl_x509.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/pem_cert_bytes_to_open_ssl_x509.py
|
BSD-3-Clause
|
def snake_case_to_camel_case(snake_case: str) -> str:
"""
Helper method for converting a snake_case'd value to camelCase
input: pub_key_cred_params
output: pubKeyCredParams
"""
parts = snake_case.split("_")
converted = parts[0].lower() + "".join(part.title() for part in parts[1:])
# Massage "clientDataJson" to "clientDataJSON"
converted = converted.replace("Json", "JSON")
return converted
|
Helper method for converting a snake_case'd value to camelCase
input: pub_key_cred_params
output: pubKeyCredParams
|
snake_case_to_camel_case
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/snake_case_to_camel_case.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/snake_case_to_camel_case.py
|
BSD-3-Clause
|
def validate_certificate_chain(
*,
x5c: List[bytes],
pem_root_certs_bytes: Optional[List[bytes]] = None,
) -> bool:
"""Validate that the certificates in x5c chain back to a known root certificate
Args:
`x5c`: X5C certificates from a registration response's attestation statement
(optional) `pem_root_certs_bytes`: Any additional (PEM-formatted)
root certificates that may complete the certificate chain
Raises:
`helpers.exceptions.InvalidCertificateChain` if chain cannot be validated
"""
if pem_root_certs_bytes is None or len(pem_root_certs_bytes) < 1:
# We have no root certs to chain back to, so just pass on validation
return True
# Make sure we have at least one certificate to try and link back to a root cert
if len(x5c) < 1:
raise InvalidCertificateChain("x5c was empty")
# Prepare leaf cert
try:
leaf_cert_bytes = x5c[0]
leaf_cert_crypto = load_der_x509_certificate(leaf_cert_bytes)
leaf_cert = X509().from_cryptography(leaf_cert_crypto)
except Exception as err:
raise InvalidCertificateChain(f"Could not prepare leaf cert: {err}")
# Prepare any intermediate certs
try:
# May be an empty array, that's fine
intermediate_certs_bytes = x5c[1:]
intermediate_certs_crypto = [
load_der_x509_certificate(cert) for cert in intermediate_certs_bytes
]
intermediate_certs = [X509().from_cryptography(cert) for cert in intermediate_certs_crypto]
except Exception as err:
raise InvalidCertificateChain(f"Could not prepare intermediate certs: {err}")
# Prepare a collection of possible root certificates
cert_store = _generate_new_cert_store()
try:
for cert in pem_root_certs_bytes:
cert_store.add_cert(pem_cert_bytes_to_open_ssl_x509(cert))
except Exception as err:
raise InvalidCertificateChain(f"Could not prepare root certs: {err}")
# Load certs into a "context" for validation
context = X509StoreContext(
store=cert_store,
certificate=leaf_cert,
chain=intermediate_certs,
)
# Validate the chain (will raise if it can't)
try:
context.verify_certificate()
except X509StoreContextError:
raise InvalidCertificateChain("Certificate chain could not be validated")
return True
|
Validate that the certificates in x5c chain back to a known root certificate
Args:
`x5c`: X5C certificates from a registration response's attestation statement
(optional) `pem_root_certs_bytes`: Any additional (PEM-formatted)
root certificates that may complete the certificate chain
Raises:
`helpers.exceptions.InvalidCertificateChain` if chain cannot be validated
|
validate_certificate_chain
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/validate_certificate_chain.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/validate_certificate_chain.py
|
BSD-3-Clause
|
def verify_safetynet_timestamp(timestamp_ms: int) -> None:
"""Handle time drift between an RP and the Google SafetyNet API servers with a window of
time within which the response is valid
"""
# Buffer period in ms
grace_ms = 10 * 1000
# Get "now" in ms
now = int(time.time()) * 1000
# Make sure the response was generated in the past
if timestamp_ms > (now + grace_ms):
raise ValueError(f"Payload timestamp {timestamp_ms} was later than {now} + {grace_ms}")
# Make sure the response arrived within the grace period
if timestamp_ms < (now - grace_ms):
raise ValueError("Payload has expired")
|
Handle time drift between an RP and the Google SafetyNet API servers with a window of
time within which the response is valid
|
verify_safetynet_timestamp
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/verify_safetynet_timestamp.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/verify_safetynet_timestamp.py
|
BSD-3-Clause
|
def verify_signature(
*,
public_key: Union[
EllipticCurvePublicKey,
RSAPublicKey,
Ed25519PublicKey,
DSAPublicKey,
Ed448PublicKey,
X25519PublicKey,
X448PublicKey,
],
signature_alg: COSEAlgorithmIdentifier,
signature: bytes,
data: bytes,
) -> None:
"""Verify a signature was signed with the private key corresponding to the provided
public key.
Args:
`public_key`: A public key loaded via cryptography's `load_der_public_key`, `load_der_x509_certificate`, etc...
`signature_alg`: Algorithm ID used to sign the signature
`signature`: Signature to verify
`data`: Data signed by private key
Raises:
`webauthn.helpers.exceptions.UnsupportedAlgorithm` when the algorithm is not a recognized COSE algorithm ID
`webauthn.helpers.exceptions.UnsupportedPublicKey` when the public key is not a valid EC2, RSA, or OKP certificate
`cryptography.exceptions.InvalidSignature` when the signature cannot be verified
"""
if isinstance(public_key, EllipticCurvePublicKey):
public_key.verify(signature, data, get_ec2_sig_alg(signature_alg))
elif isinstance(public_key, RSAPublicKey):
if is_rsa_pkcs(signature_alg):
public_key.verify(signature, data, PKCS1v15(), get_rsa_pkcs1_sig_alg(signature_alg))
elif is_rsa_pss(signature_alg):
rsa_alg = get_rsa_pss_sig_alg(signature_alg)
public_key.verify(
signature,
data,
PSS(mgf=MGF1(rsa_alg), salt_length=PSS.MAX_LENGTH),
rsa_alg,
)
else:
raise UnsupportedAlgorithm(f"Unrecognized RSA signature alg {signature_alg}")
elif isinstance(public_key, Ed25519PublicKey):
public_key.verify(signature, data)
else:
raise UnsupportedPublicKey(
f"Unsupported public key for signature verification: {public_key}"
)
|
Verify a signature was signed with the private key corresponding to the provided
public key.
Args:
`public_key`: A public key loaded via cryptography's `load_der_public_key`, `load_der_x509_certificate`, etc...
`signature_alg`: Algorithm ID used to sign the signature
`signature`: Signature to verify
`data`: Data signed by private key
Raises:
`webauthn.helpers.exceptions.UnsupportedAlgorithm` when the algorithm is not a recognized COSE algorithm ID
`webauthn.helpers.exceptions.UnsupportedPublicKey` when the public key is not a valid EC2, RSA, or OKP certificate
`cryptography.exceptions.InvalidSignature` when the signature cannot be verified
|
verify_signature
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/verify_signature.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/verify_signature.py
|
BSD-3-Clause
|
def parse_cert_info(val: bytes) -> TPMCertInfo:
"""
Turn `response.attestationObject.attStmt.certInfo` into structured data
"""
pointer = 0
# The constant "TPM_GENERATED_VALUE" indicating a structure generated by TPM
magic_bytes = val[pointer : pointer + 4]
pointer += 4
# Type of the cert info structure
type_bytes = val[pointer : pointer + 2]
pointer += 2
mapped_type = TPM_ST_MAP[type_bytes]
# Name of parent entity
qualified_signer_length = int.from_bytes(val[pointer : pointer + 2], "big")
pointer += 2
qualified_signer = val[pointer : pointer + qualified_signer_length]
pointer += qualified_signer_length
# Expected hash value of `attsToBeSigned`
extra_data_length = int.from_bytes(val[pointer : pointer + 2], "big")
pointer += 2
extra_data_bytes = val[pointer : pointer + extra_data_length]
pointer += extra_data_length
# Info about the TPM's internal clock
clock_info_bytes = val[pointer : pointer + 17]
pointer += 17
# Device firmware version
firmware_version_bytes = val[pointer : pointer + 8]
pointer += 8
# Verify that type is set to TPM_ST_ATTEST_CERTIFY.
if mapped_type != TPM_ST.ATTEST_CERTIFY:
raise InvalidTPMCertInfoStructure(
f'Cert Info type "{mapped_type}" was not "{TPM_ST.ATTEST_CERTIFY}"'
)
# Attested name
attested_name_length = int.from_bytes(val[pointer : pointer + 2], "big")
pointer += 2
attested_name_bytes = val[pointer : pointer + attested_name_length]
pointer += attested_name_length
qualified_name_length = int.from_bytes(val[pointer : pointer + 2], "big")
pointer += 2
qualified_name_bytes = val[pointer : pointer + qualified_name_length]
pointer += qualified_name_length
return TPMCertInfo(
magic=magic_bytes,
type=mapped_type,
extra_data=extra_data_bytes,
attested=TPMCertInfoAttested(attested_name_bytes, qualified_name_bytes),
# Note that the remaining fields in the "Standard Attestation Structure"
# [TPMv2-Part1] section 31.2, i.e., qualifiedSigner, clockInfo and
# firmwareVersion are ignored. These fields MAY be used as an input to risk
# engines.
qualified_signer=qualified_signer,
clock_info=TPMCertInfoClockInfo(clock_info_bytes),
firmware_version=firmware_version_bytes,
)
|
Turn `response.attestationObject.attStmt.certInfo` into structured data
|
parse_cert_info
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/tpm/parse_cert_info.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/tpm/parse_cert_info.py
|
BSD-3-Clause
|
def parse_pub_area(val: bytes) -> TPMPubArea:
"""
Turn `response.attestationObject.attStmt.pubArea` into structured data
"""
pointer = 0
type_bytes = val[pointer : pointer + 2]
pointer += 2
mapped_type = TPM_ALG_MAP[type_bytes]
name_alg_bytes = val[pointer : pointer + 2]
pointer += 2
mapped_name_alg = TPM_ALG_MAP[name_alg_bytes]
object_attributes_bytes = val[pointer : pointer + 4]
pointer += 4
# Parse attributes from right to left by zero-index bit position
object_attributes = TPMPubAreaObjectAttributes(object_attributes_bytes)
auth_policy_length = int.from_bytes(val[pointer : pointer + 2], "big")
pointer += 2
auth_policy_bytes = val[pointer : pointer + auth_policy_length]
pointer += auth_policy_length
# Decode the rest of the bytes to public key parameters
if mapped_type == TPM_ALG.RSA:
rsa_bytes = val[pointer : pointer + 10]
pointer += 10
parameters = TPMPubAreaParametersRSA(rsa_bytes)
elif mapped_type == TPM_ALG.ECC:
ecc_bytes = val[pointer : pointer + 8]
pointer += 8
# mypy will error here because of the incompatible "reassignment", but
# `parameters` in `TPMPubArea` is a Union of either type so ignore the error
parameters = TPMPubAreaParametersECC(ecc_bytes) # type: ignore
else:
raise InvalidTPMPubAreaStructure(f'Type "{mapped_type}" is unsupported')
unique_length_bytes = val[pointer:]
return TPMPubArea(
type=mapped_type,
name_alg=mapped_name_alg,
object_attributes=object_attributes,
auth_policy=auth_policy_bytes,
parameters=parameters,
unique=TPMPubAreaUnique(unique_length_bytes, mapped_type),
)
|
Turn `response.attestationObject.attStmt.pubArea` into structured data
|
parse_pub_area
|
python
|
duo-labs/py_webauthn
|
webauthn/helpers/tpm/parse_pub_area.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/helpers/tpm/parse_pub_area.py
|
BSD-3-Clause
|
def _generate_pub_key_cred_params(
supported_algs: List[COSEAlgorithmIdentifier],
) -> List[PublicKeyCredentialParameters]:
"""
Take an array of algorithm ID ints and return an array of PublicKeyCredentialParameters
"""
return [PublicKeyCredentialParameters(type="public-key", alg=alg) for alg in supported_algs]
|
Take an array of algorithm ID ints and return an array of PublicKeyCredentialParameters
|
_generate_pub_key_cred_params
|
python
|
duo-labs/py_webauthn
|
webauthn/registration/generate_registration_options.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/registration/generate_registration_options.py
|
BSD-3-Clause
|
def generate_registration_options(
*,
rp_id: str,
rp_name: str,
user_name: str,
user_id: Optional[bytes] = None,
user_display_name: Optional[str] = None,
challenge: Optional[bytes] = None,
timeout: int = 60000,
attestation: AttestationConveyancePreference = AttestationConveyancePreference.NONE,
authenticator_selection: Optional[AuthenticatorSelectionCriteria] = None,
exclude_credentials: Optional[List[PublicKeyCredentialDescriptor]] = None,
supported_pub_key_algs: Optional[List[COSEAlgorithmIdentifier]] = None,
hints: Optional[List[PublicKeyCredentialHint]] = None,
) -> PublicKeyCredentialCreationOptions:
"""Generate options for registering a credential via navigator.credentials.create()
Args:
`rp_id`: A unique, constant identifier for this Relying Party.
`rp_name`: A user-friendly, readable name for the Relying Party.
`user_name`: A value that will help the user identify which account this credential is associated with. Can be an email address, etc...
(optional) `user_id`: A collection of random bytes that identify a user account. For privacy reasons it should NOT be something like an email address. Defaults to 64 random bytes.
(optional) `user_display_name`: A user-friendly representation of their account. Can be a full name ,etc... Defaults to the value of `user_name`.
(optional) `challenge`: A byte sequence for the authenticator to return back in its response. Defaults to 64 random bytes.
(optional) `timeout`: How long in milliseconds the browser should give the user to choose an authenticator. This value is a *hint* and may be ignored by the browser.
(optional) `attestation`: The level of attestation to be provided by the authenticator.
(optional) `authenticator_selection`: Require certain characteristics about an authenticator, like attachment, support for resident keys, user verification, etc...
(optional) `exclude_credentials`: A list of credentials the user has previously registered so that they cannot re-register them.
(optional) `supported_pub_key_algs`: A list of public key algorithm IDs the RP chooses to restrict support to. Defaults to all supported algorithm IDs.
Returns:
Registration options ready for the browser. Consider using `helpers.options_to_json()` in this library to quickly convert the options to JSON.
"""
if not rp_id:
raise ValueError("rp_id cannot be an empty string")
if not rp_name:
raise ValueError("rp_name cannot be an empty string")
if not user_name:
raise ValueError("user_name cannot be an empty string")
if user_id:
if not isinstance(user_id, bytes):
raise ValueError("user_id must be bytes")
else:
user_id = generate_user_handle()
########
# Set defaults for required values
########
if not user_display_name:
user_display_name = user_name
pub_key_cred_params = default_supported_pub_key_params
if supported_pub_key_algs:
pub_key_cred_params = _generate_pub_key_cred_params(supported_pub_key_algs)
if not challenge:
challenge = generate_challenge()
if not exclude_credentials:
exclude_credentials = []
########
# Generate the actual options
########
options = PublicKeyCredentialCreationOptions(
rp=PublicKeyCredentialRpEntity(
name=rp_name,
id=rp_id,
),
user=PublicKeyCredentialUserEntity(
id=user_id,
name=user_name,
display_name=user_display_name,
),
challenge=challenge,
pub_key_cred_params=pub_key_cred_params,
timeout=timeout,
exclude_credentials=exclude_credentials,
attestation=attestation,
hints=hints,
)
########
# Set optional values if specified
########
if authenticator_selection is not None:
# "Relying Parties SHOULD set [requireResidentKey] to true if, and only if,
# residentKey is set to "required""
#
# See https://www.w3.org/TR/webauthn-2/#dom-authenticatorselectioncriteria-requireresidentkey
if authenticator_selection.resident_key == ResidentKeyRequirement.REQUIRED:
authenticator_selection.require_resident_key = True
options.authenticator_selection = authenticator_selection
return options
|
Generate options for registering a credential via navigator.credentials.create()
Args:
`rp_id`: A unique, constant identifier for this Relying Party.
`rp_name`: A user-friendly, readable name for the Relying Party.
`user_name`: A value that will help the user identify which account this credential is associated with. Can be an email address, etc...
(optional) `user_id`: A collection of random bytes that identify a user account. For privacy reasons it should NOT be something like an email address. Defaults to 64 random bytes.
(optional) `user_display_name`: A user-friendly representation of their account. Can be a full name ,etc... Defaults to the value of `user_name`.
(optional) `challenge`: A byte sequence for the authenticator to return back in its response. Defaults to 64 random bytes.
(optional) `timeout`: How long in milliseconds the browser should give the user to choose an authenticator. This value is a *hint* and may be ignored by the browser.
(optional) `attestation`: The level of attestation to be provided by the authenticator.
(optional) `authenticator_selection`: Require certain characteristics about an authenticator, like attachment, support for resident keys, user verification, etc...
(optional) `exclude_credentials`: A list of credentials the user has previously registered so that they cannot re-register them.
(optional) `supported_pub_key_algs`: A list of public key algorithm IDs the RP chooses to restrict support to. Defaults to all supported algorithm IDs.
Returns:
Registration options ready for the browser. Consider using `helpers.options_to_json()` in this library to quickly convert the options to JSON.
|
generate_registration_options
|
python
|
duo-labs/py_webauthn
|
webauthn/registration/generate_registration_options.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/registration/generate_registration_options.py
|
BSD-3-Clause
|
def verify_registration_response(
*,
credential: Union[str, dict, RegistrationCredential],
expected_challenge: bytes,
expected_rp_id: str,
expected_origin: Union[str, List[str]],
require_user_presence: bool = True,
require_user_verification: bool = False,
supported_pub_key_algs: List[COSEAlgorithmIdentifier] = default_supported_pub_key_algs,
pem_root_certs_bytes_by_fmt: Optional[Mapping[AttestationFormat, List[bytes]]] = None,
) -> VerifiedRegistration:
"""Verify an authenticator's response to navigator.credentials.create()
Args:
- `credential`: The value returned from `navigator.credentials.create()`. Can be either a
stringified JSON object, a plain dict, or an instance of RegistrationCredential
- `expected_challenge`: The challenge passed to the authenticator within the preceding
registration options.
- `expected_rp_id`: The Relying Party's unique identifier as specified in the preceding
registration options.
- `expected_origin`: The domain, with HTTP protocol (e.g. "https://domain.here"), on which
the registration should have occurred. Can also be a list of expected origins.
- (optional) `require_user_presence`: Whether or not to require that the user was present
during the registration. Should be False during auto registration.
- (optional) `require_user_verification`: Whether or not to require that the authenticator
verified the user.
- (optional) `supported_pub_key_algs`: A list of public key algorithm IDs the RP chooses to
restrict support to. Defaults to all supported algorithm IDs.
- (optional) `pem_root_certs_bytes_by_fmt`: A list of root certificates, in PEM format, to
be used to validate the certificate chains for specific attestation statement formats.
Returns:
Information about the authenticator and registration
Raises:
`helpers.exceptions.InvalidRegistrationResponse` if the response cannot be verified
"""
if isinstance(credential, str) or isinstance(credential, dict):
credential = parse_registration_credential_json(credential)
verified = False
# FIDO-specific check
if bytes_to_base64url(credential.raw_id) != credential.id:
raise InvalidRegistrationResponse("id and raw_id were not equivalent")
# FIDO-specific check
if credential.type != PublicKeyCredentialType.PUBLIC_KEY:
raise InvalidRegistrationResponse(
f'Unexpected credential type "{credential.type}", expected "public-key"'
)
response = credential.response
client_data_bytes = byteslike_to_bytes(response.client_data_json)
attestation_object_bytes = byteslike_to_bytes(response.attestation_object)
client_data = parse_client_data_json(client_data_bytes)
if client_data.type != ClientDataType.WEBAUTHN_CREATE:
raise InvalidRegistrationResponse(
f'Unexpected client data type "{client_data.type}", expected "{ClientDataType.WEBAUTHN_CREATE}"'
)
if expected_challenge != client_data.challenge:
raise InvalidRegistrationResponse("Client data challenge was not expected challenge")
if isinstance(expected_origin, str):
if expected_origin != client_data.origin:
raise InvalidRegistrationResponse(
f'Unexpected client data origin "{client_data.origin}", expected "{expected_origin}"'
)
else:
try:
expected_origin.index(client_data.origin)
except ValueError:
raise InvalidRegistrationResponse(
f'Unexpected client data origin "{client_data.origin}", expected one of {expected_origin}'
)
if client_data.token_binding:
status = client_data.token_binding.status
if status not in expected_token_binding_statuses:
raise InvalidRegistrationResponse(
f'Unexpected token_binding status of "{status}", expected one of "{",".join(expected_token_binding_statuses)}"'
)
attestation_object = parse_attestation_object(attestation_object_bytes)
auth_data = attestation_object.auth_data
# Generate a hash of the expected RP ID for comparison
expected_rp_id_hash = hashlib.sha256()
expected_rp_id_hash.update(expected_rp_id.encode("utf-8"))
expected_rp_id_hash_bytes = expected_rp_id_hash.digest()
if auth_data.rp_id_hash != expected_rp_id_hash_bytes:
raise InvalidRegistrationResponse("Unexpected RP ID hash")
if require_user_presence and not auth_data.flags.up:
raise InvalidRegistrationResponse("User presence was required, but was not present during attestation")
if require_user_verification and not auth_data.flags.uv:
raise InvalidRegistrationResponse(
"User verification is required but user was not verified during attestation"
)
if not auth_data.attested_credential_data:
raise InvalidRegistrationResponse("Authenticator did not provide attested credential data")
attested_credential_data = auth_data.attested_credential_data
if not attested_credential_data.credential_id:
raise InvalidRegistrationResponse("Authenticator did not provide a credential ID")
if not attested_credential_data.credential_public_key:
raise InvalidRegistrationResponse("Authenticator did not provide a credential public key")
if not attested_credential_data.aaguid:
raise InvalidRegistrationResponse("Authenticator did not provide an AAGUID")
decoded_credential_public_key = decode_credential_public_key(
attested_credential_data.credential_public_key
)
if decoded_credential_public_key.alg not in supported_pub_key_algs:
raise InvalidRegistrationResponse(
f'Unsupported credential public key alg "{decoded_credential_public_key.alg}", expected one of: {supported_pub_key_algs}'
)
# Prepare a list of possible root certificates for certificate chain validation
pem_root_certs_bytes: List[bytes] = []
if pem_root_certs_bytes_by_fmt:
custom_certs = pem_root_certs_bytes_by_fmt.get(attestation_object.fmt)
if custom_certs:
# Load any provided custom root certs
pem_root_certs_bytes.extend(custom_certs)
if attestation_object.fmt == AttestationFormat.NONE:
# A "none" attestation should not contain _anything_ in its attestation statement
any_att_stmt_fields_set = any(
[field is not None for field in asdict(attestation_object.att_stmt).values()]
)
if any_att_stmt_fields_set:
raise InvalidRegistrationResponse(
"None attestation had unexpected attestation statement"
)
# There's nothing else to verify, so mark the verification successful
verified = True
elif attestation_object.fmt == AttestationFormat.FIDO_U2F:
verified = verify_fido_u2f(
attestation_statement=attestation_object.att_stmt,
client_data_json=client_data_bytes,
rp_id_hash=auth_data.rp_id_hash,
credential_id=attested_credential_data.credential_id,
credential_public_key=attested_credential_data.credential_public_key,
aaguid=attested_credential_data.aaguid,
pem_root_certs_bytes=pem_root_certs_bytes,
)
elif attestation_object.fmt == AttestationFormat.PACKED:
verified = verify_packed(
attestation_statement=attestation_object.att_stmt,
attestation_object=attestation_object_bytes,
client_data_json=client_data_bytes,
credential_public_key=attested_credential_data.credential_public_key,
pem_root_certs_bytes=pem_root_certs_bytes,
)
elif attestation_object.fmt == AttestationFormat.TPM:
verified = verify_tpm(
attestation_statement=attestation_object.att_stmt,
attestation_object=attestation_object_bytes,
client_data_json=client_data_bytes,
credential_public_key=attested_credential_data.credential_public_key,
pem_root_certs_bytes=pem_root_certs_bytes,
)
elif attestation_object.fmt == AttestationFormat.APPLE:
verified = verify_apple(
attestation_statement=attestation_object.att_stmt,
attestation_object=attestation_object_bytes,
client_data_json=client_data_bytes,
credential_public_key=attested_credential_data.credential_public_key,
pem_root_certs_bytes=pem_root_certs_bytes,
)
elif attestation_object.fmt == AttestationFormat.ANDROID_SAFETYNET:
verified = verify_android_safetynet(
attestation_statement=attestation_object.att_stmt,
attestation_object=attestation_object_bytes,
client_data_json=client_data_bytes,
pem_root_certs_bytes=pem_root_certs_bytes,
)
elif attestation_object.fmt == AttestationFormat.ANDROID_KEY:
verified = verify_android_key(
attestation_statement=attestation_object.att_stmt,
attestation_object=attestation_object_bytes,
client_data_json=client_data_bytes,
credential_public_key=attested_credential_data.credential_public_key,
pem_root_certs_bytes=pem_root_certs_bytes,
)
else:
# Raise exception on an attestation format we're not prepared to verify
raise InvalidRegistrationResponse(
f'Unsupported attestation type "{attestation_object.fmt}"'
)
# If we got this far and still couldn't verify things then raise an error instead
# of simply returning False
if not verified:
raise InvalidRegistrationResponse("Attestation statement could not be verified")
parsed_backup_flags = parse_backup_flags(auth_data.flags)
return VerifiedRegistration(
credential_id=attested_credential_data.credential_id,
credential_public_key=attested_credential_data.credential_public_key,
sign_count=auth_data.sign_count,
aaguid=aaguid_to_string(attested_credential_data.aaguid),
fmt=attestation_object.fmt,
credential_type=credential.type,
user_verified=auth_data.flags.uv,
attestation_object=attestation_object_bytes,
credential_device_type=parsed_backup_flags.credential_device_type,
credential_backed_up=parsed_backup_flags.credential_backed_up,
)
|
Verify an authenticator's response to navigator.credentials.create()
Args:
- `credential`: The value returned from `navigator.credentials.create()`. Can be either a
stringified JSON object, a plain dict, or an instance of RegistrationCredential
- `expected_challenge`: The challenge passed to the authenticator within the preceding
registration options.
- `expected_rp_id`: The Relying Party's unique identifier as specified in the preceding
registration options.
- `expected_origin`: The domain, with HTTP protocol (e.g. "https://domain.here"), on which
the registration should have occurred. Can also be a list of expected origins.
- (optional) `require_user_presence`: Whether or not to require that the user was present
during the registration. Should be False during auto registration.
- (optional) `require_user_verification`: Whether or not to require that the authenticator
verified the user.
- (optional) `supported_pub_key_algs`: A list of public key algorithm IDs the RP chooses to
restrict support to. Defaults to all supported algorithm IDs.
- (optional) `pem_root_certs_bytes_by_fmt`: A list of root certificates, in PEM format, to
be used to validate the certificate chains for specific attestation statement formats.
Returns:
Information about the authenticator and registration
Raises:
`helpers.exceptions.InvalidRegistrationResponse` if the response cannot be verified
|
verify_registration_response
|
python
|
duo-labs/py_webauthn
|
webauthn/registration/verify_registration_response.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/registration/verify_registration_response.py
|
BSD-3-Clause
|
def verify_android_key(
*,
attestation_statement: AttestationStatement,
attestation_object: bytes,
client_data_json: bytes,
credential_public_key: bytes,
pem_root_certs_bytes: List[bytes],
) -> bool:
"""Verify an "android-key" attestation statement
See https://www.w3.org/TR/webauthn-2/#sctn-android-key-attestation
Also referenced: https://source.android.com/docs/security/features/keystore/attestation
"""
if not attestation_statement.sig:
raise InvalidRegistrationResponse(
"Attestation statement was missing signature (Android Key)"
)
if not attestation_statement.alg:
raise InvalidRegistrationResponse(
"Attestation statement was missing algorithm (Android Key)"
)
if not attestation_statement.x5c:
raise InvalidRegistrationResponse("Attestation statement was missing x5c (Android Key)")
# x5c includes a root certificate, so break it up accordingly
x5c_no_root = attestation_statement.x5c[:-1]
x5c_root_cert = attestation_statement.x5c[-1]
x5c_root_cert_x509 = x509.load_der_x509_certificate(x5c_root_cert)
x5c_root_cert_pem = x5c_root_cert_x509.public_bytes(Encoding.PEM)
# Make sure x509 forms a complete, valid cert chain
try:
validate_certificate_chain(
x5c=x5c_no_root,
pem_root_certs_bytes=[x5c_root_cert_pem],
)
except InvalidCertificateChain as err:
raise InvalidRegistrationResponse(f"{err} (Android Key)")
# Make sure the root cert is one of these
pem_root_certs_bytes.append(google_hardware_attestation_root_1)
pem_root_certs_bytes.append(google_hardware_attestation_root_2)
pem_root_certs_bytes.append(google_hardware_attestation_root_3)
pem_root_certs_bytes.append(google_hardware_attestation_root_4)
if x5c_root_cert_pem not in pem_root_certs_bytes:
raise InvalidRegistrationResponse(
"x5c root certificate was not a known root certificate (Android Key)"
)
# Extract attStmt bytes from attestation_object
attestation_dict = parse_cbor(attestation_object)
authenticator_data_bytes = attestation_dict["authData"]
# Generate a hash of client_data_json
client_data_hash = hashlib.sha256()
client_data_hash.update(client_data_json)
client_data_hash_bytes = client_data_hash.digest()
verification_data = b"".join(
[
authenticator_data_bytes,
client_data_hash_bytes,
]
)
# Verify that sig is a valid signature over the concatenation of authenticatorData
# and clientDataHash using the public key in the first certificate in x5c with the
# algorithm specified in alg.
attestation_cert_bytes = attestation_statement.x5c[0]
attestation_cert = x509.load_der_x509_certificate(attestation_cert_bytes)
attestation_cert_pub_key = attestation_cert.public_key()
try:
verify_signature(
public_key=attestation_cert_pub_key,
signature_alg=attestation_statement.alg,
signature=attestation_statement.sig,
data=verification_data,
)
except InvalidSignature:
raise InvalidRegistrationResponse(
"Could not verify attestation statement signature (Android Key)"
)
# Verify that the public key in the first certificate in x5c matches the
# credentialPublicKey in the attestedCredentialData in authenticatorData.
attestation_cert_pub_key_bytes = attestation_cert_pub_key.public_bytes(
Encoding.DER,
PublicFormat.SubjectPublicKeyInfo,
)
# Convert our raw public key bytes into the same format cryptography generates for
# the cert subject key
decoded_pub_key = decode_credential_public_key(credential_public_key)
pub_key_crypto = decoded_public_key_to_cryptography(decoded_pub_key)
pub_key_crypto_bytes = pub_key_crypto.public_bytes(
Encoding.DER,
PublicFormat.SubjectPublicKeyInfo,
)
if attestation_cert_pub_key_bytes != pub_key_crypto_bytes:
raise InvalidRegistrationResponse(
"Certificate public key did not match credential public key (Android Key)"
)
# Verify that the attestationChallenge field in the attestation certificate
# extension data is identical to clientDataHash.
ext_key_description_oid = "1.3.6.1.4.1.11129.2.1.17"
try:
cert_extensions = attestation_cert.extensions
ext_key_description: Extension = cert_extensions.get_extension_for_oid(
ObjectIdentifier(ext_key_description_oid)
)
except ExtensionNotFound:
raise InvalidRegistrationResponse(
f"Certificate missing extension {ext_key_description_oid} (Android Key)"
)
# Peel apart the Extension into an UnrecognizedExtension, then the bytes we actually
# want
ext_value_wrapper: UnrecognizedExtension = ext_key_description.value
ext_value: bytes = ext_value_wrapper.value
parsed_ext = KeyDescription.load(ext_value)
# Verify that the attestationChallenge field in the attestation certificate extension data
# is identical to clientDataHash.
attestation_challenge: OctetString = parsed_ext["attestationChallenge"]
if bytes(attestation_challenge) != client_data_hash_bytes:
raise InvalidRegistrationResponse(
"attestationChallenge field was not the same as the hash of clientDataJSON (Android Key)"
)
# Verify the following using the appropriate authorization list from the attestation
# certificate extension data:
software_enforced: AuthorizationList = parsed_ext["softwareEnforced"]
tee_enforced: AuthorizationList = parsed_ext["teeEnforced"]
# The AuthorizationList.allApplications field is not present on either authorization
# list (softwareEnforced nor teeEnforced), since PublicKeyCredential MUST be scoped
# to the RP ID.
if software_enforced["allApplications"].native is not None:
raise InvalidRegistrationResponse(
"allApplications field was present in softwareEnforced (Android Key)"
)
if tee_enforced["allApplications"].native is not None:
raise InvalidRegistrationResponse(
"allApplications field was present in teeEnforced (Android Key)"
)
# The value in the AuthorizationList.origin field is equal to KM_ORIGIN_GENERATED.
origin = tee_enforced["origin"].native
if origin != KeyOrigin.GENERATED:
raise InvalidRegistrationResponse(
f"teeEnforced.origin {origin} was not {KeyOrigin.GENERATED}"
)
# The value in the AuthorizationList.purpose field is equal to KM_PURPOSE_SIGN.
purpose = tee_enforced["purpose"].native
if purpose != [KeyPurpose.SIGN]:
raise InvalidRegistrationResponse(
f"teeEnforced.purpose {purpose} was not [{KeyPurpose.SIGN}]"
)
return True
|
Verify an "android-key" attestation statement
See https://www.w3.org/TR/webauthn-2/#sctn-android-key-attestation
Also referenced: https://source.android.com/docs/security/features/keystore/attestation
|
verify_android_key
|
python
|
duo-labs/py_webauthn
|
webauthn/registration/formats/android_key.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/registration/formats/android_key.py
|
BSD-3-Clause
|
def verify_android_safetynet(
*,
attestation_statement: AttestationStatement,
attestation_object: bytes,
client_data_json: bytes,
pem_root_certs_bytes: List[bytes],
verify_timestamp_ms: bool = True,
) -> bool:
"""Verify an "android-safetynet" attestation statement
See https://www.w3.org/TR/webauthn-2/#sctn-android-safetynet-attestation
Notes:
- `verify_timestamp_ms` is a kind of escape hatch specifically for enabling
testing of this method. Without this we can't use static responses in unit
tests because they'll always evaluate as expired. This flag can be removed
from this method if we ever figure out how to dynamically create
safetynet-formatted responses that can be immediately tested.
"""
if not attestation_statement.ver:
# As of this writing, there is only one format of the SafetyNet response and
# ver is reserved for future use (so for now just make sure it's present)
raise InvalidRegistrationResponse("Attestation statement was missing version (SafetyNet)")
if not attestation_statement.response:
raise InvalidRegistrationResponse("Attestation statement was missing response (SafetyNet)")
# Begin peeling apart the JWS in the attestation statement response
jws = attestation_statement.response.decode("ascii")
jws_parts = jws.split(".")
if len(jws_parts) != 3:
raise InvalidRegistrationResponse("Response JWS did not have three parts (SafetyNet)")
header_json = json.loads(base64url_to_bytes(jws_parts[0]))
payload_json = json.loads(base64url_to_bytes(jws_parts[1]))
header = SafetyNetJWSHeader(
alg=header_json.get("alg", ""),
x5c=header_json.get("x5c", []),
)
payload = SafetyNetJWSPayload(
nonce=payload_json.get("nonce", ""),
timestamp_ms=payload_json.get("timestampMs", 0),
apk_package_name=payload_json.get("apkPackageName", ""),
apk_digest_sha256=payload_json.get("apkDigestSha256", ""),
cts_profile_match=payload_json.get("ctsProfileMatch", False),
apk_certificate_digest_sha256=payload_json.get("apkCertificateDigestSha256", []),
basic_integrity=payload_json.get("basicIntegrity", False),
)
signature_bytes_str: str = jws_parts[2]
# Verify that the nonce attribute in the payload of response is identical to the
# Base64 encoding of the SHA-256 hash of the concatenation of authenticatorData and
# clientDataHash.
# Extract attStmt bytes from attestation_object
attestation_dict = parse_cbor(attestation_object)
authenticator_data_bytes = attestation_dict["authData"]
# Generate a hash of client_data_json
client_data_hash = hashlib.sha256()
client_data_hash.update(client_data_json)
client_data_hash_bytes = client_data_hash.digest()
nonce_data = b"".join(
[
authenticator_data_bytes,
client_data_hash_bytes,
]
)
# Start with a sha256 hash
nonce_data_hash = hashlib.sha256()
nonce_data_hash.update(nonce_data)
nonce_data_hash_bytes = nonce_data_hash.digest()
# Encode to base64
nonce_data_hash_bytes = base64.b64encode(nonce_data_hash_bytes)
# Finish by decoding to string
nonce_data_str = nonce_data_hash_bytes.decode("utf-8")
if payload.nonce != nonce_data_str:
raise InvalidRegistrationResponse("Payload nonce was not expected value (SafetyNet)")
# Verify that the SafetyNet response actually came from the SafetyNet service
# by following the steps in the SafetyNet online documentation.
x5c = [base64url_to_bytes(cert) for cert in header.x5c]
if not payload.basic_integrity:
raise InvalidRegistrationResponse("Could not verify device integrity (SafetyNet)")
if verify_timestamp_ms:
try:
verify_safetynet_timestamp(payload.timestamp_ms)
except ValueError as err:
raise InvalidRegistrationResponse(f"{err} (SafetyNet)")
# Verify that the leaf certificate was issued to the hostname attest.android.com
attestation_cert = x509.load_der_x509_certificate(x5c[0])
cert_common_name = attestation_cert.subject.get_attributes_for_oid(
NameOID.COMMON_NAME,
)[0]
if cert_common_name.value != "attest.android.com":
raise InvalidRegistrationResponse(
'Certificate common name was not "attest.android.com" (SafetyNet)'
)
# Validate certificate chain
try:
# Include known root certificates for this attestation format with whatever
# other certs were provided
pem_root_certs_bytes.append(globalsign_r2)
pem_root_certs_bytes.append(globalsign_root_ca)
validate_certificate_chain(
x5c=x5c,
pem_root_certs_bytes=pem_root_certs_bytes,
)
except InvalidCertificateChain as err:
raise InvalidRegistrationResponse(f"{err} (SafetyNet)")
# Verify signature
verification_data = f"{jws_parts[0]}.{jws_parts[1]}".encode("utf-8")
signature_bytes = base64url_to_bytes(signature_bytes_str)
if header.alg != "RS256":
raise InvalidRegistrationResponse(f"JWS header alg was not RS256: {header.alg} (SafetyNet")
# Get cert public key bytes
attestation_cert_pub_key = attestation_cert.public_key()
try:
verify_signature(
public_key=attestation_cert_pub_key,
signature_alg=COSEAlgorithmIdentifier.RSASSA_PKCS1_v1_5_SHA_256,
signature=signature_bytes,
data=verification_data,
)
except InvalidSignature:
raise InvalidRegistrationResponse(
"Could not verify attestation statement signature (Packed)"
)
return True
|
Verify an "android-safetynet" attestation statement
See https://www.w3.org/TR/webauthn-2/#sctn-android-safetynet-attestation
Notes:
- `verify_timestamp_ms` is a kind of escape hatch specifically for enabling
testing of this method. Without this we can't use static responses in unit
tests because they'll always evaluate as expired. This flag can be removed
from this method if we ever figure out how to dynamically create
safetynet-formatted responses that can be immediately tested.
|
verify_android_safetynet
|
python
|
duo-labs/py_webauthn
|
webauthn/registration/formats/android_safetynet.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/registration/formats/android_safetynet.py
|
BSD-3-Clause
|
def verify_apple(
*,
attestation_statement: AttestationStatement,
attestation_object: bytes,
client_data_json: bytes,
credential_public_key: bytes,
pem_root_certs_bytes: List[bytes],
) -> bool:
"""
https://www.w3.org/TR/webauthn-2/#sctn-apple-anonymous-attestation
"""
if not attestation_statement.x5c:
raise InvalidRegistrationResponse("Attestation statement was missing x5c (Apple)")
# Validate the certificate chain
try:
# Include known root certificates for this attestation format
pem_root_certs_bytes.append(apple_webauthn_root_ca)
validate_certificate_chain(
x5c=attestation_statement.x5c,
pem_root_certs_bytes=pem_root_certs_bytes,
)
except InvalidCertificateChain as err:
raise InvalidRegistrationResponse(f"{err} (Apple)")
# Concatenate authenticatorData and clientDataHash to form nonceToHash.
attestation_dict = parse_cbor(attestation_object)
authenticator_data_bytes = attestation_dict["authData"]
client_data_hash = hashlib.sha256()
client_data_hash.update(client_data_json)
client_data_hash_bytes = client_data_hash.digest()
nonce_to_hash = b"".join(
[
authenticator_data_bytes,
client_data_hash_bytes,
]
)
# Perform SHA-256 hash of nonceToHash to produce nonce.
nonce = hashlib.sha256()
nonce.update(nonce_to_hash)
nonce_bytes = nonce.digest()
# Verify that nonce equals the value of the extension with
# OID 1.2.840.113635.100.8.2 in credCert.
attestation_cert_bytes = attestation_statement.x5c[0]
attestation_cert = x509.load_der_x509_certificate(attestation_cert_bytes)
cert_extensions = attestation_cert.extensions
# Still no documented name for this OID...
ext_1_2_840_113635_100_8_2_oid = "1.2.840.113635.100.8.2"
try:
ext_1_2_840_113635_100_8_2: Extension = cert_extensions.get_extension_for_oid(
ObjectIdentifier(ext_1_2_840_113635_100_8_2_oid)
)
except ExtensionNotFound:
raise InvalidRegistrationResponse(
f"Certificate missing extension {ext_1_2_840_113635_100_8_2_oid} (Apple)"
)
# Peel apart the Extension into an UnrecognizedExtension, then the bytes we actually
# want
ext_value_wrapper: UnrecognizedExtension = ext_1_2_840_113635_100_8_2.value
# Ignore the first six ASN.1 structure bytes that define the nonce as an
# OCTET STRING. Should trim off '0$\xa1"\x04'
ext_value: bytes = ext_value_wrapper.value[6:]
if ext_value != nonce_bytes:
raise InvalidRegistrationResponse("Certificate nonce was not expected value (Apple)")
# Verify that the credential public key equals the Subject Public Key of credCert.
attestation_cert_pub_key = attestation_cert.public_key()
attestation_cert_pub_key_bytes = attestation_cert_pub_key.public_bytes(
Encoding.DER,
PublicFormat.SubjectPublicKeyInfo,
)
# Convert our raw public key bytes into the same format cryptography generates for
# the cert subject key
decoded_pub_key = decode_credential_public_key(credential_public_key)
pub_key_crypto = decoded_public_key_to_cryptography(decoded_pub_key)
pub_key_crypto_bytes = pub_key_crypto.public_bytes(
Encoding.DER,
PublicFormat.SubjectPublicKeyInfo,
)
if attestation_cert_pub_key_bytes != pub_key_crypto_bytes:
raise InvalidRegistrationResponse(
"Certificate public key did not match credential public key (Apple)"
)
return True
| ERROR: type should be string, got "\n https://www.w3.org/TR/webauthn-2/#sctn-apple-anonymous-attestation\n " |
verify_apple
|
python
|
duo-labs/py_webauthn
|
webauthn/registration/formats/apple.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/registration/formats/apple.py
|
BSD-3-Clause
|
def verify_fido_u2f(
*,
attestation_statement: AttestationStatement,
client_data_json: bytes,
rp_id_hash: bytes,
credential_id: bytes,
credential_public_key: bytes,
aaguid: bytes,
pem_root_certs_bytes: List[bytes],
) -> bool:
"""Verify a "fido-u2f" attestation statement
See https://www.w3.org/TR/webauthn-2/#sctn-fido-u2f-attestation
"""
if not attestation_statement.sig:
raise InvalidRegistrationResponse("Attestation statement was missing signature (FIDO-U2F)")
if not attestation_statement.x5c:
raise InvalidRegistrationResponse(
"Attestation statement was missing certificate (FIDO-U2F)"
)
if len(attestation_statement.x5c) > 1:
raise InvalidRegistrationResponse(
"Attestation statement contained too many certificates (FIDO-U2F)"
)
# Validate the certificate chain
try:
validate_certificate_chain(
x5c=attestation_statement.x5c,
pem_root_certs_bytes=pem_root_certs_bytes,
)
except InvalidCertificateChain as err:
raise InvalidRegistrationResponse(f"{err} (FIDO-U2F)")
# FIDO spec requires AAGUID in U2F attestations to be all zeroes
# See https://fidoalliance.org/specs/fido-v2.1-rd-20191217/fido-client-to-authenticator-protocol-v2.1-rd-20191217.html#u2f-authenticatorMakeCredential-interoperability
actual_aaguid = aaguid_to_string(aaguid)
expected_aaguid = "00000000-0000-0000-0000-000000000000"
if actual_aaguid != expected_aaguid:
raise InvalidRegistrationResponse(
f"AAGUID {actual_aaguid} was not expected {expected_aaguid} (FIDO-U2F)"
)
# Get the public key from the leaf certificate
leaf_cert_bytes = attestation_statement.x5c[0]
leaf_cert = x509.load_der_x509_certificate(leaf_cert_bytes)
leaf_cert_pub_key = leaf_cert.public_key()
# We need the cert's x and y points so make sure they exist
if not isinstance(leaf_cert_pub_key, EllipticCurvePublicKey):
raise InvalidRegistrationResponse("Leaf cert was not an EC2 certificate (FIDO-U2F)")
if not isinstance(leaf_cert_pub_key.curve, SECP256R1):
raise InvalidRegistrationResponse("Leaf cert did not use P-256 curve (FIDO-U2F)")
decoded_public_key = decode_credential_public_key(credential_public_key)
if not isinstance(decoded_public_key, DecodedEC2PublicKey):
raise InvalidRegistrationResponse("Credential public key was not EC2 (FIDO-U2F)")
# Convert the public key to "Raw ANSI X9.62 public key format"
public_key_u2f = b"".join(
[
bytes([0x04]),
decoded_public_key.x,
decoded_public_key.y,
]
)
# Generate a hash of client_data_json
client_data_hash = hashlib.sha256()
client_data_hash.update(client_data_json)
client_data_hash_bytes = client_data_hash.digest()
# Prepare the signature base (called "verificationData" in the WebAuthn spec)
verification_data = b"".join(
[
bytes([0x00]),
rp_id_hash,
client_data_hash_bytes,
credential_id,
public_key_u2f,
]
)
try:
verify_signature(
public_key=leaf_cert_pub_key,
signature_alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,
signature=attestation_statement.sig,
data=verification_data,
)
except InvalidSignature:
raise InvalidRegistrationResponse(
"Could not verify attestation statement signature (FIDO-U2F)"
)
# If we make it to here we're all good
return True
|
Verify a "fido-u2f" attestation statement
See https://www.w3.org/TR/webauthn-2/#sctn-fido-u2f-attestation
|
verify_fido_u2f
|
python
|
duo-labs/py_webauthn
|
webauthn/registration/formats/fido_u2f.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/registration/formats/fido_u2f.py
|
BSD-3-Clause
|
def verify_packed(
*,
attestation_statement: AttestationStatement,
attestation_object: bytes,
client_data_json: bytes,
credential_public_key: bytes,
pem_root_certs_bytes: List[bytes],
) -> bool:
"""Verify a "packed" attestation statement
See https://www.w3.org/TR/webauthn-2/#sctn-packed-attestation
"""
if not attestation_statement.sig:
raise InvalidRegistrationResponse("Attestation statement was missing signature (Packed)")
if not attestation_statement.alg:
raise InvalidRegistrationResponse("Attestation statement was missing algorithm (Packed)")
# Extract attStmt bytes from attestation_object
attestation_dict = parse_cbor(attestation_object)
authenticator_data_bytes = attestation_dict["authData"]
# Generate a hash of client_data_json
client_data_hash = hashlib.sha256()
client_data_hash.update(client_data_json)
client_data_hash_bytes = client_data_hash.digest()
verification_data = b"".join(
[
authenticator_data_bytes,
client_data_hash_bytes,
]
)
if attestation_statement.x5c:
# Validate the certificate chain
try:
validate_certificate_chain(
x5c=attestation_statement.x5c,
pem_root_certs_bytes=pem_root_certs_bytes,
)
except InvalidCertificateChain as err:
raise InvalidRegistrationResponse(f"{err} (Packed)")
attestation_cert_bytes = attestation_statement.x5c[0]
attestation_cert = x509.load_der_x509_certificate(attestation_cert_bytes)
attestation_cert_pub_key = attestation_cert.public_key()
try:
verify_signature(
public_key=attestation_cert_pub_key,
signature_alg=attestation_statement.alg,
signature=attestation_statement.sig,
data=verification_data,
)
except InvalidSignature:
raise InvalidRegistrationResponse(
"Could not verify attestation statement signature (Packed)"
)
else:
# Self Attestation
decoded_pub_key = decode_credential_public_key(credential_public_key)
if decoded_pub_key.alg != attestation_statement.alg:
raise InvalidRegistrationResponse(
f"Credential public key alg {decoded_pub_key.alg} did not equal attestation statement alg {attestation_statement.alg}"
)
public_key = decoded_public_key_to_cryptography(decoded_pub_key)
try:
verify_signature(
public_key=public_key,
signature_alg=attestation_statement.alg,
signature=attestation_statement.sig,
data=verification_data,
)
except InvalidSignature:
raise InvalidRegistrationResponse(
"Could not verify attestation statement signature (Packed|Self)"
)
return True
|
Verify a "packed" attestation statement
See https://www.w3.org/TR/webauthn-2/#sctn-packed-attestation
|
verify_packed
|
python
|
duo-labs/py_webauthn
|
webauthn/registration/formats/packed.py
|
https://github.com/duo-labs/py_webauthn/blob/master/webauthn/registration/formats/packed.py
|
BSD-3-Clause
|
def fixed_get_imports(filename: str | os.PathLike) -> list[str]:
"""Work around for https://huggingface.co/microsoft/phi-1_5/discussions/72."""
if not str(filename).endswith("/modeling_deepseek.py"):
return get_imports(filename)
imports = get_imports(filename)
imports.remove("flash_attn")
return imports
|
Work around for https://huggingface.co/microsoft/phi-1_5/discussions/72.
|
fixed_get_imports
|
python
|
xjdr-alt/entropix
|
download_weights.py
|
https://github.com/xjdr-alt/entropix/blob/master/download_weights.py
|
Apache-2.0
|
def kl_divergence(logp: jnp.ndarray, logq: jnp.ndarray) -> jnp.ndarray:
"""Compute KL divergence between two log probability distributions."""
p = jnp.exp(logp)
return jnp.sum(jnp.where(p > 0, p * (logp - logq), 0.0), axis=-1)
|
Compute KL divergence between two log probability distributions.
|
kl_divergence
|
python
|
xjdr-alt/entropix
|
entropix/dslider.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/dslider.py
|
Apache-2.0
|
def ent_varent(logp: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Compute entropy and varentropy from log probabilities."""
p = jnp.exp(logp)
ent = -jnp.sum(p * logp, axis=-1)
diff = logp + ent[..., None]
varent = jnp.sum(p * diff**2, axis=-1)
return ent, varent
|
Compute entropy and varentropy from log probabilities.
|
ent_varent
|
python
|
xjdr-alt/entropix
|
entropix/dslider.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/dslider.py
|
Apache-2.0
|
def normalize_logits(logits: jnp.ndarray, noise_floor: float) -> jnp.ndarray:
"""Normalize logits to log probabilities with noise floor truncation."""
shifted = logits - jnp.max(logits, axis=-1, keepdims=True)
normalized = shifted - jax.nn.logsumexp(shifted + EPS, axis=-1, keepdims=True)
# noise floor calculated for bfloat16
return jnp.where(normalized < noise_floor, jnp.log(EPS), normalized)
|
Normalize logits to log probabilities with noise floor truncation.
|
normalize_logits
|
python
|
xjdr-alt/entropix
|
entropix/dslider.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/dslider.py
|
Apache-2.0
|
def __hash__(self):
"""Static hash implementation that avoids hashing array values"""
hashable_items = []
for field in self.__dataclass_fields__.values():
value = getattr(self, field.name)
if isinstance(value, (jnp.ndarray, jax.Array)):
hashable_items.append(hash((str(field.name), value.shape, str(value.dtype))))
elif isinstance(
value, (OutlierThreshold, ArgmaxThreshold, DirichletThreshold, TargetEntropy)
):
hashable_items.append(hash(value))
else:
hashable_items.append(hash((str(field.name), value)))
return hash(tuple(hashable_items))
|
Static hash implementation that avoids hashing array values
|
__hash__
|
python
|
xjdr-alt/entropix
|
entropix/dslider_config.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/dslider_config.py
|
Apache-2.0
|
def halley_update(alpha, target_values):
"""
Compute the Halley's method update direction for the function
"""
p1 = jsp.polygamma(1, alpha)
p2 = jsp.polygamma(2, alpha)
S = jnp.sum(alpha, axis=-1, keepdims=True)
s1 = jsp.polygamma(1, S)
s2 = jsp.polygamma(2, S)
p1_inv = 1.0 / p1
sum_p1_inv = jnp.sum(p1_inv, axis=-1, keepdims=True)
denom = 1.0 - s1 * sum_p1_inv
denom = jnp.where(jnp.abs(denom) < 1e-12, 1e-12, denom)
coeff = s1 / denom
error = jsp.digamma(alpha) - jsp.digamma(S) - target_values
temp = p1_inv * error
sum_temp = jnp.sum(temp, axis=-1, keepdims=True)
J_inv_error = temp + coeff * sum_temp * p1_inv
sum_J_inv_error = jnp.sum(J_inv_error, axis=-1, keepdims=True)
H_J_inv_error = p2 * J_inv_error - s2 * sum_J_inv_error
temp2 = p1_inv * H_J_inv_error
sum_temp2 = jnp.sum(temp2, axis=-1, keepdims=True)
J_inv_H_J_inv_error = temp2 + coeff * sum_temp2 * p1_inv
return -J_inv_error + 0.5 * J_inv_H_J_inv_error
|
Compute the Halley's method update direction for the function
|
halley_update
|
python
|
xjdr-alt/entropix
|
entropix/dslider_utils.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/dslider_utils.py
|
Apache-2.0
|
def fit_dirichlet(
target_values,
init_alpha=None,
initial_lr=1.2,
decay_alpha=0.1,
decay_beta=2.0,
decay_gamma=0.25,
decay_nu=0.75,
max_iters=140,
tol=1e-4,
dtype: jnp.dtype = jnp.bfloat16,
):
"""
Estimates Dirichlet parameters (alpha) from target logprobs.
"""
batch_shape = target_values.shape[:-1]
n = target_values.shape[-1]
min_lr = 1e-8
target_values = target_values.astype(
jnp.float32
) # for large vocab size needs float64
if init_alpha is None:
init_alpha = jnp.ones((*batch_shape, n), dtype=jnp.float32)
def scan_body(carry, _):
alpha, converged, error_norm, step = carry
S = jnp.sum(alpha, axis=-1, keepdims=True)
digamma_alpha = jsp.digamma(alpha)
psi_S = jsp.digamma(S)
error = digamma_alpha - psi_S - target_values
error_norm = jnp.linalg.norm(error, axis=-1)
new_converged = converged | (error_norm < tol)
exp_factor = jnp.exp(-decay_alpha * (step**decay_nu))
cos_factor = jnp.abs(jnp.cos(decay_beta / (step**decay_gamma)))
lr = initial_lr * exp_factor * cos_factor
lr = jnp.maximum(lr, min_lr)
delta_alpha = halley_update(alpha, target_values)
scaled_delta_alpha = lr[..., None] * delta_alpha
max_delta = 0.5 * alpha
scaled_delta_alpha = jnp.clip(scaled_delta_alpha, -max_delta, max_delta)
new_alpha = jnp.where(
new_converged[..., None],
alpha,
jnp.maximum(alpha + scaled_delta_alpha, alpha / 2),
)
return (new_alpha, new_converged, error_norm, step + 1), None
init_state = (
init_alpha,
jnp.zeros(batch_shape, dtype=jnp.bool_),
jnp.full(batch_shape, jnp.inf),
jnp.ones(batch_shape, dtype=jnp.int32),
)
(final_alpha, final_converged, _, final_step), _ = jax.lax.scan(
scan_body, init_state, None, length=max_iters
)
return final_alpha.astype(dtype), final_step - 1, final_converged
|
Estimates Dirichlet parameters (alpha) from target logprobs.
|
fit_dirichlet
|
python
|
xjdr-alt/entropix
|
entropix/dslider_utils.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/dslider_utils.py
|
Apache-2.0
|
def get_result_at_slot(self, slot: int) -> SlotData:
"""Returns the token at a given slot.
Args:
slot: An integer from [0, n) representing an index into the batch.
Note: implementations of this method must correctly handle
microbatches, if microbatches are used.
"""
# Potentially get multiple beams for given slot.
start_idx = slot * self.samples_per_slot
end_idx = (slot + 1) * self.samples_per_slot
# Mask out any non valid tokens.
return SlotData(
tokens=self.data[start_idx:end_idx, self.tokens_idx[0] : self.tokens_idx[1]],
valid=self.data[start_idx:end_idx, self.valid_idx[0] : self.valid_idx[1]],
lengths=self.data[start_idx:end_idx, self.length_idx[0] : self.length_idx[1]][
:, 0
],
)
|
Returns the token at a given slot.
Args:
slot: An integer from [0, n) representing an index into the batch.
Note: implementations of this method must correctly handle
microbatches, if microbatches are used.
|
get_result_at_slot
|
python
|
xjdr-alt/entropix
|
entropix/engine.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/engine.py
|
Apache-2.0
|
def __init__(
self,
params: ModelParams,
xfmr_weights: XfmrWeights,
mesh: jax.sharding.Mesh,
tokenizer: Tokenizer,
xfmr_fn: Callable,
sample_fn: Callable,
):
"""Initialize engine with model parameters and functions.
Args:
params: Model architecture parameters
xfmr_weights: Model weights
mesh: Device mesh for parallel execution
tokenizer: Tokenizer instance
xfmr_fn: Transformer forward function
sample_fn: Token sampling function
"""
self.params = params
self.xfmr_weights = xfmr_weights
self.mesh = mesh
self.replicated = jax.NamedSharding(mesh, jax.sharding.PartitionSpec())
self.tokenizer = tokenizer
self.freqs_cis = jax.device_put(
self.precompute_freqs_cis(
params.head_dim, params.max_seq_len, params.rope_theta, params.use_scaled_rope
),
self.replicated,
)
self.xfmr_fn = xfmr_fn
self.sample_fn = sample_fn
|
Initialize engine with model parameters and functions.
Args:
params: Model architecture parameters
xfmr_weights: Model weights
mesh: Device mesh for parallel execution
tokenizer: Tokenizer instance
xfmr_fn: Transformer forward function
sample_fn: Token sampling function
|
__init__
|
python
|
xjdr-alt/entropix
|
entropix/engine.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/engine.py
|
Apache-2.0
|
def get_tokenizer(
self,
) -> Dict[str, Any]:
"""Returns the info to construct a tokenizer in py/c++."""
return {}
|
Returns the info to construct a tokenizer in py/c++.
|
get_tokenizer
|
python
|
xjdr-alt/entropix
|
entropix/engine.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/engine.py
|
Apache-2.0
|
def build_tokenizer(
self,
metadata: Dict[str, Any],
) -> Tokenizer:
"""Builds a new tokenizer object and returns it."""
return self.tokenizer
|
Builds a new tokenizer object and returns it.
|
build_tokenizer
|
python
|
xjdr-alt/entropix
|
entropix/engine.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/engine.py
|
Apache-2.0
|
def free_resource(
self,
slot: int, # pylint: disable=unused-argument
) -> Any:
"""Free cache and other decode resource for the slot.
This function is needed for advanced attetnion kenel like PageAttetion.
After finishing one request, the engine need to free all used page block
resource and reuse for coming requests.
"""
return None
|
Free cache and other decode resource for the slot.
This function is needed for advanced attetnion kenel like PageAttetion.
After finishing one request, the engine need to free all used page block
resource and reuse for coming requests.
|
free_resource
|
python
|
xjdr-alt/entropix
|
entropix/engine.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/engine.py
|
Apache-2.0
|
def prefill(
self,
*,
params: Params,
existing_prefix: Optional[jax.Array] = None,
padded_tokens: jax.Array,
true_length: int,
sampler: Optional[Callable[[Any], Any]] = None, # pylint: disable=unused-argument
rng: Optional[jax.random.PRNGKey] = None,
top_k: int = 6,
) -> Tuple[Prefix, ResultTokens]:
"""Computes a kv-cache for a set of tokens conditional on existing cache.
existing_prefix (if provided) represents a prefix that has already been
processed by the underlying model. tokens is logically appended
to the text represented by `existing_prefix`. This method returns a new
kv_cache (typically) for the resulting text.
If sampler is passed, then the engine should use it do sample next token.
"""
cur_pos = 0
bsz, seqlen = padded_tokens.shape
attn_mask = self.build_attn_mask(seqlen, cur_pos)
kvcache = KVCache.new(
params.n_layers, bsz, params.max_seq_len, params.n_local_kv_heads, params.head_dim
)
with self.mesh:
logits, kvcache, _ = self.xfmr_fn(
self.xfmr_weights,
params,
padded_tokens,
cur_pos,
self.freqs_cis[:seqlen],
kvcache,
attn_mask=attn_mask,
)
# next_token = jnp.argmax(logits[:, -1], axis=-1, keepdims=True).astype(jnp.int32)
_, next_token = jax.lax.top_k(logits[:, true_length], k=top_k)
next_token = jnp.array(next_token, dtype=jnp.int32).reshape((top_k, 1))
# Create arrays for tokens, validity, and lengths
tokens = next_token
validity = jnp.ones_like(next_token, dtype=jnp.bool_)
lengths = jnp.broadcast_to(
jnp.array([[true_length + 1]], dtype=jnp.int32), (tokens.shape[0], 1)
)
data = jnp.concatenate([tokens, validity, lengths], axis=1)
result = ResultTokens(
data=data,
# Tokens are shape [batch, speculations], so when we concatenate
# tokens, validity and length along their index 1 dimension then they
# occupy 0:speculations.
tokens_idx=(0, 1),
# Validity occupies the same amount of space, but next in line.
valid_idx=(1, 2),
# And lengths is rank 1.
length_idx=(2, 3),
samples_per_slot=bsz,
)
return {
"logits": logits,
"cache": kvcache,
"next_pos": true_length + 1,
"generated_tokens": jnp.zeros((bsz, 1), dtype=jnp.int32),
"tokens": next_token,
}, result
|
Computes a kv-cache for a set of tokens conditional on existing cache.
existing_prefix (if provided) represents a prefix that has already been
processed by the underlying model. tokens is logically appended
to the text represented by `existing_prefix`. This method returns a new
kv_cache (typically) for the resulting text.
If sampler is passed, then the engine should use it do sample next token.
|
prefill
|
python
|
xjdr-alt/entropix
|
entropix/engine.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/engine.py
|
Apache-2.0
|
def generate(
self,
params: Params,
decode_state: DecodeState,
sampler: Optional[Callable[[Any], Any]] = None, # pylint: disable=unused-argument
rng: Optional[jax.random.PRNGKey] = jax.random.PRNGKey(1337),
) -> Tuple[DecodeState, ResultTokens]:
"""Generates tokens for each sequence being decoded in parallel.
Generate takes a batch of pre-computed kv-caches, and computes:
- the predicted next token for each of the sequences
- an updated set of kv-caches
In the case of pipelining, this will handle N cycles (where each cycle
consists of each microbatch progressing through every stage), in
non-pipelined code this is a full forward pass. In both cases, this accounts
for a full embed-layerstack-unembed-sample operation.
If sampler is passed, then the engine should use it do sample next token.
"""
cur_pos = decode_state["next_pos"]
bsz = decode_state["tokens"].shape[0]
freqs_cis_slice = jax.lax.dynamic_slice(
self.freqs_cis, (cur_pos, 0), (1, self.freqs_cis.shape[1])
)
with self.mesh:
logits, kvcache, _ = self.xfmr_fn(
self.xfmr_weights,
params,
decode_state["tokens"],
cur_pos,
freqs_cis_slice,
decode_state["cache"],
)
# TODO(xjdr): reduce slop tokens by penalizing slop weights
# logits = logits.at[:, -1, self.slop_tokens].multiply(self.slop_weights[None, :, None])
new_token, new_state = self.sample_fn(
decode_state["dslider_state"], logits[:, -1, :], DEFAULT_DS_CONFIG, key=rng
)
new_token = new_token.reshape((bsz, 1))
result = ResultTokens(
data=jnp.concatenate(
(
new_token,
jnp.ones_like(new_token, dtype=jnp.bool_),
jnp.full(
(bsz, 1), decode_state["generated_tokens"][:, -1] + 1, dtype=jnp.int32
),
),
axis=1,
),
# Tokens are shape [batch, speculations], so when we concatenate
# tokens, validity and length along their index 1 dimension then they
# occupy 0:speculations.
tokens_idx=(0, 1),
# Validity occupies the same amount of space, but next in line.
valid_idx=(1, 2),
# And lengths is rank 1.
length_idx=(2, 3),
samples_per_slot=bsz,
)
return {
"logits": logits,
"cache": kvcache,
"next_pos": decode_state["next_pos"] + 1,
"generated_tokens": decode_state["generated_tokens"] + 1,
"tokens": new_token,
"dslider_state": new_state,
}, result
|
Generates tokens for each sequence being decoded in parallel.
Generate takes a batch of pre-computed kv-caches, and computes:
- the predicted next token for each of the sequences
- an updated set of kv-caches
In the case of pipelining, this will handle N cycles (where each cycle
consists of each microbatch progressing through every stage), in
non-pipelined code this is a full forward pass. In both cases, this accounts
for a full embed-layerstack-unembed-sample operation.
If sampler is passed, then the engine should use it do sample next token.
|
generate
|
python
|
xjdr-alt/entropix
|
entropix/engine.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/engine.py
|
Apache-2.0
|
def insert(
self,
prefix: Prefix,
decode_state: DecodeState,
slot: int,
) -> DecodeState:
"""Adds `new_request` into `caches` at 'slot'.
When decoding multiple requests in parallel, when one request finishes, a
new request must be slotted into the recently vacated spot: `insert`!
This can occur in between and async to generate calls, and takes a lock over
that row of the cache.
The slot may represent a tuple of positions (e.g. microbatch, pipeline stage
and batch), but at the engine interface level all of these are exposed as
a [0, n) range of slots and converted internally.
"""
bsz = prefix["tokens"].shape[0]
layers, _, max_seq_len, kv_heads, head_dim = prefix["cache"].k.shape
new_k = jnp.broadcast_to(
prefix["cache"].k, (layers, bsz, max_seq_len, kv_heads, head_dim)
)
new_v = jnp.broadcast_to(
prefix["cache"].v, (layers, bsz, max_seq_len, kv_heads, head_dim)
)
new_cache = KVCache(k=new_k, v=new_v)
return {
"logits": prefix["logits"],
"cache": new_cache,
"next_pos": prefix["next_pos"],
"generated_tokens": prefix["generated_tokens"],
"tokens": prefix["tokens"],
"dslider_state": initialize_state(prefix["logits"], bsz, DEFAULT_DS_CONFIG),
}
|
Adds `new_request` into `caches` at 'slot'.
When decoding multiple requests in parallel, when one request finishes, a
new request must be slotted into the recently vacated spot: `insert`!
This can occur in between and async to generate calls, and takes a lock over
that row of the cache.
The slot may represent a tuple of positions (e.g. microbatch, pipeline stage
and batch), but at the engine interface level all of these are exposed as
a [0, n) range of slots and converted internally.
|
insert
|
python
|
xjdr-alt/entropix
|
entropix/engine.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/engine.py
|
Apache-2.0
|
def stop(self):
"""Stops the driver and all background threads."""
# Signal to all threads that they should stop.
self.live = False
all_backlogs = list(
itertools.chain(
[self._prefill_backlog],
self._transfer_backlogs,
self._generate_backlogs.values(),
self._detokenize_backlogs,
)
)
while any(t.is_alive() for t in self._all_threads):
# Empty all backlogs and mark any remaining requests as cancelled.
for q in all_backlogs:
while True:
try:
r = q.get_nowait()
if r is None:
continue
elif isinstance(r, ActiveRequest):
r.return_channel = None
else: # detokenize backlog
_, r = r
if isinstance(r, ActiveRequest):
r.return_channel = None
except queue.Empty:
break
# Put sentinels to unblock threads.
for q in all_backlogs:
try:
q.put_nowait(None)
except queue.Full:
pass
# Wait for all threads to stop.
for t in self._all_threads:
t.join()
|
Stops the driver and all background threads.
|
stop
|
python
|
xjdr-alt/entropix
|
entropix/orchestrator.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/orchestrator.py
|
Apache-2.0
|
def get_total_concurrent_requests(self) -> int:
"""Gets the total number of concurrent requests the driver can handle."""
# We don't support filling all backlogs at once because it can cause GIL
# contention.
total_max_concurrent_decodes = sum(
[e.max_concurrent_decodes for e in self._generate_engines]
)
return total_max_concurrent_decodes
|
Gets the total number of concurrent requests the driver can handle.
|
get_total_concurrent_requests
|
python
|
xjdr-alt/entropix
|
entropix/orchestrator.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/orchestrator.py
|
Apache-2.0
|
def _prefill_thread(self, idx: int):
"""Thread which runs in the background performing prefills."""
logging.info("---------Spinning up prefill thread %d.---------", idx)
prefill_engine = self._prefill_engines[idx]
prefill_params = self._prefill_params[idx]
metadata = prefill_engine.get_tokenizer()
tokenizer = prefill_engine.build_tokenizer(metadata)
logging.info("---------Prefill params %d loaded.---------", idx)
while self.live:
my_transfer_backlog = self._transfer_backlogs[idx]
# The prefill thread can just sleep until it has work to do.
request = self._prefill_backlog.get(block=True)
if request is None:
break
request.metadata.prefill_dequeue_time = time.perf_counter()
is_bos = True
logging.info(
"Prefilling on prefill engine %d : prefill queue size, %d," " is_bos: %s",
idx,
self._prefill_backlog.qsize(),
is_bos,
)
# Tokenize and padding the text or token input.
padded_tokens, true_length = self._process_prefill_content(
request, tokenizer, is_bos, prefill_engine.max_prefill_length
)
# Compute new kv cache for the prefill_content.
prefill_result, first_token = prefill_engine.prefill(
params=prefill_params,
padded_tokens=padded_tokens,
true_length=true_length,
)
request.prefill_result = prefill_result
# put first token to detokenize queue
request.complete = np.zeros((prefill_engine.samples_per_slot,), np.bool_)
my_detokenize_backlog = self._detokenize_backlogs[idx]
request.metadata.transfer_enqueue_time = time.perf_counter()
my_detokenize_backlog.put(
(first_token, request, request.metadata.prefill_dequeue_time),
block=True,
)
# Once prefill is complete, place it on the generation queue and block if
# full.
my_transfer_backlog.put(request, block=True)
logging.info(
"Placed request on transfer queue %d, %d queued requests.",
idx,
my_transfer_backlog.qsize(),
)
del prefill_result
del request
|
Thread which runs in the background performing prefills.
|
_prefill_thread
|
python
|
xjdr-alt/entropix
|
entropix/orchestrator.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/orchestrator.py
|
Apache-2.0
|
def _transfer_thread(self, idx: int):
"""Transfers the kv cache on an active request to the least full
generate backlog."""
transfer_backlog = self._transfer_backlogs[idx]
while self.live:
# The transfer thread can just sleep until it has work to do.
new_request = transfer_backlog.get(block=True)
if new_request is None:
break
new_request.metadata.transfer_dequeue_time = time.perf_counter()
target_idx = min(self._generate_backlogs.items(), key=lambda q: q[1].qsize())[0]
# Only transfer the KVCache for the disaggregated serving.
# TODO: Remove the conditional after fixing the compatibility.
if not self._interleaved_mode:
logging.info(
"Transferring prefill from prefill engine %d " "to generate engine %d.",
idx,
target_idx,
)
# Transfer the info to the relevant generate slice.
self._transfer_prefill_result(new_request, target_idx)
# Place the request on the correct generate backlog and block if full.
new_request.metadata.generate_enqueue_time = time.perf_counter()
self._generate_backlogs[target_idx].put(new_request, block=True)
logging.info(
"Successfully transferred prefill "
"from prefill engine %d to generate engine %d "
"(%d requests now in backlog).",
idx,
target_idx,
self._generate_backlogs[target_idx].qsize(),
)
|
Transfers the kv cache on an active request to the least full
generate backlog.
|
_transfer_thread
|
python
|
xjdr-alt/entropix
|
entropix/orchestrator.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/orchestrator.py
|
Apache-2.0
|
def _generate_thread(self, idx: int):
"""Step token generation and insert prefills from backlog."""
logging.info("---------Spinning up generate thread %d.---------", idx)
generate_engine = self._generate_engines[idx]
my_slots = self._generate_slots[idx]
my_generate_backlog = self._generate_backlogs[idx]
my_detokenize_backlog = self._detokenize_backlogs[idx]
# Keep track of what step tokens were generated at.
generate_timestep = 0
# State to store things like running kv cache in.
decode_state = generate_engine.init_decode_state()
generate_params = self._generate_params[idx]
logging.info("---------Generate params %d loaded.---------", idx)
time_of_last_generate = time.time()
time_of_last_print = time.time()
while self.live:
if (time.time() - time_of_last_print) > 1:
logging.info(
"Generate thread making a decision with:"
" prefill_backlog=%d"
" generate_free_slots=%d",
self._prefill_backlog.qsize(),
my_slots.qsize(),
)
time_of_last_print = time.time()
max_concurrent_decodes = generate_engine.max_concurrent_decodes
# Check if there are any free my_slots. We don't want to block here since
# we can still generate if we can't insert. We do this in a while loop to
# insert as many sequences as possible.
while True:
my_slots_size = my_slots.qsize()
try:
slot = my_slots.get(block=False)
# Found a slot, now see if we can fill it.
except queue.Empty:
# Exit this while loop as we have no free slots to insert into.
break
# We block when the decode slots are all free since we need to get a
# prefilled request to insert. We add timeout for the block to handle
# the case when the prefill backlog is cancelled and we end up with no
# more useful prefill work to do.
block = my_slots_size == max_concurrent_decodes
if self._interleaved_mode:
# For interleaved mode, we also blocks when prefill backlog
# is not empty or there are transfer work to do.
block |= not self._prefill_backlog.empty()
block |= not self._transfer_backlogs[idx].empty()
try:
new_request = my_generate_backlog.get(block=block, timeout=1.0)
if new_request is None:
break
new_request.metadata.generate_dequeue_time = time.perf_counter()
# Got free slot and new request, use them.
except queue.Empty:
# No new requests, we can't insert, so put back slot.
my_slots.put(slot, block=False)
# If we were blocking and hit the timeout, then retry the loop.
# Otherwise, we can exit and proceed to generation.
if block:
continue
else:
break
# Signal to kill the thread.
if new_request is None:
return
logging.info(
"Generate slice %d filling slot %d at step %d.",
idx,
slot,
generate_timestep,
)
decode_state = generate_engine.insert(
new_request.prefill_result, decode_state, slot=slot
)
del new_request.prefill_result
new_request.generate_timestep_added = generate_timestep
new_request.complete = np.zeros(
(generate_engine.samples_per_slot,), dtype=np.bool_
)
# Respond to detokenization backpressure.
my_detokenize_backlog.put((slot, new_request), block=True)
# At this point, we know that we have at least some slots filled.
assert (
my_slots.qsize() < max_concurrent_decodes
), "At this point we must have some requests inserted into the slots."
# Now we actually take a generate step on requests in the slots.
decode_state, sampled_tokens = generate_engine.generate(
generate_params, decode_state
)
sampled_tokens.copy_to_host_async()
# Respond to detokenization backpressure.
my_detokenize_backlog.put((generate_timestep, sampled_tokens), block=True)
generate_timestep += 1
logging.info(
"Generate engine %d step %d - slots free : %d / %d, took %.2fms",
idx,
generate_timestep,
my_slots_size,
max_concurrent_decodes,
(time.time() - time_of_last_generate) * 10**3,
)
time_of_last_generate = time.time()
|
Step token generation and insert prefills from backlog.
|
_generate_thread
|
python
|
xjdr-alt/entropix
|
entropix/orchestrator.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/orchestrator.py
|
Apache-2.0
|
def _detokenize_thread(self, idx: int):
"""Detokenize sampled tokens and returns them to the user."""
# One of these per generate engine.
# For all filled my_slots, pop the sampled token onto the relevant
# requests return channel. If it done, place it back onto free slots.
my_detokenize_backlog = self._detokenize_backlogs[idx]
my_generate_engine = self._generate_engines[idx]
my_slots = self._generate_slots[idx]
metadata = my_generate_engine.get_tokenizer()
tokenizer = my_generate_engine.build_tokenizer(metadata)
my_live_requests = {
i: None for i in range(my_generate_engine.max_concurrent_decodes)
}
while self.live:
data = my_detokenize_backlog.get(block=True)
if data is None:
break
start_detokenize_time = time.time()
# prefill first token
if isinstance(data[0], ResultTokens):
request_first_token, request, _ = data
request_first_token = request_first_token.convert_to_numpy()
results, complete = token_utils.process_result_tokens(
tokenizer=tokenizer,
slot=0, # always 0 as prefill only run 1 sample
slot_max_length=request.max_tokens,
result_tokens=request_first_token,
is_client_side_tokenization=request.is_client_side_tokenization,
complete=request.complete,
)
request.complete = complete
# Return some output samples.
request.enqueue_samples(results)
first_token_return_time = time.perf_counter()
logging.info(
"TTFT duration: %fms",
(first_token_return_time - request.metadata.prefill_dequeue_time) * 1000,
)
# generate step tokens
elif isinstance(data[1], ResultTokens):
# We want to detokenize them.
generate_timestep_added, result_tokens = data
# Disable attribute error because pytype doesn't know this
# is a result tokens, and we can't annotate the tuple.
result_tokens = result_tokens.convert_to_numpy()
for slot, request in my_live_requests.items():
if request is not None:
results, complete = token_utils.process_result_tokens(
tokenizer=tokenizer,
slot=slot,
slot_max_length=request.max_tokens,
result_tokens=result_tokens,
is_client_side_tokenization=request.is_client_side_tokenization,
complete=request.complete,
)
request.complete = complete
# Return some output samples.
request.enqueue_samples(results)
if request.complete.all():
request.metadata.complete_time = time.perf_counter()
request.return_channel.close()
# Place the slot back on the free queue.
my_live_requests[slot] = None
my_slots.put(slot, block=False) # This should always have space.
my_generate_engine.free_resource(slot)
logging.info(
"Detokenizing generate step %d took %.2fms",
generate_timestep_added,
(time.time() - start_detokenize_time) * 10**3,
)
else:
# We want to update a slot with the new channel.
slot, active_request = data
my_live_requests[slot] = active_request
|
Detokenize sampled tokens and returns them to the user.
|
_detokenize_thread
|
python
|
xjdr-alt/entropix
|
entropix/orchestrator.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/orchestrator.py
|
Apache-2.0
|
def __init__(self, model_path: str):
"""
Initializes the Tokenizer with a Tiktoken model.
Args:
model_path (str): The path to the Tiktoken model file.
"""
assert os.path.isfile(model_path), model_path
mergeable_ranks = load_tiktoken_bpe(model_path)
num_base_tokens = len(mergeable_ranks)
special_tokens = [
'<|begin_of_text|>',
'<|end_of_text|>',
'<|reserved_special_token_0|>',
'<|reserved_special_token_1|>',
'<|finetune_right_pad_id|>',
'<|step_id|>',
'<|start_header_id|>',
'<|end_header_id|>',
'<|eom_id|>', # end of message
'<|eot_id|>', # end of turn
'<|python_tag|>',
]
reserved_tokens = [
f'<|reserved_special_token_{2 + i}|>'
for i in range(self.num_reserved_special_tokens - len(special_tokens))
]
special_tokens = special_tokens + reserved_tokens
self.special_tokens = {token: num_base_tokens + i for i, token in enumerate(special_tokens)}
self.model = tiktoken.Encoding(
name=Path(model_path).name,
pat_str=self.pat_str,
mergeable_ranks=mergeable_ranks,
special_tokens=self.special_tokens,
)
self.n_words: int = num_base_tokens + len(special_tokens)
# BOS / EOS token IDs
self.bos_id: int = self.special_tokens['<|begin_of_text|>']
self.eos_id: int = self.special_tokens['<|end_of_text|>']
self.eot_id: int = self.special_tokens['<|eot_id|>']
self.eom_id: int = self.special_tokens['<|eom_id|>']
self.python_tag_id = self.special_tokens['<|python_tag|>']
self.pad_id: int = self.special_tokens['<|finetune_right_pad_id|>']
self.stop_tokens = [
self.special_tokens['<|eom_id|>'],
self.special_tokens['<|eot_id|>'],
]
|
Initializes the Tokenizer with a Tiktoken model.
Args:
model_path (str): The path to the Tiktoken model file.
|
__init__
|
python
|
xjdr-alt/entropix
|
entropix/tokenizer.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/tokenizer.py
|
Apache-2.0
|
def encode(
self,
s: str,
*,
bos: bool,
eos: bool,
allowed_special: Optional[Union[Literal['all'], AbstractSet[str]]] = None,
disallowed_special: Union[Literal['all'], Collection[str]] = (),
) -> List[int]:
"""
Encodes a string into a list of token IDs.
Args:
s (str): The input string to be encoded.
bos (bool): Whether to prepend the beginning-of-sequence token.
eos (bool): Whether to append the end-of-sequence token.
allowed_tokens ("all"|set[str]): allowed special tokens in string
disallowed_tokens ("all"|set[str]): special tokens that raise an error when in string
Returns:
list[int]: A list of token IDs.
By default, setting disallowed_special=() encodes a string by ignoring
special tokens. Specifically:
- Setting `disallowed_special` to () will cause all text corresponding
to special tokens to be encoded as natural text (insteading of raising
an error).
- Setting `allowed_special` to "all" will treat all text corresponding
to special tokens to be encoded as special tokens.
"""
if allowed_special is None:
allowed_special = set()
assert isinstance(s, str)
substrs = (
substr
for i in range(0, len(s), TIKTOKEN_MAX_ENCODE_CHARS)
for substr in self._split_whitespaces_or_nonwhitespaces(
s[i : i + TIKTOKEN_MAX_ENCODE_CHARS], MAX_NO_WHITESPACES_CHARS
)
)
t: List[int] = []
for substr in substrs:
t.extend(
self.model.encode(
substr,
allowed_special=allowed_special,
disallowed_special=disallowed_special,
)
)
if bos:
t.insert(0, self.bos_id)
if eos:
t.append(self.eos_id)
return t
|
Encodes a string into a list of token IDs.
Args:
s (str): The input string to be encoded.
bos (bool): Whether to prepend the beginning-of-sequence token.
eos (bool): Whether to append the end-of-sequence token.
allowed_tokens ("all"|set[str]): allowed special tokens in string
disallowed_tokens ("all"|set[str]): special tokens that raise an error when in string
Returns:
list[int]: A list of token IDs.
By default, setting disallowed_special=() encodes a string by ignoring
special tokens. Specifically:
- Setting `disallowed_special` to () will cause all text corresponding
to special tokens to be encoded as natural text (insteading of raising
an error).
- Setting `allowed_special` to "all" will treat all text corresponding
to special tokens to be encoded as special tokens.
|
encode
|
python
|
xjdr-alt/entropix
|
entropix/tokenizer.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/tokenizer.py
|
Apache-2.0
|
def decode(self, t: Sequence[int]) -> str:
"""
Decodes a list of token IDs into a string.
Args:
t (List[int]): The list of token IDs to be decoded.
Returns:
str: The decoded string.
"""
# Typecast is safe here. Tiktoken doesn't do anything list-related with the sequence.
return self.model.decode(cast(List[int], t))
|
Decodes a list of token IDs into a string.
Args:
t (List[int]): The list of token IDs to be decoded.
Returns:
str: The decoded string.
|
decode
|
python
|
xjdr-alt/entropix
|
entropix/tokenizer.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/tokenizer.py
|
Apache-2.0
|
def _split_whitespaces_or_nonwhitespaces(s: str, max_consecutive_slice_len: int) -> Iterator[str]:
"""
Splits the string `s` so that each substring contains no more than `max_consecutive_slice_len`
consecutive whitespaces or consecutive non-whitespaces.
"""
current_slice_len = 0
current_slice_is_space = s[0].isspace() if len(s) > 0 else False
slice_start = 0
for i in range(len(s)):
is_now_space = s[i].isspace()
if current_slice_is_space ^ is_now_space:
current_slice_len = 1
current_slice_is_space = is_now_space
else:
current_slice_len += 1
if current_slice_len > max_consecutive_slice_len:
yield s[slice_start:i]
slice_start = i
current_slice_len = 1
yield s[slice_start:]
|
Splits the string `s` so that each substring contains no more than `max_consecutive_slice_len`
consecutive whitespaces or consecutive non-whitespaces.
|
_split_whitespaces_or_nonwhitespaces
|
python
|
xjdr-alt/entropix
|
entropix/tokenizer.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/tokenizer.py
|
Apache-2.0
|
def take_nearest_length(lengths: list[int], length: int) -> int:
"""Gets the nearest length to the right in a set of lengths."""
pos = bisect_left(lengths, length)
if pos == len(lengths):
return lengths[-1]
return lengths[pos]
|
Gets the nearest length to the right in a set of lengths.
|
take_nearest_length
|
python
|
xjdr-alt/entropix
|
entropix/token_utils.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/token_utils.py
|
Apache-2.0
|
def tokenize_and_pad(
s: str,
vocab,
is_bos: bool = True,
prefill_lengths: Optional[List[int]] = None,
max_prefill_length: Optional[int] = None,
jax_padding: bool = True,
) -> Tuple[Union[jax.Array, np.ndarray], int]:
"""Tokenize and pads a string.
Args:
s: String to tokenize.
vocab: Vocabulary to tokenize with.
is_bos: Whether or not this is the beginning of a sequence. Default to yes
as prefill is typically used when beginning sequences.
prefill_lengths: Buckets to pad the sequence to for static compilation.
max_prefill_length: Maximum bucket to use.
jax_padding: convert to JAX padded tokens if True.
Returns:
tokens: Tokenized into integers.
true_length: Actual length of the non-padded sequence.
"""
tokens = np.array(vocab.encode_tf(s)) # [Length]
bos_id = vocab.bos_id
pad_id = vocab.pad_id
assert pad_id == 0, "Further logic required if pad_id not 0."
padded_tokens, true_length = pad_tokens(
tokens=tokens,
bos_id=bos_id,
pad_id=pad_id,
is_bos=is_bos,
prefill_lengths=prefill_lengths,
max_prefill_length=max_prefill_length,
jax_padding=jax_padding,
)
return padded_tokens, true_length
|
Tokenize and pads a string.
Args:
s: String to tokenize.
vocab: Vocabulary to tokenize with.
is_bos: Whether or not this is the beginning of a sequence. Default to yes
as prefill is typically used when beginning sequences.
prefill_lengths: Buckets to pad the sequence to for static compilation.
max_prefill_length: Maximum bucket to use.
jax_padding: convert to JAX padded tokens if True.
Returns:
tokens: Tokenized into integers.
true_length: Actual length of the non-padded sequence.
|
tokenize_and_pad
|
python
|
xjdr-alt/entropix
|
entropix/token_utils.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/token_utils.py
|
Apache-2.0
|
def pad_tokens(
tokens: np.ndarray,
bos_id: int,
pad_id: int,
is_bos: bool = True,
prefill_lengths: Optional[List[int]] = None,
max_prefill_length: Optional[int] = None,
jax_padding: bool = True,
) -> Tuple[Union[jax.Array, np.ndarray], int]:
"""Pads tokens to the nearest prefill length that is equal to or greater
than the token length.
Args:
tokens: Tokens.
bos_id: Bos ID.
pad_id: Pad ID.
is_bos: Add a beginning of sequence token if this is ture.
prefill_lengths: Buckets to pad the sequence to for static compilation.
max_prefill_length: Maximum bucket to use.
jax_padding: convert to JAX padded tokens if True.
Returns:
tokens: Tokenized into integers.
true_length: Actual length of the non-padded sequence.
"""
if prefill_lengths is None:
prefill_lengths = DEFAULT_PREFILL_BUCKETS
if max_prefill_length is not None:
prefill_lengths = prefill_lengths[: prefill_lengths.index(max_prefill_length)] + [
max_prefill_length
]
# Add a beginning of sequence token if this is the beginning.
if is_bos:
tokens = np.concatenate([np.array([bos_id]), tokens], axis=-1)
true_length = tokens.shape[-1]
padded_length = take_nearest_length(prefill_lengths, true_length)
padding = padded_length - true_length
if padding < 0:
logging.warning("Provided sequence longer than available.")
# Take the last N tokens if we have too many.
padded_tokens = tokens[-padded_length:]
else:
padded_tokens = np.pad(tokens, (0, padding), constant_values=(pad_id,))
if jax_padding:
padded_tokens = jnp.array([padded_tokens])
return padded_tokens, true_length
|
Pads tokens to the nearest prefill length that is equal to or greater
than the token length.
Args:
tokens: Tokens.
bos_id: Bos ID.
pad_id: Pad ID.
is_bos: Add a beginning of sequence token if this is ture.
prefill_lengths: Buckets to pad the sequence to for static compilation.
max_prefill_length: Maximum bucket to use.
jax_padding: convert to JAX padded tokens if True.
Returns:
tokens: Tokenized into integers.
true_length: Actual length of the non-padded sequence.
|
pad_tokens
|
python
|
xjdr-alt/entropix
|
entropix/token_utils.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/token_utils.py
|
Apache-2.0
|
def process_result_tokens(
tokenizer: Tokenizer,
slot: int,
slot_max_length: int,
result_tokens: ResultTokens,
complete: np.ndarray,
is_client_side_tokenization: bool = False,
debug: bool = False,
) -> Tuple[List[ReturnSample], np.ndarray]:
"""Processes a result tokens into a list of strings, handling multiple
samples.
Args:
slot: The slot at which to draw tokens from.
slot_max_length: Max length for a sample in the slot.
result_tokens: The tokens to access by slot.
complete: Array representing the completion status of each sample in the
slot.
is_client_side_tokenization: Whether to detokenize on client side.
debug: Whether to log step by step detokenisation.
Returns:
return_samples: List of ReturnSample.
complete: Updated complete.
"""
# tokens: [samples, speculations]
slot_data = result_tokens.get_result_at_slot(slot)
slot_tokens = slot_data.tokens
slot_valid = slot_data.valid
slot_lengths = slot_data.lengths
samples, speculations = slot_tokens.shape
stop_tokens = tokenizer.stop_tokens
# Stop anything which has reached it's max length.
complete = complete | (slot_lengths > slot_max_length)
if debug:
logging.info(
"Complete %s, slot_tokens: %s, slot_lengths: %s",
str(complete),
str(slot_tokens),
str(slot_lengths),
)
return_samples = []
for idx in range(samples):
text_so_far = []
tok_id_so_far = []
if not complete[idx].item():
for spec_idx in range(speculations):
tok_id = slot_tokens[idx, spec_idx].item()
valid = slot_valid[idx, spec_idx].item()
if debug:
logging.info(
"Sample idx: %d Speculation idx: %d Token: %d",
idx,
spec_idx,
tok_id,
)
if tok_id in stop_tokens or not valid:
complete[idx] = True
tok_id_so_far.append(tok_id)
break
else:
if not is_client_side_tokenization:
text_so_far.append(tokenizer.decode([tok_id]))
tok_id_so_far.append(tok_id)
return_samples.append(ReturnSample(text=text_so_far, token_ids=tok_id_so_far))
if debug:
logging.info("Return samples %s", str(return_samples))
return return_samples, complete
|
Processes a result tokens into a list of strings, handling multiple
samples.
Args:
slot: The slot at which to draw tokens from.
slot_max_length: Max length for a sample in the slot.
result_tokens: The tokens to access by slot.
complete: Array representing the completion status of each sample in the
slot.
is_client_side_tokenization: Whether to detokenize on client side.
debug: Whether to log step by step detokenisation.
Returns:
return_samples: List of ReturnSample.
complete: Updated complete.
|
process_result_tokens
|
python
|
xjdr-alt/entropix
|
entropix/token_utils.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/token_utils.py
|
Apache-2.0
|
def is_byte_token(s: str) -> bool:
"""Returns True if s is a byte string like "<0xAB>"."""
# Bytes look like "<0xAB>".
if len(s) != 6 or s[0:3] != "<0x" or s[-1] != ">":
return False
return True
|
Returns True if s is a byte string like "<0xAB>".
|
is_byte_token
|
python
|
xjdr-alt/entropix
|
entropix/token_utils.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/token_utils.py
|
Apache-2.0
|
def create_mesh(device_count: int) -> jax.sharding.Mesh:
"""Creates device mesh for distributed execution."""
devices = jax.devices()
mesh_shape = (device_count, 1)
device_mesh = jax.experimental.mesh_utils.create_device_mesh(mesh_shape)
return jax.sharding.Mesh(device_mesh, ("mp", "fsdp"))
|
Creates device mesh for distributed execution.
|
create_mesh
|
python
|
xjdr-alt/entropix
|
entropix/weights.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/weights.py
|
Apache-2.0
|
def load_weights(
ckpt_dir: Path, model_params, weight_config: Optional[WeightConfig] = None
) -> Tuple[XfmrWeights, jax.sharding.Mesh]:
"""Load and shard model weights across devices."""
weight_config = weight_config or WeightConfig()
mesh = create_mesh(jax.device_count())
w = {}
layer_weights = []
for file in ckpt_dir.glob("*.npy"):
name = ".".join(str(file).split("/")[-1].split(".")[:-1])
weight = jnp.load(file=file, mmap_mode="r", allow_pickle=True)
partition_spec = create_partition_spec(name)
sharding = NamedSharding(mesh, partition_spec)
if any(lyr in name for lyr in ["wq", "wk", "wv", "wo", "w1", "w2", "w3"]):
weight = weight.T
if "wq" in name or "wk" in name or "wv" in name:
weight = weight.reshape(
-1,
model_params.n_local_heads if "wq" in name else model_params.n_local_kv_heads,
model_params.head_dim,
)
# print(name, weight.shape, sharding._to_xla_hlo_sharding(weight.ndim))
w[name] = jax.device_put(weight, sharding)
for i in range(model_params.n_layers):
layer_weights.append(
LayerWeights(
wq=w[f"layers.{i}.attention.wq.weight"],
wk=w[f"layers.{i}.attention.wk.weight"],
wv=w[f"layers.{i}.attention.wv.weight"],
wo=w[f"layers.{i}.attention.wo.weight"],
w1=w[f"layers.{i}.feed_forward.w1.weight"],
w2=w[f"layers.{i}.feed_forward.w2.weight"],
w3=w[f"layers.{i}.feed_forward.w3.weight"],
ffn_norm=w[f"layers.{i}.ffn_norm.weight"],
attention_norm=w[f"layers.{i}.attention_norm.weight"],
)
)
xfmr_weights = XfmrWeights(
tok_embeddings=w["tok_embeddings.weight"],
norm=w["norm.weight"],
output=w["output.weight"],
layer_weights=layer_weights,
)
return xfmr_weights, mesh
|
Load and shard model weights across devices.
|
load_weights
|
python
|
xjdr-alt/entropix
|
entropix/weights.py
|
https://github.com/xjdr-alt/entropix/blob/master/entropix/weights.py
|
Apache-2.0
|
def aggregate_results(
single_eval_results: list[SingleEvalResult],
default_stats: tuple[str] = ("mean", "std"),
name2stats: dict[str, tuple[str]] | None = None,
) -> EvalResult:
"""
Aggregate results from multiple evaluations into a single EvalResult.
"""
name2stats = name2stats or {}
name2values = defaultdict(list)
htmls = []
convos = []
for single_eval_result in single_eval_results:
for name, value in single_eval_result.metrics.items():
name2values[name].append(value)
if single_eval_result.score is not None:
name2values["score"].append(single_eval_result.score)
htmls.append(single_eval_result.html)
convos.append(single_eval_result.convo)
final_metrics = {}
for name, values in name2values.items():
stats = name2stats.get(name, default_stats)
for stat in stats:
key = name if stat == "mean" else f"{name}:{stat}"
final_metrics[key] = _compute_stat(values, stat)
return EvalResult(
score=final_metrics.pop("score", None),
metrics=final_metrics,
htmls=htmls,
convos=convos,
)
|
Aggregate results from multiple evaluations into a single EvalResult.
|
aggregate_results
|
python
|
xjdr-alt/entropix
|
evals/common.py
|
https://github.com/xjdr-alt/entropix/blob/master/evals/common.py
|
Apache-2.0
|
def map_with_progress(f: callable, xs: list[Any], num_threads: int = 50):
"""
Apply f to each element of xs, using a ThreadPool, and show progress.
"""
if os.getenv("debug"):
return list(map(f, tqdm(xs, total=len(xs))))
else:
with ThreadPool(min(num_threads, len(xs))) as pool:
return list(tqdm(pool.imap(f, xs), total=len(xs)))
|
Apply f to each element of xs, using a ThreadPool, and show progress.
|
map_with_progress
|
python
|
xjdr-alt/entropix
|
evals/common.py
|
https://github.com/xjdr-alt/entropix/blob/master/evals/common.py
|
Apache-2.0
|
def message_to_html(message: Message) -> str:
"""
Generate HTML snippet (inside a <div>) for a message.
"""
return jinja_env.from_string(_message_template).render(
role=message["role"],
content=message["content"],
variant=message.get("variant", None),
)
|
Generate HTML snippet (inside a <div>) for a message.
|
message_to_html
|
python
|
xjdr-alt/entropix
|
evals/common.py
|
https://github.com/xjdr-alt/entropix/blob/master/evals/common.py
|
Apache-2.0
|
def make_report(eval_result: EvalResult) -> str:
"""
Create a standalone HTML report from an EvalResult.
"""
return jinja_env.from_string(_report_template).render(
score=eval_result.score,
metrics=eval_result.metrics,
htmls=eval_result.htmls,
)
|
Create a standalone HTML report from an EvalResult.
|
make_report
|
python
|
xjdr-alt/entropix
|
evals/common.py
|
https://github.com/xjdr-alt/entropix/blob/master/evals/common.py
|
Apache-2.0
|
def make_report_from_example_htmls(htmls: list[str]):
"""
Create a standalone HTML report from a list of example htmls
"""
return jinja_env.from_string(_report_template).render(
score=None, metrics={}, htmls=htmls
)
|
Create a standalone HTML report from a list of example htmls
|
make_report_from_example_htmls
|
python
|
xjdr-alt/entropix
|
evals/common.py
|
https://github.com/xjdr-alt/entropix/blob/master/evals/common.py
|
Apache-2.0
|
def normalize_response(response: str) -> str:
"""
Normalize the response by removing markdown and LaTeX formatting that may prevent a match.
"""
return (
response.replace("**", "")
.replace("$\\boxed{", "")
.replace("}$", "")
.replace("\\$", "")
.replace("$\\text{", "")
.replace("$", "")
.replace("\\mathrm{", "")
.replace("\\{", "")
.replace("\\text", "")
.replace("\\(", "")
.replace("\\mathbf{", "")
.replace("{", "")
.replace("\\boxed", "")
)
|
Normalize the response by removing markdown and LaTeX formatting that may prevent a match.
|
normalize_response
|
python
|
xjdr-alt/entropix
|
evals/common.py
|
https://github.com/xjdr-alt/entropix/blob/master/evals/common.py
|
Apache-2.0
|
def _normalize_answer(text: str) -> str:
"""Lower text and remove punctuation, articles and extra whitespace."""
parts = [
_white_space_fix(_remove_articles(_normalize_number(_remove_punc(_lower(token)))))
for token in _tokenize(text)
]
parts = [part for part in parts if part.strip()]
normalized = " ".join(parts).strip()
return normalized
|
Lower text and remove punctuation, articles and extra whitespace.
|
_normalize_answer
|
python
|
xjdr-alt/entropix
|
evals/drop_eval.py
|
https://github.com/xjdr-alt/entropix/blob/master/evals/drop_eval.py
|
Apache-2.0
|
def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]:
"""
Takes gold and predicted answer sets and first finds the optimal 1-1 alignment
between them and gets maximum metric values over all the answers.
"""
scores = np.zeros([len(gold), len(predicted)])
for gold_index, gold_item in enumerate(gold):
for pred_index, pred_item in enumerate(predicted):
if _match_numbers_if_present(gold_item, pred_item):
scores[gold_index, pred_index] = _compute_f1(pred_item, gold_item)
row_ind, col_ind = linear_sum_assignment(-scores)
max_scores = np.zeros([max(len(gold), len(predicted))])
for row, column in zip(row_ind, col_ind):
max_scores[row] = max(max_scores[row], scores[row, column])
return max_scores
|
Takes gold and predicted answer sets and first finds the optimal 1-1 alignment
between them and gets maximum metric values over all the answers.
|
_align_bags
|
python
|
xjdr-alt/entropix
|
evals/drop_eval.py
|
https://github.com/xjdr-alt/entropix/blob/master/evals/drop_eval.py
|
Apache-2.0
|
def get_drop_metrics(
predicted: Union[str, List[str], Tuple[str, ...]],
gold: Union[str, List[str], Tuple[str, ...]],
) -> Tuple[float, float]:
"""
Takes a predicted answer and a gold answer (that are both either a string or a list of
strings), and returns exact match and the DROP F1 metric for the prediction. If you are
writing a script for evaluating objects in memory (say, the output of predictions during
validation, or while training), this is the function you want to call, after using
:func:`answer_json_to_strings` when reading the gold answer from the released data file.
"""
predicted_bags = _answer_to_bags(predicted)
gold_bags = _answer_to_bags(gold)
if set(predicted_bags[0]) == set(gold_bags[0]) and len(predicted_bags[0]) == len(
gold_bags[0]
):
exact_match = 1.0
else:
exact_match = 0.0
f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1])
f1 = np.mean(f1_per_bag)
f1 = round(f1, 2)
return exact_match, f1
|
Takes a predicted answer and a gold answer (that are both either a string or a list of
strings), and returns exact match and the DROP F1 metric for the prediction. If you are
writing a script for evaluating objects in memory (say, the output of predictions during
validation, or while training), this is the function you want to call, after using
:func:`answer_json_to_strings` when reading the gold answer from the released data file.
|
get_drop_metrics
|
python
|
xjdr-alt/entropix
|
evals/drop_eval.py
|
https://github.com/xjdr-alt/entropix/blob/master/evals/drop_eval.py
|
Apache-2.0
|
def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]:
"""
Takes an answer JSON blob from the DROP data release and converts it into strings used for
evaluation.
"""
if "number" in answer and answer["number"]:
return tuple([str(answer["number"])]), "number"
elif "spans" in answer and answer["spans"]:
return tuple(answer["spans"]), "span" if len(answer["spans"]) == 1 else "spans"
elif "date" in answer:
return (
tuple(
[
"{0} {1} {2}".format(
answer["date"]["day"], answer["date"]["month"], answer["date"]["year"]
).strip()
]
),
"date",
)
else:
raise ValueError(
f"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}"
)
|
Takes an answer JSON blob from the DROP data release and converts it into strings used for
evaluation.
|
answer_json_to_strings
|
python
|
xjdr-alt/entropix
|
evals/drop_eval.py
|
https://github.com/xjdr-alt/entropix/blob/master/evals/drop_eval.py
|
Apache-2.0
|
def normalize(s: str) -> str:
"""Lower text and remove punctuation, articles and extra whitespace."""
s = s.lower()
exclude = set(string.punctuation)
s = "".join(char for char in s if char not in exclude)
s = re.sub(r"\b(a|an|the)\b", " ", s)
s = " ".join(s.split())
return s
|
Lower text and remove punctuation, articles and extra whitespace.
|
normalize
|
python
|
xjdr-alt/entropix
|
evals/drop_eval.py
|
https://github.com/xjdr-alt/entropix/blob/master/evals/drop_eval.py
|
Apache-2.0
|
def evaluate_functional_correctness(
sample: dict[str, str],
completions: list[str],
n_workers: int = 4,
timeout: float = 3.0,
):
"""
Evaluates the functional correctness of generated samples, and writes
results to f"{sample_file}_results.jsonl.gz"
"""
import copy
# Check the generated samples against test suites.
with ThreadPoolExecutor(max_workers=n_workers) as executor:
futures = []
for i, completion in enumerate(completions):
args = (sample, completion, timeout, i)
future = executor.submit(check_correctness, *args)
futures.append(future)
results = []
for future in as_completed(futures):
result = future.result()
results.append(result)
passed = [int(r["passed"]) for r in results]
return passed
|
Evaluates the functional correctness of generated samples, and writes
results to f"{sample_file}_results.jsonl.gz"
|
evaluate_functional_correctness
|
python
|
xjdr-alt/entropix
|
evals/humaneval_eval.py
|
https://github.com/xjdr-alt/entropix/blob/master/evals/humaneval_eval.py
|
Apache-2.0
|
def stream_jsonl(filename: str) -> Iterable[Dict]:
"""
Parses each jsonl line and yields it as a dictionary
"""
if filename.endswith(".gz"):
with open(filename, "rb") as gzfp:
with gzip.open(gzfp, 'rt') as fp:
for line in fp:
if any(not x.isspace() for x in line):
yield json.loads(line)
else:
with open(filename, "r") as fp:
for line in fp:
if any(not x.isspace() for x in line):
yield json.loads(line)
|
Parses each jsonl line and yields it as a dictionary
|
stream_jsonl
|
python
|
xjdr-alt/entropix
|
evals/human-eval/human_eval/data.py
|
https://github.com/xjdr-alt/entropix/blob/master/evals/human-eval/human_eval/data.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.