Unnamed: 0
int64
0
0
repo_id
stringlengths
5
186
file_path
stringlengths
15
223
content
stringlengths
1
32.8M
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/inline-table/empty.toml
empty1 = {} empty2 = { } empty_in_array = [ { not_empty = 1 }, {} ] empty_in_array2 = [{},{not_empty=1}] many_empty = [{},{},{}] nested_empty = {"empty"={}}
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/inline-table/inline-table.toml
name = { first = "Tom", last = "Preston-Werner" } point = { x = 1, y = 2 } simple = { a = 1 } str-key = { "a" = 1 } table-array = [{ "a" = 1 }, { "b" = 2 }]
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/inline-table/inline-table.json
{ "name": { "first": { "type": "string", "value": "Tom" }, "last": { "type": "string", "value": "Preston-Werner" } }, "point": { "x": { "type": "integer", "value": "1" }, "y": { "type": "integer", "value": "2" } }, "simple": { "a": { "type": "integer", "value": "1" } }, "str-key": { "a": { "type": "integer", "value": "1" } }, "table-array": [ { "a": { "type": "integer", "value": "1" } }, { "b": { "type": "integer", "value": "2" } } ] }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/inline-table/key-dotted.json
{ "a": { "a": { "b": { "type": "integer", "value": "1" } } }, "arr": [ { "T": { "a": { "b": { "type": "integer", "value": "1" } } }, "t": { "a": { "b": { "type": "integer", "value": "1" } } } }, { "T": { "a": { "b": { "type": "integer", "value": "2" } } }, "t": { "a": { "b": { "type": "integer", "value": "2" } } } } ], "b": { "a": { "b": { "type": "integer", "value": "1" } } }, "c": { "a": { "b": { "type": "integer", "value": "1" } } }, "d": { "a": { "b": { "type": "integer", "value": "1" } } }, "e": { "a": { "b": { "type": "integer", "value": "1" } } }, "inline": { "a": { "b": { "type": "integer", "value": "42" } } }, "many": { "dots": { "here": { "dot": { "dot": { "dot": { "a": { "b": { "c": { "type": "integer", "value": "1" }, "d": { "type": "integer", "value": "2" } } } } } } } } }, "tbl": { "a": { "b": { "c": { "d": { "e": { "type": "integer", "value": "1" } } } } }, "x": { "a": { "b": { "c": { "d": { "e": { "type": "integer", "value": "1" } } } } } } } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/float/float.toml
pi = 3.14 pospi = +3.14 negpi = -3.14 zero-intpart = 0.123
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/float/zero.toml
zero = 0.0 signed-pos = +0.0 signed-neg = -0.0 exponent = 0e0 exponent-two-0 = 0e00 exponent-signed-pos = +0e0 exponent-signed-neg = -0e0
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/float/zero.json
{ "zero": { "type": "float", "value": "0" }, "signed-pos": { "type": "float", "value": "0" }, "signed-neg": { "type": "float", "value": "0" }, "exponent": { "type": "float", "value": "0" }, "exponent-two-0": { "type": "float", "value": "0" }, "exponent-signed-pos": { "type": "float", "value": "0" }, "exponent-signed-neg": { "type": "float", "value": "0" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/float/long.json
{ "longpi": { "type": "float", "value": "3.141592653589793" }, "neglongpi": { "type": "float", "value": "-3.141592653589793" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/float/underscore.json
{ "after": { "type": "float", "value": "3141.5927" }, "before": { "type": "float", "value": "3141.5927" }, "exponent": { "type": "float", "value": "3.0e14" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/float/inf-and-nan.json
{ "infinity": { "type": "float", "value": "inf" }, "infinity_neg": { "type": "float", "value": "-inf" }, "infinity_plus": { "type": "float", "value": "+inf" }, "nan": { "type": "float", "value": "nan" }, "nan_neg": { "type": "float", "value": "nan" }, "nan_plus": { "type": "float", "value": "nan" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/float/inf-and-nan.toml
# We don't encode +nan and -nan back with the signs; many languages don't # support a sign on NaN (it doesn't really make much sense). nan = nan nan_neg = -nan nan_plus = +nan infinity = inf infinity_neg = -inf infinity_plus = +inf
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/float/long.toml
longpi = 3.141592653589793 neglongpi = -3.141592653589793
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/float/float.json
{ "negpi": { "type": "float", "value": "-3.14" }, "pi": { "type": "float", "value": "3.14" }, "pospi": { "type": "float", "value": "3.14" }, "zero-intpart": { "type": "float", "value": "0.123" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/float/exponent.json
{ "lower": { "type": "float", "value": "300.0" }, "minustenth": { "type": "float", "value": "-0.1" }, "neg": { "type": "float", "value": "0.03" }, "pointlower": { "type": "float", "value": "310.0" }, "pointupper": { "type": "float", "value": "310.0" }, "pos": { "type": "float", "value": "300.0" }, "upper": { "type": "float", "value": "300.0" }, "zero": { "type": "float", "value": "3.0" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/float/exponent.toml
lower = 3e2 upper = 3E2 neg = 3e-2 pos = 3E+2 zero = 3e0 pointlower = 3.1e2 pointupper = 3.1E2 minustenth = -1E-1
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/float/underscore.toml
before = 3_141.5927 after = 3141.592_7 exponent = 3e1_4
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/integer/integer.toml
answer = 42 posanswer = +42 neganswer = -42 zero = 0
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/integer/zero.toml
d1 = 0 d2 = +0 d3 = -0 h1 = 0x0 h2 = 0x00 h3 = 0x00000 o1 = 0o0 a2 = 0o00 a3 = 0o00000 b1 = 0b0 b2 = 0b00 b3 = 0b00000
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/integer/zero.json
{ "a2": { "type": "integer", "value": "0" }, "a3": { "type": "integer", "value": "0" }, "b1": { "type": "integer", "value": "0" }, "b2": { "type": "integer", "value": "0" }, "b3": { "type": "integer", "value": "0" }, "d1": { "type": "integer", "value": "0" }, "d2": { "type": "integer", "value": "0" }, "d3": { "type": "integer", "value": "0" }, "h1": { "type": "integer", "value": "0" }, "h2": { "type": "integer", "value": "0" }, "h3": { "type": "integer", "value": "0" }, "o1": { "type": "integer", "value": "0" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/integer/long.json
{ "int64-max": { "type": "integer", "value": "9223372036854775807" }, "int64-max-neg": { "type": "integer", "value": "-9223372036854775808" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/integer/underscore.json
{ "kilo": { "type": "integer", "value": "1000" }, "x": { "type": "integer", "value": "1111" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/integer/literals.toml
bin1 = 0b11010110 bin2 = 0b1_0_1 oct1 = 0o01234567 oct2 = 0o755 oct3 = 0o7_6_5 hex1 = 0xDEADBEEF hex2 = 0xdeadbeef hex3 = 0xdead_beef hex4 = 0x00987
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/integer/long.toml
int64-max = 9223372036854775807 int64-max-neg = -9223372036854775808
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/integer/integer.json
{ "answer": { "type": "integer", "value": "42" }, "neganswer": { "type": "integer", "value": "-42" }, "posanswer": { "type": "integer", "value": "42" }, "zero": { "type": "integer", "value": "0" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/integer/underscore.toml
kilo = 1_000 x = 1_1_1_1
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/integer/literals.json
{ "bin1": { "type": "integer", "value": "214" }, "bin2": { "type": "integer", "value": "5" }, "hex1": { "type": "integer", "value": "3735928559" }, "hex2": { "type": "integer", "value": "3735928559" }, "hex3": { "type": "integer", "value": "3735928559" }, "hex4": { "type": "integer", "value": "2439" }, "oct1": { "type": "integer", "value": "342391" }, "oct2": { "type": "integer", "value": "493" }, "oct3": { "type": "integer", "value": "501" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/multiline.json
{ "equivalent_one": { "type": "string", "value": "The quick brown fox jumps over the lazy dog." }, "equivalent_three": { "type": "string", "value": "The quick brown fox jumps over the lazy dog." }, "equivalent_two": { "type": "string", "value": "The quick brown fox jumps over the lazy dog." }, "escape-bs-1": { "type": "string", "value": "a \\\nb" }, "escape-bs-2": { "type": "string", "value": "a \\b" }, "escape-bs-3": { "type": "string", "value": "a \\\\\n b" }, "keep-ws-before": { "type": "string", "value": "a \tb" }, "multiline_empty_four": { "type": "string", "value": "" }, "multiline_empty_one": { "type": "string", "value": "" }, "multiline_empty_three": { "type": "string", "value": "" }, "multiline_empty_two": { "type": "string", "value": "" }, "no-space": { "type": "string", "value": "ab" }, "whitespace-after-bs": { "type": "string", "value": "The quick brown fox jumps over the lazy dog." } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/simple.json
{ "answer": { "type": "string", "value": "You are not drinking enough whisky." } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/double-quote-escape.json
{ "test": { "type": "string", "value": "\"one\"" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/escaped-escape.toml
answer = "\\x64"
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/empty.json
{ "answer": { "type": "string", "value": "" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/multiline-escaped-crlf.toml
# The following line should be an unescaped backslash followed by a Windows # newline sequence ("\r\n") 0="""\ """
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/simple.toml
answer = "You are not drinking enough whisky."
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/escapes.toml
backspace = "This string has a \b backspace character." tab = "This string has a \t tab character." newline = "This string has a \n new line character." formfeed = "This string has a \f form feed character." carriage = "This string has a \r carriage return character." quote = "This string has a \" quote character." backslash = "This string has a \\ backslash character." notunicode1 = "This string does not have a unicode \\u escape." notunicode2 = "This string does not have a unicode \u005Cu escape." notunicode3 = "This string does not have a unicode \\u0075 escape." notunicode4 = "This string does not have a unicode \\\u0075 escape." delete = "This string has a \u007F delete control code." unitseparator = "This string has a \u001F unit separator control code."
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/multiline.toml
# NOTE: this file includes some literal tab characters. multiline_empty_one = """""" # A newline immediately following the opening delimiter will be trimmed. multiline_empty_two = """ """ # \ at the end of line trims newlines as well; note that last \ is followed by # two spaces, which are ignored. multiline_empty_three = """\ """ multiline_empty_four = """\ \ \ """ equivalent_one = "The quick brown fox jumps over the lazy dog." equivalent_two = """ The quick brown \ fox jumps over \ the lazy dog.""" equivalent_three = """\ The quick brown \ fox jumps over \ the lazy dog.\ """ whitespace-after-bs = """\ The quick brown \ fox jumps over \ the lazy dog.\ """ no-space = """a\ b""" # Has tab character. keep-ws-before = """a \ b""" escape-bs-1 = """a \\ b""" escape-bs-2 = """a \\\ b""" escape-bs-3 = """a \\\\ b"""
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/unicode-literal.toml
answer = "δ"
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/escaped-escape.json
{ "answer": { "type": "string", "value": "\\x64" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/raw-multiline.json
{ "firstnl": { "type": "string", "value": "This string has a ' quote character." }, "multiline": { "type": "string", "value": "This string\nhas ' a quote character\nand more than\none newline\nin it." }, "oneline": { "type": "string", "value": "This string has a ' quote character." } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/nl.json
{ "lit_nl_end": { "type": "string", "value": "value\\n" }, "lit_nl_mid": { "type": "string", "value": "val\\nue" }, "lit_nl_uni": { "type": "string", "value": "val\\ue" }, "nl_end": { "type": "string", "value": "value\n" }, "nl_mid": { "type": "string", "value": "val\nue" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/unicode-escape.toml
answer4 = "\u03B4" answer8 = "\U000003B4"
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/multiline-quotes.toml
# Make sure that quotes inside multiline strings are allowed, including right # after the opening '''/""" and before the closing '''/""" lit_one = ''''one quote'''' lit_two = '''''two quotes''''' lit_one_space = ''' 'one quote' ''' lit_two_space = ''' ''two quotes'' ''' one = """"one quote"""" two = """""two quotes""""" one_space = """ "one quote" """ two_space = """ ""two quotes"" """ mismatch1 = """aaa'''bbb""" mismatch2 = '''aaa"""bbb''' # Three opening """, then one escaped ", then two "" (allowed), and then three # closing """ escaped = """lol\""""""
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/escapes.json
{ "backslash": { "type": "string", "value": "This string has a \\ backslash character." }, "backspace": { "type": "string", "value": "This string has a \u0008 backspace character." }, "carriage": { "type": "string", "value": "This string has a \r carriage return character." }, "delete": { "type": "string", "value": "This string has a  delete control code." }, "formfeed": { "type": "string", "value": "This string has a \u000c form feed character." }, "newline": { "type": "string", "value": "This string has a \n new line character." }, "notunicode1": { "type": "string", "value": "This string does not have a unicode \\u escape." }, "notunicode2": { "type": "string", "value": "This string does not have a unicode \\u escape." }, "notunicode3": { "type": "string", "value": "This string does not have a unicode \\u0075 escape." }, "notunicode4": { "type": "string", "value": "This string does not have a unicode \\u escape." }, "quote": { "type": "string", "value": "This string has a \" quote character." }, "tab": { "type": "string", "value": "This string has a \t tab character." }, "unitseparator": { "type": "string", "value": "This string has a \u001f unit separator control code." } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/raw-multiline.toml
# Single ' should be allowed. oneline = '''This string has a ' quote character.''' # A newline immediately following the opening delimiter will be trimmed. firstnl = ''' This string has a ' quote character.''' # All other whitespace and newline characters remain intact. multiline = ''' This string has ' a quote character and more than one newline in it.'''
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/empty.toml
answer = ""
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/multiline-escaped-crlf.json
{ "0": { "type": "string", "value": "" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/unicode-literal.json
{ "answer": { "type": "string", "value": "δ" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/escape-tricky.json
{ "end_esc": { "type": "string", "value": "String does not end here\" but ends here\\" }, "lit_end_esc": { "type": "string", "value": "String ends here\\" }, "lit_multiline_end": { "type": "string", "value": "There is no escape\\" }, "lit_multiline_not_unicode": { "type": "string", "value": "\\u007f" }, "multiline_end_esc": { "type": "string", "value": "When will it end? \"\"\"...\"\"\" should be here\"" }, "multiline_not_unicode": { "type": "string", "value": "\\u0041" }, "multiline_unicode": { "type": "string", "value": " " } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/raw.toml
backspace = 'This string has a \b backspace character.' tab = 'This string has a \t tab character.' newline = 'This string has a \n new line character.' formfeed = 'This string has a \f form feed character.' carriage = 'This string has a \r carriage return character.' slash = 'This string has a \/ slash character.' backslash = 'This string has a \\ backslash character.'
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/unicode-escape.json
{ "answer4": { "type": "string", "value": "δ" }, "answer8": { "type": "string", "value": "δ" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/with-pound.json
{ "pound": { "type": "string", "value": "We see no # comments here." }, "poundcomment": { "type": "string", "value": "But there are # some comments here." } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/double-quote-escape.toml
test = "\"one\""
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/multiline-quotes.json
{ "escaped": { "type": "string", "value": "lol\"\"\"" }, "lit_one": { "type": "string", "value": "'one quote'" }, "lit_one_space": { "type": "string", "value": " 'one quote' " }, "lit_two": { "type": "string", "value": "''two quotes''" }, "lit_two_space": { "type": "string", "value": " ''two quotes'' " }, "mismatch1": { "type": "string", "value": "aaa'''bbb" }, "mismatch2": { "type": "string", "value": "aaa\"\"\"bbb" }, "one": { "type": "string", "value": "\"one quote\"" }, "one_space": { "type": "string", "value": " \"one quote\" " }, "two": { "type": "string", "value": "\"\"two quotes\"\"" }, "two_space": { "type": "string", "value": " \"\"two quotes\"\" " } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/raw.json
{ "backslash": { "type": "string", "value": "This string has a \\\\ backslash character." }, "backspace": { "type": "string", "value": "This string has a \\b backspace character." }, "carriage": { "type": "string", "value": "This string has a \\r carriage return character." }, "formfeed": { "type": "string", "value": "This string has a \\f form feed character." }, "newline": { "type": "string", "value": "This string has a \\n new line character." }, "slash": { "type": "string", "value": "This string has a \\/ slash character." }, "tab": { "type": "string", "value": "This string has a \\t tab character." } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/escape-esc.toml
esc = "\e There is no escape! \e"
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/with-pound.toml
pound = "We see no # comments here." poundcomment = "But there are # some comments here." # Did I # mess you up?
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/nl.toml
nl_mid = "val\nue" nl_end = """value\n""" lit_nl_end = '''value\n''' lit_nl_mid = 'val\nue' lit_nl_uni = 'val\ue'
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/escape-tricky.toml
end_esc = "String does not end here\" but ends here\\" lit_end_esc = 'String ends here\' multiline_unicode = """ \u00a0""" multiline_not_unicode = """ \\u0041""" multiline_end_esc = """When will it end? \"""...""\" should be here\"""" lit_multiline_not_unicode = ''' \u007f''' lit_multiline_end = '''There is no escape\'''
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/string/escape-esc.json
{ "esc": { "type": "string", "value": "\u001b There is no escape! \u001b" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/bool/bool.toml
t = true f = false
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/bool/bool.json
{ "f": { "type": "bool", "value": "false" }, "t": { "type": "bool", "value": "true" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/datetime/datetime.json
{ "lower": { "type": "datetime", "value": "1987-07-05T17:45:00Z" }, "space": { "type": "datetime", "value": "1987-07-05T17:45:00Z" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/datetime/local-time.toml
besttimeever = 17:45:00 milliseconds = 10:32:00.555
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/datetime/local.toml
local = 1987-07-05T17:45:00 milli = 1977-12-21T10:32:00.555 space = 1987-07-05 17:45:00
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/datetime/datetime.toml
space = 1987-07-05 17:45:00Z lower = 1987-07-05t17:45:00z
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/datetime/milliseconds.json
{ "utc1": { "type": "datetime", "value": "1987-07-05T17:45:56.1234Z" }, "utc2": { "type": "datetime", "value": "1987-07-05T17:45:56.6000Z" }, "wita1": { "type": "datetime", "value": "1987-07-05T17:45:56.1234+08:00" }, "wita2": { "type": "datetime", "value": "1987-07-05T17:45:56.6000+08:00" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/datetime/local-date.json
{ "bestdayever": { "type": "date-local", "value": "1987-07-05" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/datetime/timezone.json
{ "nzdt": { "type": "datetime", "value": "1987-07-05T17:45:56+13:00" }, "nzst": { "type": "datetime", "value": "1987-07-05T17:45:56+12:00" }, "pdt": { "type": "datetime", "value": "1987-07-05T17:45:56-05:00" }, "utc": { "type": "datetime", "value": "1987-07-05T17:45:56Z" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/datetime/timezone.toml
utc = 1987-07-05T17:45:56Z pdt = 1987-07-05T17:45:56-05:00 nzst = 1987-07-05T17:45:56+12:00 nzdt = 1987-07-05T17:45:56+13:00 # DST
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/datetime/local-time.json
{ "besttimeever": { "type": "time-local", "value": "17:45:00" }, "milliseconds": { "type": "time-local", "value": "10:32:00.555" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/datetime/local-date.toml
bestdayever = 1987-07-05
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/datetime/local.json
{ "local": { "type": "datetime-local", "value": "1987-07-05T17:45:00" }, "milli": { "type": "datetime-local", "value": "1977-12-21T10:32:00.555" }, "space": { "type": "datetime-local", "value": "1987-07-05T17:45:00" } }
0
repos/tomlz/tests/valid
repos/tomlz/tests/valid/datetime/milliseconds.toml
utc1 = 1987-07-05T17:45:56.1234Z utc2 = 1987-07-05T17:45:56.6Z wita1 = 1987-07-05T17:45:56.1234+08:00 wita2 = 1987-07-05T17:45:56.6+08:00
0
repos/tomlz
repos/tomlz/src/main.zig
const std = @import("std"); const testing = std.testing; pub const lex = @import("lexer.zig"); pub const parser = @import("parser.zig"); pub const serializer = @import("serializer.zig"); pub const parse = parser.parse; pub const decode = parser.decode; pub const Value = parser.Value; pub const Array = parser.Array; pub const Table = parser.Table; /// Serialize a value to the given out_stream. /// /// Use this when you want to write a struct as the root table. When serializing /// e.g. a number use `serializeKeyValue` instead, because in that case a key is required. /// /// For a fixed-depth version that doesn't require an allocator, see `serializer.serializeFixedDepth`. /// /// # Example /// ``` /// const std = @import("std") /// const tomlz = @import("tomlz"); /// /// var gpa_instance = std.heap.GeneralPurposeAllocator(.{}){}; /// defer _ = gpa_instance.deinit(); /// /// const point = .{ /// .x=4, /// .y=2, /// }; /// /// try tomlz.serialize( /// gpa_instance.allocator(), /// std.io.getStdOut().writer(), /// point, /// ); /// /// // Output: /// // x = 4 /// // y = 2 /// ```` pub const serialize = serializer.serialize; /// Serialize a key-value pair to the given out_stream. /// /// If you want to write a struct as the root table, see `serialize`. /// /// For a fixed-depth version that doesn't require an allocator, see `serializer.serializeKeyValueFixedDepth`. /// # Example /// ``` /// const std = @import("std") /// const tomlz = @import("tomlz"); /// /// var gpa_instance = std.heap.GeneralPurposeAllocator(.{}){}; /// defer _ = gpa_instance.deinit(); /// /// const mynumber = 42; /// /// try tomlz.serializeKeyValue( /// gpa_instance.allocator(), /// std.io.getStdOut().writer(), /// "some_key", /// mynumber /// ); /// /// // Output: /// // mynumber = 42 /// ```` pub const serializeKeyValue = serializer.serializeKeyValue; test "refAllDecls" { const integration_tests = @import("integration_tests.zig"); std.testing.refAllDecls(lex); std.testing.refAllDecls(parser); std.testing.refAllDecls(integration_tests); std.testing.refAllDecls(serializer); }
0
repos/tomlz
repos/tomlz/src/parser.zig
const std = @import("std"); const lex = @import("lexer.zig"); const testing = std.testing; const LexError = lex.Lexer.Error; const AllocError = std.mem.Allocator.Error; pub const ParseError = error{ UnexpectedToken, UnexpectedChar, NotUTF8, EOF, KeyAlreadyExists, NotTableOrArray, NotTable, NotArray, StringNotEnded, InlineTablesAndArraysAreImmutable, InvalidCodepoint, } || LexError || AllocError; pub const DecodingError = error{ MismatchedType, MissingField } || ParseError; fn locToIndex(src: []const u8, loc: lex.Loc) usize { var line: usize = 1; var col: usize = 1; var i: usize = 0; while (i < src.len) : (i += 1) { const c = src[i]; if (c == '\r' and i + 1 < src.len and src[i + 1] == '\n') { col = 1; line += 1; i += 1; continue; } if (c == '\n') { col = 1; line += 1; continue; } if (line == loc.line and col == loc.col) break; col += 1; } return i; } /// Lexer is a wrapper enum so we can do static dispatch on real/fake lexers for testing purposes pub const Lexer = union(enum) { real: lex.Lexer, fake: lex.Fake, fn next(self: *Lexer, force_key: bool) LexError!?lex.TokLoc { return switch (self.*) { .real => |*r| r.next(force_key), .fake => |*f| f.next(), }; } fn source(self: *const Lexer) []const u8 { return switch (self.*) { .real => |*r| r.source, .fake => "<empty source>", }; } fn deinit(self: *Lexer) void { switch (self.*) { .real => |*r| r.deinit(), else => {}, } } }; /// Table represents a TOML table (i.e. dicitonary/hashmap). It assumes that the key and value were allocated with the /// same allocator pub const Table = struct { table: TableBase = .{}, source: Source, /// Whether or not we can assign to values inside of this table. /// /// NOTE: does not cover nesting. If we are table 'a' then you may be able to assign to 'a.b.c' even if we have /// closed set to true. closed: bool, const Source = enum { @"inline", header, top_level, assignment }; const TableBase = std.StringHashMapUnmanaged(Value); fn close(self: *Table) void { self.closed = true; var it = self.table.iterator(); while (it.next()) |entry| { if (entry.value_ptr.* != .table) continue; entry.value_ptr.table.close(); } } pub fn contains(self: *const Table, key: []const u8) bool { return self.table.get(key) != null; } pub fn getInteger(self: *const Table, key: []const u8) ?i64 { const val = self.table.get(key) orelse return null; switch (val) { .integer => |i| return i, else => return null, } } pub fn getFloat(self: *const Table, key: []const u8) ?f64 { const val = self.table.get(key) orelse return null; switch (val) { .float => |f| return f, else => return null, } } pub fn getBool(self: *const Table, key: []const u8) ?bool { const val = self.table.get(key) orelse return null; switch (val) { .boolean => |b| return b, else => return null, } } pub fn getString(self: *const Table, key: []const u8) ?[]const u8 { const val = self.table.get(key) orelse return null; switch (val) { .string => |s| return s, else => return null, } } pub fn getArray(self: *const Table, key: []const u8) ?Array { const val = self.table.get(key) orelse return null; switch (val) { .array => |a| return a, else => return null, } } pub fn getTable(self: *const Table, key: []const u8) ?Table { const val = self.table.get(key) orelse return null; switch (val) { .table => |t| return t, else => return null, } } fn insert(self: *Table, allocator: std.mem.Allocator, key: []const u8, value: Value) AllocError!void { return try self.table.put(allocator, key, value); } fn getOrPutTable(self: *Table, allocator: std.mem.Allocator, key: []const u8, value: Table) !*Table { var v = try self.table.getOrPutValue(allocator, key, .{ .table = value }); if (v.value_ptr.* != .table) return error.NotTable; return &v.value_ptr.table; } fn getOrPutArray(self: *Table, allocator: std.mem.Allocator, key: []const u8, source: Array.Source) !*Array { var v = try self.table.getOrPutValue(allocator, key, .{ .array = .{ .source = source } }); if (v.value_ptr.* != .array) return error.NotArray; return &v.value_ptr.array; } pub fn deinit(self: *Table, allocator: std.mem.Allocator) void { var it = self.table.iterator(); while (it.next()) |entry| { allocator.free(entry.key_ptr.*); entry.value_ptr.deinit(allocator); } self.table.deinit(allocator); } pub fn tomlzSerialize(self: *const Table, writer: anytype) !void { try writer.beginTable(); var it = self.table.iterator(); while (it.next()) |entry| { try writer.writeKeyValue(entry.key_ptr.*, entry.value_ptr.*); } } }; pub const Array = struct { array: Base = .{}, source: Source, const Base = std.ArrayListUnmanaged(Value); const Source = enum { @"inline", header }; pub fn items(self: *const Array) []const Value { return self.array.items; } pub fn getInteger(self: *const Array, index: usize) ?i64 { if (index >= self.array.items.len) return null; switch (self.array.items[index]) { .integer => |i| return i, else => return null, } } pub fn getFloat(self: *const Array, index: usize) ?f64 { if (index >= self.array.items.len) return null; switch (self.array.items[index]) { .float => |f| return f, else => return null, } } pub fn getBoolean(self: *const Array, index: usize) ?bool { if (index >= self.array.items.len) return null; switch (self.array.items[index]) { .boolean => |b| return b, else => return null, } } pub fn getString(self: *const Array, index: usize) ?[]const u8 { if (index >= self.array.items.len) return null; switch (self.array.items[index]) { .string => |s| return s, else => return null, } } pub fn getTable(self: *const Array, index: usize) ?Table { if (index >= self.array.items.len) return null; switch (self.array.items[index]) { .table => |t| return t, else => return null, } } pub fn getArray(self: *const Array, index: usize) ?Array { if (index >= self.array.items.len) return null; switch (self.array.items[index]) { .array => |a| return a, else => return null, } } pub fn tomlzSerialize(self: *const Array, writer: anytype) !void { return writer.write(self.items()); } }; /// Value represents a TOML value: i.e. an integer, string, float, boolean, table, array pub const Value = union(enum) { integer: i64, float: f64, string: []const u8, array: Array, table: Table, boolean: bool, fn dupe(self: *const Value, allocator: std.mem.Allocator) AllocError!Value { return switch (self.*) { .string => |s| Value{ .string = try allocator.dupe(u8, s) }, .array => |a| b: { var new = try std.ArrayListUnmanaged(Value).initCapacity(allocator, a.array.items.len); new.appendSliceAssumeCapacity(a.array.items); break :b Value{ .array = .{ .array = new, .source = a.source } }; }, .table => |t| b: { var new_table = Table{ .source = t.source, .closed = t.closed }; var it = t.table.iterator(); while (it.next()) |entry| { try new_table.insert( allocator, try allocator.dupe(u8, entry.key_ptr.*), try entry.value_ptr.dupe(allocator), ); } break :b Value{ .table = new_table }; }, else => return self.*, }; } fn deinit(self: *Value, allocator: std.mem.Allocator) void { switch (self.*) { Value.table => |*t| t.deinit(allocator), Value.array => |*a| { for (a.array.items) |*item| { item.deinit(allocator); } a.array.deinit(allocator); }, Value.string => |*s| allocator.free(s.*), else => {}, } } }; fn unwrapOptionals(comptime T: anytype) std.builtin.Type { var ti = @typeInfo(T); inline while (true) { switch (ti) { .Optional => |o| ti = @typeInfo(o.child), else => return ti, } } } fn decodeValue(comptime T: type, gpa: std.mem.Allocator, v: Value) DecodingError!T { const ti = @typeInfo(T); const opts_unwrapped_ti = unwrapOptionals(T); switch (v) { .integer => |i| { if (opts_unwrapped_ti != .Int) return DecodingError.MismatchedType; return @intCast(i); }, .float => |fl| { if (opts_unwrapped_ti != .Float) return DecodingError.MismatchedType; return @floatCast(fl); }, .boolean => |b| { if (opts_unwrapped_ti != .Bool) return DecodingError.MismatchedType; return b; }, .string => |s| { if (ti != .Pointer or ti.Pointer.child != u8 or (ti.Pointer.size != .Slice and ti.Pointer.Size != .Many)) { return DecodingError.MismatchedType; } return try gpa.dupe(u8, s); }, .array => |a| { if (ti != .Pointer or (ti.Pointer.size != .Slice and ti.Pointer.size != .Many)) return DecodingError.MismatchedType; var al = try std.ArrayList(ti.Pointer.child).initCapacity(gpa, a.items().len); errdefer al.deinit(); for (a.items()) |array_val| { al.appendAssumeCapacity(try decodeValue(ti.Pointer.child, gpa, array_val)); } return al.toOwnedSlice(); }, .table => |t| { return try decodeTable(T, gpa, t); }, } } fn decodeTable(comptime T: anytype, gpa: std.mem.Allocator, table: Table) DecodingError!T { const ti = @typeInfo(T); if (ti != .Struct) return DecodingError.MismatchedType; var strct: T = undefined; inline for (ti.Struct.fields) |f| { const f_ti = @typeInfo(f.type); if (!table.contains(f.name)) { if (f_ti == .Optional) @field(strct, f.name) = null else if (f.default_value) |default_ptr| { const default = @as(*align(1) const f.type, @ptrCast(default_ptr)).*; @field(strct, f.name) = default; } else return DecodingError.MissingField; } else { const v = table.table.get(f.name).?; @field(strct, f.name) = try decodeValue(f.type, gpa, v); } } return strct; } pub fn decode(comptime T: anytype, gpa: std.mem.Allocator, src: []const u8) DecodingError!T { const ti = @typeInfo(T); if (ti != .Struct) @compileError("argument T of decode must be a struct"); var tbl = try parse(gpa, src); defer tbl.deinit(gpa); return try decodeTable(T, gpa, tbl); } /// Parser takes a Lexer and parses the tokens into a table. pub const Parser = struct { allocator: std.mem.Allocator, lexer: Lexer, peeked: ?lex.TokLoc = null, diag: ?lex.Diagnostic = null, top_level_table: *Table, current_table: *Table, pub fn init(allocator: std.mem.Allocator, lexer: Lexer) AllocError!Parser { const table = try allocator.create(Table); table.* = .{ .source = .top_level, .closed = false }; return .{ .allocator = allocator, .lexer = lexer, .top_level_table = table, .current_table = table }; } fn peek(self: *Parser, force_key: bool) LexError!lex.TokLoc { if (self.peeked) |tokloc| return tokloc; self.peeked = try self.lexer.next(force_key); return self.peeked orelse error.EOF; } fn pop(self: *Parser, force_key: bool) LexError!lex.TokLoc { if (self.peeked) |tokloc| { self.peeked = null; return tokloc; } const next = try self.lexer.next(force_key); return next orelse error.EOF; } /// expect asserts that the current token has the same tag as expected. /// /// NOTE: If the token stores a value (e.g. boolean, string) then it does not check that the value is the same fn expect(self: *Parser, comptime expected: lex.Tok, comptime str: []const u8) !void { const actual = try self.pop(false); if (std.meta.activeTag(actual.tok) != std.meta.activeTag(expected)) { self.diag = .{ .msg = "expected '" ++ str ++ "'", .loc = actual.loc, }; return error.UnexpectedToken; } } /// parseKey parses a potentially nested (i.e. with dots in) key and inserts each key segment into al. It returns /// the location of the final key fn parseKey(self: *Parser, key: []const u8, loc: lex.Loc, al: *std.ArrayList([]const u8)) !lex.Loc { const dup: []const u8 = try self.allocator.dupe(u8, key); { errdefer self.allocator.free(dup); try al.append(dup); } var prev_loc = loc; while (true) { const next = try self.peek(false); switch (next.tok) { .equals, .close_square_bracket => return prev_loc, .dot => {}, else => { self.diag = .{ .msg = "expected '.', ']' or '=' after key", .loc = next.loc, }; return error.UnexpectedToken; }, } _ = self.pop(false) catch unreachable; const key_tok = try self.pop(true); prev_loc = key_tok.loc; const key_s = switch (key_tok.tok) { .key => |k| k, .string => |s| s, else => { self.diag = .{ .msg = "expected key after '.'", .loc = key_tok.loc, }; return error.UnexpectedToken; }, }; const new_dup = try self.allocator.dupe(u8, key_s); { errdefer self.allocator.free(new_dup); try al.append(new_dup); } } } fn parseValue(self: *Parser) ParseError!Value { const tokloc = try self.pop(false); const val = switch (tokloc.tok) { .string => |s| Value{ .string = try self.allocator.dupe(u8, s) }, .integer => |i| Value{ .integer = i }, .float => |f| Value{ .float = f }, .boolean => |b| Value{ .boolean = b }, .open_square_bracket => try self.parseInlineArray(), .open_curly_brace => try self.parseInlineTable(), else => { self.diag = .{ .msg = "expected value type", .loc = tokloc.loc }; return error.UnexpectedToken; }, }; return val; } fn skipNewlines(self: *Parser, force_key: bool) LexError!bool { var had_newline = false; while ((try self.peek(force_key)).tok == .newline) { had_newline = true; _ = self.pop(force_key) catch unreachable; } return had_newline; } /// parseInlineArray parses a value of the form "[ <value-1>, <value-2>, ...]" fn parseInlineArray(self: *Parser) !Value { var al = std.ArrayListUnmanaged(Value){}; errdefer { for (al.items) |*item| { item.deinit(self.allocator); } al.deinit(self.allocator); } var had_newline = false; var first = true; while (true) { had_newline = try self.skipNewlines(false) or had_newline; if (!first) { const tokloc = try self.peek(false); switch (tokloc.tok) { .close_square_bracket => _ = { _ = self.pop(false) catch unreachable; return Value{ .array = .{ .array = al, .source = .@"inline" } }; }, else => {}, } } if (first and (try self.peek(false)).tok == .close_square_bracket) { _ = self.pop(false) catch unreachable; return Value{ .array = .{ .array = al, .source = .@"inline" } }; } first = false; var v = try self.parseValue(); { errdefer v.deinit(self.allocator); try al.append(self.allocator, v); } had_newline = try self.skipNewlines(false) or had_newline; const tokloc = try self.pop(false); switch (tokloc.tok) { .comma => {}, .newline => {}, .close_square_bracket => return Value{ .array = .{ .array = al, .source = .@"inline" } }, else => { self.diag = .{ .msg = "expected one of '\n', ',' or ']' after list entry", .loc = tokloc.loc, }; return error.UnexpectedChar; }, } } } fn parseInlineTable(self: *Parser) !Value { const curr = self.current_table; var tbl = Table{ .source = .@"inline", .closed = false }; errdefer tbl.deinit(self.allocator); self.current_table = &tbl; defer { // Don't allow adding keys to an inline table self.current_table.close(); self.current_table = curr; } var first = true; while (true) { const tokloc = try self.pop(true); switch (tokloc.tok) { .close_curly_brace => { if (first) return .{ .table = tbl }; self.diag = .{ .msg = "trailing comma not allowed in inline table", .loc = tokloc.loc }; return error.UnexpectedToken; }, .key => |k| try self.parseAssignment(tokloc.loc, k), .string => |s| try self.parseAssignment(tokloc.loc, s), else => { self.diag = .{ .msg = "expected a key in inline table", .loc = tokloc.loc }; return error.UnexpectedToken; }, } first = false; const next = try self.peek(true); switch (next.tok) { .close_curly_brace => { _ = self.pop(true) catch unreachable; return .{ .table = tbl }; }, .comma => _ = self.pop(true) catch unreachable, else => { self.diag = .{ .msg = "expected a comma after assignment in inline table", .loc = next.loc }; return error.UnexpectedToken; }, } } } const AllowOpts = struct { exists: bool, closed: bool }; /// createPath takes a key_path and ensures that it exists. If any key at any point in the path does not exist then /// it will be created. All but the final key are created as tables - e.g. "foo.bar.baz" will ensure "foo" is a /// table in self.current_table, "bar" is a table in that. "baz" will be a value with an undefined type which the /// caller should fill in fn createPath( self: *Parser, key_path: []const []const u8, loc: lex.Loc, allow_opts: AllowOpts, source: Table.Source, ) !struct { table: *Table, value: *Value, existed: bool = false } { std.debug.assert(key_path.len > 0); var tbl = self.current_table; for (key_path, 0..) |k, i| { if (i == key_path.len - 1) { if (tbl.table.getPtr(k)) |val| { if (val.* == .table and !val.table.closed) return .{ .table = tbl, .value = val, .existed = true }; if (allow_opts.exists) { if (val.* != .array) return .{ .table = tbl, .value = val, .existed = true }; if (val.array.source == .@"inline") { self.diag = .{ .msg = "cannot extend inline arrays", .loc = loc }; return error.InlineTablesAndArraysAreImmutable; } var tbl_in_array = &val.array.array.items[val.array.array.items.len - 1]; return .{ .table = &tbl_in_array.table, .value = tbl_in_array, .existed = true }; } self.diag = .{ .msg = "key already exists", .loc = loc }; return error.KeyAlreadyExists; } const val = tbl.table.getPtr(k) orelse b: { const dup = try self.allocator.dupe(u8, k); errdefer self.allocator.free(dup); try tbl.table.put(self.allocator, dup, .{ .integer = 0xaa }); break :b tbl.table.getPtr(k) orelse unreachable; }; return .{ .table = tbl, .value = val }; } if (tbl.table.getPtr(k)) |val| { switch (val.*) { .table => |*t| { if (t.source == .@"inline") { self.diag = .{ .msg = "inline tables are immutable", .loc = loc }; return error.InlineTablesAndArraysAreImmutable; } if (t.closed and !allow_opts.closed) { self.diag = .{ .msg = "table already exists", .loc = loc }; return error.KeyAlreadyExists; } tbl = t; }, .array => |*a| { if (a.source == .@"inline") { self.diag = .{ .msg = "cannot extend inline arrays", .loc = loc }; return error.InlineTablesAndArraysAreImmutable; } tbl = &a.array.items[a.array.items.len - 1].table; }, else => { self.diag = .{ .msg = "key already exists and is not a table or array", .loc = loc }; return error.NotTableOrArray; }, } } else { const dup = try self.allocator.dupe(u8, k); errdefer self.allocator.free(dup); tbl = try tbl.getOrPutTable(self.allocator, dup, .{ .source = source, .closed = false }); } } unreachable; } /// parseAssignment parses a key/value assignment to `key`, followed by either a newline or EOF fn parseAssignment(self: *Parser, loc: lex.Loc, key: []const u8) !void { var al = std.ArrayList([]const u8).init(self.allocator); defer { for (al.items) |s| self.allocator.free(s); al.deinit(); } const new_loc = try self.parseKey(key, loc, &al); const res = try self.createPath(al.items, new_loc, AllowOpts{ .exists = false, .closed = false }, .assignment); if (res.table.closed) { self.diag = .{ .msg = "table already exists", .loc = loc }; return error.KeyAlreadyExists; } if (res.value.* == .table and res.existed) { self.diag = .{ .msg = "key already exists", .loc = loc }; return error.KeyAlreadyExists; } const val = res.value; try self.expect(.equals, "="); val.* = try self.parseValue(); if (val.* == .array and val.*.array.array.items.len == 0) { self.diag = .{ .msg = "inline arrays cannot be empty", .loc = loc }; return error.UnexpectedToken; } const next = self.peek(false) catch |err| switch (err) { error.EOF => return, else => return err, }; switch (next.tok) { .newline, .comma, .close_curly_brace => return, else => { self.diag = .{ .msg = "expected comma, newline or EOF after assignment", .loc = next.loc }; return error.UnexpectedToken; }, } } /// parseAssignmentEatNewline runs parseAssignment and then expects a newline. It is used for assignments on their /// own line fn parseAssignmentEatNewline(self: *Parser, loc: lex.Loc, key: []const u8) !void { try self.parseAssignment(loc, key); self.expect(.newline, "\n") catch |err| switch (err) { error.EOF => return, else => return err, }; } /// parseTableHeader parses "[<key>]\n" which specifies the next assignments should be in the table defined by <key> /// /// NOTE: Assumes "[" has already been parsed fn parseTableHeader(self: *Parser) !void { std.debug.assert(self.current_table == self.top_level_table); const tokloc = try self.pop(true); const key = switch (tokloc.tok) { .key => |k| k, .string => |s| s, else => { self.diag = .{ .msg = "expected key inside of square brackets", .loc = tokloc.loc }; return error.UnexpectedToken; }, }; var al = std.ArrayList([]const u8).init(self.allocator); defer { for (al.items) |s| self.allocator.free(s); al.deinit(); } const new_loc = try self.parseKey(key, tokloc.loc, &al); var res = try self.createPath(al.items, new_loc, AllowOpts{ .exists = false, .closed = true }, .header); if (!res.existed) res.value.* = .{ .table = .{ .source = .header, .closed = false } }; self.current_table = &res.value.table; try self.expect(.close_square_bracket, "]"); try self.expect(.newline, "\n"); } /// parseArrayHeaderKey parses the key inside an array header. It will ensure that all but the last key in the path /// exists as a table. It then returns the final table it created (or just self.current_table if the key inside the /// header is not a path) and the final key. /// /// NOTE: The caller is responsible for freeing the key in the result pub fn parseArrayHeaderKey(self: *Parser, key: []const u8, loc: lex.Loc) ParseError!struct { table: *Table, key: []const u8, } { var al = std.ArrayList([]const u8).init(self.allocator); defer { for (al.items) |s| self.allocator.free(s); al.deinit(); } const new_loc = try self.parseKey(key, loc, &al); std.debug.assert(al.items.len > 0); if (al.items.len == 1) return .{ .table = self.current_table, .key = try self.allocator.dupe(u8, al.items[0]) }; const all_but_last_key = al.items[0 .. al.items.len - 1]; var res = try self.createPath(all_but_last_key, new_loc, AllowOpts{ .exists = true, .closed = true }, .header); if (res.existed and res.value.* != .table) { self.diag = .{ .msg = "key already exists and is not a table", .loc = new_loc, }; return error.NotTable; } if (!res.existed) res.value.* = .{ .table = .{ .source = .header, .closed = false } }; return .{ .table = &res.value.table, .key = try self.allocator.dupe(u8, al.items[al.items.len - 1]) }; } /// ensureNoWhitespace returns an error if the current token was preceeded by some whitespace. fn ensureNoWhitespace(self: *Parser, loc: lex.Loc) !void { const src = self.lexer.source(); const index = locToIndex(src, loc); if (index == 0) return; if (std.ascii.isWhitespace(src[index - 1]) and src[index - 1] != '\n') { self.diag = .{ .msg = "unexpected whitespace", .loc = loc, }; return error.UnexpectedChar; } } /// parseArrayHeader parses "[[<key>]]\n" which specifies the next assignments should be in a table in the array /// <key>. loc is the location of the second '['. /// /// NOTE: Assumes "[[" has already been parsed fn parseArrayHeader(self: *Parser, loc: lex.Loc) !void { std.debug.assert(self.current_table == self.top_level_table); try self.ensureNoWhitespace(loc); const tokloc = try self.pop(true); const key = switch (tokloc.tok) { .key => |k| k, .string => |s| s, else => { self.diag = .{ .msg = "expected key inside of square brackets", .loc = tokloc.loc }; return error.UnexpectedToken; }, }; var res = try self.parseArrayHeaderKey(key, tokloc.loc); const existed = res.table.contains(res.key); var arr = b: { errdefer self.allocator.free(res.key); break :b try res.table.getOrPutArray(self.allocator, res.key, .header); }; defer if (existed) self.allocator.free(res.key); try arr.array.append(self.allocator, .{ .table = .{ .source = .header, .closed = false } }); self.current_table = &arr.array.items[arr.array.items.len - 1].table; try self.expect(.close_square_bracket, "]"); const closing_bracket = try self.peek(false); if (closing_bracket.tok == .close_square_bracket) { try self.ensureNoWhitespace(closing_bracket.loc); } try self.expect(.close_square_bracket, "]"); try self.expect(.newline, "\n"); } pub fn parse(self: *Parser) ParseError!Table { while (true) { const tokloc = self.pop(true) catch |err| switch (err) { error.EOF => { const table = self.top_level_table.*; self.allocator.destroy(self.top_level_table); self.top_level_table = try self.allocator.create(Table); self.top_level_table.* = .{ .source = .top_level, .closed = false }; self.current_table = self.top_level_table; return table; }, else => return err, }; switch (tokloc.tok) { .key => |k| try self.parseAssignmentEatNewline(tokloc.loc, k), .string => |s| try self.parseAssignmentEatNewline(tokloc.loc, s), .open_square_bracket => { if (self.current_table != self.top_level_table) self.current_table.close(); self.current_table = self.top_level_table; const next = try self.peek(true); switch (next.tok) { .open_square_bracket => { _ = self.pop(true) catch unreachable; try self.parseArrayHeader(next.loc); }, else => try self.parseTableHeader(), } }, .newline => {}, else => { self.diag = .{ .msg = "expected key", .loc = tokloc.loc, }; return error.UnexpectedToken; }, } } } pub fn deinit(self: *Parser) void { self.top_level_table.deinit(self.allocator); self.allocator.destroy(self.top_level_table); self.lexer.deinit(); } }; /// parse takes a given TOML source and returns a Table which has been allocated with the given allocator. pub fn parse(allocator: std.mem.Allocator, src: []const u8) ParseError!Table { var parser = try Parser.init(allocator, .{ .real = try lex.Lexer.init(allocator, src) }); defer parser.deinit(); return try parser.parse(); } const KV = struct { k: []const u8, v: Value }; /// kvsToTable takes a slice of KVs (i.e. assignments) and returns the table they would make fn kvsToTable(kvs: []const KV) AllocError!Table { var table = Table{ .source = .assignment, .closed = false }; for (kvs) |entry| { const v = try entry.v.dupe(testing.allocator); try table.insert(testing.allocator, try testing.allocator.dupe(u8, entry.k), v); } return table; } /// toksToLocToks takes a slice of toks and gives them a dummy location fn toksToLocToks(toks: []const lex.Tok) AllocError![]lex.TokLoc { const loc = lex.Loc{ .line = 1, .col = 1 }; var al = std.ArrayListUnmanaged(lex.TokLoc){}; for (toks) |tok| { try al.append(testing.allocator, .{ .tok = tok, .loc = loc }); } return al.toOwnedSlice(testing.allocator); } /// testParse takes the given toks and parses them into a table fn testParse(toks: []const lex.Tok) ParseError!Table { const toklocs = try toksToLocToks(toks); defer testing.allocator.free(toklocs); const lexer = Lexer{ .fake = .{ .toklocs = toklocs } }; var parser = try Parser.init(testing.allocator, lexer); defer parser.deinit(); return try parser.parse(); } fn expectEqualTables(expected: Table, actual: Table) !void { try testing.expectEqual(expected.table.count(), actual.table.count()); var it = expected.table.iterator(); while (it.next()) |entry| { const value = actual.table.get(entry.key_ptr.*); try testing.expect(value != null); try testing.expectEqual(std.meta.activeTag(entry.value_ptr.*), std.meta.activeTag(value.?)); switch (entry.value_ptr.*) { .string => |s| try testing.expectEqualStrings(s, value.?.string), else => try testing.expectEqual(entry.value_ptr.*, value.?), } } } /// expectTableEqualTo asserts that an actual table is equal to the KV assignments expected fn expectTableEqualTo(expected: []const KV, actual: Table) !void { var expected_table = try kvsToTable(expected); defer expected_table.deinit(testing.allocator); try expectEqualTables(expected_table, actual); } /// expectEqualParses parses toks and asserts that it gives the same value as expected, once expected is turned into a /// table fn expectEqualParses(toks: []const lex.Tok, expected: []const KV) !void { var parsed_table = try testParse(toks); defer parsed_table.deinit(testing.allocator); try expectTableEqualTo(expected, parsed_table); } /// expectErrorParse asserts that trying to parse toks gives err fn expectErrorParse(err: anyerror, toks: []const lex.Tok) !void { const toklocs = try toksToLocToks(toks); defer testing.allocator.free(toklocs); const lexer = Lexer{ .fake = .{ .toklocs = toklocs } }; var parser = try Parser.init(testing.allocator, lexer); defer parser.deinit(); try testing.expectError(err, parser.parse()); } test "default table assignment" { try expectEqualParses( &.{ .{ .key = "foo" }, .equals, .{ .string = "a" } }, &.{.{ .k = "foo", .v = .{ .string = "a" } }}, ); try expectEqualParses( &.{ .{ .key = "foo" }, .equals, .{ .integer = 147 } }, &.{.{ .k = "foo", .v = .{ .integer = 147 } }}, ); try expectEqualParses( &.{ .{ .key = "foo" }, .equals, .{ .boolean = true } }, &.{.{ .k = "foo", .v = .{ .boolean = true } }}, ); try expectEqualParses( &.{ .{ .string = "foo" }, .equals, .{ .string = "a" }, .newline, .{ .key = "bar" }, .equals, .{ .string = "b" } }, &.{ .{ .k = "foo", .v = .{ .string = "a" } }, .{ .k = "bar", .v = .{ .string = "b" } }, }, ); } test "fail: default table assignment" { try expectErrorParse(error.UnexpectedToken, &.{.equals}); try expectErrorParse(error.UnexpectedToken, &.{ .{ .key = "foo" }, .newline }); try expectErrorParse(error.UnexpectedToken, &.{ .{ .string = "foo" }, .equals, .{ .string = "a" }, .{ .key = "bar" }, .equals, .{ .string = "b" } }); try expectErrorParse(error.UnexpectedToken, &.{ .{ .key = "foo" }, .equals, .{ .key = "a" } }); try expectErrorParse(error.UnexpectedToken, &.{ .{ .integer = 147 }, .equals, .{ .string = "a" } }); try expectErrorParse(error.UnexpectedToken, &.{ .{ .boolean = true }, .equals, .{ .string = "a" } }); } test "dotted assignment" { // zig fmt: off { var table = try testParse(&.{ .{ .key = "foo"}, .dot, .{ .key = "bar" }, .equals, .{ .string = "a" }}); defer table.deinit(testing.allocator); try testing.expectEqualStrings("a", table.getTable("foo").?.getString("bar").?); } { var table = try testParse(&.{ .{ .key = "foo"}, .dot, .{ .key = "bar" }, .equals, .{ .string = "a" }, .newline, .{ .key = "foo"}, .dot, .{ .key = "baz" }, .equals, .{ .string = "b" }, .newline, }); defer table.deinit(testing.allocator); try testing.expectEqualStrings("a", table.getTable("foo").?.getString("bar").?); try testing.expectEqualStrings("b", table.getTable("foo").?.getString("baz").?); } { var table = try testParse(&.{ .{ .key = "foo"}, .dot, .{ .key = "bar" }, .equals, .{ .string = "a" }, .newline, .{ .key = "foo"}, .dot, .{ .string = "baz baz" }, .equals, .{ .string = "b" }, .newline, }); defer table.deinit(testing.allocator); try testing.expectEqualStrings("a", table.getTable("foo").?.getString("bar").?); try testing.expectEqualStrings("b", table.getTable("foo").?.getString("baz baz").?); } // zig fmt: on } test "fail: dotted assignment" { try expectErrorParse(error.KeyAlreadyExists, &.{ .{ .key = "foo" }, .dot, .{ .key = "bar" }, .equals, .{ .string = "a" }, .newline, .{ .key = "foo" }, .dot, .{ .key = "bar" }, .equals, .{ .string = "b" }, .newline, }); } test "table header" { // zig fmt: off { var table = try testParse(&.{ .open_square_bracket, .{ .key="foo" }, .close_square_bracket, .newline, .{ .key = "bar"}, .equals, .{ .string = "a" }, .newline, .{ .key = "baz" }, .equals, .{ .string = "b" }, .newline, }); defer table.deinit(testing.allocator); try testing.expectEqualStrings("a", table.getTable("foo").?.getString("bar").?); try testing.expectEqualStrings("b", table.getTable("foo").?.getString("baz").?); } { var table = try testParse(&.{ .open_square_bracket, .{ .key="foo" }, .close_square_bracket, .newline, .{ .key = "bar"}, .equals, .{ .string = "a" }, .newline, .newline, .newline, .newline, .{ .key = "baz" }, .equals, .{ .string = "b" }, .newline, }); defer table.deinit(testing.allocator); try testing.expectEqualStrings("a", table.getTable("foo").?.getString("bar").?); try testing.expectEqualStrings("b", table.getTable("foo").?.getString("baz").?); } { var table = try testParse(&.{ .open_square_bracket, .{ .key="foo" }, .dot, .{.key = "bar"}, .close_square_bracket, .newline, .{ .key = "baz" }, .equals, .{ .string = "a" }, .newline, .{ .key = "bat" }, .equals, .{ .string = "b" }, .newline, }); defer table.deinit(testing.allocator); try testing.expectEqualStrings( "a", table.getTable("foo").?.getTable("bar").?.getString("baz").?); try testing.expectEqualStrings( "b", table.getTable("foo").?.getTable("bar").?.getString("bat").?); } { var table = try testParse(&.{ .open_square_bracket, .{ .key="foo" }, .close_square_bracket, .newline, .{ .key = "a"}, .equals, .{ .integer = 1 }, .newline, .open_square_bracket, .{ .key="bar" }, .close_square_bracket, .newline, .{ .key = "b" }, .equals, .{ .integer = 2 }, .newline, }); defer table.deinit(testing.allocator); try testing.expectEqual(@as(i64, 1), table.getTable("foo").?.getInteger("a").?); try testing.expectEqual(@as(i64, 2), table.getTable("bar").?.getInteger("b").?); } // zig fmt: on } test "fail: table header" { try expectErrorParse(error.EOF, &.{ .open_square_bracket, .{ .key = "foo" } }); try expectErrorParse(error.UnexpectedToken, &.{ .open_square_bracket, .{ .key = "foo" }, .equals }); } test "inline array" { // zig fmt: off { var table = try testParse(&.{.{ .key = "foo" }, .equals, .open_square_bracket, .{ .integer = 1 }, .comma, .{ .integer = 2 }, .close_square_bracket }); defer table.deinit(testing.allocator); try testing.expectEqualSlices(Value, &.{ .{ .integer = 1 }, .{ .integer = 2 }}, table.getArray("foo").?.items()); } { var table = try testParse(&.{.{ .key = "foo" }, .equals, .open_square_bracket, .{ .integer = 1 }, .newline, .newline, .comma, .newline, .{ .integer = 2 }, .close_square_bracket }); defer table.deinit(testing.allocator); try testing.expectEqualSlices(Value, &.{ .{ .integer = 1 }, .{ .integer = 2 }}, table.getArray("foo").?.items()); } { var table = try testParse(&.{.{ .key = "foo" }, .equals, .open_square_bracket, .{ .integer = 1 }, .comma, .{ .string = "bar" }, .close_square_bracket }); defer table.deinit(testing.allocator); try testing.expectEqual(@as(usize, 2), table.getArray("foo").?.items().len); try testing.expectEqual(Value{ .integer = 1}, table.getArray("foo").?.items()[0]); try testing.expectEqualStrings("bar", table.getArray("foo").?.getString(1).?); } { var table = try testParse(&.{.{ .key = "foo" }, .equals, .open_square_bracket, .{ .integer = 1 }, .comma, .open_square_bracket, .{ .integer = 2 }, .comma, .{ .integer = 3 }, .close_square_bracket, .close_square_bracket }); defer table.deinit(testing.allocator); try testing.expectEqual(@as(usize, 2), table.getArray("foo").?.items().len); try testing.expectEqual(Value{ .integer = 1}, table.getArray("foo").?.items()[0]); try testing.expectEqualSlices(Value, &.{ .{ .integer = 2 }, .{ .integer = 3 }}, table.getArray("foo").?.items()[1].array.items()); } // zig fmt: on } test "fail: inline array" { try expectErrorParse(error.EOF, &.{ .{ .key = "foo" }, .equals, .open_square_bracket }); try expectErrorParse(error.EOF, &.{ .{ .key = "foo" }, .equals, .open_square_bracket, .{ .integer = 1 } }); try expectErrorParse(error.EOF, &.{ .{ .key = "foo" }, .equals, .open_square_bracket, .{ .integer = 1 } }); } test "arrays" { // zig fmt: off { var table = try testParse(&.{ .open_square_bracket, .open_square_bracket, .{ .key="foo" }, .close_square_bracket, .close_square_bracket, .newline, .{ .key = "bar"}, .equals, .{ .string = "a" }, .newline, .open_square_bracket, .open_square_bracket, .{ .key="foo" }, .close_square_bracket, .close_square_bracket, .newline, .{ .key = "baz" }, .equals, .{ .string = "b" }, .newline, }); defer table.deinit(testing.allocator); try testing.expectEqual(@as(usize, 1), table.table.count()); const arr = table.getArray("foo").?; try testing.expectEqual(@as(usize, 2), arr.items().len); try testing.expectEqualStrings("a", arr.items()[0].table.getString("bar").?); try testing.expectEqualStrings("b", arr.items()[1].table.getString("baz").?); } { var table = try testParse(&.{ .open_square_bracket, .open_square_bracket, .{ .key="foo" }, .dot, .{ .key = "bar" }, .close_square_bracket, .close_square_bracket, .newline, .{ .key = "a"}, .equals, .{ .integer = 1 }, .newline, .open_square_bracket, .open_square_bracket, .{ .key="foo" }, .dot, .{ .key = "bar" }, .close_square_bracket, .close_square_bracket, .newline, .{ .key = "b" }, .equals, .{ .integer = 2 }, .newline, }); defer table.deinit(testing.allocator); try testing.expectEqual(@as(usize, 1), table.table.count()); const arr = table.getTable("foo").?.getArray("bar").?; try testing.expectEqual(@as(usize, 2), arr.items().len); try testing.expectEqual(@as(i64, 1), arr.items()[0].table.getInteger("a").?); try testing.expectEqual(@as(i64, 2), arr.items()[1].table.getInteger("b").?); } // zig fmt: on } test "fail: arrays" { try expectErrorParse(error.EOF, &.{ .open_square_bracket, .open_square_bracket }); try expectErrorParse(error.EOF, &.{ .open_square_bracket, .open_square_bracket, .{ .key = "foo" } }); try expectErrorParse( error.EOF, &.{ .open_square_bracket, .open_square_bracket, .{ .key = "foo" }, .close_square_bracket }, ); // zig fmt: off try expectErrorParse(error.KeyAlreadyExists, &.{ .open_square_bracket, .open_square_bracket, .{ .key="foo" }, .close_square_bracket, .close_square_bracket, .newline, .{ .key = "bar"}, .equals, .{ .string = "a" }, .newline, .open_square_bracket, .{ .key="foo" }, .close_square_bracket, .newline, .{ .key = "baz" }, .equals, .{ .string = "b" }, .newline, }); try expectErrorParse(error.NotArray, &.{ .open_square_bracket, .{ .key="foo" }, .close_square_bracket, .newline, .{ .key = "baz" }, .equals, .{ .string = "b" }, .newline, .open_square_bracket, .open_square_bracket, .{ .key="foo" }, .close_square_bracket, .close_square_bracket, .newline, .{ .key = "bar"}, .equals, .{ .string = "a" }, .newline, }); // zig fmt: on } test "array of tables" { // zig fmt: off { var table = try testParse(&.{ .open_square_bracket, .open_square_bracket, .{ .key = "foo" }, .close_square_bracket, .close_square_bracket, .newline, .{ .key = "bar"}, .equals, .{ .string = "a" }, .newline, .open_square_bracket, .{ .key = "foo" }, .dot, .{ .key = "baz" }, .close_square_bracket, .newline, .{ .key = "bat" }, .equals, .{ .string = "b" }, .newline, }); defer table.deinit(testing.allocator); try testing.expectEqual(@as(usize, 1), table.table.count()); const arr = table.getArray("foo").?; try testing.expectEqual(@as(usize, 1), arr.items().len); try testing.expectEqualStrings("a", arr.items()[0].table.getString("bar").?); const inner_table = arr.items()[0].table.getTable("baz").?; try testing.expectEqualStrings("b", inner_table.getString("bat").?); } // zig fmt: on } test "fail: array of tables" { // zig fmt: off try expectErrorParse(error.NotArray, &.{ .open_square_bracket, .{ .key="foo" }, .dot, .{ .key = "bar"}, .close_square_bracket, .newline, .{ .key = "baz" }, .equals, .{ .string = "b" }, .newline, .open_square_bracket, .open_square_bracket, .{ .key="foo" }, .close_square_bracket, .close_square_bracket, .newline, .{ .key = "bat"}, .equals, .{ .string = "a" }, .newline, }); // zig fmt: on } test "inline tables" { // zig fmt: off { var table = try testParse(&.{ .{ .key = "foo" }, .equals, .open_curly_brace, .{ .key = "bar"}, .equals, .{ .string = "a" }, .comma, .{ .key = "baz" }, .equals, .{ .string = "b" }, .close_curly_brace, }); defer table.deinit(testing.allocator); try testing.expectEqualStrings("a", table.getTable("foo").?.getString("bar").?); try testing.expectEqualStrings("b", table.getTable("foo").?.getString("baz").?); } { var table = try testParse(&.{ .{ .key = "foo" }, .equals, .open_curly_brace, .{ .key = "bar"}, .dot, .{ .key = "baz"}, .equals, .{ .string = "a" }, .close_curly_brace, }); defer table.deinit(testing.allocator); try testing.expectEqualStrings( "a", table.getTable("foo").?.getTable("bar").?.getString("baz").?); } // zig fmt: on } test "fail: inline tables" { // zig fmt: off try expectErrorParse(error.UnexpectedToken, &.{ .{ .key = "foo" }, .equals, .open_curly_brace, .{ .key = "bar"}, .equals, .{ .string = "a" }, .comma, .close_curly_brace, }); try expectErrorParse(error.UnexpectedToken, &.{ .{ .key = "foo" }, .equals, .open_curly_brace, .newline, .{ .key = "bar"}, .equals, .{ .string = "a" }, .close_curly_brace, }); try expectErrorParse(error.UnexpectedToken, &.{ .{ .key = "foo" }, .equals, .open_curly_brace, .{ .key = "bar"}, .equals, .{ .string = "a" }, .comma, .newline, .close_curly_brace, }); // zig fmt: on }
0
repos/tomlz
repos/tomlz/src/fuzz.zig
const std = @import("std"); const testing = std.testing; const lex = @import("lexer.zig"); const parser = @import("parser.zig"); export fn cmain() void { main() catch unreachable; } pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer std.debug.assert(gpa.deinit() == .ok); var allocator = gpa.allocator(); const stdin = std.io.getStdIn(); const data = try stdin.readToEndAlloc(allocator, std.math.maxInt(usize)); defer allocator.free(data); const lexer = parser.Lexer{ .real = try lex.Lexer.init(allocator, data) }; var p = try parser.Parser.init(allocator, lexer); defer p.deinit(); var table = p.parse() catch |err| { std.debug.print("error parsing {}\n", .{err}); std.debug.print("{?}\n", .{p.diag}); return; }; defer table.deinit(allocator); }
0
repos/tomlz
repos/tomlz/src/print_failing_tests.zig
const std = @import("std"); const testing = std.testing; const lexer = @import("lexer.zig"); const parser = @import("parser.zig"); const e2e = @import("end_to_end.zig"); const standard_tests = @import("standard_tests.zig"); fn testFile(gpa: std.mem.Allocator, dir: *const std.fs.Dir, basename: []const u8) !parser.Table { var f = try dir.openFile(basename, .{}); defer f.close(); var contents = try f.reader().readAllAlloc(gpa, 5 * 1024 * 1024); defer gpa.free(contents); return try parser.parse(gpa, contents); } fn parseInvalid(gpa: std.mem.Allocator, dir: *const std.fs.Dir, basename: []const u8) !bool { var full_path = try dir.realpathAlloc(gpa, basename); defer gpa.free(full_path); var tbl = testFile(gpa, dir, basename) catch return false; defer tbl.deinit(gpa); std.debug.print("{s}\n", .{full_path}); return true; } fn parseValid(gpa: std.mem.Allocator, dir: *const std.fs.Dir, basename: []const u8) !bool { var full_path = try dir.realpathAlloc(gpa, basename); defer gpa.free(full_path); var tbl = testFile(gpa, dir, basename) catch { std.debug.print("{s}\n", .{full_path}); return true; }; defer tbl.deinit(gpa); var value = .{ .table = tbl }; var actual = try e2e.tomlValueToJson(gpa, &value); var json_path = try gpa.dupe(u8, basename); defer gpa.free(json_path); std.mem.copy(u8, json_path[basename.len - 4 ..], "json"); var f = try dir.openFile(json_path, .{}); defer f.close(); var contents = try f.reader().readAllAlloc(gpa, 5 * 1024 * 1024); defer gpa.free(contents); var json_parser = std.json.Parser.init(gpa, false); defer json_parser.deinit(); var expected = try json_parser.parse(contents); defer expected.deinit(); if (!try standard_tests.jsonEquality(gpa, &actual, &expected.root)) { std.debug.print("{s}\n", .{full_path}); return true; } return false; } pub fn main() !void { var gpa = std.heap.page_allocator; { std.debug.print("Invalid\n", .{}); var dir = try std.fs.cwd().makeOpenPathIterable("tests/invalid", .{}); defer dir.close(); var walker = try dir.walk(gpa); defer walker.deinit(); while (try walker.next()) |entry| { if (entry.kind != .File) continue; _ = try parseInvalid(gpa, &entry.dir, entry.basename); } } { std.debug.print("Valid\n", .{}); var dir = try std.fs.cwd().makeOpenPathIterable("tests/valid", .{}); defer dir.close(); var walker = try dir.walk(gpa); defer walker.deinit(); while (try walker.next()) |entry| { if (entry.kind != .File) continue; if (!std.mem.endsWith(u8, entry.basename, "toml")) continue; _ = try parseValid(gpa, &entry.dir, entry.basename); } } }
0
repos/tomlz
repos/tomlz/src/serializer.zig
const std = @import("std"); const ascci = std.ascii; const Allocator = std.mem.Allocator; /// Serialize a value to the given out_stream. /// /// Use this when you want to write a struct as the root table. When serializing /// e.g. a number use `serializeKeyValue` instead, because in that case a key is required. /// /// For a fixed-depth version that doesn't require an allocator, see `serializeFixedDepth`. /// /// # Example /// ``` /// const std = @import("std") /// const tomlz = @import("tomlz"); /// /// var gpa_instance = std.heap.GeneralPurposeAllocator(.{}){}; /// defer _ = gpa_instance.deinit(); /// /// const point = .{ /// .x=4, /// .y=2, /// }; /// /// try tomlz.serialize( /// gpa_instance.allocator(), /// std.io.getStdOut().writer(), /// point, /// ); /// /// // Output: /// // x = 4 /// // y = 2 /// ```` pub fn serialize( allocator: Allocator, out_stream: anytype, value: anytype, ) (@TypeOf(out_stream).Error || SerializeError || Allocator.Error)!void { var toml_writer = writeStream(allocator, out_stream); defer toml_writer.deinit(); try toml_writer.write(value); } /// Same as `serialize`, except does not require an allocator. /// /// The `depth` is the maximum amount of nested tables the writer can handle. /// A struct with just numbers as fields has depth 1. As soon as one of those fields /// is a struct itself, the depth is 2 and so on. /// For most use cases a max-depth like 64 or even 32 should be enough, but as it isn't completely universal /// its not the default. pub fn serializeFixedDepth( comptime depth: usize, out_stream: anytype, value: anytype, ) (@TypeOf(out_stream).Error || SerializeError)!void { var toml_writer = writeStreamFixedDepth(depth, out_stream); defer toml_writer.deinit(); try toml_writer.write(value); } /// Serialize a key-value pair to the given out_stream. /// /// If you want to write a struct as the root table, see `serialize`. /// /// For a fixed-depth version that doesn't require an allocator, see `serializeKeyValueFixedDepth`. /// # Example /// ``` /// const std = @import("std") /// const tomlz = @import("tomlz"); /// /// var gpa_instance = std.heap.GeneralPurposeAllocator(.{}){}; /// defer _ = gpa_instance.deinit(); /// /// const mynumber = 42; /// /// try tomlz.serializeKeyValue( /// gpa_instance.allocator(), /// std.io.getStdOut().writer(), /// "some_key", /// mynumber /// ); /// /// // Output: /// // mynumber = 42 /// ```` pub fn serializeKeyValue( allocator: Allocator, out_stream: anytype, key: []const u8, value: anytype, ) (@TypeOf(out_stream).Error || SerializeError || Allocator.Error)!void { var toml_writer = writeStream(allocator, out_stream); defer toml_writer.deinit(); try toml_writer.writeKeyValue(key, value); } /// Same as `serializeKeyValue`, except does not require an allocator. /// /// The `depth` is the maximum amount of nested tables the writer can handle. /// A struct with just numbers as fields has depth 1. As soon as one of those fields /// is a struct itself, the depth is 2 and so on. /// For most use cases a max-depth like 64 or even 32 should be enough, but as it isn't completely universal /// its not the default. pub fn serializeKeyValueFixedDepth( comptime depth: usize, out_stream: anytype, key: []const u8, value: anytype, ) (@TypeOf(out_stream).Error || SerializeError)!void { var toml_writer = writeStreamFixedDepth(depth, out_stream); defer toml_writer.deinit(); try toml_writer.writeKeyValue(key, value); } pub fn writeStream( allocator: Allocator, out_stream: anytype, ) WriteStream(@TypeOf(out_stream), .arbitrary) { return WriteStream(@TypeOf(out_stream), .arbitrary).init( allocator, out_stream, ); } pub fn writeStreamFixedDepth( comptime depth: usize, out_stream: anytype, ) WriteStream(@TypeOf(out_stream), .{ .fixed = depth }) { return WriteStream( @TypeOf(out_stream), .{ .fixed = depth }, ).init( undefined, out_stream, ); } pub const SerializeError = error{ NoKey, /// Can only occur if the WriteStream in use is not arbitrary depth /// (This is basically OutOfMemory in that case) MaxDepthReached, }; pub fn WriteStream( comptime OutStream: type, comptime max_depth: union(enum) { arbitrary, fixed: usize, }, ) type { return struct { const Self = @This(); const Error = switch (max_depth) { .arbitrary => OutStream.Error || SerializeError || Allocator.Error, .fixed => OutStream.Error || SerializeError, }; out_stream: OutStream, /// Keeps track of all the sub-keys making up the current key. /// /// To properly write a tables key, we also need to know all previous keys /// that have led us to the current one(e.g. "tomlz.is.awesome", "awesome" being the key of the current table). /// Every time we write a value to a key, we also push that key onto the key-stack so /// we can later still find it. If we're done writing the value we pop the key again. /// /// Technically we'd only need to do this when actually writing a table or an array of tables /// but for simplicity's sake we always do it, even if just writing a number. /// /// If this is a fixed-depth Writer we also use an array instead of an arraylist to /// remove the need for an allocator. This comes with the disadvantage of a writer of depth 4 /// not being able to handle a key like "one.two.three.four.five". /// /// Does NOT own the actual sub-keys, only holds pointers to them! key_stack: switch (max_depth) { .arbitrary => std.ArrayList([]const u8), .fixed => |depth| [depth]?[]const u8, }, /// Points to the top of the key-stack plus 1(the next free slot) /// /// This is required in case of a fixed-depth writer, but is also updated /// for arbitrary-depth writers for more readable code. stack_pointer: usize = 0, /// Counts the number of tables we have descended into within an array. /// /// If a table is inside an array, its key needs to be in double parentheses. /// The array needs to relay this information to its children, and also needs to "turn it off" /// when done. A simple flag would not be able to effectively represent this, because in the case /// that one of those children has another array of tables as a field, that array might deactivate the /// flag too early. Recursion could have handled this with a flag, but custom serialize functions /// would have had to pass that along and that would have been ugly. array_depth: usize = 0, /// Create a new WriteStream. If this is fixed-depth, key_allocator can be undefined. pub fn init(key_allocator: Allocator, out_stream: OutStream) Self { return .{ .out_stream = out_stream, .key_stack = switch (max_depth) { .arbitrary => std.ArrayList([]const u8).init(key_allocator), .fixed => |depth| [_]?[]const u8{null} ** depth, }, }; } /// Write a value to a key. If you want to write a struct as the root table, /// see `write` instead. /// /// Does NOT take ownership of the key. /// /// Convenience wrapper around /// ``` /// pushKey() /// write() /// popKey() /// ``` pub fn writeKeyValue(self: *Self, key: []const u8, value: anytype) Error!void { try self.pushKey(key); defer self.popKey(); try self.write(value); } /// Writes a value without a key, only works for tables. See `writeKeyValue` otherwise. pub fn write(self: *Self, value: anytype) Error!void { const T = @TypeOf(value); return switch (@typeInfo(T)) { .Int, .ComptimeInt, .Float, .ComptimeFloat, .Bool, .Enum, .EnumLiteral, .ErrorSet, => { try self.beginAssignment(); try self.writeInline(value); try self.out_stream.writeByte('\n'); }, .Optional => { if (value) |payload| { return self.write(payload); } }, .Struct => { if (std.meta.hasFn(T, "tomlzSerialize")) { return value.tomlzSerialize(self); } return self.writeTable(value); }, .Union => { if (std.meta.hasFn(T, "tomlzSerialize")) { return value.tomlzSerialize(self); } const info = @typeInfo(T).Union; if (info.tag_type) |UnionTagType| { inline for (info.fields) |u_field| { if (value == @field(UnionTagType, u_field.name)) { try self.write(@field(value, u_field.name)); break; } } else { unreachable; // No active tag? } return; } else { @compileError("Unable to serialize untagged union '" ++ @typeName(T) ++ "'"); } }, .Pointer => |ptr_info| switch (ptr_info.size) { .One => switch (@typeInfo(ptr_info.child)) { .Array => { // Coerce `*[N]T` to `[]const T`. const Slice = []const std.meta.Elem(ptr_info.child); return self.write(@as(Slice, value)); }, else => { return self.write(value.*); }, }, .Many, .Slice => { if (ptr_info.size == .Many and ptr_info.sentinel == null) @compileError("Unable to serialize type '" ++ @typeName(T) ++ "' without sentinel"); const slice = if (ptr_info.size == .Many) std.mem.span(value) else value; if (comptime canInline(T)) { try self.beginAssignment(); try self.writeInline(slice); try self.out_stream.writeByte('\n'); return; } self.array_depth += 1; for (slice) |elem| { try self.write(elem); } self.array_depth -= 1; }, else => @compileError("Unable to serialize type '" ++ @typeName(T) ++ "'."), }, .Array => { // Coerce `[N]T` to `*const [N]T` (and then to `[]const T`). return self.write(&value); }, .Vector => |info| { const array: [info.len]info.child = value; return self.write(&array); }, .Void => {}, else => @compileError("Unable to serialize type '" ++ @typeName(T) ++ "'."), }; } /// Writes "raw" values, e.g. "5" instead of "value = 5" fn writeInline(self: *Self, value: anytype) Error!void { const T = @TypeOf(value); return switch (@typeInfo(T)) { .Int => |info| { if (info.bits > 64) { @compileError("Unable to serialize type '" ++ @typeName(T) ++ "'."); } return self.out_stream.print("{}", .{value}); }, .Float => |info| { if (info.bits > 64) { @compileError("Unable to serialize type '" ++ @typeName(T) ++ "'."); } return self.out_stream.print("{}", .{value}); }, .ComptimeInt => return self.writeInline(@as(std.math.IntFittingRange(value, value), value)), .ComptimeFloat => return self.out_stream.print("{}", .{value}), .Bool => return self.out_stream.print("{}", .{value}), .Enum, .EnumLiteral => { return self.out_stream.print("\"{s}\"", .{@tagName(value)}); }, .ErrorSet => return self.out_stream.print("\"{s}\"", .{@errorName(value)}), .Array => { // Coerce `[N]T` to `*const [N]T` (and then to `[]const T`). return self.writeInline(&value); }, .Vector => |info| { const array: [info.len]info.child = value; return self.writeInline(&array); }, .Pointer => |ptr_info| switch (ptr_info.size) { .One => switch (@typeInfo(ptr_info.child)) { .Array => { // Coerce `*[N]T` to `[]const T`. const Slice = []const std.meta.Elem(ptr_info.child); return self.writeInline(@as(Slice, value)); }, else => { return self.writeInline(value.*); }, }, .Many, .Slice => { if (ptr_info.size == .Many and ptr_info.sentinel == null) @compileError("Unable to serialize type '" ++ @typeName(T) ++ "' without sentinel"); const slice = if (ptr_info.size == .Many) std.mem.span(value) else value; // This is a []const u8, or some similar Zig string. if (ptr_info.child == u8 and std.unicode.utf8ValidateSlice(slice)) { return self.out_stream.print("\"{s}\"", .{value}); } try self.out_stream.writeByte('['); var i: usize = 0; while (i < slice.len - 1) : (i += 1) { try self.writeInline(slice[i]); try self.out_stream.writeAll(", "); } try self.writeInline(slice[i]); try self.out_stream.writeByte(']'); }, else => @compileError("Inlining value of type '" ++ @typeName(T) ++ "' is not supported"), }, else => @compileError("Inlining value of type '" ++ @typeName(T) ++ "' is not supported"), }; } fn writeTable(self: *Self, value: anytype) Error!void { const T = @TypeOf(value); const S = @typeInfo(T).Struct; if (S.fields.len == 1 and @typeInfo(S.fields[0].type) == .Struct) { const field = S.fields[0]; try self.pushKey(field.name); try self.writeTable(@field(value, field.name)); self.popKey(); return; } try self.beginTable(); inline for (S.fields) |Field| { if (comptime !canInline(Field.type)) continue; try self.pushKey(Field.name); try self.beginAssignment(); try self.writeInline(@field(value, Field.name)); try self.out_stream.writeByte('\n'); self.popKey(); } inline for (S.fields) |Field| { if (comptime canInline(Field.type)) continue; try self.pushKey(Field.name); try self.write(@field(value, Field.name)); self.popKey(); } } pub fn pushKey(self: *Self, key: []const u8) Error!void { switch (max_depth) { .arbitrary => try self.key_stack.append(key), .fixed => { if (self.stack_pointer == self.key_stack.len) return error.MaxDepthReached; self.key_stack[self.stack_pointer] = key; }, } self.stack_pointer += 1; } pub fn popKey(self: *Self) void { switch (max_depth) { .arbitrary => _ = self.key_stack.pop(), .fixed => {}, } self.stack_pointer -= 1; } /// Returns a reference to the sub-key at the given position. /// /// Prefer this over accessing the key-stack directly, as this abstracts over /// the comptime distinction between fixed and arbitrary depth. /// /// NOTE: This assumes the index to be valid and might /// return a null pointer if this is fixed-depth(and safety checks are off) fn getSubKey(self: *const Self, index: usize) []const u8 { return switch (max_depth) { .arbitrary => self.key_stack.items[index], .fixed => self.key_stack[index].?, }; } /// Writes a single sub-key, correctly escaping it if it is non-bare fn writeSubKey(self: *Self, sub_key: []const u8) Error!void { var is_bare = true; for (sub_key) |char| { if (ascci.isAlphanumeric(char)) continue; if (char != '_' and char != '-') { is_bare = false; break; } } if (!is_bare) { try self.out_stream.writeByte('"'); } try self.out_stream.writeAll(sub_key); if (!is_bare) { try self.out_stream.writeByte('"'); } } /// Writes the beginning of an assignment, e.g. "mykey = ". /// /// There are only two cases where you want to use this /// 1. You have multiple values you want to write as a single one, e.g. concatenating /// two strings. Call this, then use the underlying `out_stream` aferwards. /// /// 2. Implement a date-time serializer, because in that case you dont want /// to surround your value with quotation marks. /// /// Everything else should be handled by `write` and `writeKeyValue`. pub fn beginAssignment(self: *Self) Error!void { if (self.stack_pointer == 0) return error.NoKey; try self.writeSubKey(self.getSubKey(self.stack_pointer - 1)); try self.out_stream.writeAll(" = "); } /// Write a table header. You only need to bother with this when implementing a custom /// serialize function. /// /// Automatically handles enclosing the key in double parentheses if inside /// an array. pub fn beginTable(self: *Self) Error!void { // this is the root table if (self.stack_pointer == 0) return; if (self.array_depth > 0) { try self.out_stream.writeAll("[["); } else { try self.out_stream.writeByte('['); } var i: usize = 0; while (i < self.stack_pointer - 1) : (i += 1) { try self.writeSubKey(self.getSubKey(i)); try self.out_stream.writeByte('.'); } try self.writeSubKey(self.getSubKey(self.stack_pointer - 1)); if (self.array_depth > 0) { try self.out_stream.writeAll("]]"); } else { try self.out_stream.writeByte(']'); } try self.out_stream.writeByte('\n'); } // If this is an arbitrary-depth writer, frees the key-stack. // // Does NOT free the keys themselves. pub fn deinit(self: *Self) void { if (max_depth == .arbitrary) self.key_stack.deinit(); self.* = undefined; } }; } fn canInline(comptime T: type) bool { return switch (@typeInfo(T)) { .Int, .ComptimeInt, .Float, .ComptimeFloat, .Bool, .Enum, .ErrorSet, => true, .Pointer => |info| canInline(info.child), .Array => |info| canInline(info.child), .Vector => |info| canInline(info.child), else => false, }; } const testing = std.testing; fn testWriteStream(value: anytype, key: ?[]const u8, expected: []const u8) !void { var buffer = std.ArrayList(u8).init(testing.allocator); defer buffer.deinit(); const writer = buffer.writer(); if (key) |payload| { try serializeKeyValue(testing.allocator, writer, payload, value); } else { try serialize(testing.allocator, writer, value); } try testing.expectEqualStrings(expected, buffer.items); } fn testWriteStreamFailure(value: anytype, key: ?[]const u8, err: anyerror) !void { var buffer = std.ArrayList(u8).init(testing.allocator); defer buffer.deinit(); const writer = buffer.writer(); var stream = writeStream(testing.allocator, writer); defer stream.deinit(); if (key) |payload| { try stream.pushKey(payload); } try stream.write(value); try testing.expectError(err, stream.write(value)); } test "encode basic types" { // integers try testWriteStream(42, "truth", "truth = 42\n"); // this tests comptime_int try testWriteStream(@as(u16, 42), "truth", "truth = 42\n"); // unrepresentable integers fail at compile time // floats try testWriteStream(@as(f64, 13.37), "value", "value = 1.337e1\n"); try testWriteStream(13.37, "value", "value = 1.337e1\n"); // unrepresentable floats fail at compile time // bools try testWriteStream(false, "truth", "truth = false\n"); // enums const SomeEnum = enum { SomeState }; try testWriteStream(SomeEnum.SomeState, "value", "value = \"SomeState\"\n"); try testWriteStream(.SomeState, "value", "value = \"SomeState\"\n"); // error sets const SomeError = error{TerriblyWrong}; try testWriteStream(SomeError.TerriblyWrong, "value", "value = \"TerriblyWrong\"\n"); // pointers try testWriteStream(&&5, "value", "value = 5\n"); // optionals try testWriteStream(@as(?u16, 42), "value", "value = 42\n"); // strings try testWriteStream("test", "value", "value = \"test\"\n"); } test "encode arrays" { const test_array = [_]u16{ 1, 2, 3 }; // arrays try testWriteStream(test_array, "value", "value = [1, 2, 3]\n"); // slices try testWriteStream(test_array[0..], "value", "value = [1, 2, 3]\n"); // vectors try testWriteStream(@Vector(3, u16){ 1, 2, 3 }, "value", "value = [1, 2, 3]\n"); // array of arrays const array_of_arrays = [_][3]u16{ test_array, test_array }; try testWriteStream(array_of_arrays, "value", "value = [[1, 2, 3], [1, 2, 3]]\n"); } test "encode union" { const MyUnion = union(enum) { one: u16, two: u16, }; try testWriteStream(MyUnion{ .two = 2 }, "value", "value = 2\n"); } test "encode table" { // empty table try testWriteStream(.{}, "empty", "[empty]\n"); // "root" table try testWriteStream(.{ .field1 = 1, .field2 = 2, }, null, \\field1 = 1 \\field2 = 2 \\ ); // nested table try testWriteStream(.{ .field1 = 1, .child = .{ .field3 = 3, }, .field2 = 2, }, null, \\field1 = 1 \\field2 = 2 \\[child] \\field3 = 3 \\ ); // nested table with transparent ancestor try testWriteStream(.{ .parent = .{ .child = .{} } }, null, \\[parent.child] \\ ); } test "encode array of tables" { const MyStruct = struct { field1: u16 = 1, field2: u16 = 2, }; const array_of_tables = [_]MyStruct{ .{}, .{} }; try testWriteStream(array_of_tables, "arr", \\[[arr]] \\field1 = 1 \\field2 = 2 \\[[arr]] \\field1 = 1 \\field2 = 2 \\ ); } test "encode array of nested tables" { const A = struct { content: []const u8 }; const B = struct { a: A, }; // array of tables with tables as fields const array_of_tables = [_]B{ .{ .a = .{ .content = "never" } }, .{ .a = .{ .content = "gonna" } }, }; try testWriteStream(array_of_tables, "arr", \\[[arr.a]] \\content = "never" \\[[arr.a]] \\content = "gonna" \\ ); // array of tables with tables as fields and arrays of tables as fields(ultimate stress test) const C = struct { b: B, as: [3]A, }; const stress_test = [_]C{ .{ .b = .{ .a = .{ .content = "give" } }, .as = [_]A{ .{ .content = "you" }, .{ .content = "up" }, .{ .content = "never" }, }, }, .{ .b = .{ .a = .{ .content = "gonna" } }, .as = [_]A{ .{ .content = "let" }, .{ .content = "you" }, .{ .content = "down" }, }, }, }; try testWriteStream(stress_test, "arr", \\[[arr]] \\[[arr.b.a]] \\content = "give" \\[[arr.as]] \\content = "you" \\[[arr.as]] \\content = "up" \\[[arr.as]] \\content = "never" \\[[arr]] \\[[arr.b.a]] \\content = "gonna" \\[[arr.as]] \\content = "let" \\[[arr.as]] \\content = "you" \\[[arr.as]] \\content = "down" \\ ); } test "encode with custom function" { const A = struct { pub fn tomlzSerialize(self: *const @This(), stream: anytype) !void { _ = self; try stream.beginTable(); try stream.writeKeyValue("i_dont", "exist"); } }; try testWriteStream(A{}, null, \\i_dont = "exist" \\ ); } test "encode tomlz table" { const tomlz = @import("main.zig"); const Table = tomlz.Table; const Value = tomlz.Value; var table = Table{ .source = .top_level, .closed = false, }; defer table.deinit(testing.allocator); try table.table.put( testing.allocator, try testing.allocator.dupe(u8, "somevalue"), Value{ .integer = 42 }, ); try table.table.put( testing.allocator, try testing.allocator.dupe(u8, "somestring"), Value{ .string = try testing.allocator.dupe(u8, "notastring") }, ); try testWriteStream(table, null, \\somestring = "notastring" \\somevalue = 42 \\ ); } test "encode correctly quote keys" { try testWriteStream(42, "ASCII_encoded-key42", "ASCII_encoded-key42 = 42\n"); try testWriteStream(42, "mr🐢turtle", "\"mr🐢turtle\" = 42\n"); } test "test write stream fixed depth" { { var buffer = std.ArrayList(u8).init(testing.allocator); defer buffer.deinit(); const writer = buffer.writer(); try serializeKeyValueFixedDepth(2, writer, "mykey", .{ .one = 1, .two = 2, .three = 3 }); try testing.expectEqualStrings( \\[mykey] \\one = 1 \\two = 2 \\three = 3 \\ , buffer.items); } { var buffer = std.ArrayList(u8).init(testing.allocator); defer buffer.deinit(); const writer = buffer.writer(); var stream = writeStreamFixedDepth(2, writer); defer stream.deinit(); const result = stream.writeKeyValue("mykey", .{ .one = 1, .child = .{ // oh no! .two = 2, }, }); try testing.expectError(error.MaxDepthReached, result); } } test "encoding works at comptime" { comptime { var alloc_buffer = [_]u8{0} ** 32; var fba = std.heap.FixedBufferAllocator.init(&alloc_buffer); const alloc = fba.allocator(); var buffer = std.ArrayList(u8).init(alloc); defer buffer.deinit(); const writer = buffer.writer(); try serializeKeyValueFixedDepth(1, writer, "key", "value"); if (!std.mem.eql(u8, alloc_buffer[0..14], "key = \"value\"\n")) { @compileLog("WriteStream no longer works at comptime. expected 'key = \"value\"\n' found '" ++ alloc_buffer ++ "'(includes garbage data, ignore all \\x00)"); } } }
0
repos/tomlz
repos/tomlz/src/integration_tests.zig
const std = @import("std"); const parser = @import("parser.zig"); const testing = std.testing; const failing_invalid_tests = [_][]const u8{}; const failing_valid_tests = [_][]const u8{ "comment/everywhere.toml", "spec-example-1.toml", "array/array.toml", "example.toml", "spec-example-1-compact.toml", "datetime/milliseconds.toml", "datetime/local-date.toml", "datetime/timezone.toml", "datetime/local-time.toml", "datetime/local.toml", "datetime/datetime.toml", }; const dbg = false; fn jsonValueEquality(actual: *const std.json.Value, expected: *const std.json.Value) bool { if (dbg) { std.debug.print("====\n", .{}); actual.dump(); std.debug.print("\n", .{}); expected.dump(); std.debug.print("\n====\n", .{}); } switch (actual.*) { .string => |s| return std.mem.eql(u8, s, expected.string), .integer => |i| return i == expected.integer, .bool => |a| return a == expected.bool, .float => |f| return switch (expected.*) { .float => |f2| f == f2, .integer => |i| f == @as(f64, @floatFromInt(i)), else => false, }, .null, .number_string => return false, else => return false, } } pub fn jsonEquality(gpa: std.mem.Allocator, actual: *const std.json.Value, expected: *const std.json.Value) !bool { if (expected.* == .object and expected.object.contains("type")) { const t = expected.object.get("type") orelse unreachable; const s = expected.object.get("value") orelse unreachable; if (std.mem.eql(u8, t.string, "string")) { if (actual.* != .string) return false; return std.mem.eql(u8, actual.string, s.string); } if (std.mem.eql(u8, t.string, "float")) { if (actual.* != .float) return false; if (std.mem.eql(u8, s.string, "inf") or (std.mem.eql(u8, s.string, "+inf") or (std.mem.eql(u8, s.string, "-inf")))) return std.math.inf(f64) == actual.float; if (std.mem.eql(u8, s.string, "nan") or (std.mem.eql(u8, s.string, "+nan") or (std.mem.eql(u8, s.string, "-nan")))) return std.math.isNan(actual.float); } var parsed = std.json.parseFromSlice(std.json.Value, gpa, s.string, .{}) catch { if (dbg) std.debug.print("could not parse '{s}'", .{s.string}); return false; }; defer parsed.deinit(); return jsonValueEquality(actual, &parsed.value); } switch (actual.*) { .array => { const arr_actual = actual.array.items; const arr_expected = expected.array.items; if (arr_actual.len != arr_expected.len) return false; for (arr_actual, 0..) |value_a, i| { const value_e = arr_expected[i]; if (dbg) std.debug.print("index: {}\n", .{i}); if (!try jsonEquality(gpa, &value_a, &value_e)) return false; } return true; }, .object => { var tbl_a = actual.object; var tbl_e = expected.object; if (tbl_a.count() != tbl_e.count()) { if (dbg) std.debug.print("wrong count\n", .{}); return false; } var it = tbl_a.iterator(); while (it.next()) |entry_a| { if (dbg) std.debug.print("key: {s}\n", .{entry_a.key_ptr.*}); const value_e = tbl_e.get(entry_a.key_ptr.*) orelse return false; if (!try jsonEquality(gpa, entry_a.value_ptr, &value_e)) return false; } return true; }, else => { if (dbg) std.debug.print("some other type\n", .{}); return false; }, } } pub fn tomlValueToJson(allocator: std.mem.Allocator, v: *parser.Value) !std.json.Value { return switch (v.*) { .string => |s| std.json.Value{ .string = s }, .integer => |s| std.json.Value{ .integer = s }, .float => |f| std.json.Value{ .float = f }, .boolean => |b| std.json.Value{ .bool = b }, .array => |*a| b: { var al = try std.json.Array.initCapacity(allocator, a.array.items.len); for (a.array.items) |*value| { al.appendAssumeCapacity(try tomlValueToJson(allocator, value)); } break :b std.json.Value{ .array = al }; }, .table => |*t| try tableToJson(allocator, t), }; } pub fn tableToJson(allocator: std.mem.Allocator, table: *parser.Table) error{OutOfMemory}!std.json.Value { var obj = std.json.ObjectMap.init(allocator); errdefer obj.deinit(); var it = table.table.iterator(); while (it.next()) |entry| { const v = try tomlValueToJson(allocator, entry.value_ptr); try obj.put(entry.key_ptr.*, v); } return std.json.Value{ .object = obj }; } fn expectParseEqualToJson(src: []const u8, json: []const u8) !void { var table = try parser.parse(testing.allocator, src); defer table.deinit(testing.allocator); var actual_al = std.ArrayList(u8).init(testing.allocator); defer actual_al.deinit(); var json_writer = std.json.writeStreamArbitraryDepth( testing.allocator, actual_al.writer(), .{ .whitespace = .indent_4 }, ); defer json_writer.deinit(); var arena = std.heap.ArenaAllocator.init(testing.allocator); defer arena.deinit(); var actual_json = try tableToJson(arena.allocator(), &table); try actual_json.jsonStringify(&json_writer); try testing.expectEqualStrings(json, actual_al.items); } fn testFile(dir: *const std.fs.Dir, basename: []const u8) !parser.Table { var f = try dir.openFile(basename, .{}); defer f.close(); const contents = try f.reader().readAllAlloc(testing.allocator, 5 * 1024 * 1024); defer testing.allocator.free(contents); return try parser.parse(testing.allocator, contents); } fn testInvalid(dir: *const std.fs.Dir, path: []const u8, basename: []const u8) !bool { for (failing_invalid_tests) |skip_path| if (std.mem.eql(u8, path, skip_path)) return false; const full_path = try dir.realpathAlloc(testing.allocator, basename); defer testing.allocator.free(full_path); var tbl = testFile(dir, basename) catch return false; defer tbl.deinit(testing.allocator); std.debug.print("{s} successfully parsed\n", .{full_path}); return true; } fn testValid(dir: *const std.fs.Dir, path: []const u8, basename: []const u8) !bool { for (failing_valid_tests) |skip_path| if (std.mem.eql(u8, path, skip_path)) return false; const full_path = try dir.realpathAlloc(testing.allocator, basename); defer testing.allocator.free(full_path); var tbl = testFile(dir, basename) catch |err| { std.debug.print("{s} failed to parse {}\n", .{ full_path, err }); return true; }; defer tbl.deinit(testing.allocator); var value = .{ .table = tbl }; var arena = std.heap.ArenaAllocator.init(testing.allocator); defer arena.deinit(); var actual = try tomlValueToJson(arena.allocator(), &value); var json_path = try testing.allocator.dupe(u8, basename); defer testing.allocator.free(json_path); std.mem.copyForwards(u8, json_path[basename.len - 4 ..], "json"); var f = try dir.openFile(json_path, .{}); defer f.close(); const contents = try f.reader().readAllAlloc(testing.allocator, 5 * 1024 * 1024); defer testing.allocator.free(contents); var expected = try std.json.parseFromSlice(std.json.Value, testing.allocator, contents, .{}); defer expected.deinit(); if (!try jsonEquality(arena.allocator(), &actual, &expected.value)) { std.debug.print("{s}\n", .{full_path}); return true; } return false; } // standard tests test "invalid" { var dir = try std.fs.cwd().makeOpenPath("tests/invalid", .{.iterate = true}); defer dir.close(); var fail = false; var walker = try dir.walk(testing.allocator); defer walker.deinit(); while (try walker.next()) |entry| { if (entry.kind != .file) continue; fail = fail or try testInvalid(&entry.dir, entry.path, entry.basename); } if (fail) return error.InvalidDidNotFail; } test "valid" { var dir = try std.fs.cwd().makeOpenPath("tests/valid", .{.iterate = true}); defer dir.close(); var fail = false; var walker = try dir.walk(testing.allocator); defer walker.deinit(); while (try walker.next()) |entry| { if (entry.kind != .file) continue; if (std.mem.endsWith(u8, entry.basename, "json")) continue; fail = fail or try testValid(&entry.dir, entry.path, entry.basename); } if (fail) return error.ValidDidNotPass; } // fuzz error case tests test "fuzz" { var dir = try std.fs.cwd().makeOpenPath("tests/fuzzing", .{.iterate = true}); defer dir.close(); var walker = try dir.walk(testing.allocator); defer walker.deinit(); while (try walker.next()) |entry| { if (entry.kind != .file) continue; const full_path = try entry.dir.realpathAlloc(testing.allocator, entry.basename); defer testing.allocator.free(full_path); var f = try entry.dir.openFile(full_path, .{}); defer f.close(); const contents = try f.reader().readAllAlloc(testing.allocator, 5 * 1024 * 1024); defer testing.allocator.free(contents); // We just want to make sure we don't crash when parsing these var tbl = parser.parse(testing.allocator, contents) catch continue; tbl.deinit(testing.allocator); } } // decode tests test "decode simple" { const S = struct { b: bool, i1: i32, i2: u8, f1: f32, f2: f64, }; const s = try parser.decode(S, testing.allocator, \\b = false \\i1 = 147 \\i2 = 14 \\f1 = 14.7 \\f2 = 14.7 ); try testing.expectEqual(S{ .b = false, .i1 = 147, .i2 = 14, .f1 = 14.7, .f2 = 14.7 }, s); } test "decode optional" { const S = struct { a: i64, b: ?bool }; const s = try parser.decode(S, testing.allocator, "a = 147"); try testing.expectEqual(S{ .a = 147, .b = null }, s); } test "decode array of ints" { const S = struct { vals: []const i64, }; const s = try parser.decode(S, testing.allocator, "vals = [1, 2, 3, 4, 5]"); defer testing.allocator.free(s.vals); try testing.expectEqualSlices(i64, &.{ 1, 2, 3, 4, 5 }, s.vals); } test "decode array of strings" { const S = struct { vals: []const []const u8, }; const s = try parser.decode(S, testing.allocator, \\vals = ["hello", ", ", "world"] ); defer { for (s.vals) |str| testing.allocator.free(str); testing.allocator.free(s.vals); } try testing.expectEqual(@as(usize, 3), s.vals.len); try testing.expectEqualStrings("hello", s.vals[0]); try testing.expectEqualStrings(", ", s.vals[1]); try testing.expectEqualStrings("world", s.vals[2]); } test "decode array of tables" { const B = struct { a: i64 }; const F = struct { bar: []const B }; const S = struct { foo: F }; const s = try parser.decode(S, testing.allocator, \\[[foo.bar]] \\a = 147 \\[[foo.bar]] \\a = 1 ); defer { testing.allocator.free(s.foo.bar); } try testing.expectEqualSlices(B, &.{ .{ .a = 147 }, .{ .a = 1 } }, s.foo.bar); } test "decode default value" { const S = struct { a: []const u8 = "hello world", b: i32 = 147, c: bool = false, }; const s = try parser.decode(S, testing.allocator, \\c = true ); try testing.expectEqualStrings("hello world", s.a); try testing.expectEqual(@as(i32, 147), s.b); try testing.expect(s.c); } // toml2json tests test "snooker" { try expectParseEqualToJson( \\name = "snooker" \\ \\[goat] \\name = "Ronnie o' Sullivan" \\age = 46 # as of Nov 2022 \\hobbies = ["running", "hustling at pool"] \\ \\[goat.triple-crowns] \\worlds = 7 \\masters = 7 \\uks = 7 , \\{ \\ "name": "snooker", \\ "goat": { \\ "triple-crowns": { \\ "uks": 7, \\ "masters": 7, \\ "worlds": 7 \\ }, \\ "name": "Ronnie o' Sullivan", \\ "age": 46, \\ "hobbies": [ \\ "running", \\ "hustling at pool" \\ ] \\ } \\} ); } test "decode snooker" { const TripleCrowns = struct { worlds: i64, masters: i64, uks: i64 }; const Player = struct { name: []const u8, age: i64, hobbies: []const []const u8, triplecrowns: TripleCrowns, const Self = @This(); pub fn deinit(self: *Self, gpa: std.mem.Allocator) void { gpa.free(self.name); for (self.hobbies) |hobby| { gpa.free(hobby); } gpa.free(self.hobbies); } }; const Game = struct { name: []const u8, goat: Player, const Self = @This(); pub fn deinit(self: *Self, gpa: std.mem.Allocator) void { gpa.free(self.name); self.goat.deinit(gpa); } }; var s = try parser.decode(Game, testing.allocator, \\name = "snooker" \\ \\[goat] \\name = "Ronnie o' Sullivan" \\age = 46 # as of Nov 2022 \\hobbies = ["running", "hustling at pool"] \\ \\[goat.triplecrowns] \\worlds = 7 \\masters = 7 \\uks = 7 ); defer s.deinit(testing.allocator); try testing.expectEqualStrings("snooker", s.name); try testing.expectEqualStrings("Ronnie o' Sullivan", s.goat.name); try testing.expectEqual(@as(i64, 46), s.goat.age); try testing.expectEqual(@as(usize, 2), s.goat.hobbies.len); try testing.expectEqualStrings("running", s.goat.hobbies[0]); try testing.expectEqualStrings("hustling at pool", s.goat.hobbies[1]); try testing.expectEqual(@as(i64, 7), s.goat.triplecrowns.worlds); try testing.expectEqual(@as(i64, 7), s.goat.triplecrowns.masters); try testing.expectEqual(@as(i64, 7), s.goat.triplecrowns.uks); } test "helix config" { try expectParseEqualToJson( \\theme = "ayu_dark" \\ \\[editor] \\line-number = "relative" \\rulers = [80, 120] \\scrolloff = 0 \\true-color = true \\ \\[editor.cursor-shape] \\insert = "bar" \\normal = "block" \\select = "underline" \\ \\[editor.statusline] \\center = ["file-name"] \\left = ["mode", "spinner"] \\right = ["diagnostics", "selections", "position", "position-percentage", "file-encoding", "file-type"] \\separator = "│" \\ \\[keys.normal] \\"," = "collapse_selection" \\";" = "keep_primary_selection" \\A-J = "join_selections" \\A-K = "remove_selections" \\A-k = "keep_selections" \\B = "extend_prev_word_start" \\E = "extend_next_word_end" \\H = "extend_char_left" \\J = "extend_line_down" \\K = "extend_line_up" \\L = "extend_char_right" \\N = "extend_search_next" \\W = "extend_next_word_start" \\X = "extend_line_below" \\ \\[keys.normal.space] \\"," = "buffer_picker" \\space = "file_picker" \\ \\[keys.normal.space.c] \\D = "workspace_diagnostics_picker" \\R = "rename_symbol" \\S = "workspace_symbol_picker" \\a = "code_action" \\d = "diagnostics_picker" \\s = "symbol_picker" , \\{ \\ "keys": { \\ "normal": { \\ "A-K": "remove_selections", \\ "space": { \\ "c": { \\ "a": "code_action", \\ "R": "rename_symbol", \\ "S": "workspace_symbol_picker", \\ "s": "symbol_picker", \\ "D": "workspace_diagnostics_picker", \\ "d": "diagnostics_picker" \\ }, \\ "space": "file_picker", \\ ",": "buffer_picker" \\ }, \\ "K": "extend_line_up", \\ ";": "keep_primary_selection", \\ "H": "extend_char_left", \\ ",": "collapse_selection", \\ "A-k": "keep_selections", \\ "B": "extend_prev_word_start", \\ "W": "extend_next_word_start", \\ "X": "extend_line_below", \\ "L": "extend_char_right", \\ "J": "extend_line_down", \\ "N": "extend_search_next", \\ "E": "extend_next_word_end", \\ "A-J": "join_selections" \\ } \\ }, \\ "theme": "ayu_dark", \\ "editor": { \\ "line-number": "relative", \\ "true-color": true, \\ "statusline": { \\ "right": [ \\ "diagnostics", \\ "selections", \\ "position", \\ "position-percentage", \\ "file-encoding", \\ "file-type" \\ ], \\ "left": [ \\ "mode", \\ "spinner" \\ ], \\ "separator": "│", \\ "center": [ \\ "file-name" \\ ] \\ }, \\ "rulers": [ \\ 80, \\ 120 \\ ], \\ "cursor-shape": { \\ "select": "underline", \\ "insert": "bar", \\ "normal": "block" \\ }, \\ "scrolloff": 0 \\ } \\} ); } test "cargo" { try expectParseEqualToJson( \\[package] \\ \\name = "tiled" \\version = "0.9.3" \\description = "A rust crate for loading in maps created by the Tiled editor" \\repository = "https://github.com/mattyhall/rs-tiled.git" \\# documentation = "http://rust-ci.org/mattyhall/rs-tiled/doc/tiled/" \\readme = "README.md" \\license = "MIT" \\authors = ["Matthew Hall <[email protected]>"] \\edition = "2018" \\ \\keywords = ["tiled", "tmx", "map"] \\ \\[features] \\default = ["zstd"] \\ \\[lib] \\name = "tiled" \\path = "src/lib.rs" \\ \\[[example]] \\name = "example" \\path = "examples/main1.rs" \\ \\[[example]] \\name = "example" \\path = "examples/main2.rs" \\ \\[dependencies] \\base64 = "0.10" \\xml-rs = "0.8" \\libflate = "0.1.18" \\zstd = { version = "0.5", optional = true } , \\{ \\ "example": [ \\ { \\ "path": "examples/main1.rs", \\ "name": "example" \\ }, \\ { \\ "path": "examples/main2.rs", \\ "name": "example" \\ } \\ ], \\ "dependencies": { \\ "libflate": "0.1.18", \\ "xml-rs": "0.8", \\ "zstd": { \\ "version": "0.5", \\ "optional": true \\ }, \\ "base64": "0.10" \\ }, \\ "package": { \\ "repository": "https://github.com/mattyhall/rs-tiled.git", \\ "version": "0.9.3", \\ "license": "MIT", \\ "keywords": [ \\ "tiled", \\ "tmx", \\ "map" \\ ], \\ "authors": [ \\ "Matthew Hall <[email protected]>" \\ ], \\ "description": "A rust crate for loading in maps created by the Tiled editor", \\ "name": "tiled", \\ "edition": "2018", \\ "readme": "README.md" \\ }, \\ "lib": { \\ "path": "src/lib.rs", \\ "name": "tiled" \\ }, \\ "features": { \\ "default": [ \\ "zstd" \\ ] \\ } \\} ); } test "fruits" { try expectParseEqualToJson( \\[[fruits]] \\name = "apple" \\ \\[fruits.physical] # subtable \\color = "red" \\shape = "round" \\ \\[[fruits.varieties]] # nested array of tables \\name = "red delicious" \\ \\[fruits.varieties.rating] \\yumminess = 5 \\appearance = 6 \\ \\[[fruits.varieties]] \\name = "granny smith" \\ \\ \\[[fruits]] \\name = "banana" \\ \\[[fruits.varieties]] \\name = "plantain" , \\{ \\ "fruits": [ \\ { \\ "physical": { \\ "color": "red", \\ "shape": "round" \\ }, \\ "name": "apple", \\ "varieties": [ \\ { \\ "rating": { \\ "appearance": 6, \\ "yumminess": 5 \\ }, \\ "name": "red delicious" \\ }, \\ { \\ "name": "granny smith" \\ } \\ ] \\ }, \\ { \\ "name": "banana", \\ "varieties": [ \\ { \\ "name": "plantain" \\ } \\ ] \\ } \\ ] \\} ); }
0
repos/tomlz
repos/tomlz/src/lexer.zig
const std = @import("std"); const testing = std.testing; const AllocError = std.mem.Allocator.Error; pub const Loc = struct { line: u64, col: u64, }; pub const Tok = union(enum) { equals, newline, dot, open_square_bracket, close_square_bracket, open_curly_brace, close_curly_brace, comma, key: []const u8, string: []const u8, integer: i64, float: f64, boolean: bool, pub fn dupe(self: Tok, allocator: std.mem.Allocator) AllocError!Tok { return switch (self) { .key => |k| Tok{ .key = try allocator.dupe(u8, k) }, .string => |k| Tok{ .string = try allocator.dupe(u8, k) }, else => self, }; } pub fn deinit(self: Tok, allocator: std.mem.Allocator) void { switch (self) { .key => |k| allocator.free(k), .string => |s| allocator.free(s), else => {}, } } }; pub const TokLoc = struct { tok: Tok, loc: Loc }; pub const Diagnostic = struct { loc: Loc, msg: []const u8, }; const Quotation = enum { single, double }; /// Lexer splits its given source TOML file into tokens pub const Lexer = struct { arena: std.heap.ArenaAllocator, source: []const u8, loc: Loc, index: usize, diag: ?Diagnostic, pub const Error = error{ EOF, UnexpectedChar, OutOfMemory, StringNotEnded, InvalidCodepoint, }; pub fn init(allocator: std.mem.Allocator, src: []const u8) !Lexer { if (!std.unicode.utf8ValidateSlice(src)) return error.NotUTF8; const arena = std.heap.ArenaAllocator.init(allocator); return Lexer{ .arena = arena, .source = src, .loc = .{ .line = 1, .col = 1 }, .diag = null, .index = 0 }; } fn peek(self: *Lexer) Error!u8 { if (self.index >= self.source.len) { self.diag = .{ .loc = .{ .line = self.loc.line + 1, .col = 1 }, .msg = "end of file", }; return error.EOF; } return self.source[self.index]; } fn pop(self: *Lexer) Error!u8 { if (self.index >= self.source.len) { self.diag = .{ .loc = .{ .line = self.loc.line + 1, .col = 1 }, .msg = "end of file", }; return error.EOF; } const c = self.source[self.index]; self.index += 1; self.loc.col += 1; if (c == '\n') { self.loc.line += 1; self.loc.col = 1; } return c; } fn peek2(self: *Lexer) ?[]const u8 { if (self.index + 1 >= self.source.len) return null; return self.source[self.index .. self.index + 2]; } fn peek3(self: *Lexer) ?[]const u8 { if (self.index + 2 >= self.source.len) return null; return self.source[self.index .. self.index + 3]; } fn parseUnicode(self: *Lexer, len: u4, al: *std.ArrayListUnmanaged(u8)) Error!void { var i: usize = 1; var codepoint: u21 = 0; var buf: [16]u8 = undefined; while (true) : (i += 1) { const c = try self.peek(); if (c != '0') break; _ = self.pop() catch unreachable; } while (true) : (i += 1) { const c = try self.peek(); const n = std.fmt.parseInt(u21, &.{c}, 16) catch { if (i == @as(usize, @intCast(len)) + 1) { const written = std.unicode.utf8Encode(codepoint, &buf) catch return error.InvalidCodepoint; try al.appendSlice(self.arena.allocator(), buf[0..written]); return; } return error.UnexpectedChar; }; _ = self.pop() catch unreachable; if (std.math.maxInt(u21) < 16 * @as(u64, @intCast(codepoint)) + n) return error.InvalidCodepoint; codepoint = codepoint * 16 + n; if (i == len) { const written = std.unicode.utf8Encode(codepoint, &buf) catch return error.InvalidCodepoint; try al.appendSlice(self.arena.allocator(), buf[0..written]); return; } } } /// parseEscapeChar parses the valid escape codes allowed in TOML (i.e. those allowed in a string beginning with a /// backslash) fn parseEscapeChar(self: *Lexer, al: *std.ArrayListUnmanaged(u8)) Error!void { const c: u8 = switch (try self.peek()) { 'b' => 8, 't' => '\t', 'n' => '\n', 'r' => '\r', 'f' => '\x0c', 'e' => '\x1b', 'u' => { _ = self.pop() catch unreachable; return try self.parseUnicode(4, al); }, 'U' => { _ = self.pop() catch unreachable; return try self.parseUnicode(8, al); }, '"' => '"', '\'' => '\'', '\\' => '\\', else => { self.diag = Diagnostic{ .loc = self.loc, .msg = "unexpected escape character" }; return error.UnexpectedChar; }, }; _ = self.pop() catch unreachable; try al.append(self.arena.allocator(), c); } fn parseMultiline(self: *Lexer, typ: Quotation) Error!TokLoc { const loc = self.loc; var al = std.ArrayListUnmanaged(u8){}; var first = true; while (true) { const c = self.pop() catch |err| switch (err) { error.EOF => return error.StringNotEnded, else => return err, }; if (c == '\n' and first) continue; first = false; switch (c) { '"' => if (typ == .single) { try al.append(self.arena.allocator(), c); } else { if (self.peek2()) |two| { if (!std.mem.eql(u8, two, "\"\"")) { try al.append(self.arena.allocator(), c); continue; } } // If we are here we have at least three quotes in a row. We need to keep taking quotes until we are // at the last three var i: usize = 0; while (self.peek3()) |three| { if (three[2] != c) break; _ = try self.pop(); try al.append(self.arena.allocator(), c); i += 1; if (i == 2) break; } _ = try self.pop(); _ = try self.pop(); return TokLoc{ .loc = loc, .tok = .{ .string = al.items } }; }, '\'' => if (typ == .double) { try al.append(self.arena.allocator(), c); } else { if (self.peek2()) |two| { if (!std.mem.eql(u8, two, "''")) { try al.append(self.arena.allocator(), c); continue; } } // If we are here we have at least three quotes in a row. We need to keep taking quotes until we are // at the last three var i: usize = 0; while (self.peek3()) |three| { if (three[2] != c) break; _ = try self.pop(); try al.append(self.arena.allocator(), c); i += 1; if (i == 2) break; } _ = try self.pop(); _ = try self.pop(); return TokLoc{ .loc = loc, .tok = .{ .string = al.items } }; }, '\\' => { if (typ == .single) { try al.append(self.arena.allocator(), c); continue; } // A trailing slash in a multiline string means trim up to the next non-whitespace character var p = try self.peek(); if (std.ascii.isWhitespace(p)) { try self.skipWhitespaceAndComment(); if (self.pop() catch unreachable != '\n') return error.UnexpectedChar; while (true) { p = try self.peek(); if (std.ascii.isWhitespace(p)) { _ = self.pop() catch unreachable; continue; } break; } continue; } try self.parseEscapeChar(&al); }, else => { if ((c >= 0x0 and c <= 0x8) or c == 0x0B or c == 0x0C or (c >= 0xE and c <= 0x1f) or c == 0x7f) { self.diag = .{ .loc = loc, .msg = "unexpected control character in string" }; return error.UnexpectedChar; } try al.append(self.arena.allocator(), c); }, } } } fn parseString(self: *Lexer, typ: Quotation, force_key: bool) Error!TokLoc { if (self.peek2()) |two| { if ((typ == .double and std.mem.eql(u8, two, "\"\"")) or (std.mem.eql(u8, two, "''"))) { if (force_key) { self.diag = .{ .msg = "cannot use a multiline string as a key", .loc = self.loc }; return error.UnexpectedChar; } _ = self.pop() catch unreachable; _ = self.pop() catch unreachable; return try self.parseMultiline(typ); } } const loc = self.loc; var al = std.ArrayListUnmanaged(u8){}; while (true) { const c = self.pop() catch |err| switch (err) { error.EOF => return error.StringNotEnded, else => return err, }; switch (c) { '"' => if (typ == .single) try al.append(self.arena.allocator(), c) else return TokLoc{ .loc = loc, .tok = .{ .string = al.items } }, '\'' => if (typ == .double) try al.append(self.arena.allocator(), c) else return TokLoc{ .loc = loc, .tok = .{ .string = al.items } }, '\\' => { if (typ == .single) { try al.append(self.arena.allocator(), c); continue; } try self.parseEscapeChar(&al); }, else => { if ((c >= 0x0 and c <= 0x8) or (c >= 0xA and c <= 0x1f) or c == 0x7f) { self.diag = .{ .loc = loc, .msg = "unexpected control character in string" }; return error.UnexpectedChar; } try al.append(self.arena.allocator(), c); }, } } } /// parseKey parses a key. A key can only contain [A-Za-z0-9-_] fn parseKey(self: *Lexer) Error!TokLoc { const loc = self.loc; var al = std.ArrayListUnmanaged(u8){}; while (true) { const c = self.peek() catch |err| { switch (err) { error.EOF => return TokLoc{ .loc = loc, .tok = .{ .key = al.items } }, else => return err, } }; if ((c >= '0' and c <= '9') or (c >= 'A' and c <= 'Z') or (c >= 'a' and c <= 'z') or c == '-' or c == '_') { _ = try self.pop(); try al.append(self.arena.allocator(), c); continue; } if (std.mem.indexOf(u8, " \t\r.=]", &.{c}) == null) { self.diag = Diagnostic{ .loc = self.loc, .msg = "expected one of '\t', '\r', ' ', '.', '=', ']' after a key", }; return error.UnexpectedChar; } return TokLoc{ .loc = loc, .tok = .{ .key = al.items } }; } } fn skipComment(self: *Lexer) Error!void { while (true) { var c = try self.peek(); if (c == '\r') { _ = self.pop() catch unreachable; c = try self.peek(); if (c != '\n') { self.diag = .{ .loc = self.loc, .msg = "\\r can only appear at the end of a comment", }; return error.UnexpectedChar; } } if (c == '\n') return; const loc = self.loc; _ = self.pop() catch unreachable; // From the spec: "Control characters other than tab (U+0000 to U+0008, U+000A to U+001F, U+007F) are not // permitted in comments." if ((c >= 0x0 and c <= 0x8) or (c >= 0xA and c <= 0x1f) or c == 0x7f) { self.diag = .{ .loc = loc, .msg = "unexpected control character in comment" }; return error.UnexpectedChar; } } } /// skipWhitespace skips any non significant (i.e. not a newline) whitespace fn skipWhitespaceAndComment(self: *Lexer) Error!void { while (true) { const c = try self.peek(); if (c == '#') { _ = self.pop() catch unreachable; return try self.skipComment(); } if (c == '\r') { _ = self.pop() catch unreachable; const p = self.peek() catch |err| switch (err) { error.EOF => { self.diag = .{ .msg = "expected \\n after \\r", .loc = self.loc }; return error.UnexpectedChar; }, else => return err, }; if (p != '\n') { self.diag = .{ .msg = "expected \\n after \\r", .loc = self.loc }; return error.UnexpectedChar; } return; } if (c == '\n' or !std.ascii.isWhitespace(c)) return; _ = self.pop() catch unreachable; } } pub fn parseNumber(self: *Lexer) Error!TokLoc { var explicit_sign = false; var base: u8 = 10; const original_index = self.index; switch (self.peek() catch unreachable) { '-' => { explicit_sign = true; _ = self.pop() catch unreachable; }, '+' => { explicit_sign = true; _ = self.pop() catch unreachable; }, else => {}, } if (self.source.len - self.index > 3) { if (std.mem.eql(u8, self.source[self.index .. self.index + 3], "nan")) { _ = self.pop() catch unreachable; _ = self.pop() catch unreachable; _ = self.pop() catch unreachable; return TokLoc{ .tok = .{ .float = std.math.nan(f64) }, .loc = self.loc }; } if (std.mem.eql(u8, self.source[self.index .. self.index + 3], "inf")) { _ = self.pop() catch unreachable; _ = self.pop() catch unreachable; _ = self.pop() catch unreachable; return TokLoc{ .tok = .{ .float = std.math.inf(f64) }, .loc = self.loc }; } } var c = try self.pop(); var had_number = false; var is_float = false; if (c == '0') { had_number = true; const radix = self.peek() catch |err| switch (err) { error.EOF => return TokLoc{ .tok = .{ .integer = 0 }, .loc = self.loc }, else => return err, }; switch (radix) { 'b' => base = 2, 'o' => base = 8, 'e', 'E' => is_float = true, 'x' => base = 16, '.' => is_float = true, else => { if (std.ascii.isWhitespace(radix)) return TokLoc{ .tok = .{ .integer = 0 }, .loc = self.loc }; self.diag = .{ .msg = "expected 'b', 'o' or 'x' after '0'", .loc = self.loc }; return error.UnexpectedChar; }, } if (explicit_sign and base != 10) { self.diag = .{ .msg = "only base 10 numbers can have an explicit sign", .loc = self.loc }; return error.UnexpectedChar; } c = try self.pop(); } var last_c: ?u8 = null; while (true) { if (c == '.') { if (!had_number) { self.diag = .{ .msg = "cannot have leading '.' in float", .loc = self.loc }; return error.UnexpectedChar; } is_float = true; } if ((c == 'e' or c == 'E') and base != 16) { if (last_c == @as(u8, '.')) { self.diag = .{ .msg = "number must follow dot", .loc = self.loc }; return error.UnexpectedChar; } is_float = true; } last_c = c; c = self.peek() catch |err| switch (err) { error.EOF => { if (last_c != @as(u8, '_')) break; self.diag = .{ .msg = "trailing underscores not allowed", .loc = self.loc }; return error.UnexpectedChar; }, else => return err, }; if (c == @as(u8, '_') and last_c == @as(u8, '_')) { self.diag = .{ .msg = "double underscores not allowed", .loc = self.loc }; return error.UnexpectedChar; } if ((c == '+' or c == '-') and last_c != @as(u8, 'e') and last_c != @as(u8, 'E')) { self.diag = .{ .msg = "'+' and '-' can only follow an 'e'", .loc = self.loc }; return error.UnexpectedChar; } if (!std.ascii.isAlphanumeric(c) and c != '.' and c != '_' and c != '+' and c != '-') { if (last_c != @as(u8, '_')) break; self.diag = .{ .msg = "trailing underscores not allowed", .loc = self.loc }; return error.UnexpectedChar; } had_number = true; _ = self.pop() catch unreachable; } const slice = self.source[original_index..self.index]; if (slice[slice.len - 1] == '.') { self.diag = .{ .msg = "trailing dot not allowed", .loc = self.loc }; return error.UnexpectedChar; } if (is_float) return TokLoc{ .tok = .{ .float = std.fmt.parseFloat(f64, slice) catch { return error.UnexpectedChar; } }, .loc = self.loc, }; return TokLoc{ .tok = .{ .integer = std.fmt.parseInt(i64, slice, 0) catch { return error.UnexpectedChar; } }, .loc = self.loc, }; } fn parseKeyword(self: *Lexer, rest: []const u8, tok: Tok) Error!TokLoc { const loc = self.loc; const full_len = rest.len + 1; if (self.source.len - self.index < full_len) return try self.parseKey(); if (!std.mem.eql(u8, rest, self.source[self.index + 1 .. self.index + full_len])) return try self.parseKey(); // The character after rest must be either whitespace, an equals, a comma or a close curly brace for this to be // a keyword if (self.source.len - self.index >= full_len + 1 and !std.ascii.isWhitespace(self.source[self.index + full_len]) and self.source[self.index + full_len] != '=' and self.source[self.index + full_len] != ',' and self.source[self.index + full_len] != '}' and self.source[self.index + full_len] != ']') return try self.parseKey(); self.index += full_len; self.loc.col += full_len; return TokLoc{ .loc = loc, .tok = tok }; } fn consume(self: *Lexer, tok: Tok) TokLoc { const loc = self.loc; _ = self.pop() catch unreachable; return .{ .tok = tok, .loc = loc }; } /// next gives the next token, or null if there are none left. Force key ensures that numbers/keywords are parsed as /// keys rather than their normal type. This is needed as TOML allows these values to be used as keys in assignments. /// /// NOTE: any memory returned in TokLoc (e.g. the []const u8 array in a key/string) is only valid until the next /// call of next pub fn next(self: *Lexer, force_key: bool) Error!?TokLoc { const child = self.arena.child_allocator; self.arena.deinit(); self.arena = std.heap.ArenaAllocator.init(child); self.skipWhitespaceAndComment() catch |err| switch (err) { error.EOF => return null, else => return err, }; const c = self.peek() catch |err| switch (err) { error.EOF => return null, else => return err, }; const loc = self.loc; switch (c) { '=' => return self.consume(.equals), '.' => return self.consume(.dot), ',' => return self.consume(.comma), '[' => return self.consume(.open_square_bracket), ']' => return self.consume(.close_square_bracket), '{' => return self.consume(.open_curly_brace), '}' => return self.consume(.close_curly_brace), '\n' => return self.consume(.newline), '\r' => { _ = self.pop() catch unreachable; if (try self.peek() == '\n') return self.consume(.newline); self.diag = .{ .msg = "expect a \\n after a \\r", .loc = loc }; return error.UnexpectedChar; }, '"' => { _ = self.pop() catch unreachable; return try self.parseString(.double, force_key); }, '\'' => { _ = self.pop() catch unreachable; return try self.parseString(.single, force_key); }, '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' => { if (force_key) return try self.parseKey(); return try self.parseNumber(); }, 't' => { if (force_key) return try self.parseKey(); return try self.parseKeyword("rue", .{ .boolean = true }); }, 'f' => { if (force_key) return try self.parseKey(); return try self.parseKeyword("alse", .{ .boolean = false }); }, 'n' => { if (force_key) return try self.parseKey(); return try self.parseKeyword("an", .{ .float = std.math.nan(f64) }); }, 'i' => { if (force_key) return try self.parseKey(); return try self.parseKeyword("nf", .{ .float = std.math.inf(f64) }); }, else => return try self.parseKey(), } } pub fn deinit(self: *Lexer) void { self.arena.deinit(); } }; pub const Fake = struct { toklocs: []TokLoc, index: usize = 0, pub fn next(self: *Fake) Lexer.Error!?TokLoc { if (self.index >= self.toklocs.len) return null; self.index += 1; return self.toklocs[self.index - 1]; } }; fn readAllTokens(src: []const u8) ![]const Tok { var lexer = try Lexer.init(testing.allocator, src); defer lexer.deinit(); var al = std.ArrayListUnmanaged(Tok){}; while (try lexer.next(false)) |tok_loc| { try al.append(testing.allocator, try tok_loc.tok.dupe(testing.allocator)); } return al.toOwnedSlice(testing.allocator); } fn testTokens(src: []const u8, expected: []const Tok) !void { const toks = try readAllTokens(src); defer { for (toks) |tok| { tok.deinit(testing.allocator); } testing.allocator.free(toks); } try testing.expectEqual(expected.len, toks.len); { var i: usize = 0; while (i < expected.len) : (i += 1) { const actual = toks[i]; const exp = expected[i]; try testing.expectEqual(std.meta.activeTag(exp), std.meta.activeTag(actual)); switch (exp) { .key => |k| try testing.expectEqualStrings(k, actual.key), .string => |s| try testing.expectEqualStrings(s, actual.string), else => try testing.expectEqual(exp, actual), } } } } test "normal keys" { try testTokens("foo", &.{.{ .key = "foo" }}); try testTokens("foo-bar", &.{.{ .key = "foo-bar" }}); try testTokens("foo_bar", &.{.{ .key = "foo_bar" }}); try testTokens("1234", &.{.{ .integer = 1234 }}); try testTokens("foo.bar", &.{ .{ .key = "foo" }, .dot, .{ .key = "bar" } }); } test "quotation works" { try testTokens("\"foo\"", &.{.{ .string = "foo" }}); try testTokens("\"!!!\"", &.{.{ .string = "!!!" }}); try testTokens("\"foo bar baz\"", &.{.{ .string = "foo bar baz" }}); try testTokens("\"foo.bar.baz\"", &.{.{ .string = "foo.bar.baz" }}); try testTokens("'foo'", &.{.{ .string = "foo" }}); try testTokens("'!!!'", &.{.{ .string = "!!!" }}); try testTokens("'foo bar baz'", &.{.{ .string = "foo bar baz" }}); try testTokens("'foo.bar.baz'", &.{.{ .string = "foo.bar.baz" }}); try testTokens( \\"foo \"bar\" baz" , &.{.{ .string = \\foo "bar" baz }}); try testTokens("\"foo \\n bar\"", &.{.{ .string = "foo \n bar" }}); try testTokens("\"foo \\t bar\"", &.{.{ .string = "foo \t bar" }}); try testTokens( "'This string has a \\b backspace character.'", &.{.{ .string = "This string has a \\b backspace character." }}, ); try testTokens("\"\"\" foo bar baz \"\"\"", &.{.{ .string = " foo bar baz " }}); try testTokens("''' foo bar baz '''", &.{.{ .string = " foo bar baz " }}); try testTokens("\"\"\"\"\"\"\"", &.{.{ .string = "\"" }}); try testTokens("'''''''", &.{.{ .string = "'" }}); } test "key/value" { try testTokens("foo = \"hi\"", &.{ .{ .key = "foo" }, .equals, .{ .string = "hi" } }); try testTokens("foo = \"hi\"", &.{ .{ .key = "foo" }, .equals, .{ .string = "hi" } }); try testTokens("foo \t=\"hi\"", &.{ .{ .key = "foo" }, .equals, .{ .string = "hi" } }); try testTokens("foo=\"hi\"", &.{ .{ .key = "foo" }, .equals, .{ .string = "hi" } }); try testTokens("foo=\"hi\"\n", &.{ .{ .key = "foo" }, .equals, .{ .string = "hi" }, .newline }); try testTokens("\"foo bar baz\"=\"hi\"\n", &.{ .{ .string = "foo bar baz" }, .equals, .{ .string = "hi" }, .newline }); try testTokens("foo.bar=\"hi\"", &.{ .{ .key = "foo" }, .dot, .{ .key = "bar" }, .equals, .{ .string = "hi" } }); } test "comments" { try testTokens("# foo bar baz\n", &.{.newline}); try testTokens("# foo bar\tbaz\n", &.{.newline}); try testTokens("foo # comment\n", &.{ .{ .key = "foo" }, .newline }); } test "integers" { try testTokens("0", &.{.{ .integer = 0 }}); try testTokens("147", &.{.{ .integer = 147 }}); try testTokens("+147", &.{.{ .integer = 147 }}); try testTokens("-147", &.{.{ .integer = -147 }}); try testTokens("0o147", &.{.{ .integer = 0o147 }}); try testTokens("0x147abc", &.{.{ .integer = 0x147abc }}); try testTokens("0b10010011", &.{.{ .integer = 0b10010011 }}); try testTokens("1_000_000", &.{.{ .integer = 1000000 }}); try testTokens("-1_000_000", &.{.{ .integer = -1000000 }}); } test "floats" { try testTokens("3.14", &.{.{ .float = 3.14 }}); try testTokens("0.1", &.{.{ .float = 0.1 }}); try testTokens("-3.14", &.{.{ .float = -3.14 }}); try testTokens("-0.1", &.{.{ .float = -0.1 }}); try testTokens("0e0", &.{.{ .float = 0.0 }}); try testTokens("-1E-1", &.{.{ .float = -0.1 }}); } test "booleans" { try testTokens("true", &.{.{ .boolean = true }}); try testTokens("true ", &.{.{ .boolean = true }}); try testTokens("true=", &.{ .{ .boolean = true }, .equals }); try testTokens("false", &.{.{ .boolean = false }}); try testTokens("false ", &.{.{ .boolean = false }}); try testTokens("false=", &.{ .{ .boolean = false }, .equals }); try testTokens("truee", &.{.{ .key = "truee" }}); try testTokens("falsee", &.{.{ .key = "falsee" }}); try testTokens("tr ue", &.{ .{ .key = "tr" }, .{ .key = "ue" } }); try testTokens("fal se", &.{ .{ .key = "fal" }, .{ .key = "se" } }); } test "square brackets" { try testTokens("[]", &.{ .open_square_bracket, .close_square_bracket }); try testTokens("[foo]", &.{ .open_square_bracket, .{ .key = "foo" }, .close_square_bracket }); try testTokens("[[foo]]", &.{ .open_square_bracket, .open_square_bracket, .{ .key = "foo" }, .close_square_bracket, .close_square_bracket, }); try testTokens( "[1,2]", &.{ .open_square_bracket, .{ .integer = 1 }, .comma, .{ .integer = 2 }, .close_square_bracket }, ); } test "curly braces" { try testTokens("{}", &.{ .open_curly_brace, .close_curly_brace }); try testTokens("{foo = 10}", &.{ .open_curly_brace, .{ .key = "foo" }, .equals, .{ .integer = 10 }, .close_curly_brace }); }
0
repos/tomlz
repos/tomlz/src/toml2json.zig
const std = @import("std"); const testing = std.testing; const lex = @import("lexer.zig"); const parser = @import("parser.zig"); pub fn main() !void { if (std.os.argv.len != 2) { std.debug.print("Please pass a TOML file as the second argument", .{}); } var gpa = std.heap.page_allocator; var f = try std.fs.openFileAbsoluteZ(std.os.argv[1], .{}); defer f.close(); var contents = try f.reader().readAllAlloc(gpa, 5 * 1024 * 1024); defer gpa.free(contents); var lexer = parser.Lexer{ .real = try lex.Lexer.init(gpa, contents) }; var p = try parser.Parser.init(gpa, lexer); defer p.deinit(); var table = p.parse() catch |err| { std.debug.print("error parsing {s}: {}\n", .{ std.os.argv[1], err }); std.debug.print("{?}\n", .{p.diag}); return err; }; defer table.deinit(gpa); const integration = @import("integration_tests.zig"); var json = try integration.tableToJson(gpa, &table); var al = std.ArrayList(u8).init(gpa); defer al.deinit(); try json.jsonStringify(.{ .whitespace = .{} }, al.writer()); std.debug.print("{s}", .{al.items}); }
0
repos/tomlz/examples
repos/tomlz/examples/simple/build.zig
const std = @import("std"); pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); const optimize = b.standardOptimizeOption(.{}); const exe = b.addExecutable(.{ .name = "simple", .root_source_file = .{ .path = "src/main.zig" }, .target = target, .optimize = optimize, }); // If we have the project in our repository then we can just add it as a module const tomlz = b.addModule("tomlz", .{ .root_source_file = .{ .path = "../../src/main.zig" }, }); exe.root_module.addImport("tomlz", tomlz); const run_cmd = b.addRunArtifact(exe); run_cmd.step.dependOn(b.getInstallStep()); if (b.args) |args| { run_cmd.addArgs(args); } const run_step = b.step("run", "Run the app"); run_step.dependOn(&run_cmd.step); }
0
repos/tomlz/examples/simple
repos/tomlz/examples/simple/src/main.zig
const std = @import("std"); const tomlz = @import("tomlz"); pub fn main() !void { const toml = \\[goat] \\name = "Ronnie O' Sullivan" \\age = 46 \\world_titles = [2001, 2004, 2008, 2012, 2013, 2020, 2022] ; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); var table = try tomlz.parse(gpa.allocator(), toml); defer table.deinit(gpa.allocator()); const goat = table.getTable("goat").?; std.debug.print("GOAT: {s} (age {})\n", .{ goat.getString("name").?, goat.getInteger("age").? }); std.debug.print("Number of world titles: {}\n", .{goat.getArray("world_titles").?.items().len}); }
0
repos/tomlz/examples
repos/tomlz/examples/serialize/build.zig
const std = @import("std"); pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); const optimize = b.standardOptimizeOption(.{}); const exe = b.addExecutable(.{ .name = "simple", .root_source_file = .{ .path = "src/main.zig" }, .target = target, .optimize = optimize, }); // If we have the project in our repository then we can just add it as a module const tomlz = b.addModule("tomlz", .{ .root_source_file = .{ .path = "../../src/main.zig" }, }); exe.root_module.addImport("tomlz", tomlz); const run_cmd = b.addRunArtifact(exe); run_cmd.step.dependOn(b.getInstallStep()); if (b.args) |args| { run_cmd.addArgs(args); } const run_step = b.step("run", "Run the app"); run_step.dependOn(&run_cmd.step); }
0
repos/tomlz/examples/serialize
repos/tomlz/examples/serialize/src/main.zig
const std = @import("std"); const tomlz = @import("tomlz"); pub fn main() !void { // setup a basic allocator var gpa_instance = std.heap.GeneralPurposeAllocator(.{}){}; const gpa = gpa_instance.allocator(); defer _ = gpa_instance.deinit(); const stdout_writer = std.io.getStdOut().writer(); // anything that isn't a table(numbers, strings, etc) // need to be written with a key try stdout_writer.writeAll("# Simple value:\n"); try tomlz.serializeKeyValue(gpa, stdout_writer, "truth", 42); // serialize a simple struct like this const my_point = .{ .x = 5, .y = 5, }; // try switching this to a `tomlz.serializeKeyValue` and see why that's // a problem! try stdout_writer.writeAll("\n# Table:\n"); try tomlz.serialize(gpa, stdout_writer, my_point); // Finally lets look how we can avoid having to use an allocator // Every type has a certain "depth" to it, describing how many of its // fields need to be written in table form const my_nested_type = .{ .number1 = 1, // depth 1(default) .child = .{ // depth 2, this is a table .number2 = 2, .child = .{ // depth 3, we're even deeper .number3 = 3, }, }, .otherchild = .{ // depth 2, this is also a table .number4 = 4, }, }; // We can see the highest depth is 3, so that's what we need to prepare for try stdout_writer.writeAll("\n# No allocator:\n"); try tomlz.serializer.serializeFixedDepth( 3, // specify the depth here stdout_writer, my_nested_type, ); // In a lot of cases you can just use a "big" number like 64 here and it'll work // but we can't make that assumption for you, so the default uses an allocator to // allow for arbitrarily deep types. }
0
repos/tomlz/examples
repos/tomlz/examples/serialize-custom/build.zig
const std = @import("std"); pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); const optimize = b.standardOptimizeOption(.{}); const exe = b.addExecutable(.{ .name = "simple", .root_source_file = .{ .path = "src/main.zig" }, .target = target, .optimize = optimize, }); // If we have the project in our repository then we can just add it as a module const tomlz = b.addModule("tomlz", .{ .root_source_file = .{ .path = "../../src/main.zig" }, }); exe.root_module.addImport("tomlz", tomlz); const run_cmd = b.addRunArtifact(exe); run_cmd.step.dependOn(b.getInstallStep()); if (b.args) |args| { run_cmd.addArgs(args); } const run_step = b.step("run", "Run the app"); run_step.dependOn(&run_cmd.step); }
0
repos/tomlz/examples/serialize-custom
repos/tomlz/examples/serialize-custom/src/main.zig
const std = @import("std"); const tomlz = @import("tomlz"); pub fn main() !void { // setup a basic allocator var gpa_instance = std.heap.GeneralPurposeAllocator(.{}){}; const gpa = gpa_instance.allocator(); defer _ = gpa_instance.deinit(); const stdout_writer = std.io.getStdOut().writer(); // structs and unions can have their default serialize function overwritten like this // see `tomlz.serializer.Writer` for documentation on the writer const MyCustomStruct = struct { my_fancy_number: usize, string1: []const u8, string2: []const u8, pub fn tomlzSerialize(self: *const @This(), writer: anytype) !void { // When writing a struct, you always need to do this. try writer.beginTable(); try writer.writeKeyValue("number", self.my_fancy_number); // can be used to rename fields for example // If you want to e.g. stitch two string together you'd normally need an allocator, // but sadly we don't have one. Instead use the underlying stream to do so. // You have to be VERY cautious with this, since it could easily produce an invalid // output. // You also need to push a key where this can be written to try writer.pushKey("string"); defer writer.popKey(); //...and remove it when you're done // This writes "key = " to the stream, you now have to fill in the value try writer.beginAssignment(); // The underlying stream is just a standard stdlib writer // (you can put all of this into a single `out_stream.print`, it's just easier to explain like this) try writer.out_stream.writeByte('"'); // don't forget the quotation marks! try writer.out_stream.print("{s}{s}", .{ self.string1, self.string2 }); try writer.out_stream.writeByte('"'); try writer.out_stream.writeByte('\n'); // you're also in charge of ending the line // yes this is a very raw API } }; const test_struct = MyCustomStruct{ .my_fancy_number = 42, .string1 = "the", .string2 = "truth", }; try stdout_writer.writeAll("# Custom serializer:\n"); try tomlz.serialize(gpa, stdout_writer, test_struct); // Finally lets have a look how to implement custom serialize logic for a type you dont own // (Such as `std.HashMap`) try stdout_writer.writeAll("\n# Custom serializer for foreign type:\n"); var my_map = std.StringHashMap(usize).init(gpa); defer my_map.deinit(); try my_map.put("key1", 1); try my_map.put("key2", 2); // We need to use the internall write stream, since we need // to access it directly and not just write a value. var stream = tomlz.serializer.writeStream(gpa, stdout_writer); defer stream.deinit(); var map_iter = my_map.iterator(); while (map_iter.next()) |entry| { try stream.writeKeyValue(entry.key_ptr.*, entry.value_ptr.*); } // Note: // If you'd want to properly wrap the type, you should create a wrapper struct. // See the example for "owned" types above. // // If you're now wondering "But what if it's a very big foreign type, // but only in a few cases the default doesnt work?". Sadly you're out of luck // and need to handle the whole type :/ }
0
repos
repos/zig-charm/README.md
# charm A tiny, self-contained cryptography library, implementing authenticated encryption and keyed hashing. Charm was especially designed for memory-constrained devices, but can also be used to add encryption support to WebAssembly modules with minimal overhead. Any number of hashing and authenticated encryption operations can be freely chained using a single rolling state. In this mode, each authentication tag authenticates the whole transcript since the beginning of the session. The [original implementation](https://github.com/jedisct1/charm) was written in C and is used by the [dsvpn](https://github.com/jedisct1/dsvpn) VPN software. This is a port to the [Zig](https://ziglang.org) language. It is fully compatible with the C version. ## Usage ### Setting up a session Charm requires a 256-bit key, and, if the key is reused for different sessions, a unique session identifier (`nonce`): ```zig var key: [Charm.key_length]u8 = undefined; std.crypto.random.bytes(&key); var charm = Charm.new(key, null); ``` ### Hashing ```zig const h = charm.hash("data"); ``` ### Authenticated encryption #### Encryption ```zig const tag = charm.encrypt(msg[0..]); ``` Encrypts `msg` in-place and returns a 128-bit authentication tag. #### Decryption Starting from the same state as the one used for encryption: ```zig try charm.decrypt(msg[0..], tag); ``` Returns `error.AuthenticationFailed` if the authentication tag is invalid for the given message and the previous transcript. ## Security guarantees 128-bit security, no practical limits on the size and length of messages. ## Other implementations: - [charm](https://github.com/jedisct1/charm) original implementation in C. - [charm.js](https://github.com/jedisct1/charm.js) a JavaScript (TypeScript) implementation.
0
repos
repos/zig-charm/build.zig
const std = @import("std"); pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); const optimize = b.standardOptimizeOption(.{}); const lib = b.addStaticLibrary(.{ .name = "charm", .root_source_file = b.path("src/main.zig"), .target = target, .optimize = optimize, }); b.installArtifact(lib); const main_tests = b.addTest(.{ .root_source_file = b.path("src/main.zig"), .target = target, .optimize = optimize, }); const run_main_tests = b.addRunArtifact(main_tests); const test_step = b.step("test", "Run library tests"); test_step.dependOn(&run_main_tests.step); }
0
repos/zig-charm
repos/zig-charm/src/main.zig
const std = @import("std"); const builtin = @import("builtin"); const math = std.math; const mem = std.mem; const Xoodoo = struct { const rcs = [12]u32{ 0x058, 0x038, 0x3c0, 0x0d0, 0x120, 0x014, 0x060, 0x02c, 0x380, 0x0f0, 0x1a0, 0x012 }; const Lane = @Vector(4, u32); state: [3]Lane, inline fn asWords(self: *Xoodoo) *[12]u32 { return @as(*[12]u32, @ptrCast(&self.state)); } inline fn asBytes(self: *Xoodoo) *[48]u8 { return mem.asBytes(&self.state); } fn permute(self: *Xoodoo) void { const rot8x32 = comptime if (builtin.target.cpu.arch.endian() == .Big) [_]i32{ 9, 10, 11, 8, 13, 14, 15, 12, 1, 2, 3, 0, 5, 6, 7, 4 } else [_]i32{ 11, 8, 9, 10, 15, 12, 13, 14, 3, 0, 1, 2, 7, 4, 5, 6 }; var a = self.state[0]; var b = self.state[1]; var c = self.state[2]; inline for (rcs) |rc| { var p = @shuffle(u32, a ^ b ^ c, undefined, [_]i32{ 3, 0, 1, 2 }); var e = math.rotl(Lane, p, 5); p = math.rotl(Lane, p, 14); e ^= p; a ^= e; b ^= e; c ^= e; b = @shuffle(u32, b, undefined, [_]i32{ 3, 0, 1, 2 }); c = math.rotl(Lane, c, 11); a[0] ^= rc; a ^= ~b & c; b ^= ~c & a; c ^= ~a & b; b = math.rotl(Lane, b, 1); c = @as(Lane, @bitCast(@shuffle(u8, @as(@Vector(16, u8), @bitCast(c)), undefined, rot8x32))); } self.state[0] = a; self.state[1] = b; self.state[2] = c; } inline fn endianSwapRate(self: *Xoodoo) void { for (self.asWords()[0..4]) |*w| { w.* = mem.littleToNative(u32, w.*); } } inline fn endianSwapAll(self: *Xoodoo) void { for (self.asWords()) |*w| { w.* = mem.littleToNative(u32, w.*); } } fn squeezePermute(self: *Xoodoo) [16]u8 { self.endianSwapRate(); const rate = self.asBytes()[0..16].*; self.endianSwapRate(); self.permute(); return rate; } }; pub const Charm = struct { x: Xoodoo, pub const tag_length = 16; pub const key_length = 32; pub const nonce_length = 16; pub const hash_length = 32; pub fn new(key: [key_length]u8, nonce: ?[nonce_length]u8) Charm { var x = Xoodoo{ .state = undefined }; var bytes = x.asBytes(); if (nonce) |n| { mem.copy(u8, bytes[0..16], n[0..]); } else { mem.set(u8, bytes[0..16], 0); } mem.copy(u8, bytes[16..][0..32], key[0..]); x.endianSwapAll(); x.permute(); return Charm{ .x = x }; } fn xor128(out: *[16]u8, in: *const [16]u8) void { for (out, 0..) |*x, i| { x.* ^= in[i]; } } fn equal128(a: [16]u8, b: [16]u8) bool { var d: u8 = 0; for (a, 0..) |x, i| { d |= x ^ b[i]; } mem.doNotOptimizeAway(d); return d == 0; } pub fn nonceIncrement(nonce: *[nonce_length]u8, endian: builtin.Endian) void { const next = mem.readInt(u128, nonce, endian) +% 1; mem.writeInt(u128, nonce, next, endian); } pub fn encrypt(charm: *Charm, msg: []u8) [tag_length]u8 { var squeezed: [16]u8 = undefined; var bytes = charm.x.asBytes(); var off: usize = 0; while (off + 16 < msg.len) : (off += 16) { charm.x.endianSwapRate(); mem.copy(u8, squeezed[0..], bytes[0..16]); xor128(bytes[0..16], msg[off..][0..16]); charm.x.endianSwapRate(); xor128(msg[off..][0..16], squeezed[0..]); charm.x.permute(); } const leftover = msg.len - off; var padded = [_]u8{0} ** (16 + 1); mem.copy(u8, padded[0..leftover], msg[off..][0..leftover]); padded[leftover] = 0x80; charm.x.endianSwapRate(); mem.copy(u8, squeezed[0..], bytes[0..16]); xor128(bytes[0..16], padded[0..16]); charm.x.endianSwapRate(); charm.x.asWords()[11] ^= (@as(u32, 1) << 24 | @as(u32, @intCast(leftover)) >> 4 << 25 | @as(u32, 1) << 26); xor128(padded[0..16], squeezed[0..]); mem.copy(u8, msg[off..][0..leftover], padded[0..leftover]); charm.x.permute(); return charm.x.squeezePermute(); } pub fn decrypt(charm: *Charm, msg: []u8, expected_tag: [tag_length]u8) !void { var squeezed: [16]u8 = undefined; var bytes = charm.x.asBytes(); var off: usize = 0; while (off + 16 < msg.len) : (off += 16) { charm.x.endianSwapRate(); mem.copy(u8, squeezed[0..], bytes[0..16]); xor128(msg[off..][0..16], squeezed[0..]); xor128(bytes[0..16], msg[off..][0..16]); charm.x.endianSwapRate(); charm.x.permute(); } const leftover = msg.len - off; var padded = [_]u8{0} ** (16 + 1); mem.copy(u8, padded[0..leftover], msg[off..][0..leftover]); charm.x.endianSwapRate(); mem.set(u8, squeezed[0..], 0); mem.copy(u8, squeezed[0..leftover], bytes[0..leftover]); xor128(padded[0..16], squeezed[0..]); padded[leftover] = 0x80; xor128(bytes[0..16], padded[0..16]); charm.x.endianSwapRate(); charm.x.asWords()[11] ^= (@as(u32, 1) << 24 | @as(u32, @intCast(leftover)) >> 4 << 25 | @as(u32, 1) << 26); mem.copy(u8, msg[off..][0..leftover], padded[0..leftover]); charm.x.permute(); const tag = charm.x.squeezePermute(); if (!equal128(expected_tag, tag)) { mem.set(u8, msg, 0); return error.AuthenticationFailed; } } pub fn hash(charm: *Charm, msg: []const u8) [hash_length]u8 { var bytes = charm.x.asBytes(); var off: usize = 0; while (off + 16 < msg.len) : (off += 16) { charm.x.endianSwapRate(); xor128(bytes[0..16], msg[off..][0..16]); charm.x.endianSwapRate(); charm.x.permute(); } const leftover = msg.len - off; var padded = [_]u8{0} ** (16 + 1); mem.copy(u8, padded[0..leftover], msg[off..][0..leftover]); padded[leftover] = 0x80; charm.x.endianSwapRate(); xor128(bytes[0..16], padded[0..16]); charm.x.endianSwapRate(); charm.x.asWords()[11] ^= (@as(u32, 1) << 24 | @as(u32, @intCast(leftover)) >> 4 << 25); charm.x.permute(); var h: [hash_length]u8 = undefined; mem.copy(u8, h[0..16], charm.x.squeezePermute()[0..]); mem.copy(u8, h[16..32], charm.x.squeezePermute()[0..]); return h; } }; test "charm" { _ = @import("test.zig"); }
0
repos/zig-charm
repos/zig-charm/src/test.zig
const std = @import("std"); const debug = std.debug; const mem = std.mem; const random = std.crypto.random; const Charm = @import("main.zig").Charm; test "encrypt and hash in a session" { var key: [Charm.key_length]u8 = undefined; var nonce: [Charm.nonce_length]u8 = undefined; random.bytes(&key); random.bytes(&nonce); const msg1_0 = "message 1"; const msg2_0 = "message 2"; var msg1 = msg1_0.*; var msg2 = msg2_0.*; var charm = Charm.new(key, nonce); const tag1 = charm.encrypt(msg1[0..]); const tag2 = charm.encrypt(msg2[0..]); const h = charm.hash(msg1_0); charm = Charm.new(key, nonce); try charm.decrypt(msg1[0..], tag1); try charm.decrypt(msg2[0..], tag2); const hx = charm.hash(msg1_0); debug.assert(mem.eql(u8, msg1[0..], msg1_0[0..])); debug.assert(mem.eql(u8, msg2[0..], msg2_0[0..])); debug.assert(mem.eql(u8, h[0..], hx[0..])); }
0
repos
repos/ntp-client/README.md
<!-- -*- coding: utf-8 -*- --> [![Zig](https://img.shields.io/badge/-Zig-F7A41D?style=flat&logo=zig&logoColor=white)](https://ziglang.org/) [![tests](https://github.com/FObersteiner/ntp-client/actions/workflows/run_tests.yml/badge.svg)](https://github.com/FObersteiner/ntp-client/actions/workflows/run_tests.yml) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://github.com/FObersteiner/ntp-client/blob/master/LICENSE) # NTP Client Command line app to query an [NTP](https://datatracker.ietf.org/doc/html/rfc5905) server, e.g. to verify your OS clock setting. Or get the time independent of your OS clock. Or mess with your local NTP server. ## Usage ### Building the binary Note: `v0.0.18` and greater requires at least Zig `0.14.0-dev.1411+a670f5519` to build. ```sh zig build -Dexe [--release=[safe|small|fast]] # build and run, debug: zig build -Dexe run # library tests: zig build test ``` ### NTP library Currently targets SNTP ([RFC4330](https://datatracker.ietf.org/doc/html/rfc4330)), does not implement the full NTP spec. `src/ntp.zig` can be used independently in other projects; it is exposed via this project's `build.zig` and `build.zig.zon` files. Other dependencies of the binary are lazy, i.e. they won't be fetched if you use only the library in another project. ### Usage of the binary ```sh Usage: ntp_client [options] Options: -s, --server NTP server to query (default: pool.ntp.org) -v, --protocol-version NTP protocol version, 3 or 4 (default: 4) -4, --ipv4 use IPv4 instead of the default IPv6 --src-ip IP address to use for sending the query (default: 0::0 / IPv6 auto-select) --src-port UDP port to use for sending the query (default: 0 / any port) --dst-port UDP port of destination server (default: 123) -z, --timezone Timezone to use in console output (default: UTC) -j, --json Print result in JSON -i, --interval Interval for repeated queries in seconds (default: null / one-shot operation) -a, --all Query all IP addresses found for a given server URL (default: false / stop after first) -h, --help Show this help and exit ``` ## Demo output ```sh zig build run -Dexe -- -4 -z local ``` ```text ---***--- Server name: "pool.ntp.org" Server address: "185.41.106.152:123" --- LI=0 VN=4 Mode=4 Stratum=2 Poll=0 (0 s) Precision=-25 (29 ns) ID: 0x6C6735C0 Server root dispersion: 518 us, root delay: 5599 us --- Server last synced : 2024-08-27T09:36:35.013046150+02:00 T1, packet created : 2024-08-27T09:44:24.203294803+02:00 T2, server received : 2024-08-27T09:44:24.209060683+02:00 T3, server replied : 2024-08-27T09:44:24.209271892+02:00 T4, reply received : 2024-08-27T09:44:24.215617157+02:00 (timezone displayed: Europe/Berlin) --- Offset to timserver: -0.000 s (-290 us) Round-trip delay: 0.012 s (12111 us) --- Result flags: 0 (OK) ---***--- ``` ## Compatibility and Requirements Developed & tested mostly on Debian Linux, on an x86 machine. Windows worked last time I tested (build.zig links libc for this), Mac OS might work (can't test this). ## Zig version This package tracks Zig `0.14.0-dev` (master); might not compile with older versions. ## Dependencies - [flags](https://github.com/n0s4/flags) for command line argument parsing - [zdt](https://codeberg.org/FObersteiner/zdt) to display timestamps as UTC or timezone-local datetimes ## License MIT. See the LICENSE file in the root directory of the repository.
0
repos
repos/ntp-client/build.zig.zon
.{ .name = "ntp_client", .version = "0.0.18", .dependencies = .{ .flags = .{ .url = "https://github.com/n0s4/flags/archive/45b5e26a507cda792d92d0bf215af4a167142f68.tar.gz", .hash = "1220a600a82d95809de7cb67c53a08e18bc176f81f7028643116e4cb80c2db8dd06b", .lazy = true, }, .zdt = .{ .url = "https://github.com/FObersteiner/zdt/archive/refs/tags/v0.2.1.tar.gz", .hash = "1220422bde7bf871a4f8924d9674b47c43e376916a689f08108fce218b65e734bb34", .lazy = true, }, }, .paths = .{ "src/ntp.zig", "build.zig", "build.zig.zon", "LICENSE", "README.md", }, }
0
repos
repos/ntp-client/build.zig
const std = @import("std"); const log = std.log.scoped(.ntp_client_build); const version = std.SemanticVersion{ .major = 0, .minor = 0, .patch = 18 }; pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); const optimize = b.standardOptimizeOption(.{}); // -Dexe option is required to build the executable. // This avoids leaking dependencies, if another project wants to use // ntp.zig as a library. const build_exe = b.option(bool, "exe", "build executable"); // expose ntp.zig as a library const ntplib_module = b.addModule("ntp_client", .{ .root_source_file = b.path("src/ntp.zig"), }); if (build_exe) |_| { const exe = b.addExecutable(.{ .name = "ntp_client", .root_source_file = b.path("src/main.zig"), .target = target, .optimize = optimize, .version = version, }); b.installArtifact(exe); // for Windows compatibility, required by sockets functionality exe.linkLibC(); // using lazy dependencies here so that another project can // use the NTP lib without having to fetch flags and zdt if (b.lazyDependency("flags", .{ .optimize = optimize, .target = target, })) |dep| { exe.root_module.addImport("flags", dep.module("flags")); } if (b.lazyDependency("zdt", .{ .optimize = optimize, .target = target, // use system zoneinfo: // .prefix_tzdb = @as([]const u8, "/usr/share/zoneinfo"), })) |dep| { exe.root_module.addImport("zdt", dep.module("zdt")); } const run_cmd = b.addRunArtifact(exe); run_cmd.step.dependOn(b.getInstallStep()); if (b.args) |args| run_cmd.addArgs(args); const run_step = b.step("run", "Run the app"); run_step.dependOn(&run_cmd.step); } // run unit tests for ntplib, as a client of the library const test_step = b.step("test", "Run ntplib unit tests"); const unit_tests = b.addTest(.{ .root_source_file = b.path("src/test_ntplib.zig"), .target = target, .optimize = optimize, }); unit_tests.root_module.addImport("ntplib", ntplib_module); const run_unit_tests = b.addRunArtifact(unit_tests); test_step.dependOn(&run_unit_tests.step); }
0
repos/ntp-client
repos/ntp-client/src/main.zig
//! NTP query CLI const std = @import("std"); const io = std.io; const net = std.net; const posix = std.posix; const flags = @import("flags"); const zdt = @import("zdt"); const Datetime = zdt.Datetime; const Timezone = zdt.Timezone; const Resolution = zdt.Duration.Resolution; const CliFlags = @import("cliflags.zig"); const ntp = @import("ntp.zig"); const pprint = @import("prettyprint.zig"); // ------------------------------------------------------------------------------------ const timeout_sec: isize = 5; // wait-for-reply timeout const mtu: usize = 1024; // buffer size for transmission const ip_default = "0::0"; // default to IPv6 // ------------------------------------------------------------------------------------ pub fn main() !void { // var gpa = std.heap.GeneralPurposeAllocator(.{}){}; // const allocator = gpa.allocator(); // defer _ = gpa.deinit(); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const allocator = arena.allocator(); // for Windows compatibility: feed an allocator for args parsing var args = try std.process.argsWithAllocator(allocator); defer args.deinit(); const cliflags = flags.parse(&args, CliFlags, .{ .command_name = "ntp_client" }); const proto_vers: u8 = cliflags.protocol_version; if (proto_vers < 3 or proto_vers > 4) { return errprintln("invalid protocol version: {d}", .{proto_vers}); } if (cliflags.interval != null and cliflags.all) { return errprintln("cannot query all servers repeatedly", .{}); } if (cliflags.interval) |interval| { if (interval > 60 * 60 * 4 or interval < 2) return errprintln("interval must be in range [2s..4h], got {d}s", .{interval}); } var tz: Timezone = Timezone.UTC; defer tz.deinit(); if (!std.mem.eql(u8, cliflags.timezone, "UTC")) { if (std.mem.eql(u8, cliflags.timezone, "local")) { tz = try Timezone.tzLocal(allocator); } else { tz = Timezone.fromTzdata( cliflags.timezone, allocator, ) catch Timezone.UTC; } } // --- prepare connection --------------------------------------------------------- // resolve hostname const addrlist = net.getAddressList(allocator, cliflags.server, cliflags.dst_port) catch { return errprintln("invalid hostname '{s}'", .{cliflags.server}); }; defer addrlist.deinit(); // only use default IPv4 if user specified to use IPv4 without setting a specific src IP: const src_ip = if (cliflags.ipv4 and std.mem.eql(u8, ip_default, cliflags.src_ip)) "0.0.0.0" // any available IPv4 else cliflags.src_ip; // from where to send the query. // Zig std docs: to handle IPv6 link-local unix addresses, // it is recommended to use `resolveIp` instead. const src_addr = try std.net.Address.parseIp(src_ip, cliflags.src_port); const sock = try posix.socket( src_addr.any.family, // CLOEXEC not strictly needed here; see open(2) man page. posix.SOCK.DGRAM | posix.SOCK.CLOEXEC, posix.IPPROTO.UDP, ); try posix.bind(sock, &src_addr.any, src_addr.getOsSockLen()); defer posix.close(sock); if (timeout_sec != 0) { // make this configurable ? ...0 would mean no timeout try posix.setsockopt( sock, posix.SOL.SOCKET, posix.SO.RCVTIMEO, &std.mem.toBytes(posix.timespec{ .sec = timeout_sec }), // zig 0.13 : .tv_sec ); } // --- query server(s) ------------------------------------------------------------ var buf: [mtu]u8 = std.mem.zeroes([mtu]u8); repeat: while (true) { iter_addrs: for (addrlist.addrs, 0..) |dst, i| { const result: ntp.Result = sample_ntp( &sock, &src_addr, &dst, &buf, proto_vers, ) catch |err| switch (err) { error.AddressFamilyMismatch => { errprintln( "Error: IP address family mismatch for server at {any} (src: {s}, dst: {s})", .{ dst, inet_family(src_addr.any.family), inet_family(dst.any.family) }, ); if (i < addrlist.addrs.len - 1) errprintln("Try next server...", .{}); continue :iter_addrs; // continue to iterate addresses, even if -a is not set }, error.WouldBlock => { errprintln("Error: connection timed out", .{}); if (i < addrlist.addrs.len - 1) errprintln("Try next server...", .{}); continue :iter_addrs; // continue to iterate addresses, even if -a is not set }, else => |e| return e, }; if (cliflags.json) { try pprint.json(io.getStdOut().writer(), result, cliflags.server, dst); } else { try pprint.humanfriendly(io.getStdOut().writer(), result, &tz, cliflags.server, dst); } if (!cliflags.all) break :iter_addrs; } // end loop 'iter_addrs' if (cliflags.interval) |interval| { std.time.sleep(interval * std.time.ns_per_s); } else break :repeat; } // end loop 'repeat' } // --- helpers ------------------------------------------------------------------------ /// Sample an NTP server at 'dst' from given socket and source address. /// Result gets written to the buffer 'buf'. fn sample_ntp(sock: *const posix.socket_t, src: *const net.Address, dst: *const net.Address, buf: []u8, protocol_version: u8) !ntp.Result { // Check src and dst addr if families match (both posix.AF.INET/v4 or posix.AF.INET6/v6). if (src.any.family != dst.any.family) return error.AddressFamilyMismatch; var dst_addr_sock: posix.sockaddr = undefined; // must not use dst.any var dst_addr_len: posix.socklen_t = dst.getOsSockLen(); ntp.Packet.initToBuffer(protocol_version, buf); const T1: ntp.Time = ntp.Time.fromUnixNanos(@as(u64, @intCast(std.time.nanoTimestamp()))); _ = try posix.sendto( sock.*, buf[0..ntp.packet_len], 0, &dst.any, dst_addr_len, ); const n_recv: usize = try posix.recvfrom( sock.*, buf[0..], 0, &dst_addr_sock, &dst_addr_len, ); const T4: ntp.Time = ntp.Time.fromUnixNanos(@as(u64, @intCast(std.time.nanoTimestamp()))); if (n_recv != ntp.packet_len) return error.invalidLength; return ntp.Result.fromPacket(ntp.Packet.parse(buf[0..ntp.packet_len].*), T1, T4); } /// Print an error to stderr. fn errprintln(comptime fmt: []const u8, args: anytype) void { const stderr = io.getStdErr().writer(); nosuspend stderr.print(fmt ++ "\n", args) catch return; } /// Turn AF flags into an appropriate text representation. fn inet_family(family: u16) []const u8 { const result = switch (family) { posix.AF.INET => "IPv4", posix.AF.INET6 => "IPv6", else => "unknown", }; return result; }
0
repos/ntp-client
repos/ntp-client/src/prettyprint.zig
const std = @import("std"); const bufPrint = std.fmt.bufPrint; const ntp = @import("ntp.zig"); const zdt = @import("zdt"); const Datetime = zdt.Datetime; const Timezone = zdt.Timezone; const Resolution = zdt.Duration.Resolution; const ns_per_s: u64 = 1_000_000_000; const ns_per_us: u64 = 1_000; pub fn json( writer: anytype, ntpr: ntp.Result, server_name: []const u8, server_addr: std.net.Address, ) !void { try writer.print( \\{{ \\ "server_name": "{s}", \\ "server_address": "{any}", \\ "leap_indicator": {d}, \\ "version": {d}, \\ "mode": {d}, \\ "stratum": {d}, \\ "poll": {d}, \\ "precision": {d}, \\ "ref_id": {d}, \\ "root_delay_ns": {d}, \\ "root_dispersion_ns": {d}, \\ "ts_ref": "{s}", \\ "T1": "{s}", \\ "T2": "{s}", \\ "T3": "{s}", \\ "T4": "{s}", \\ "offset_ns": {d}, \\ "delay_ns": {d} \\}} \\ , .{ server_name, server_addr, ntpr.leap_indicator, ntpr.version, ntpr.mode, ntpr.stratum, ntpr.poll, ntpr.precision, ntpr.ref_id, ntpr.root_delay, ntpr.root_dispersion, try Datetime.fromUnix(ntpr.Tref.toUnixNanos(), Resolution.nanosecond, Timezone.UTC), try Datetime.fromUnix(ntpr.T1.toUnixNanos(), Resolution.nanosecond, Timezone.UTC), try Datetime.fromUnix(ntpr.T2.toUnixNanos(), Resolution.nanosecond, Timezone.UTC), try Datetime.fromUnix(ntpr.T3.toUnixNanos(), Resolution.nanosecond, Timezone.UTC), try Datetime.fromUnix(ntpr.T4.toUnixNanos(), Resolution.nanosecond, Timezone.UTC), ntpr.offset, ntpr.delay, }, ); } pub fn humanfriendly( writer: anytype, ntpr: ntp.Result, tz: ?*Timezone, server_name: []const u8, server_addr: std.net.Address, ) !void { const offset_f: f64 = @as(f64, @floatFromInt(ntpr.offset)) / @as(f64, ns_per_s); const delay_f: f64 = @as(f64, @floatFromInt(ntpr.delay)) / @as(f64, ns_per_s); var z: *Timezone = if (tz == null) @constCast(&Timezone.UTC) else tz.?; // ref_id string looks like "0xFFFFFFFF (xxxx)" var refid_buf: [18]u8 = std.mem.zeroes([18]u8); if (ntpr.stratum < 2) { _ = try bufPrint(&refid_buf, "0x{X} ({s})", .{ ntpr.ref_id, ntpr.__ref_id }); } else { _ = try bufPrint(&refid_buf, "0x{X}", .{ntpr.ref_id}); } var flags_buf: [512]u8 = std.mem.zeroes([512]u8); _ = try ntp.Result.printFlags(ntpr.validate(), &flags_buf); try writer.print( \\---***--- \\Server name: "{s}" \\Server address: "{any}" \\--- \\LI={d} VN={d} Mode={d} Stratum={d} Poll={d} ({d} s) Precision={d} ({d} ns) \\ID: {s} \\Server root dispersion: {d} us, root delay: {d} us \\--- \\Server last synced : {s} \\T1, packet created : {s} \\T2, server received : {s} \\T3, server replied : {s} \\T4, reply received : {s} \\(timezone displayed: {s}) \\--- \\Offset to timserver: {d:.3} s ({d} us) \\Round-trip delay: {d:.3} s ({d} us) \\--- \\Result flags: {s} \\---***--- \\ , .{ server_name, server_addr, ntpr.leap_indicator, ntpr.version, ntpr.mode, ntpr.stratum, ntpr.poll, ntpr.poll_period, ntpr.precision, ntpr.precision_ns, refid_buf, ntpr.root_dispersion / ns_per_us, ntpr.root_delay / ns_per_us, try Datetime.fromUnix(ntpr.Tref.toUnixNanos(), Resolution.nanosecond, z.*), try Datetime.fromUnix(ntpr.T1.toUnixNanos(), Resolution.nanosecond, z.*), try Datetime.fromUnix(ntpr.T2.toUnixNanos(), Resolution.nanosecond, z.*), try Datetime.fromUnix(ntpr.T3.toUnixNanos(), Resolution.nanosecond, z.*), try Datetime.fromUnix(ntpr.T4.toUnixNanos(), Resolution.nanosecond, z.*), z.name(), offset_f, @divFloor(ntpr.offset, ns_per_us), delay_f, @divFloor(ntpr.delay, ns_per_us), flags_buf, }, ); }
0
repos/ntp-client
repos/ntp-client/src/ntp.zig
//! NTP client library const std = @import("std"); const mem = std.mem; const rand = std.crypto.random; const print = std.debug.print; const testing = std.testing; const assert = std.debug.assert; const ns_per_s: u64 = 1_000_000_000; const s_per_ntp_era: u64 = 1 << 32; const u64_max: u64 = 0xFFFFFFFFFFFFFFFF; /// NTP packet has 48 bytes if extension and key / digest fields are excluded pub const packet_len: usize = 48; // min/max constants see <https://datatracker.ietf.org/doc/html/rfc5905#section-7.2> pub const max_stratum: u8 = 16; // TODO : where is this applicable ? pub const max_dispersion: u64 = 16; // [s] /// [s]; ref. root distance (dispersion + delay/2) pub const max_dist: u64 = 1; /// [s]; server must have synced in last x seconds pub const max_refclock_age: i64 = 1024; /// 36 h pub const max_poll: i8 = 17; /// 16 s - ignored in result check pub const min_poll: i8 = 4; /// Offset between the Unix epoch and the NTP epoch, era zero, in seconds pub const epoch_offset: u32 = 2_208_988_800; /// The current NTP era, 0 = [1900-01-01T00:00:00Z..2036-02-07T06:28:15Z] pub const ntp_era: i8 = 0; pub const client_mode: u8 = 3; pub const server_mode: u8 = 4; /// NTP precision and poll interval come as period of log2 seconds pub fn periodToNanos(p: i8) u64 { if (p > 63) return u64_max; if (p < -63) return 0; if (p > 0) return ns_per_s << @as(u6, @intCast(p)); if (p < 0) return ns_per_s >> @as(u6, @intCast(-p)); return ns_per_s; } pub fn periodToSeconds(p: i8) u64 { if (p > 63) return u64_max; if (p > 0) return @as(u64, 1) << @as(u6, @intCast(p)); // ignore negative input (ceil period); cannot represent sub-second period return 1; } /// Time (duration, to be precise) since epoch. /// - 32 bits seconds: ~ 136 years per era (64 bits would be ~ 5.8e10 years) /// - 32 bits fraction: precision is ~ 2.3e-10 s (64 bits would be ~ 5.4e-20 s) /// - total nanoseconds: ~ 2^62 /// /// In an NTP packet, this represents a duration since the NTP epoch. /// Era 0 starts at zero hours on 1900-01-01. /// pub const Time = struct { t: u64 = 0, // upper 32 bits: seconds, lower 32 bits: fraction era: i8 = ntp_era, // this is only used for Unix time input / output /// from value as received in NTP packet. pub fn fromRaw(raw: u64) Time { return .{ .t = raw }; } /// from nanoseconds since epoch in current era pub fn encode(nanos: u64) Time { const sec: u64 = @truncate(@divFloor(nanos, ns_per_s)); const nsec: u64 = @intCast(@rem(nanos, ns_per_s)); const frac: u32 = @truncate((nsec << 32) / ns_per_s); return .{ .t = @as(u64, @intCast(sec << 32)) + frac }; } /// to nanoseconds since epoch in current era pub fn decode(self: Time) u64 { const sec: u64 = (self.t >> 32); const nsec = frac_to_nsec(self.t & 0xFFFFFFFF); return sec * ns_per_s + nsec; } // Addition is not permitted by NTP since might overflow. /// NTP time subtraction which works across era bounds; /// works as long as the absolute difference between A and B is < 2^(n-1) (~68 years for n=32). pub fn sub(this: Time, other: Time) i64 { const a_sec: u32 = @truncate(this.t >> 32); const a_nsec = frac_to_nsec(this.t & 0xFFFFFFFF); const b_sec: u32 = @truncate(other.t >> 32); const b_nsec = frac_to_nsec(other.t & 0xFFFFFFFF); const offset: i32 = @bitCast(a_sec +% (~b_sec +% 1)); return @as(i64, offset) * ns_per_s + (@as(i64, @intCast(a_nsec)) - @as(i64, @intCast(b_nsec))); } /// nanoseconds since the Unix epoch to NTP time since /// the NTP epoch / era 0. /// Cannot handle time before 1970-01-01 / negative Unix time. pub fn fromUnixNanos(nanos: i128) Time { var result: Time = .{ .era = @intCast(@divFloor(@divFloor(nanos, ns_per_s) + epoch_offset, s_per_ntp_era)) }; var ntp_nanos: i128 = @as(i128, nanos) + ns_per_s * epoch_offset; ntp_nanos -= result.era * @as(i128, ns_per_s * s_per_ntp_era); result.t = Time.encode(@intCast(ntp_nanos)).t; return result; } /// NTP time since epoch / era 0 to nanoseconds since the Unix epoch pub fn toUnixNanos(time: Time) i128 { const era_offset: i128 = time.era * @as(i128, s_per_ntp_era * ns_per_s); const epoch_offset_ns: i128 = @as(i128, epoch_offset) * @as(i128, ns_per_s); return @as(i128, @intCast(time.decode())) - epoch_offset_ns + era_offset; } // fraction to nanoseconds; // frac's lower (towards LSB) 32 bits hold the fraction from Time.t fn frac_to_nsec(frac: u64) u64 { const nsfrac: u64 = frac * ns_per_s; // >> N is the same as division by 2^N, // however we would have to ceil-divide if the nanoseconds fraction // fills equal to or more than 2^32 // 2 if (@as(u32, @truncate(nsfrac)) >= 0x80000000) return (nsfrac >> 32) + 1; return nsfrac >> 32; } }; /// Duration with lower resolution and smaller range pub const TimeShort = struct { t: u32 = 0, // upper 16 bits: seconds, lower 16 bits: fraction /// from value as received in NTP packet. pub fn fromRaw(raw: u32) TimeShort { return .{ .t = raw }; } /// from nanoseconds pub fn encode(nanos: u32) TimeShort { const sec: u32 = @truncate(@divFloor(nanos, ns_per_s)); const nsec: u32 = @intCast(@rem(nanos, ns_per_s)); const frac: u16 = @truncate((nsec << 16) / ns_per_s); return .{ .t = @as(u32, @intCast(sec << 16)) + frac }; } /// to nanoseconds pub fn decode(ts: TimeShort) u64 { const nanos: u64 = @as(u64, ts.t >> 16) * ns_per_s; const frac: u64 = @as(u64, ts.t & 0xFFFF) * ns_per_s; const nsec = if (@as(u16, @truncate(frac)) > 0x8000) (frac >> 16) + 1 else frac >> 16; return nanos + nsec; } }; /// Struct equivalent of the NTP packet definition. /// Byte order is considered if a Packet instance is serialized to bytes /// or parsed from bytes. Bytes representation is big endian (network). pub const Packet = packed struct { li_vers_mode: u8, // 2 bits leap second indicator, 3 bits protocol version, 3 bits mode stratum: u8 = 0, poll: i8 = 0, precision: i8 = 0x20, root_delay: u32 = 0, root_dispersion: u32 = 0, ref_id: u32 = 0, ts_ref: u64 = 0, ts_org: u64 = 0, ts_rec: u64 = 0, ts_xmt: u64 = 0, // extension field #1 // extension field #2 // key identifier // digest /// Create a client mode NTP packet to query the time from a server. /// Random bytes are used as client transmit timestamp (xmt), /// see <https://www.ietf.org/archive/id/draft-ietf-ntp-data-minimization-04.txt>. /// For a single query, the poll interval should be 0. pub fn init(version: u8) Packet { var b: [8]u8 = undefined; rand.bytes(&b); return .{ .li_vers_mode = 0 << 6 | version << 3 | client_mode, .ts_xmt = @bitCast(b), }; } /// Create an NTP packet and fill it into a bytes buffer. /// 'buf' must be sufficiently large to store ntp.packet_len bytes. /// Considers endianess; fields > 1 byte are in big endian byte order. pub fn initToBuffer(version: u8, buf: []u8) void { assert(buf.len >= packet_len); var p: Packet = Packet.init(version); p.ts_xmt = mem.nativeToBig(u64, p.ts_xmt); const ntp_bytes: [packet_len]u8 = @bitCast(p); mem.copyForwards(u8, buf, ntp_bytes[0..]); } /// Parse bytes of the reply received from the server. /// Adjusts for byte order. /// ref_id is NOT byte-swapped even if native is little-endian. pub fn parse(bytes: [packet_len]u8) Packet { var p: Packet = @bitCast(bytes); p.root_delay = mem.bigToNative(u32, p.root_delay); p.root_dispersion = mem.bigToNative(u32, p.root_dispersion); p.ts_ref = mem.bigToNative(u64, p.ts_ref); p.ts_org = mem.bigToNative(u64, p.ts_org); p.ts_rec = mem.bigToNative(u64, p.ts_rec); p.ts_xmt = mem.bigToNative(u64, p.ts_xmt); return p; } }; /// Analyze an NTP packet received from a server. pub const Result = struct { leap_indicator: u2 = 0, version: u3 = 0, mode: u3 = 0, stratum: u8 = 0, poll: i8 = 0, // log2 seconds poll_period: i32 = 0, precision: i8 = 0, // log2 seconds precision_ns: u64 = 0, root_delay: u64 = 0, root_delay_client: u64 = 0, root_dispersion: u64 = 0, ref_id: u32 = 0, __ref_id: [4]u8 = undefined, // Unix timestamps /// time when the server's clock was last updated Tref: Time = .{}, /// T1, when the packet was created by client T1: Time = .{}, /// T2, when the server received the request packet T2: Time = .{}, /// T3, when the server sent the reply T3: Time = .{}, /// T4, when the packet was received and processed T4: Time = .{}, /// offset in ns of the local machine vs. the server offset: i64 = 0, /// round-trip delay in ns (network) delay: i64 = 0, // TODO : inter-arrival jitter (RFC 3550) ia_jitter: f64 = 0, /// dispersion / clock error estimate in ns disp: u64 = 0, /// results from a server reply packet. /// client org and rec times must be provided by the caller. pub fn fromPacket(p: Packet, T1: Time, T4: Time) Result { var result = Result{}; result.leap_indicator = @truncate((p.li_vers_mode >> 6) & 3); result.version = @truncate((p.li_vers_mode >> 3) & 0x7); result.mode = @truncate(p.li_vers_mode & 7); result.stratum = p.stratum; result.precision = p.precision; result.poll = p.poll; result.ref_id = p.ref_id; result.root_dispersion = TimeShort.fromRaw(p.root_dispersion).decode(); result.root_delay = TimeShort.fromRaw(p.root_delay).decode(); result.Tref = Time.fromRaw(p.ts_ref); result.T1 = T1; result.T2 = Time.fromRaw(p.ts_rec); result.T3 = Time.fromRaw(p.ts_xmt); result.T4 = T4; // poll interval comes as log2 seconds and should be 4...17 or 0 result.poll_period = switch (p.poll) { 0 => 0, // unspecified 1...17 => @intCast(periodToSeconds(p.poll)), else => -1, }; result.precision_ns = periodToNanos(result.precision); // offset = T(B) - T(A) = 1/2 * [(T2-T1) + (T3-T4)] result.offset = @divFloor((result.T2.sub(result.T1) + result.T3.sub(result.T4)), 2); // roundtrip delay = T(ABA) = (T4-T1) - (T3-T2) result.delay = result.T4.sub(result.T1) - result.T3.sub(result.T2); // Client delay to the root, as sum of delay to timeserver and timeserver root delay. // Client delay might be negative in edge case of very close proximity to server. const delay_normalized: u64 = if (result.delay < 0) 0 else @intCast(result.delay); result.root_delay_client = result.root_delay + delay_normalized; // from RFC5905: For packet stratum 0 (unspecified or invalid), this // is a four-character ASCII [RFC1345] string, called the "kiss code", // used for debugging and monitoring purposes. For stratum 1 (reference // clock), this is a four-octet, left-justified, zero-padded ASCII // string assigned to the reference clock. result.__ref_id = std.mem.zeroes([4]u8); if (result.refIDprintable()) { result.__ref_id = @bitCast(result.ref_id); } return result; } /// current time in nanoseconds since the Unix epoch corrected by offset reported /// by NTP server. pub fn correctTime(result: Result, uncorrected: i128) i128 { return uncorrected + result.offset; } // TODO : stratum 0 --> Kiss of Death --> check code /// ref_id might be a 4-letter ASCII string. /// Only applicable if stratum 0 (kiss code) or stratum 1. pub fn refIDprintable(result: Result) bool { if (result.stratum >= 2) return false; const data: [4]u8 = @bitCast(result.ref_id); for (data) |c| { if ((c < ' ' or c > '~') and c != 0) return false; } return true; } /// NTP query result flagging /// bit | meaning /// ----|------------------ /// 0 | there is an unsynchronized leapsecond /// 1 | incorrect NTP version, must be 3 or 4 /// 2 | mode in received packet is not server-mode /// 3 | stratum is too large (> 16) /// 4 | poll frequency incorrect /// 5 | sync distance of server too large (> 16s) /// 6 | server last synced long ago /// 7 | client send time after client receive time /// 8 | server send time after server receive time /// 9 | round-trip time must be positive pub const result_flag = enum(u32) { OK = 0, unsynchronized_leapsecond = 1, incorrect_version = (1 << 1), incorrect_mode = (1 << 2), stratum_too_large = (1 << 3), incorrect_poll_freq = (1 << 4), server_sync_dist_too_large = (1 << 5), server_sync_outdated = (1 << 6), client_send_after_receive = (1 << 7), server_send_after_receive = (1 << 8), negative_rtt = (1 << 9), }; pub fn printFlags(flags: u32, buf: []u8) !void { if (flags == 0) { _ = try std.fmt.bufPrint(buf, "0 (OK)", .{}); return; } var idx: usize = 0; for (std.enums.values(result_flag)) |v| { const prefix = if (idx > 0) ", " else ""; if ((@intFromEnum(v) & flags) > 0) { const s = try std.fmt.bufPrint(buf[idx..], "{s}{s}", .{ prefix, @tagName(v) }); idx += s.len; } } } /// Validate result from an NTP query. Returns a set of flags as a u32. /// A result of zero means OK. If a bit is set, something is wrong. /// See 'result_flag'. pub fn validate(result: Result) u32 { var flags: u32 = @intFromEnum(result_flag.OK); // # 0 - unsynchronized leapsecond if (result.leap_indicator == 3) flags |= @intFromEnum(result_flag.unsynchronized_leapsecond); // # 1 - version not 3 or 4 if (result.version > 4 or result.version < 3) flags |= @intFromEnum(result_flag.incorrect_version); // # 2 - mode not server-mode if (result.mode != server_mode) flags |= @intFromEnum(result_flag.incorrect_mode); // # 3 - stratum > max_stratum if (result.stratum > max_stratum) flags |= @intFromEnum(result_flag.stratum_too_large); // # 4 - incorrect_poll_freq = (1 << 4), // Note: RFC5905 specifies a min poll of 4, we ignore this deliberately if (result.poll > max_poll) flags |= @intFromEnum(result_flag.incorrect_poll_freq); // # 5 - sync distance of the server; // Note: root_dispersion and _delay as found in the NTP packet only refer to the // server. To get the actual root distance, the client's delay / dispersion // to the root would have to be used. if ((result.root_dispersion +| result.root_delay / 2) > max_dist * ns_per_s) flags |= @intFromEnum(result_flag.server_sync_dist_too_large); // # 6 - server_sync_outdated = (1 << 6), if (result.T2.sub(result.Tref) > 1024 * ns_per_s) flags |= @intFromEnum(result_flag.server_sync_outdated); // # 8 - T1>T4: cannot receive before send // Note: #1 this is incorrect across an NTP era boundary // #2 this might be incorrect due to poor clock resolution / accuracy if (result.T1.decode() > result.T4.decode()) flags |= @intFromEnum(result_flag.client_send_after_receive); // # 9 - T2>T3: cannot receive before send // Note: #1 this is incorrect across an NTP era boundary // #2 this might be incorrect due to poor clock resolution / accuracy if (result.T2.decode() > result.T3.decode()) flags |= @intFromEnum(result_flag.server_send_after_receive); // # 10 - round-trip time must not be negative if (result.delay < 0) flags |= @intFromEnum(result_flag.negative_rtt); // TODO : ? // pub const max_dispersion: u64 = 16; // [s] return flags; } };
0
repos/ntp-client
repos/ntp-client/src/cliflags.zig
//! config struct for the flags package argument parser const CliFlags = @This(); // defaults: server: []const u8 = "pool.ntp.org", protocol_version: u8 = 4, ipv4: bool = false, src_ip: []const u8 = "0::0", src_port: u16 = 0, dst_port: u16 = 123, timezone: []const u8 = "UTC", json: bool = false, interval: ?u64 = null, all: bool = false, pub const descriptions = .{ .server = "NTP server to query (default: pool.ntp.org)", .protocol_version = "NTP protocol version, 3 or 4 (default: 4)", .ipv4 = "use IPv4 instead of the default IPv6", .src_ip = "IP address to use for sending the query (default: 0::0 / IPv6 auto-select)", .src_port = "UDP port to use for sending the query (default: 0 / any port)", .dst_port = "UDP port of destination server (default: 123)", .timezone = "Timezone to use in console output (default: UTC)", .json = "Print result in JSON", .interval = "Interval for repeated queries in seconds (default: null / one-shot operation)", .all = "Query all IP addresses found for a given server URL (default: false / stop after first)", }; pub const switches = .{ .server = 's', .protocol_version = 'v', .timezone = 'z', .json = 'j', .interval = 'i', .all = 'a', .ipv4 = '4', };
0
repos/ntp-client
repos/ntp-client/src/test_ntplib.zig
const std = @import("std"); const testing = std.testing; const print = std.debug.print; const rand = std.crypto.random; const ntp = @import("ntplib"); const ns_per_s = 1_000_000_000; test "period (poll)" { var s = ntp.periodToSeconds(17); try testing.expectEqual(std.math.powi(u64, 2, 17), s); s = ntp.periodToSeconds(0); try testing.expectEqual(1, s); s = ntp.periodToSeconds(-1); try testing.expectEqual(1, s); const ns = ntp.periodToNanos(5); const want = try std.math.powi(u64, 2, 5); try testing.expectEqual(want * ns_per_s, ns); } test "NTP time, set directly" { var t = ntp.Time{ .t = 0x0 }; try testing.expectEqual(@as(u64, 0), t.decode()); t = ntp.Time{ .t = 0x0000000080000000 }; try testing.expectEqual(@as(u64, 500_000_000), t.decode()); t = ntp.Time{ .t = 0x00000000FFFFFFFB }; try testing.expectEqual(@as(u64, 999_999_999), t.decode()); t.t += t.t; try testing.expectEqual(@as(u64, 1_999_999_998), t.decode()); t = ntp.Time{ .t = 0x0000000180000000 }; try testing.expectEqual(@as(u64, 1_500_000_000), t.decode()); // 2036-02-07T06:28:16+00:00, last second of NTP era 0 t = ntp.Time{ .t = 0xFFFFFFFFFFFFFFFB }; try testing.expectEqual(@as(u64, 4294967295999999999), t.decode()); // overflow: // t.t += (1 << 32); } test "NTP time encode decode" { try testing.expectEqual(@as(u64, ns_per_s), ntp.Time.encode(ns_per_s).decode()); try testing.expectEqual( @as(u64, ns_per_s * 2 + 10), ntp.Time.encode(ns_per_s * 2 + 10).decode(), ); const ts: u64 = @as(u64, @intCast(std.time.nanoTimestamp())); try testing.expectEqual(ts, ntp.Time.encode(ts).decode()); } test "NTP time vs. Unix time" { var ts: i128 = 0; try testing.expectEqual(ntp.epoch_offset, ntp.Time.fromUnixNanos(ts).decode() / ns_per_s); ts = std.time.nanoTimestamp(); try testing.expectEqual(ts, ntp.Time.fromUnixNanos(ts).toUnixNanos()); // 2036-02-07T06:28:17+00:00 --> NTP era 1 ts = 2085978497000000000; try testing.expectEqual(ts, ntp.Time.fromUnixNanos(ts).toUnixNanos()); // 1899-12-31T23:59:59+00:00 --> NTP era -1 ts = -2208988801000000000; try testing.expectEqual(ts, ntp.Time.fromUnixNanos(ts).toUnixNanos()); } test "NTP time arithmetic" { try testing.expectEqual( @as(i64, 0), ntp.Time.encode(ns_per_s + 1).sub(ntp.Time.encode(ns_per_s + 1)), ); try testing.expectEqual( @as(i64, ns_per_s), ntp.Time.encode(ns_per_s * 2).sub(ntp.Time.encode(ns_per_s)), ); try testing.expectEqual( -@as(i64, ns_per_s), ntp.Time.encode(ns_per_s).sub(ntp.Time.encode(ns_per_s * 2)), ); try testing.expectEqual( @as(i64, -1), ntp.Time.encode(ns_per_s - 1).sub(ntp.Time.encode(ns_per_s)), ); try testing.expectEqual( @as(i64, 1), ntp.Time.encode(ns_per_s).sub(ntp.Time.encode(ns_per_s - 1)), ); // across era bounds var a = ntp.Time{ .t = 0xFFFFFFFF << 32 }; // last second of era n var b = ntp.Time{ .t = 1 << 32 }; // first second of era n+1 try testing.expectEqual(@as(i64, -2_000_000_000), a.sub(b)); try testing.expectEqual(@as(i64, 2_000_000_000), b.sub(a)); // 2024-06-07, 2044-06-07 a, b = .{ ntp.Time.encode(3926707200000000000), ntp.Time.encode(4557859200000000000) }; try testing.expectEqual(@as(i64, -631152000000000000), a.sub(b)); try testing.expectEqual(@as(i64, 631152000000000000), b.sub(a)); } test "NTP time short / 32 bits" { var t = ntp.TimeShort{ .t = 0x00000000 }; try testing.expectEqual(@as(u64, 0), t.decode()); t = ntp.TimeShort{ .t = 0x00000001 }; try testing.expectEqual(@as(u64, 15259), t.decode()); t = ntp.TimeShort{ .t = 0x00008000 }; try testing.expectEqual(@as(u64, 500_000_000), t.decode()); t = ntp.TimeShort{ .t = 0x00018000 }; try testing.expectEqual(@as(u64, 1_500_000_000), t.decode()); t = ntp.TimeShort{ .t = 0xffff0000 }; try testing.expectEqual(@as(u64, 65535 * ns_per_s), t.decode()); } test "query result - random bytes" { var b: [ntp.packet_len]u8 = undefined; var i: usize = 0; while (i < 1_000_000) : (i += 1) { rand.bytes(&b); const r: ntp.Result = ntp.Result.fromPacket(ntp.Packet.parse(b), ntp.Time{}, ntp.Time{}); std.mem.doNotOptimizeAway(r); } } test "NTP packet" { const p = ntp.Packet.init(3); try testing.expectEqual(@as(i8, 32), p.precision); const b: [ntp.packet_len]u8 = @bitCast(p); try testing.expectEqual(@as(u8, 27), b[0]); } test "Result - delay, offset" { const now: u64 = @intCast(std.time.nanoTimestamp()); var p = ntp.Packet.init(4); // client | server | client // T1 ->T2 ->T3 ->T4 // 1 0 0 3 // => offset -2, roundtrip 2 var T1 = ntp.Time.fromUnixNanos(now + 1 * ns_per_s); p.ts_rec = ntp.Time.fromUnixNanos(now).t; p.ts_xmt = ntp.Time.fromUnixNanos(now).t; var T4 = ntp.Time.fromUnixNanos(now + 3 * ns_per_s); var res = ntp.Result.fromPacket(p, T1, T4); try testing.expectEqual(@as(i64, 2 * ns_per_s), res.delay); try testing.expectEqual(-@as(i64, 2 * ns_per_s), res.offset); try testing.expectEqual(@as(i128, now - 2 * ns_per_s), res.correctTime(@as(i128, now))); // 0 2 2 1 // => offset 1.5, roundtrip 1 T1 = ntp.Time.fromUnixNanos(now); p.ts_rec = ntp.Time.fromUnixNanos(now + 2 * ns_per_s).t; p.ts_xmt = ntp.Time.fromUnixNanos(now + 2 * ns_per_s).t; T4 = ntp.Time.fromUnixNanos(now + 1 * ns_per_s); res = ntp.Result.fromPacket(p, T1, T4); try testing.expectEqual(@as(i64, 1 * ns_per_s), res.delay); try testing.expectEqual(@as(i64, 15 * ns_per_s / 10), res.offset); try testing.expectEqual(@as(i128, now + 15 * ns_per_s / 10), res.correctTime(@as(i128, now))); // 0 20 21 5 // => offset 18, roundtrip 4 T1 = ntp.Time.fromUnixNanos(now); p.ts_rec = ntp.Time.fromUnixNanos(now + 20 * ns_per_s).t; p.ts_xmt = ntp.Time.fromUnixNanos(now + 21 * ns_per_s).t; T4 = ntp.Time.fromUnixNanos(now + 5 * ns_per_s); res = ntp.Result.fromPacket(p, T1, T4); try testing.expectEqual(@as(i64, 4 * ns_per_s), res.delay); try testing.expectEqual(@as(i64, 18 * ns_per_s), res.offset); try testing.expectEqual(@as(i128, now + 18 * ns_per_s), res.correctTime(@as(i128, now))); // 101 102 103 105 // => offset -0.5, roundtrip 3 T1 = ntp.Time.fromUnixNanos(now + 101 * ns_per_s); p.ts_rec = ntp.Time.fromUnixNanos(now + 102 * ns_per_s).t; p.ts_xmt = ntp.Time.fromUnixNanos(now + 103 * ns_per_s).t; T4 = ntp.Time.fromUnixNanos(now + 105 * ns_per_s); res = ntp.Result.fromPacket(p, T1, T4); try testing.expectEqual(@as(i64, 3 * ns_per_s), res.delay); try testing.expectEqual(-@as(i64, ns_per_s / 2), res.offset); try testing.expectEqual(@as(i128, now - 5 * ns_per_s / 10), res.correctTime(@as(i128, now))); } test "Result - stratum, ref-id" { const now: u64 = @intCast(std.time.nanoTimestamp()); var p = ntp.Packet.init(4); p.stratum = 1; // ref id is ASCII string, left-justified, 0-terminated p.ref_id = 5460039; // GPS\0 const T1 = ntp.Time.fromUnixNanos(now + 1 * ns_per_s); p.ts_rec = ntp.Time.fromUnixNanos(now).t; p.ts_xmt = ntp.Time.fromUnixNanos(now).t; const T4 = ntp.Time.fromUnixNanos(now + 3 * ns_per_s); var res = ntp.Result.fromPacket(p, T1, T4); try testing.expect(res.refIDprintable()); try testing.expectEqual([4]u8{ 'G', 'P', 'S', 0x0 }, res.__ref_id); p.ref_id = std.mem.nativeToBig(u32, 0x44524f50); // DROP res = ntp.Result.fromPacket(p, T1, T4); try testing.expect(res.refIDprintable()); p.ref_id = 0x00000000; res = ntp.Result.fromPacket(p, T1, T4); try testing.expect(res.refIDprintable()); p.stratum = 2; res = ntp.Result.fromPacket(p, T1, T4); try testing.expect(!res.refIDprintable()); try testing.expectEqual([4]u8{ 0x0, 0x0, 0x0, 0x0 }, res.__ref_id); } test "Result - validate / flags" { const now: u64 = @intCast(std.time.nanoTimestamp()); var p = ntp.Packet.init(4); p.stratum = 1; p.li_vers_mode = 0 << 6 | 3 << 3 | 4; // server mode const T1 = ntp.Time.fromUnixNanos(now + 1 * ns_per_s); p.ts_ref = ntp.Time.fromUnixNanos(now - 5 * ns_per_s).t; p.ts_rec = ntp.Time.fromUnixNanos(now).t; p.ts_xmt = ntp.Time.fromUnixNanos(now).t; const T4 = ntp.Time.fromUnixNanos(now + 3 * ns_per_s); var res = ntp.Result.fromPacket(p, T1, T4); var buf: [256]u8 = std.mem.zeroes([256]u8); var flags = res.validate(); // stratum 1 is good try testing.expectEqual(@intFromEnum(ntp.Result.result_flag.OK), flags); _ = try ntp.Result.printFlags(flags, &buf); try testing.expectEqualStrings("0 (OK)", std.mem.sliceTo(buf[0..], 0)); p.stratum = 17; res = ntp.Result.fromPacket(p, T1, T4); flags = res.validate(); try testing.expectEqual(@intFromEnum(ntp.Result.result_flag.stratum_too_large), flags); p.stratum = 1; // v---- client ! p.li_vers_mode = 0 << 6 | 3 << 3 | 3; res = ntp.Result.fromPacket(p, T1, T4); flags = res.validate(); try testing.expectEqual(@intFromEnum(ntp.Result.result_flag.incorrect_mode), flags); _ = try ntp.Result.printFlags(flags, &buf); try testing.expectEqualStrings("incorrect_mode", std.mem.sliceTo(buf[0..], 0)); // v---------------------- leap ! // v v---- client ! p.li_vers_mode = 3 << 6 | 3 << 3 | 3; res = ntp.Result.fromPacket(p, T1, T4); flags = res.validate(); _ = try ntp.Result.printFlags(flags, &buf); try testing.expectEqualStrings("unsynchronized_leapsecond, incorrect_mode", std.mem.sliceTo(buf[0..], 0)); p.poll = 18; res = ntp.Result.fromPacket(p, T1, T4); flags = res.validate(); try testing.expect(flags & @intFromEnum(ntp.Result.result_flag.incorrect_poll_freq) > 0); }
0
repos/ntp-client
repos/ntp-client/docs/NTP_notes.md
<!-- -*- coding: utf-8 -*- --> # NTP ## inspired by - <https://www.eecis.udel.edu/~mills/exec.html> - <https://github.com/beevik/ntp> - <https://lettier.github.io/posts/2016-04-26-lets-make-a-ntp-client-in-c.html> ## limits Only fields up to and including Transmit Timestamp are used further on. Extensions are not supported (yet). ## general procedure of NTP query 1. Client creates a request. This request contains the current time of the local machine, as transmit timestamp (xmt). 2. Request struct gets packed into bytes and send to the server. 3. Server receives the packet and does its magic. - origin timestamp (org, T1): the transmit timestamp of the client - receive timestamp (rec, T2): moment of message reception - reference timestamp (ref): moment when server was last synced - transmit timestamp (xmt, T3): moment when the reply packet leaves the server 4. Client receives the reply and stores the moment when the reply was received (dst, T4). 5. Client can calculate round trip delay,local clock offset etc. ## on Linux On a Linux running `timedatectl`, check via `timedatectl timesync-status` ## Specs NTP v4 data format, <https://datatracker.ietf.org/doc/html/rfc5905> ### Packet ```text 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |LI | VN |Mode | Stratum | Poll | Precision | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Root Delay | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Root Dispersion | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Reference ID | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | + Reference Timestamp (64) + | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | + Origin Timestamp (64) + | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | + Receive Timestamp (64) + | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | + Transmit Timestamp (64) + | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | . . . Extension Field 1 (variable) . . . | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | . . . Extension Field 2 (variable) . . . | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Key Identifier | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | dgst (128) | | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ``` ### Kiss Codes (stratum = 0, monitoring, debugging) ```text +------+------------------------------------------------------------+ | Code | Meaning | +------+------------------------------------------------------------+ | ACST | The association belongs to a unicast server. | | AUTH | Server authentication failed. | | AUTO | Autokey sequence failed. | | BCST | The association belongs to a broadcast server. | | CRYP | Cryptographic authentication or identification failed. | | DENY | Access denied by remote server. | | DROP | Lost peer in symmetric mode. | | RSTR | Access denied due to local policy. | | INIT | The association has not yet synchronized for the first | | | time. | | MCST | The association belongs to a dynamically discovered server.| | NKEY | No key found. Either the key was never installed or is | | | not trusted. | | RATE | Rate exceeded. The server has temporarily denied access | | | because the client exceeded the rate threshold. | | RMOT | Alteration of association from a remote host running | | | ntpdc. | | STEP | A step change in system time has occurred, but the | | | association has not yet resynchronized. | +------+------------------------------------------------------------+ ``` ### Globals / Boundaries ```text +-----------+-------+----------------------------------+ | Name | Value | Description | +-----------+-------+----------------------------------+ | PORT | 123 | NTP port number | | VERSION | 4 | NTP version number | | TOLERANCE | 15e-6 | frequency tolerance PHI (s/s) | | MINPOLL | 4 | minimum poll exponent (16 s) | | MAXPOLL | 17 | maximum poll exponent (36 h) | | MAXDISP | 16 | maximum dispersion (16 s) | | MINDISP | .005 | minimum dispersion increment (s) | | MAXDIST | 1 | distance threshold (1 s) | | MAXSTRAT | 16 | maximum stratum number | +-----------+-------+----------------------------------+ ```
0
repos
repos/zelda/LICENSE-MIT.md
MIT License Copyright (c) 2021 Haze Booth Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.