Unnamed: 0
int64
0
0
repo_id
stringlengths
5
186
file_path
stringlengths
15
223
content
stringlengths
1
32.8M
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/msvc/check.lua
--!A cross-toolchain build utility based on Lua -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- -- Copyright (C) 2015-present, TBOOX Open Source Group. -- -- @author ruki -- @file check.lua -- -- imports import("core.base.option") import("core.project.config") import("detect.sdks.find_vstudio") import("lib.detect.find_tool") -- attempt to check vs environment function _check_vsenv(toolchain) -- have been checked? local vs = toolchain:config("vs") or config.get("vs") if vs then vs = tostring(vs) end local vcvars = toolchain:config("vcvars") if vs and vcvars then return vs end -- find vstudio local vs_toolset = toolchain:config("vs_toolset") or config.get("vs_toolset") local vs_sdkver = toolchain:config("vs_sdkver") or config.get("vs_sdkver") local vstudio = find_vstudio({vcvars_ver = vs_toolset, sdkver = vs_sdkver}) if vstudio then -- make order vsver local vsvers = {} for vsver, _ in pairs(vstudio) do if not vs or vs ~= vsver then table.insert(vsvers, vsver) end end table.sort(vsvers, function (a, b) return tonumber(a) > tonumber(b) end) if vs then table.insert(vsvers, 1, vs) end -- get vcvarsall for _, vsver in ipairs(vsvers) do local vcvarsall = (vstudio[vsver] or {}).vcvarsall or {} local vcvars = vcvarsall[toolchain:arch()] if vcvars and vcvars.PATH and vcvars.INCLUDE and vcvars.LIB then -- save vcvars toolchain:config_set("vcvars", vcvars) toolchain:config_set("vcarchs", table.orderkeys(vcvarsall)) toolchain:config_set("vs_toolset", vcvars.VCToolsVersion) toolchain:config_set("vs_sdkver", vcvars.WindowsSDKVersion) -- check compiler local program = nil local tool = find_tool("cl.exe", {version = true, force = true, envs = vcvars}) if tool then program = tool.program end if program then return vsver, tool end end end end end -- check the visual studio function _check_vstudio(toolchain) local vs, msvc = _check_vsenv(toolchain) if vs then if toolchain:is_global() then config.set("vs", vs, {force = true, readonly = true}) end toolchain:config_set("vs", vs) toolchain:configs_save() cprint("checking for Microsoft Visual Studio (%s) version ... ${color.success}%s", toolchain:arch(), vs) if msvc and msvc.version then cprint("checking for Microsoft C/C++ Compiler (%s) version ... ${color.success}%s", toolchain:arch(), msvc.version) end else cprint("checking for Microsoft Visual Studio (%s) version ... ${color.nothing}${text.nothing}", toolchain:arch()) end return vs end -- main entry function main(toolchain) -- only for windows if not is_host("windows") then return end -- @see https://github.com/xmake-io/xmake/pull/679 local cc = path.basename(config.get("cc") or "cl"):lower() local cxx = path.basename(config.get("cxx") or "cl"):lower() local mrc = path.basename(config.get("mrc") or "rc"):lower() if cc == "cl" or cxx == "cl" or mrc == "rc" then return _check_vstudio(toolchain) end end
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/c51/xmake.lua
--!A cross-platform build utility based on Lua -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- -- Copyright (C) 2015-present, TBOOX Open Source Group. -- -- @author DawnMagnet -- @file xmake.lua -- toolchain("c51") set_homepage("https://www.keil.com/c51/") set_description("Keil development tools for the 8051 Microcontroller Architecture") set_kind("cross") set_kind("standalone") set_toolset("cc", "c51") set_toolset("cxx", "c51") set_toolset("ld", "bl51") on_check(function (toolchain) import("lib.detect.find_tool") import("detect.sdks.find_c51") local c51 = find_c51() if c51 and c51.sdkdir and find_tool("c51") then toolchain:config_set("sdkdir", c51.sdkdir) toolchain:configs_save() return true end end)
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/clang-16/xmake.lua
includes(path.join(os.scriptdir(), "../clang/xmake.lua")) toolchain_clang("16")
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/clang/xmake.lua
--!A cross-platform build utility based on Lua -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- -- Copyright (C) 2015-present, TBOOX Open Source Group. -- -- @author ruki -- @file xmake.lua -- -- define toolchain function toolchain_clang(version) local suffix = "" if version then suffix = suffix .. "-" .. version end toolchain("clang" .. suffix) set_kind("standalone") set_homepage("https://clang.llvm.org/") set_description("A C language family frontend for LLVM" .. (version and (" (" .. version .. ")") or "")) set_runtimes("c++_static", "c++_shared", "stdc++_static", "stdc++_shared") set_toolset("cc", "clang" .. suffix) set_toolset("cxx", "clang" .. suffix, "clang++" .. suffix) set_toolset("ld", "clang++" .. suffix, "clang" .. suffix) set_toolset("sh", "clang++" .. suffix, "clang" .. suffix) set_toolset("ar", "ar", "llvm-ar") set_toolset("strip", "strip", "llvm-strip") set_toolset("ranlib", "ranlib", "llvm-ranlib") set_toolset("objcopy", "objcopy", "llvm-objcopy") set_toolset("mm", "clang" .. suffix) set_toolset("mxx", "clang" .. suffix, "clang++" .. suffix) set_toolset("as", "clang" .. suffix) set_toolset("mrc", "llvm-rc") on_check(function (toolchain) return import("lib.detect.find_tool")("clang" .. suffix) end) on_load(function (toolchain) local march if toolchain:is_arch("x86_64", "x64") then march = "-m64" elseif toolchain:is_arch("i386", "x86") then march = "-m32" end if march then toolchain:add("cxflags", march) toolchain:add("mxflags", march) toolchain:add("asflags", march) toolchain:add("ldflags", march) toolchain:add("shflags", march) end if toolchain:is_plat("windows") then toolchain:add("runtimes", "MT", "MTd", "MD", "MDd") end end) end toolchain_clang()
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/armclang/xmake.lua
--!A cross-platform build utility based on Lua -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- -- Copyright (C) 2015-present, TBOOX Open Source Group. -- -- @author ruki -- @file xmake.lua -- toolchain("armclang") set_homepage("https://www2.keil.com/mdk5/compiler/6") set_description("ARM Compiler Version 6 of Keil MDK") set_kind("cross") set_toolset("cc", "armclang") set_toolset("cxx", "armclang") set_toolset("ld", "armlink") set_toolset("ar", "armar") on_check(function (toolchain) import("core.base.semver") import("lib.detect.find_tool") import("detect.sdks.find_mdk") local mdk = find_mdk() if mdk and mdk.sdkdir_armclang then toolchain:config_set("sdkdir", mdk.sdkdir_armclang) -- different assembler choices for different versions of armclang local armclang = find_tool("armclang", {version = true, force = true, paths = path.join(mdk.sdkdir_armclang, "bin")}) if armclang and semver.compare(armclang.version, "6.13") > 0 then toolchain:config_set("toolset_as", "armclang") else toolchain:config_set("toolset_as", "armasm") end toolchain:configs_save() return true end end) on_load(function (toolchain) local arch = toolchain:arch() if arch then local arch_cpu = arch:lower() local arch_cpu_ld = "" local arch_target = "" if arch_cpu:startswith("cortex-m") then arch_cpu_ld = arch_cpu:replace("cortex-m", "Cortex-M", {plain = true}) arch_target = "arm-arm-none-eabi" end if arch_cpu:startswith("cortex-a") then arch_cpu_ld = arch_cpu:replace("cortex-a", "Cortex-A", {plain = true}) arch_target = "aarch64-arm-none-eabi" end local as = toolchain:config("toolset_as") toolchain:set("toolset", "as", as) toolchain:add("cxflags", "--target=" .. arch_target) toolchain:add("cxflags", "-mcpu=" .. arch_cpu) toolchain:add("asflags", "--target=" .. arch_target) toolchain:add("asflags", (as == "armclang" and "-mcpu=" or "--cpu=") .. arch_cpu) toolchain:add("ldflags", "--cpu " .. arch_cpu_ld) end end)
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/wasi/xmake.lua
--!A cross-platform build utility based on Lua -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- -- Copyright (C) 2015-present, TBOOX Open Source Group. -- -- @author wsw0108 -- @file xmake.lua -- -- define toolchain toolchain("wasi") -- set homepage set_homepage("https://github.com/WebAssembly/wasi-sdk") set_description("WASI-enabled WebAssembly C/C++ toolchain.") -- mark as standalone toolchain set_kind("standalone") -- set toolset set_toolset("cc", "clang") set_toolset("cxx", "clang", "clang++") set_toolset("cpp", "clang -E") set_toolset("as", "clang") set_toolset("ld", "clang++", "clang") set_toolset("sh", "clang++", "clang") set_toolset("ar", "llvm-ar") set_toolset("ranlib", "llvm-ranlib") set_toolset("strip", "llvm-strip") -- check toolchain on_check(function (toolchain) import("lib.detect.find_tool") import("detect.sdks.find_wasisdk") local wasisdk = find_wasisdk(toolchain:sdkdir()) if wasisdk then toolchain:config_set("bindir", wasisdk.bindir) toolchain:config_set("sdkdir", wasisdk.sdkdir) toolchain:configs_save() return wasisdk end return import("lib.detect.find_tool")("clang", {paths = toolchain:bindir()}) end) -- on load on_load(function (toolchain) local sdkdir = toolchain:sdkdir() local sysroot = path.join(sdkdir, "share", "wasi-sysroot") toolchain:add("cxflags", "--sysroot=" .. sysroot) toolchain:add("mxflags", "--sysroot=" .. sysroot) toolchain:add("ldflags", "--sysroot=" .. sysroot) toolchain:add("shflags", "--sysroot=" .. sysroot) -- add bin search library for loading some dependent .dll files windows local bindir = toolchain:bindir() if bindir and is_host("windows") then toolchain:add("runenvs", "PATH", bindir) end end)
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/ifx/load.lua
--!A cross-toolchain build utility based on Lua -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- -- Copyright (C) 2015-present, TBOOX Open Source Group. -- -- @author ruki -- @file load.lua -- -- imports import("core.base.option") import("core.project.config") import("detect.sdks.find_vstudio") -- add the given vs environment function _add_vsenv(toolchain, name, curenvs) -- get vcvars local vcvars = toolchain:config("vcvars") if not vcvars then return end -- get the paths for the vs environment local new = vcvars[name] if new then -- fix case naming conflict for cmake/msbuild between the new msvc envs and current environment, if we are running xmake in vs prompt. -- @see https://github.com/xmake-io/xmake/issues/4751 for k, c in pairs(curenvs) do if name:lower() == k:lower() and name ~= k then name = k break end end toolchain:add("runenvs", name, table.unpack(path.splitenv(new))) end end -- add the given ifx environment function _add_ifxenv(toolchain, name, curenvs) -- get ifxvarsall local ifxvarsall = toolchain:config("varsall") if not ifxvarsall then return end -- get ifx environment for the current arch local arch = toolchain:arch() local ifxenv = ifxvarsall[arch] or {} -- get the paths for the ifx environment local new = ifxenv[name] if new then -- fix case naming conflict for cmake/msbuild between the new msvc envs and current environment, if we are running xmake in vs prompt. -- @see https://github.com/xmake-io/xmake/issues/4751 for k, c in pairs(curenvs) do if name:lower() == k:lower() and name ~= k then name = k break end end toolchain:add("runenvs", name, table.unpack(path.splitenv(new))) end end -- load intel on windows function _load_intel_on_windows(toolchain) -- set toolset toolchain:set("toolset", "fc", "ifx.exe") toolchain:set("toolset", "mrc", "rc.exe") if toolchain:is_arch("x64") then toolchain:set("toolset", "as", "ml64.exe") else toolchain:set("toolset", "as", "ml.exe") end toolchain:set("toolset", "fcld", "ifx.exe") toolchain:set("toolset", "fcsh", "ifx.exe") toolchain:set("toolset", "ar", "link.exe") -- add vs/ifx environments local expect_vars = {"PATH", "LIB", "INCLUDE", "LIBPATH"} local curenvs = os.getenvs() for _, name in ipairs(expect_vars) do _add_vsenv(toolchain, name, curenvs) _add_ifxenv(toolchain, name, curenvs) end for _, name in ipairs(find_vstudio.get_vcvars()) do if not table.contains(expect_vars, name:upper()) then _add_vsenv(toolchain, name, curenvs) end end end -- load intel on linux function _load_intel_on_linux(toolchain) -- set toolset toolchain:set("toolset", "fc", "ifx") toolchain:set("toolset", "fcld", "ifx") toolchain:set("toolset", "fcsh", "ifx") toolchain:set("toolset", "ar", "ar") -- add march flags local march if toolchain:is_arch("x86_64", "x64") then march = "-m64" elseif toolchain:is_arch("i386", "x86") then march = "-m32" end if march then toolchain:add("fcflags", march) toolchain:add("fcldflags", march) toolchain:add("fcshflags", march) end -- get ifx environments local ifxenv = toolchain:config("ifxenv") if ifxenv then local ldname = is_host("macosx") and "DYLD_LIBRARY_PATH" or "LD_LIBRARY_PATH" toolchain:add("runenvs", ldname, ifxenv.libdir) end end -- main entry function main(toolchain) if is_host("windows") then return _load_intel_on_windows(toolchain) else return _load_intel_on_linux(toolchain) end end
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/ifx/xmake.lua
--!A cross-platform build utility based on Lua -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- -- Copyright (C) 2015-present, TBOOX Open Source Group. -- -- @author ruki -- @file xmake.lua -- -- define toolchain toolchain("ifx") -- set homepage set_homepage("https://www.intel.com/content/www/us/en/developer/articles/tool/oneapi-standalone-components.html#fortran") set_description("Intel LLVM Fortran Compiler") -- mark as standalone toolchain set_kind("standalone") -- check toolchain on_check("check") -- on load on_load("load")
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/ifx/check.lua
--!A cross-toolchain build utility based on Lua -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- -- Copyright (C) 2015-present, TBOOX Open Source Group. -- -- @author ruki -- @file check.lua -- -- imports import("core.base.option") import("core.project.config") import("detect.sdks.find_ifxenv") import("detect.sdks.find_vstudio") import("lib.detect.find_tool") -- attempt to check vs environment function _check_vsenv(toolchain) -- has been checked? local vs = toolchain:config("vs") or config.get("vs") if vs then vs = tostring(vs) end local vcvars = toolchain:config("vcvars") if vs and vcvars then return vs end -- find vstudio local vs_toolset = toolchain:config("vs_toolset") or config.get("vs_toolset") local vs_sdkver = toolchain:config("vs_sdkver") or config.get("vs_sdkver") local vstudio = find_vstudio({vcvars_ver = vs_toolset, sdkver = vs_sdkver}) if vstudio then -- make order vsver local vsvers = {} for vsver, _ in pairs(vstudio) do if not vs or vs ~= vsver then table.insert(vsvers, vsver) end end table.sort(vsvers, function (a, b) return tonumber(a) > tonumber(b) end) if vs then table.insert(vsvers, 1, vs) end -- get vcvarsall for _, vsver in ipairs(vsvers) do local vcvarsall = (vstudio[vsver] or {}).vcvarsall or {} local vcvars = vcvarsall[toolchain:arch()] if vcvars and vcvars.PATH and vcvars.INCLUDE and vcvars.LIB then -- save vcvars toolchain:config_set("vcvars", vcvars) toolchain:config_set("vcarchs", table.orderkeys(vcvarsall)) toolchain:config_set("vs_toolset", vcvars.VCToolsVersion) toolchain:config_set("vs_sdkver", vcvars.WindowsSDKVersion) -- check compiler local program local paths local pathenv = os.getenv("PATH") if pathenv then paths = path.splitenv(pathenv) end local tool = find_tool("cl.exe", {version = true, force = true, paths = paths, envs = vcvars}) if tool then program = tool.program end if program then return vsver, tool end end end end end -- check the visual studio function _check_vstudio(toolchain) local vs = _check_vsenv(toolchain) if vs then if toolchain:is_global() then config.set("vs", vs, {force = true, readonly = true}) end toolchain:config_set("vs", vs) toolchain:configs_save() cprint("checking for Microsoft Visual Studio (%s) version ... ${color.success}%s", toolchain:arch(), vs) else cprint("checking for Microsoft Visual Studio (%s) version ... ${color.nothing}${text.nothing}", toolchain:arch()) end return vs end -- check intel on windows function _check_intel_on_windows(toolchain) -- have been checked? local varsall = toolchain:config("varsall") if varsall then return true end -- find intel llvm c/c++ compiler environment local ifxenv = find_ifxenv() if ifxenv and ifxenv.ifxvars then local ifxvarsall = ifxenv.ifxvars local ifxenv = ifxvarsall[toolchain:arch()] if ifxenv and ifxenv.PATH and ifxenv.INCLUDE and ifxenv.LIB then local tool = find_tool("ifx.exe", {force = true, envs = ifxenv, version = true}) if tool then cprint("checking for Intel LLVM Fortran Compiler (%s) ... ${color.success}${text.success}", toolchain:arch()) toolchain:config_set("varsall", ifxvarsall) toolchain:configs_save() return _check_vstudio(toolchain) end end end end -- check intel on linux function _check_intel_on_linux(toolchain) local ifxenv = toolchain:config("ifxenv") if ifxenv then return true end ifxenv = find_ifxenv() if ifxenv then local ldname = is_host("macosx") and "DYLD_LIBRARY_PATH" or "LD_LIBRARY_PATH" local tool = find_tool("ifx", {force = true, envs = {[ldname] = ifxenv.libdir}, paths = ifxenv.bindir}) if tool then cprint("checking for Intel Fortran Compiler (%s) ... ${color.success}${text.success}", toolchain:arch()) toolchain:config_set("ifxenv", ifxenv) toolchain:config_set("bindir", ifxenv.bindir) toolchain:configs_save() return true end return true end end -- main entry function main(toolchain) if is_host("windows") then return _check_intel_on_windows(toolchain) else return _check_intel_on_linux(toolchain) end end
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/gdc/xmake.lua
--!A cross-platform build utility based on Lua -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- -- Copyright (C) 2015-present, TBOOX Open Source Group. -- -- @author ruki -- @file xmake.lua -- -- define toolchain toolchain("gdc") set_homepage("https://gdcproject.org/") set_description("The GNU D Compiler (GDC).") on_check("check") on_load(function (toolchain) local cross = toolchain:cross() or "" toolchain:add("toolset", "dc", cross .. "gdc") toolchain:add("toolset", "dcld", cross .. "gdc") toolchain:add("toolset", "dcsh", cross .. "gdc") toolchain:add("toolset", "dcar", cross .. "gcc-ar") local march if toolchain:is_arch("x86_64", "x64") then march = "-m64" elseif toolchain:is_arch("i386", "x86") then march = "-m32" end toolchain:add("dcflags", march or "") toolchain:add("dcshflags", march or "") toolchain:add("dcldflags", march or "") end)
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/gdc/check.lua
--!A cross-toolchain build utility based on Lua -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- -- Copyright (C) 2015-present, TBOOX Open Source Group. -- -- @author ruki -- @file check.lua -- -- imports import("core.project.config") import("lib.detect.find_tool") import("detect.sdks.find_cross_toolchain") function main(toolchain) -- we attempt to find gdc in $PATH if find_tool("gdc") then return true end -- we need to find gdc2 in the given toolchain sdk directory local sdkdir = toolchain:sdkdir() local bindir = toolchain:bindir() local cross = toolchain:cross() if not sdkdir and not bindir and not cross then return end -- find cross toolchain local cross_toolchain = find_cross_toolchain(sdkdir, {bindir = bindir, cross = cross}) if cross_toolchain then toolchain:config_set("cross", cross_toolchain.cross) toolchain:config_set("bindir", cross_toolchain.bindir) toolchain:config_set("sdkdir", cross_toolchain.sdkdir) toolchain:configs_save() else raise("cross toolchain not found!") end return cross_toolchain end
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/clang-15/xmake.lua
includes(path.join(os.scriptdir(), "../clang/xmake.lua")) toolchain_clang("15")
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/gcc-4.8/xmake.lua
includes(path.join(os.scriptdir(), "../gcc/xmake.lua")) toolchain_gcc("4.8")
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/sdcc/xmake.lua
--!A cross-platform build utility based on Lua -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- -- Copyright (C) 2015-present, TBOOX Open Source Group. -- -- @author ruki -- @file xmake.lua -- -- define toolchain toolchain("sdcc") -- set homepage set_homepage("http://sdcc.sourceforge.net/") set_description("Small Device C Compiler") -- mark as standalone toolchain set_kind("standalone") -- set toolset set_toolset("cc", "sdcc") set_toolset("cxx", "sdcc") set_toolset("cpp", "sdcpp") set_toolset("as", "sdcc") set_toolset("ld", "sdcc") set_toolset("sh", "sdcc") set_toolset("ar", "sdar") -- set archs set_archs("stm8", "mcs51", "z80", "z180", "r2k", "r3ka", "s08", "hc08") -- set formats set_formats("static", "$(name).lib") set_formats("object", "$(name).rel") set_formats("binary", "$(name).bin") set_formats("symbol", "$(name).sym") -- check toolchain on_check("check") -- on load on_load(function (toolchain) local arch = toolchain:arch() if arch then toolchain:add("cxflags", "-m" .. arch) toolchain:add("ldflags", "-m" .. arch) end end)
0
repos/xmake/xmake/toolchains
repos/xmake/xmake/toolchains/sdcc/check.lua
--!A cross-toolchain build utility based on Lua -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- -- Copyright (C) 2015-present, TBOOX Open Source Group. -- -- @author ruki -- @file check.lua -- -- imports import("core.project.config") import("detect.sdks.find_cross_toolchain") -- check the cross toolchain function main(toolchain) local sdkdir = toolchain:sdkdir() local bindir = toolchain:bindir() local cross_toolchain = find_cross_toolchain(sdkdir, {bindir = bindir}) if cross_toolchain then toolchain:config_set("cross", cross_toolchain.cross) toolchain:config_set("bindir", cross_toolchain.bindir) toolchain:configs_save() else raise("sdcc toolchain not found!") end return cross_toolchain end
0
repos
repos/weekend-raytracer-zig/README.md
# Ray Tracing in One Weekend (Zig) This is a fairly straightforward implementation of Peter Shirley's "Ray Tracing in One Weekend" book in the Zig programming language To run: ``` $ zig build run -Drelease-fast ``` ![cover_image](https://raw.githubusercontent.com/Nelarius/weekend-raytracer-rust/master/img/cover_image.png) # Dependencies - [email protected]: https://ziglang.org/ - SDL2: https://wiki.libsdl.org/Installation
0
repos
repos/weekend-raytracer-zig/build.zig
const Builder = @import("std").build.Builder; pub fn build(b: *Builder) void { const exe = b.addExecutable("zig-tracer", "src/main.zig"); exe.setBuildMode(b.standardReleaseOptions()); exe.linkSystemLibrary("c"); if (exe.target.isDarwin()) { exe.addIncludeDir("/Library/Frameworks/SDL2.framework/Headers"); exe.linkFramework("SDL2"); } else { exe.linkSystemLibrary("SDL2"); } exe.install(); const run = b.step("run", "Run the project"); const run_cmd = exe.run(); run_cmd.step.dependOn(b.getInstallStep()); run.dependOn(&run_cmd.step); }
0
repos/weekend-raytracer-zig
repos/weekend-raytracer-zig/src/camera.zig
const std = @import("std"); const math = std.math; const rand = std.rand; const Ray = @import("ray.zig").Ray; const Vec3f = @import("vector.zig").Vec3f; pub const Camera = struct { const Self = @This(); eye: Vec3f, lower_left_corner: Vec3f, horizontal: Vec3f, vertical: Vec3f, u: Vec3f, v: Vec3f, lens_radius: f32, pub fn new(lookfrom: Vec3f, lookat: Vec3f, vup: Vec3f, vfov: f32, aspect: f32, aperture: f32, focus_distance: f32) Camera { const lens_radius = 0.5 * aperture; // TODO: numerical constant for PI const theta = vfov * 3.14159 / 180.0; const half_height = math.tan(0.5 * theta); const half_width = aspect * half_height; const w = lookfrom.sub(lookat).makeUnitVector(); const u = vup.cross(w).makeUnitVector(); const v = w.cross(u); const lower_left_corner = lookfrom.sub(u.mul(half_width * focus_distance)).sub(v.mul(half_height * focus_distance)).sub(w.mul(focus_distance)); const horizontal = u.mul(2.0 * half_width * focus_distance); const vertical = v.mul(2.0 * half_height * focus_distance); return Self{ .eye = lookfrom, .lower_left_corner = lower_left_corner, .horizontal = horizontal, .vertical = vertical, .u = u, .v = v, .lens_radius = lens_radius }; } pub fn makeRay(self: *const Self, r: *rand.Random, u: f32, v: f32) Ray { const rd = Vec3f.randomInUnitDisk(r).mul(self.lens_radius); const offset = self.u.mul(rd.x).add(self.v.mul(rd.y)); const lens_pos = self.eye.add(offset); return Ray.new(lens_pos, self.lower_left_corner.add(self.horizontal.mul(u)).add(self.vertical.mul(v)).sub(lens_pos).makeUnitVector()); } };
0
repos/weekend-raytracer-zig
repos/weekend-raytracer-zig/src/main.zig
const Camera = @import("camera.zig").Camera; const hitable = @import("hitable.zig"); const mat = @import("material.zig"); const Material = mat.Material; const std = @import("std"); const ArrayList = std.ArrayList; const rand = std.rand; const Ray = @import("ray.zig").Ray; const Vec3f = @import("vector.zig").Vec3f; const Sphere = hitable.Sphere; const World = hitable.World; const c = @cImport({ @cInclude("SDL.h"); }); // See https://github.com/zig-lang/zig/issues/565 // SDL_video.h:#define SDL_WINDOWPOS_UNDEFINED SDL_WINDOWPOS_UNDEFINED_DISPLAY(0) // SDL_video.h:#define SDL_WINDOWPOS_UNDEFINED_DISPLAY(X) (SDL_WINDOWPOS_UNDEFINED_MASK|(X)) // SDL_video.h:#define SDL_WINDOWPOS_UNDEFINED_MASK 0x1FFF0000u const SDL_WINDOWPOS_UNDEFINED = @bitCast(c_int, c.SDL_WINDOWPOS_UNDEFINED_MASK); const window_width: c_int = 640; const window_height: c_int = 320; const num_threads: i32 = 16; const num_samples: i32 = 256; const max_depth: i32 = 16; // For some reason, this isn't parsed automatically. According to SDL docs, the // surface pointer returned is optional! extern fn SDL_GetWindowSurface(window: *c.SDL_Window) ?*c.SDL_Surface; fn setPixel(surf: *c.SDL_Surface, x: c_int, y: c_int, pixel: u32) void { const target_pixel = @ptrToInt(surf.pixels) + @intCast(usize, y) * @intCast(usize, surf.pitch) + @intCast(usize, x) * 4; @intToPtr(*u32, target_pixel).* = pixel; } fn colorNormal(r: Ray, w: *const World) Vec3f { const maybe_hit = w.hit(r, 0.001, 10000.0); if (maybe_hit) |hit| { const n = hit.n.makeUnitVector(); return n.add(Vec3f.one()).mul(0.5); } else { const unit_direction = r.direction.makeUnitVector(); const t = 0.5 * (unit_direction.y + 1.0); return Vec3f.new(1.0, 1.0, 1.0).mul(1.0 - t).add(Vec3f.new(0.5, 0.7, 1.0).mul(t)); } } fn colorAlbedo(r: Ray, w: *const World) Vec3f { const maybe_hit = w.hit(r, 0.001, 10000.0); if (maybe_hit) |hit| { return switch (hit.material) { Material.Lambertian => |l| l.albedo, Material.Metal => |m| m.albedo, Material.Dielectric => |l| Vec3f.one(), }; } else { const unit_direction = r.direction.makeUnitVector(); const t = 0.5 * (unit_direction.y + 1.0); return Vec3f.new(1.0, 1.0, 1.0).mul(1.0 - t).add(Vec3f.new(0.5, 0.7, 1.0).mul(t)); } } fn colorDepthHelper(r: Ray, w: *const World, random: *rand.Random, depth: i32) i32 { const maybe_hit = w.hit(r, 0.001, 10000.0); if (maybe_hit) |hit| { if (depth < max_depth) { const scatter = switch (hit.material) { Material.Lambertian => |l| l.scatter(hit, random), Material.Metal => |m| m.scatter(r, hit, random), Material.Dielectric => |d| d.scatter(r, hit, random), }; return colorDepthHelper(scatter.ray, w, random, depth + 1); } else { return depth; // reached max depth } } else { return depth; // hit the sky } } fn colorDepth(r: Ray, w: *const World, random: *rand.Random) Vec3f { const depth = colorDepthHelper(r, w, random, 0); return Vec3f.new(@intToFloat(f32, depth) / @intToFloat(f32, max_depth), 0.0, 0.0); } fn colorScattering(r: Ray, w: *const World, random: *rand.Random) Vec3f { const maybe_hit = w.hit(r, 0.001, 10000.0); if (maybe_hit) |hit| { const scatter = switch (hit.material) { Material.Lambertian => |l| l.scatter(hit, random), Material.Metal => |m| m.scatter(r, hit, random), Material.Dielectric => |d| d.scatter(r, hit, random), }; const dir = scatter.ray.direction.makeUnitVector(); return dir.add(Vec3f.one()).mul(0.5); } else { const unit_direction = r.direction.makeUnitVector(); const t = 0.5 * (unit_direction.y + 1.0); return Vec3f.new(1.0, 1.0, 1.0).mul(1.0 - t).add(Vec3f.new(0.5, 0.7, 1.0).mul(t)); } } fn color(r: Ray, world: *const World, random: *rand.Random, depth: i32) Vec3f { const maybe_hit = world.hit(r, 0.001, 10000.0); if (maybe_hit) |hit| { if (depth < max_depth) { const scatter = switch (hit.material) { Material.Lambertian => |l| l.scatter(hit, random), Material.Metal => |m| m.scatter(r, hit, random), Material.Dielectric => |d| d.scatter(r, hit, random), }; return color(scatter.ray, world, random, depth + 1).elementwiseMul(scatter.attenuation); } else { return Vec3f.zero(); } } else { const unit_direction = r.direction.makeUnitVector(); const t = 0.5 * (unit_direction.y + 1.0); return Vec3f.new(1.0, 1.0, 1.0).mul(1.0 - t).add(Vec3f.new(0.5, 0.7, 1.0).mul(t)); } } fn toBgra(r: u32, g: u32, b: u32) u32 { return 255 << 24 | r << 16 | g << 8 | b; } const ThreadContext = struct { thread_index: i32, num_pixels: i32, chunk_size: i32, rng: rand.DefaultPrng, surface: *c.SDL_Surface, world: *const World, camera: *const Camera, }; fn renderFn(context: *ThreadContext) void { const start_index = context.thread_index * context.chunk_size; const end_index = if (start_index + context.chunk_size <= context.num_pixels) start_index + context.chunk_size else context.num_pixels; var idx: i32 = start_index; while (idx < end_index) : (idx += 1) { const w = @mod(idx, window_width); const h = @divTrunc(idx, window_width); var sample: i32 = 0; var color_accum = Vec3f.zero(); while (sample < num_samples) : (sample += 1) { const v = (@intToFloat(f32, h) + context.rng.random.float(f32)) / @intToFloat(f32, window_height); const u = (@intToFloat(f32, w) + context.rng.random.float(f32)) / @intToFloat(f32, window_width); const r = context.camera.makeRay(&context.rng.random, u, v); const color_sample = color(r, context.world, &context.rng.random, 0); // const color_sample = colorScattering(r, context.world, &context.rng.random); // const color_sample = colorDepth(r, context.world, &context.rng.random); // const color_sample = colorNormal(r, context.world); // const color_sample = colorAlbedo(r, context.world); color_accum = color_accum.add(color_sample); } color_accum = color_accum.mul(1.0 / @intToFloat(f32, num_samples)); setPixel(context.surface, w, window_height - h - 1, toBgra(@floatToInt(u32, 255.99 * color_accum.x), @floatToInt(u32, 255.99 * color_accum.y), @floatToInt(u32, 255.99 * color_accum.z))); } } pub fn main() !void { if (c.SDL_Init(c.SDL_INIT_VIDEO) != 0) { c.SDL_Log("Unable to initialize SDL: %s", c.SDL_GetError()); return error.SDLInitializationFailed; } defer c.SDL_Quit(); const window = c.SDL_CreateWindow("weekend raytracer", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, window_width, window_height, c.SDL_WINDOW_OPENGL) orelse { c.SDL_Log("Unable to create window: %s", c.SDL_GetError()); return error.SDLInitializationFailed; }; const surface = SDL_GetWindowSurface(window) orelse { c.SDL_Log("Unable to get window surface: %s", c.SDL_GetError()); return error.SDLInitializationFailed; }; // Ray tracing takes place here const lookfrom = Vec3f.new(16.0, 2.0, 4.0); const lookat = Vec3f.new(0.0, 0.0, 0.0); const vfov = 15.0; const focus_distance = lookfrom.sub(lookat).length(); const aperture = 0.4; // 640 by 320 const aspect_ratio = @intToFloat(f32, window_width) / @intToFloat(f32, window_height); const camera = Camera.new(lookfrom, lookat, Vec3f.new(0.0, 1.0, 0.0), vfov, aspect_ratio, aperture, focus_distance); var world = World.init(); defer world.deinit(); try world.spheres.append(Sphere.new(Vec3f.new(0.0, -1000.0, -1.0), 1000.0, Material.lambertian(Vec3f.new(0.5, 0.5, 0.5)))); try world.spheres.append(Sphere.new(Vec3f.new(0.0, 1.0, 0.0), 1.0, Material.dielectric(1.5))); try world.spheres.append(Sphere.new(Vec3f.new(-4.0, 1.0, 0.0), 1.0, Material.lambertian(Vec3f.new(0.4, 0.2, 0.1)))); try world.spheres.append(Sphere.new(Vec3f.new(4.0, 1.0, 0.0), 1.0, Material.metal(Vec3f.new(0.7, 0.6, 0.5), 0.0))); var prng = rand.DefaultPrng.init(0); const sphere_offset = Vec3f.new(4.0, 0.2, 0.0); var i: i32 = -5; while (i < 5) : (i += 1) { var j: i32 = -5; while (j < 5) : (j += 1) { const a = @intToFloat(f32, i); const b = @intToFloat(f32, j); const center = Vec3f.new(a + 0.9 * prng.random.float(f32), 0.2, b + 0.9 * prng.random.float(f32)); const choose_mat = prng.random.float(f32); if (center.sub(sphere_offset).length() > 0.9) { if (choose_mat < 0.8) { // diffuse const random_albedo = Vec3f.new(prng.random.float(f32), prng.random.float(f32), prng.random.float(f32)); try world.spheres.append(Sphere.new(center, 0.2, Material.lambertian(random_albedo))); } else if (choose_mat < 0.95) { // metal const random_albedo = Vec3f.new(prng.random.float(f32), prng.random.float(f32), prng.random.float(f32)); try world.spheres.append(Sphere.new(center, 0.2, Material.metal(random_albedo, 0.5 * prng.random.float(f32)))); } else { try world.spheres.append(Sphere.new(center, 0.2, Material.dielectric(1.5))); } } } } { _ = c.SDL_LockSurface(surface); var tasks = ArrayList(*std.Thread).init(std.testing.allocator); defer tasks.deinit(); var contexts = ArrayList(ThreadContext).init(std.testing.allocator); defer contexts.deinit(); const chunk_size = blk: { const num_pixels = window_width * window_height; const n = num_pixels / num_threads; const rem = num_pixels % num_threads; if (rem > 0) { break :blk n + 1; } else { break :blk n; } }; { var ithread: i32 = 0; while (ithread < num_threads) : (ithread += 1) { try contexts.append(ThreadContext{ .thread_index = ithread, .num_pixels = window_width * window_height, .chunk_size = chunk_size, .rng = rand.DefaultPrng.init(@intCast(u64, ithread)), .surface = surface, .world = &world, .camera = &camera, }); const thread = try std.Thread.spawn(&contexts.items[@intCast(usize, ithread)], renderFn); try tasks.append(thread); } } for (tasks.items) |task| { task.wait(); } c.SDL_UnlockSurface(surface); } if (c.SDL_UpdateWindowSurface(window) != 0) { c.SDL_Log("Error updating window surface: %s", c.SDL_GetError()); return error.SDLUpdateWindowFailed; } var running = true; while (running) { var event: c.SDL_Event = undefined; while (c.SDL_PollEvent(&event) != 0) { switch (event.@"type") { c.SDL_QUIT => { running = false; }, else => {}, } } c.SDL_Delay(16); } }
0
repos/weekend-raytracer-zig
repos/weekend-raytracer-zig/src/vector.zig
const math = @import("std").math; const Random = @import("std").rand.Random; pub fn Vector3(comptime T: type) type { return packed struct { const Self = @This(); x: T, y: T, z: T, pub fn new(x: T, y: T, z: T) Self { return Self{ .x = x, .y = y, .z = z, }; } pub fn zero() Self { return Self{ .x = 0.0, .y = 0.0, .z = 0.0, }; } pub fn one() Self { return Self{ .x = 1.0, .y = 1.0, .z = 1.0, }; } pub fn add(a: Self, b: Self) Self { return Self{ .x = a.x + b.x, .y = a.y + b.y, .z = a.z + b.z, }; } pub fn sub(a: Self, b: Self) Self { return Self{ .x = a.x - b.x, .y = a.y - b.y, .z = a.z - b.z, }; } pub fn mul(self: Self, s: T) Self { return Self{ .x = s * self.x, .y = s * self.y, .z = s * self.z, }; } pub fn elementwiseMul(lhs: Self, rhs: Self) Self { return Self{ .x = lhs.x * rhs.x, .y = lhs.y * rhs.y, .z = lhs.z * rhs.z, }; } pub fn length(self: Self) T { return math.sqrt(self.x * self.x + self.y * self.y + self.z * self.z); } pub fn lengthSquared(self: Self) T { return self.x * self.x + self.y * self.y + self.z * self.z; } pub fn dot(a: Self, b: Self) T { return a.x * b.x + a.y * b.y + a.z * b.z; } pub fn cross(a: Self, b: Self) Self { return Self{ .x = a.y * b.z - a.z * b.y, .y = a.z * b.x - a.x * b.z, .z = a.x * b.y - a.y * b.x, }; } pub fn makeUnitVector(self: Self) Self { const inv_n = 1.0 / self.length(); return Self{ .x = inv_n * self.x, .y = inv_n * self.y, .z = inv_n * self.z, }; } pub fn randomInUnitSphere(r: *Random) Self { return while (true) { const p = Vec3f.new(r.float(f32), r.float(f32), r.float(f32)); if (p.lengthSquared() < 1.0) { break p; } // WTF, why do we need an else for a while loop? O.o } else Vec3f.zero(); } pub fn randomInUnitDisk(r: *Random) Self { return while (true) { const p = Vec3f.new(2.0 * r.float(f32) - 1.0, 2.0 * r.float(f32) - 1.0, 0.0); if (p.lengthSquared() < 1.0) { break p; } } else Vec3f.zero(); } pub fn reflect(self: Self, n: Self) Self { return self.sub(n.mul(2.0 * self.dot(n))); } }; } pub const Vec3f = Vector3(f32); const assert = @import("std").debug.assert; const epsilon: f32 = 0.00001; test "Vector3.add" { const lhs = Vec3f.new(1.0, 2.0, 3.0); const rhs = Vec3f.new(2.0, 3.0, 4.0); const r = lhs.add(rhs); assert(math.fabs(r.x - 3.0) < epsilon); assert(math.fabs(r.y - 5.0) < epsilon); assert(math.fabs(r.z - 7.0) < epsilon); } test "Vector3.sub" { const lhs = Vec3f.new(2.0, 3.0, 4.0); const rhs = Vec3f.new(2.0, 4.0, 3.0); const r = lhs.sub(rhs); assert(math.fabs(r.x) < epsilon); assert(math.fabs(r.y + 1.0) < epsilon); assert(math.fabs(r.z - 1.0) < epsilon); } test "Vector3.makeUnitVector" { const v = Vec3f.new(1.0, 2.0, 3.0); const uv = v.makeUnitVector(); assert(math.fabs(uv.length() - 1.0) < epsilon); } test "Vector3.cross" { const lhs = Vec3f.new(1.0, 0.0, 2.0); const rhs = Vec3f.new(2.0, 1.0, 2.0); const res = lhs.cross(rhs); assert(math.fabs(res.x + 2.0) < epsilon); assert(math.fabs(res.y - 2.0) < epsilon); assert(math.fabs(res.z - 1.0) < epsilon); }
0
repos/weekend-raytracer-zig
repos/weekend-raytracer-zig/src/hitable.zig
const std = @import("std"); const math = std.math; const ArrayList = std.ArrayList; const debug = std.debug; const mat = @import("material.zig"); const Material = @import("material.zig").Material; const Ray = @import("ray.zig").Ray; const Vec3f = @import("vector.zig").Vec3f; pub const HitRecord = struct { t: f32, p: Vec3f, n: Vec3f, material: mat.Material }; pub const Sphere = struct { center: Vec3f, radius: f32, material: Material, pub fn new(center: Vec3f, radius: f32, material: Material) Sphere { return Sphere{ .center = center, .radius = radius, .material = material, }; } pub fn hit(self: Sphere, ray: Ray, t_min: f32, t_max: f32) ?HitRecord { // C: circle center // r: circle radius // O: ray origin // D: ray direction // (t*D + O - C)^2 = r^2 // t^2 * D^2 + 2 * t * D * (O - C) + (O - C) * (O - C) = r^2 const oc = ray.origin.sub(self.center); const a = ray.direction.dot(ray.direction); const b = oc.dot(ray.direction); // the factor 2.0 was moved out of b const c = oc.dot(oc) - self.radius * self.radius; const discriminant = b * b - a * c; if (discriminant > 0.0) { { const t = (-b - math.sqrt(b * b - a * c)) / a; if (t < t_max and t > t_min) { const hit_point = ray.pointAtParameter(t); return HitRecord{ .t = t, .p = hit_point, .n = (hit_point.sub(self.center)).mul(1.0 / self.radius), .material = self.material }; } } { const t = (-b + math.sqrt(b * b - a * c)) / a; if (t < t_max and t > t_min) { const hit_point = ray.pointAtParameter(t); return HitRecord{ .t = t, .p = hit_point, .n = (hit_point.sub(self.center)).mul(1.0 / self.radius), .material = self.material }; } } } return null; } }; pub const World = struct { spheres: ArrayList(Sphere), pub fn init() World { return World { .spheres = ArrayList(Sphere).init(std.testing.allocator) }; } pub fn deinit(self: *World) void { self.spheres.deinit(); } pub fn hit(self: *const World, ray: Ray, t_min: f32, t_max: f32) ?HitRecord { var maybe_hit: ?HitRecord = null; var closest_so_far = t_max; for (self.spheres.items) |sphere| { if (sphere.hit(ray, t_min, t_max)) |hit_rec| { if (hit_rec.t < closest_so_far) { maybe_hit = hit_rec; closest_so_far = hit_rec.t; } } } return maybe_hit; } };
0
repos/weekend-raytracer-zig
repos/weekend-raytracer-zig/src/material.zig
const HitRecord = @import("hitable.zig").HitRecord; const math = @import("std").math; const Random = @import("std").rand.Random; const Ray = @import("ray.zig").Ray; const Vec3f = @import("vector.zig").Vec3f; pub const Scatter = struct { attenuation: Vec3f, ray: Ray, pub fn new(attenuation: Vec3f, ray: Ray) Scatter { return Scatter{ .attenuation = attenuation, .ray = ray, }; } }; pub const Lambertian = struct { albedo: Vec3f, pub fn scatter(self: Lambertian, hit: HitRecord, rand: *Random) Scatter { const target = hit.p.add(hit.n.add(Vec3f.randomInUnitSphere(rand))); const attenuation = self.albedo; const scattered_ray = Ray.new(hit.p, target.sub(hit.p).makeUnitVector()); return Scatter.new(attenuation, scattered_ray); } }; pub const Metal = struct { albedo: Vec3f, fuzz: f32, pub fn scatter(self: Metal, ray: Ray, hit: HitRecord, rand: *Random) Scatter { const reflected = ray.direction.reflect(hit.n.makeUnitVector()); const attenuation = self.albedo; const scattered = Ray.new(hit.p, reflected.add(Vec3f.randomInUnitSphere(rand).mul(self.fuzz)).makeUnitVector()); return Scatter.new(attenuation, scattered); } }; fn refract(v: Vec3f, n: Vec3f, ni_over_nt: f32) ?Vec3f { // ni * sin(i) = nt * sin(t) // sint(t) = sin(i) * (ni / nt) const uv = v.makeUnitVector(); const dt = uv.dot(n); const discriminant = 1.0 - ni_over_nt * ni_over_nt * (1.0 - dt * dt); if (discriminant > 0.0) { // ni_over_nt * (uv - dt * n) - (n * sqrt(discriminant)) return uv.sub(n.mul(dt)).mul(ni_over_nt).sub(n.mul(math.sqrt(discriminant))); } return null; } fn schlick(cosine: f32, refraction_index: f32) f32 { var r0 = (1.0 - refraction_index) / (1.0 + refraction_index); r0 = r0 * r0; return r0 + (1.0 - r0) * math.pow(f32, (1.0 - cosine), 5.0); } pub const Dielectric = struct { refraction_index: f32, pub fn scatter(self: Dielectric, ray: Ray, hit: HitRecord, rand: *Random) Scatter { // If the ray direction and hit normal are in the same half-sphere var outward_normal: Vec3f = undefined; var ni_over_nt: f32 = undefined; var cosine: f32 = undefined; if (ray.direction.dot(hit.n) > 0.0) { outward_normal = Vec3f.new(-hit.n.x, -hit.n.y, -hit.n.z); ni_over_nt = self.refraction_index; cosine = self.refraction_index * ray.direction.dot(hit.n) / ray.direction.length(); } else { outward_normal = hit.n; ni_over_nt = 1.0 / self.refraction_index; cosine = -ray.direction.dot(hit.n) / ray.direction.length(); } if (refract(ray.direction, outward_normal, ni_over_nt)) |refracted_dir| { const reflection_prob = schlick(cosine, self.refraction_index); return if (rand.float(f32) < reflection_prob) Scatter.new(Vec3f.one(), Ray.new(hit.p, ray.direction.reflect(hit.n).makeUnitVector())) else Scatter.new(Vec3f.one(), Ray.new(hit.p, refracted_dir.makeUnitVector())); } else { return Scatter.new(Vec3f.one(), Ray.new(hit.p, ray.direction.reflect(hit.n).makeUnitVector())); } } }; pub const Material = union(enum) { Lambertian: Lambertian, Metal: Metal, Dielectric: Dielectric, pub fn lambertian(albedo: Vec3f) Material { return Material{ .Lambertian = Lambertian{ .albedo = albedo } }; } pub fn metal(albedo: Vec3f, fuzz: f32) Material { return Material{ .Metal = Metal{ .albedo = albedo, .fuzz = fuzz } }; } pub fn dielectric(refraction_index: f32) Material { return Material{ .Dielectric = Dielectric{ .refraction_index = refraction_index } }; } }; const std = @import("std"); const assert = std.debug.assert; test "complex union" { const complex_union = Material{ .Lambertian = Lambertian{ .attenuation = Vec3f.new(1.0, 0.0, 0.0) } }; assert(complex_union.Lambertian.attenuation.x == 1.0); } test "switch expression" { const complex_union = Material{ .Lambertian = Lambertian{ .attenuation = Vec3f.new(1.0, 0.0, 0.0) } }; assert(complex_union.Lambertian.attenuation.x == 1.0); const val = switch (complex_union) { Material.Lambertian => |l| l.attenuation.x, Material.Metal => |m| m.albedo.x, }; assert(val == 1.0); }
0
repos/weekend-raytracer-zig
repos/weekend-raytracer-zig/src/ray.zig
const Vec3f = @import("vector.zig").Vec3f; pub const Ray = struct { origin: Vec3f, direction: Vec3f, pub fn new(origin: Vec3f, direction: Vec3f) Ray { return Ray{ .origin = origin, .direction = direction, }; } pub fn pointAtParameter(self: Ray, t: f32) Vec3f { return self.origin.add(self.direction.mul(t)); } }; const assert = @import("std").debug.assert; const math = @import("std").math; const epsilon: f32 = 0.00001; test "Ray.pointAtParameter" { const r = Ray.new(Vec3f.zero(), Vec3f.one()); const p = r.pointAtParameter(1.0); assert(math.fabs(p.x - 1.0) < epsilon); assert(math.fabs(p.y - 1.0) < epsilon); assert(math.fabs(p.z - 1.0) < epsilon); }
0
repos/advent-of-code
repos/advent-of-code/2023/build.zig.zon
.{ .name = "2023", .version = "0.0.0", .dependencies = .{}, .paths = .{ "", }, }
0
repos/advent-of-code
repos/advent-of-code/2023/build.zig
const std = @import("std"); pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); const optimize = b.standardOptimizeOption(.{}); for (0..4) |day| { const name = b.fmt("day{d:0>2}", .{day + 1}); const exe = b.addExecutable(.{ .name = name, .root_source_file = .{ .path = b.fmt("src/{s}.zig", .{name}) }, .target = target, .optimize = optimize, }); b.installArtifact(exe); const run_cmd = b.addRunArtifact(exe); run_cmd.step.dependOn(b.getInstallStep()); if (b.args) |args| { run_cmd.addArgs(args); } const run_step = b.step(b.fmt("{s}", .{name}), b.fmt("Run {s}", .{name})); run_step.dependOn(&run_cmd.step); } }
0
repos/advent-of-code/2023
repos/advent-of-code/2023/src/day03.zig
const std = @import("std"); const print = std.debug.print; const ArrayList = std.ArrayList; const Allocator = std.mem.Allocator; const isDigit = std.ascii.isDigit; fn findNum(line: []const u8, idx: usize) std.fmt.ParseIntError!usize { var begin = idx; while (begin > 0 and isDigit(line[begin - 1])) { begin -= 1; } var end = if (isDigit(line[idx])) idx + 1 else idx; while (end < line.len and isDigit(line[end])) { end += 1; } return std.fmt.parseUnsigned(usize, line[begin..end], 10); } pub fn solve(allocator: std.mem.Allocator, input_path: []const u8) !void { const input_file = try std.fs.cwd().openFile(input_path, .{ .mode = .read_only }); defer input_file.close(); var buffered = std.io.bufferedReader(input_file.reader()); var reader = buffered.reader(); var lines_array = ArrayList([]const u8).init(allocator); defer lines_array.deinit(); defer { for (lines_array.items) |line| { allocator.free(line); } } var temp = std.ArrayList(u8).init(allocator); defer temp.deinit(); while (true) { defer temp.clearRetainingCapacity(); reader.streamUntilDelimiter(temp.writer(), '\n', null) catch |err| switch (err) { error.EndOfStream => break, else => return err, }; try lines_array.append(try temp.toOwnedSlice()); } var part_one: usize = 0; var part_two: usize = 0; const lines = lines_array.items; for (lines, 0..) |line, line_idx| { for (line, 0..) |char, char_idx| { if (char != '.' and !isDigit(char)) { // check for numbers around the symbol const l = char_idx > 0 and isDigit(line[char_idx - 1]); const r = char_idx < line.len - 1 and isDigit(line[char_idx + 1]); const t = line_idx > 0 and isDigit(lines[line_idx - 1][char_idx]); const b = line_idx < line.len - 1 and isDigit(lines[line_idx + 1][char_idx]); const tl = line_idx > 0 and char_idx > 0 and isDigit(lines[line_idx - 1][char_idx - 1]); const tr = line_idx > 0 and char_idx < line.len - 1 and isDigit(lines[line_idx - 1][char_idx + 1]); const bl = line_idx < line.len - 1 and char_idx > 0 and isDigit(lines[line_idx + 1][char_idx - 1]); const br = line_idx < line.len - 1 and char_idx < line.len - 1 and isDigit(lines[line_idx + 1][char_idx + 1]); var count: usize = 0; var ratio: usize = 1; if (l) { const num = findNum(line, char_idx - 1); part_one += num catch 0; ratio *= num catch 1; count += 1; } if (tl) { const num = findNum(lines[line_idx - 1], char_idx - 1); part_one += num catch 0; ratio *= num catch 1; count += 1; } if (t and !tl) { const num = findNum(lines[line_idx - 1], char_idx); part_one += num catch 0; ratio *= num catch 1; count += 1; } if (tr and !t) { const num = findNum(lines[line_idx - 1], char_idx + 1); part_one += num catch 0; ratio *= num catch 1; count += 1; } if (r) { const num = findNum(line, char_idx + 1); part_one += num catch 0; ratio *= num catch 1; count += 1; } if (bl) { const num = findNum(lines[line_idx + 1], char_idx - 1); part_one += num catch 0; ratio *= num catch 1; count += 1; } if (b and !bl) { const num = findNum(lines[line_idx + 1], char_idx); part_one += num catch 0; ratio *= num catch 1; count += 1; } if (br and !b) { const num = findNum(lines[line_idx + 1], char_idx + 1); part_one += num catch 0; ratio *= num catch 1; count += 1; } if (char == '*' and count == 2) { part_two += ratio; } } } } print("part_one: {}\n", .{part_one}); print("part_two: {}\n", .{part_two}); } pub fn main() !void { // Set up the General Purpose allocator, this will track memory leaks, etc. var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = gpa.allocator(); defer _ = gpa.deinit(); // Parse the command line arguments to get the input file const args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); if (args.len < 2) { std.debug.print("No input file passed\n", .{}); return; } try solve(allocator, args[1]); }
0
repos/advent-of-code/2023
repos/advent-of-code/2023/src/day01.zig
const std = @import("std"); const Digit = enum(usize) { one = 1, two, three, four, five, six, seven, eight, nine, fn fromSlice(string: []const u8) ?Digit { return for (std.meta.tags(Digit)) |tag| { const name = @tagName(tag); if (name.len > string.len) continue; if (std.mem.eql(u8, name, string[0..name.len])) { break tag; } } else null; } }; pub fn solve(allocator: std.mem.Allocator, input_path: []const u8) !void { // https://www.openmsevenymind.net/Performance-of-reading-a-file-line-by-line-in-Zig/ const input_file = try std.fs.cwd().openFile(input_path, .{ .mode = .read_only }); defer input_file.close(); var buffered = std.io.bufferedReader(input_file.reader()); var reader = buffered.reader(); var line = std.ArrayList(u8).init(allocator); defer line.deinit(); var total: usize = 0; while (true) { defer line.clearRetainingCapacity(); reader.streamUntilDelimiter(line.writer(), '\n', null) catch |err| switch (err) { error.EndOfStream => break, else => return err, }; var idx: usize = 0; var found_first: bool = false; var last: usize = 0; while (idx < line.items.len) : (idx += 1) { // part 1 if (std.ascii.isDigit(line.items[idx])) { last = try std.fmt.charToDigit(line.items[idx], 10); if (!found_first) { found_first = true; total += 10 * last; } // part 2 } else if (Digit.fromSlice(line.items[idx..])) |digit| { last = @intFromEnum(digit); if (!found_first) { found_first = true; total += 10 * last; } // We need to decrement index by two: // 1 is added by the while loop // 1 is the maximum overlap between number names: sevenine, eighthree // This is completely optional, but allows us to skip parts of the line. idx += @tagName(digit).len - 2; } } total += last; } std.debug.print("total: {}\n", .{total}); } pub fn main() !void { // Set up the General Purpose allocator, this will track memory leaks, etc. var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = gpa.allocator(); defer _ = gpa.deinit(); // Parse the command line arguments to get the input file const args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); if (args.len < 2) { std.debug.print("No input file passed\n", .{}); return; } try solve(allocator, args[1]); }
0
repos/advent-of-code/2023
repos/advent-of-code/2023/src/day00.zig
const std = @import("std"); const print = std.debug.print; const ArrayList = std.ArrayList; const Allocator = std.mem.Allocator; pub fn solve(allocator: Allocator, input_path: []const u8) !void { // https://www.openmsevenymind.net/Performance-of-reading-a-file-line-by-line-in-Zig/ const input_file = try std.fs.cwd().openFile(input_path, .{ .mode = .read_only }); defer input_file.close(); var buffered = std.io.bufferedReader(input_file.reader()); var reader = buffered.reader(); var line = ArrayList(u8).init(allocator); defer line.deinit(); while (true) { defer line.clearRetainingCapacity(); reader.streamUntilDelimiter(line.writer(), '\n', null) catch |err| switch (err) { error.EndOfStream => break, else => return err, }; } } pub fn main() !void { // Set up the General Purpose allocator, this will track memory leaks, etc. var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = gpa.allocator(); defer _ = gpa.deinit(); // Parse the command line arguments to get the input file const args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); if (args.len < 2) { std.debug.print("No input file passed\n", .{}); return; } try solve(allocator, args[1]); }
0
repos/advent-of-code/2023
repos/advent-of-code/2023/src/day04.zig
const std = @import("std"); const print = std.debug.print; const ArrayList = std.ArrayList; const Allocator = std.mem.Allocator; const Set = std.AutoHashMap(usize, void); const Scratchcard = struct { winners: usize, amount: usize = 1, }; pub fn solve(allocator: Allocator, input_path: []const u8) !void { // https://www.openmsevenymind.net/Performance-of-reading-a-file-line-by-line-in-Zig/ const input_file = try std.fs.cwd().openFile(input_path, .{ .mode = .read_only }); defer input_file.close(); var buffered = std.io.bufferedReader(input_file.reader()); var reader = buffered.reader(); var line = ArrayList(u8).init(allocator); defer line.deinit(); var part_one: usize = 0; var scratchcards = ArrayList(Scratchcard).init(allocator); defer scratchcards.deinit(); while (true) { defer line.clearRetainingCapacity(); reader.streamUntilDelimiter(line.writer(), '\n', null) catch |err| switch (err) { error.EndOfStream => break, else => return err, }; const colon_pos = for (line.items, 0..) |char, idx| { if (char == ':') { break idx; } } else line.items.len; var parts = std.mem.split(u8, line.items[colon_pos + 1 ..], "|"); var winning_numbers = blk: { var set = Set.init(allocator); var nums = std.mem.tokenize(u8, parts.next().?, " "); while (nums.next()) |num| { try set.put(try std.fmt.parseInt(usize, num, 10), {}); } break :blk set; }; defer winning_numbers.deinit(); var count: u6 = 0; var owned_numbers = std.mem.tokenize(u8, parts.next().?, " "); while (owned_numbers.next()) |num_str| { const num = try std.fmt.parseInt(usize, num_str, 10); if (winning_numbers.contains(num)) { count += 1; } } try scratchcards.append(.{ .winners = count }); switch (count) { 0 => continue, 1 => part_one += 1, else => part_one += @as(usize, 1) << (count - 1), } } var part_two: usize = 0; for (scratchcards.items, 0..) |scratchcard, current_id| { for (1..scratchcard.winners + 1) |i| { scratchcards.items[current_id + i].amount += scratchcard.amount; } part_two += scratchcard.amount; } print("part_one: {}\n", .{part_one}); print("part_two: {}\n", .{part_two}); } pub fn main() !void { // Set up the General Purpose allocator, this will track memory leaks, etc. var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = gpa.allocator(); defer _ = gpa.deinit(); // Parse the command line arguments to get the input file const args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); if (args.len < 2) { std.debug.print("No input file passed\n", .{}); return; } try solve(allocator, args[1]); }
0
repos/advent-of-code/2023
repos/advent-of-code/2023/src/day02.zig
const std = @import("std"); const print = std.debug.print; const ArrayList = std.ArrayList; const Allocator = std.mem.Allocator; const Color = enum { red, green, blue, }; const Subset = struct { color: Color, amount: usize, }; fn Game() type { return struct { const Self = @This(); id: usize, subsets: ArrayList(Subset), pub fn init(allocator: Allocator) Self { return Self{ .id = 0, .subsets = ArrayList(Subset).init(allocator) }; } pub fn deinit(self: *Self) void { self.subsets.deinit(); } fn check(self: Self, subsets: []const Subset) bool { for (subsets) |subset| { for (self.subsets.items) |candidate| { if (subset.color == candidate.color) { if (candidate.amount > subset.amount) { return false; } } } } return true; } fn power(self: Self) usize { var max_red: usize = 0; var max_green: usize = 0; var max_blue: usize = 0; for (self.subsets.items) |item| { switch (item.color) { .red => max_red = @max(max_red, item.amount), .green => max_green = @max(max_green, item.amount), .blue => max_blue = @max(max_blue, item.amount), } } return max_red * max_green * max_blue; } fn parseId(self: *Self, string: []const u8) !void { var parts = std.mem.split(u8, string, " "); _ = parts.first(); self.id = try std.fmt.parseUnsigned(usize, parts.next().?, 10); } fn parseSubsets(self: *Self, string: []const u8) !void { var subgames = std.mem.tokenize(u8, string, ";"); while (subgames.next()) |subgame| { var pairs = std.mem.tokenize(u8, subgame, ","); while (pairs.next()) |pair| { var parts = std.mem.tokenize(u8, pair, " "); try self.subsets.append(.{ .amount = try std.fmt.parseUnsigned(usize, parts.next().?, 10), .color = std.meta.stringToEnum(Color, parts.next().?).?, }); } } } }; } pub fn solve(allocator: std.mem.Allocator, input_path: []const u8) !void { // https://www.openmsevenymind.net/Performance-of-reading-a-file-line-by-line-in-Zig/ const input_file = try std.fs.cwd().openFile(input_path, .{ .mode = .read_only }); defer input_file.close(); var buffered = std.io.bufferedReader(input_file.reader()); var reader = buffered.reader(); var line = std.ArrayList(u8).init(allocator); defer line.deinit(); var part_one: usize = 0; var part_two: usize = 0; while (true) { defer line.clearRetainingCapacity(); reader.streamUntilDelimiter(line.writer(), '\n', null) catch |err| switch (err) { error.EndOfStream => break, else => return err, }; var parts = std.mem.split(u8, line.items, ":"); var game = Game().init(allocator); defer game.deinit(); try game.parseId(parts.first()); try game.parseSubsets(parts.next().?); const contents: []const Subset = &.{ .{ .amount = 12, .color = .red }, .{ .amount = 13, .color = .green }, .{ .amount = 14, .color = .blue }, }; part_one += if (game.check(contents)) game.id else 0; part_two += game.power(); } print("part_one: {}\n", .{part_one}); print("part_two: {}\n", .{part_two}); } pub fn main() !void { // Set up the General Purpose allocator, this will track memory leaks, etc. var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = gpa.allocator(); defer _ = gpa.deinit(); // Parse the command line arguments to get the input file const args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); if (args.len < 2) { std.debug.print("No input file passed\n", .{}); return; } try solve(allocator, args[1]); }
0
repos
repos/http2.0/old.build.zig
const std = @import("std"); // Although this function looks imperative, note that its job is to // declaratively construct a build graph that will be executed by an external // runner. pub fn build(b: *std.Build) void { // Standard target options allows the person running `zig build` to choose // what target to build for. Here we do not override the defaults, which // means any target is allowed, and the default is native. Other options // for restricting supported target set are available. const target = b.standardTargetOptions(.{}); // Standard optimization options allow the person running `zig build` to select // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not // set a preferred release mode, allowing the user to decide how to optimize. const optimize = b.standardOptimizeOption(.{}); const lib = b.addStaticLibrary(.{ .name = "hpack", // In this case the main source file is merely a path, however, in more // complicated build scripts, this could be a generated file. .root_source_file = b.path("main.zig"), .target = target, .optimize = optimize, }); // This declares intent for the library to be installed into the standard // location when the user invokes the "install" step (the default step when // running `zig build`). b.installArtifact(lib); // Creates a step for unit testing. This only builds the test executable // but does not run it. const lib_unit_tests = b.addTest(.{ .root_source_file = b.path("main.zig"), .target = target, .optimize = optimize, }); const run_lib_unit_tests = b.addRunArtifact(lib_unit_tests); // Similar to creating the run step earlier, this exposes a `test` step to // the `zig build --help` menu, providing a way for the user to request // running the unit tests. const test_step = b.step("test", "Run unit tests"); test_step.dependOn(&run_lib_unit_tests.step); }
0
repos
repos/http2.0/main.zig
const std = @import("std"); const hpack = @import("../main.zig"); pub const StreamState = enum { idle, reservedlocal, reservedremote, open, halfclosedlocal, halfclosedremote, closed, }; pub const StreamTransition = enum { recvpushpromise, sendpushpromise, recvheaders, sendheaders, recveos, sendeos, sendrst, recvrst }; pub const Stream = struct { state: StreamState = .idle, id: u32, pub fn transition(self: *Stream, st: StreamTransition) void { self.state = switch (self.state) { .closed => switch (st) { .sendpushpromise, .recvpushpromise, .sendheaders, .sendeos, .recveos, .recvheaders, .recvrst, .sendrst => @panic("invalid transition"), }, .halfclosedlocal => switch (st) { .sendpushpromise, .recvpushpromise, .sendheaders, .sendeos, .recvheaders => @panic("invalid transition"), .recveos, .recvrst, .sendrst => .closed, }, .halfclosedremote => switch (st) { .sendpushpromise, .recvpushpromise, .sendheaders, .recveos, .recvheaders => @panic("invalid transition"), .sendeos, .recvrst, .sendrst => .closed, }, .idle => switch (st) { .sendpushpromise => .reservedlocal, .recvpushpromise => .reservedremote, .sendheaders, .recvheaders => .open, .sendeos, .recveos, .recvrst, .sendrst => @panic("invalid transition"), }, .open => switch (st) { .sendpushpromise, .recvpushpromise, .sendheaders, .recvheaders => @panic("invalid transition"), .recvrst, .sendrst => .closed, .sendeos => .halfclosedlocal, .recveos => .halfclosedremote, }, .reservedlocal => switch (st) { .sendpushpromise, .recvpushpromise, .sendeos, .recveos, .recvheaders => @panic("invalid transition"), .sendheaders => .halfclosedremote, .recvrst, .sendrst => .closed, }, .reservedremote => switch (st) { .sendpushpromise, .recvpushpromise, .sendheaders, .recveos, .sendeos => @panic("invalid transition"), .recvheaders => .halfclosedlocal, .recvrst, .sendrst => .closed, }, }; } }; pub fn main() !void { var a = Stream{ .state = .reservedlocal }; a.transition(.recvrst); std.debug.print("{}\n", .{a.state}); }
0
repos
repos/http2.0/README.md
### Toy HTTP 2 #### server ```zig const std = @import("std"); const hpack = @import("./hpack.zig"); const Connection = @import("connection.zig"); const Stream = @import("stream.zig"); const frames = @import("frames.zig"); const Headers = frames.Headers; const Head = frames.Head; const Code = @import("errors.zig").Code; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; pub fn main() !void { const address = try std.net.Address.parseIp4("127.0.0.1", 3000); var server = try address.listen(.{ .reuse_address = true, .reuse_port = true }); while (true) { var con = try Connection.init(gpa.allocator(), (try server.accept()).stream, .server); defer con.close() catch {}; con.onStream(struct { pub fn f(stream: *Stream) void { stream.onHeaders(struct { pub fn cb(headers: []hpack.HeaderField, strm: *Stream) void { for (headers) |h| { h.display(); } var reply = [_]hpack.HeaderField{ .{ .name = ":status", .value = "200" }, .{ .name = "content-type", .value = "text-plain" } }; strm.sendHeaders(reply[0..], false) catch {}; strm.write("i wonder but kalao", "hello world", true) catch {}; } }.cb); stream.onData(struct { pub fn cb(strm: *Stream, head: frames.Head) void { _ = head; var buf = [_]u8{0} ** 512; var n = strm.read(buf[0..]) catch @panic("err reading"); while (n > 0) { n = strm.read(buf[0..]) catch @panic("err reading"); std.debug.print("Data: {s}, [{}]\n", .{ buf[0..n], n }); } } }.cb); stream.onRst(struct { pub fn cb(strm: *Stream, code: Code) void { _ = strm; std.debug.print("RST: code[{}]\n", .{code}); } }.cb); } }.f); con.onPing(struct { pub fn f(c: *Connection, payload: [8]u8) void { c.pong(payload) catch {}; std.debug.print("PING: {s}", .{payload}); } }.f); con.onSettings(struct { pub fn f(c: *Connection, settings: frames.Settings) void { std.debug.print("SET: {}\n", .{settings}); c.acceptSettings(settings) catch {}; } }.f); con.onGoAway(struct { pub fn f(_: *Connection, payload: frames.GoAway.PayLoad) void { std.debug.print("GOAWAY: {}", .{payload}); } }.f); try con.processFrames(); } } ``` #### client ```zig const std = @import("std"); const hpack = @import("./hpack.zig"); const Connection = @import("connection.zig"); const Stream = @import("stream.zig"); const frames = @import("frames.zig"); const Headers = frames.Headers; const Head = frames.Head; const Code = @import("errors.zig").Code; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; pub fn main() !void { const address = try std.net.Address.parseIp4("127.0.0.1", 3000); const st = try std.net.tcpConnectToAddress(address); var con = try Connection.init(gpa.allocator(), st, .client); defer con.close() catch {}; var reply = [_]hpack.HeaderField{ .{ .name = ":method", .value = "GET" }, .{ .name = ":authority", .value = "127.0.0.1:3000" }, .{ .name = ":scheme", .value = "http" }, .{ .name = ":path", .value = "/mogoka/baze" }, }; var stream = try con.request(reply[0..]); stream.onHeaders(struct { pub fn cb(headers: []hpack.HeaderField, _: *Stream) void { for (headers) |h| { h.display(); } } }.cb); const DataProcesser = struct { pub fn cb(strm: *Stream, head: frames.Head) void { _ = head; std.debug.print("DATA...\n", .{}); var buf = [_]u8{0} ** 512; var n = strm.read(buf[0..]) catch |e| @panic(@errorName(e)); std.debug.print("Data: {s}\n", .{buf[0..n]}); while (n > 0) { n = strm.read(buf[0..]) catch @panic("err reading"); } } }; stream.onData(DataProcesser.cb); stream.onRst(struct { pub fn cb(strm: *Stream, code: Code) void { _ = strm; std.debug.print("RST: code[{}]\n", .{code}); } }.cb); con.onPushPromise(struct { pub fn f(strm: *Stream, headers: []hpack.HeaderField) void { //_ = strm; for (headers) |value| value.display(); //strm.terminate(.no_error) catch @panic("err terminating"); strm.onData(DataProcesser.cb); } }.f); con.onPing(struct { pub fn f(c: *Connection, payload: [8]u8) void { c.pong(payload) catch {}; std.debug.print("PING: {s}", .{payload}); } }.f); con.onSettings(struct { pub fn f(c: *Connection, settings: frames.Settings) void { std.debug.print("SET: {}\n", .{settings}); c.acceptSettings(settings) catch {}; } }.f); con.onGoAway(struct { pub fn f(_: *Connection, payload: frames.GoAway.PayLoad) void { std.debug.print("GOAWAY: {}", .{payload}); } }.f); try con.processFrames(); } ```
0
repos
repos/http2.0/build.zig.zon
.{ .name = "http2.0", // This is a [Semantic Version](https://semver.org/). // In a future version of Zig it will be used for package deduplication. .version = "0.0.0", // This field is optional. // This is currently advisory only; Zig does not yet do anything // with this value. //.minimum_zig_version = "0.11.0", // This field is optional. // Each dependency must either provide a `url` and `hash`, or a `path`. // `zig build --fetch` can be used to fetch all dependencies of a package, recursively. // Once all dependencies are fetched, `zig build` no longer requires // internet connectivity. .dependencies = .{ // See `zig fetch --save <url>` for a command-line interface for adding dependencies. //.example = .{ // // When updating this field to a new URL, be sure to delete the corresponding // // `hash`, otherwise you are communicating that you expect to find the old hash at // // the new URL. // .url = "https://example.com/foo.tar.gz", // // // This is computed from the file contents of the directory of files that is // // obtained after fetching `url` and applying the inclusion rules given by // // `paths`. // // // // This field is the source of truth; packages do not come from a `url`; they // // come from a `hash`. `url` is just one of many possible mirrors for how to // // obtain a package matching this `hash`. // // // // Uses the [multihash](https://multiformats.io/multihash/) format. // .hash = "...", // // // When this is provided, the package is found in a directory relative to the // // build root. In this case the package's hash is irrelevant and therefore not // // computed. This field and `url` are mutually exclusive. // .path = "foo", // // When this is set to `true`, a package is declared to be lazily // // fetched. This makes the dependency only get fetched if it is // // actually used. // .lazy = false, //}, }, // Specifies the set of files and directories that are included in this package. // Only files and directories listed here are included in the `hash` that // is computed for this package. Only files listed here will remain on disk // when using the zig package manager. As a rule of thumb, one should list // files required for compilation plus any license(s). // Paths are relative to the build root. Use the empty string (`""`) to refer to // the build root itself. // A directory listed here means that all files within, recursively, are included. .paths = .{ "build.zig", "build.zig.zon", "src", // For example... //"LICENSE", //"README.md", }, }
0
repos
repos/http2.0/build.zig
const std = @import("std"); // Although this function looks imperative, note that its job is to // declaratively construct a build graph that will be executed by an external // runner. pub fn build(b: *std.Build) void { // Standard target options allows the person running `zig build` to choose // what target to build for. Here we do not override the defaults, which // means any target is allowed, and the default is native. Other options // for restricting supported target set are available. const target = b.standardTargetOptions(.{}); // Standard optimization options allow the person running `zig build` to select // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not // set a preferred release mode, allowing the user to decide how to optimize. const optimize = b.standardOptimizeOption(.{}); const lib = b.addStaticLibrary(.{ .name = "http2.0", // In this case the main source file is merely a path, however, in more // complicated build scripts, this could be a generated file. .root_source_file = b.path("src/root.zig"), .target = target, .optimize = optimize, }); // This declares intent for the library to be installed into the standard // location when the user invokes the "install" step (the default step when // running `zig build`). b.installArtifact(lib); const exe = b.addExecutable(.{ .name = "http2.0", .root_source_file = b.path("src/main.zig"), .target = target, .optimize = optimize, }); // This declares intent for the executable to be installed into the // standard location when the user invokes the "install" step (the default // step when running `zig build`). b.installArtifact(exe); // This *creates* a Run step in the build graph, to be executed when another // step is evaluated that depends on it. The next line below will establish // such a dependency. const run_cmd = b.addRunArtifact(exe); // By making the run step depend on the install step, it will be run from the // installation directory rather than directly from within the cache directory. // This is not necessary, however, if the application depends on other installed // files, this ensures they will be present and in the expected location. run_cmd.step.dependOn(b.getInstallStep()); // This allows the user to pass arguments to the application in the build // command itself, like this: `zig build run -- arg1 arg2 etc` if (b.args) |args| { run_cmd.addArgs(args); } // This creates a build step. It will be visible in the `zig build --help` menu, // and can be selected like this: `zig build run` // This will evaluate the `run` step rather than the default, which is "install". const run_step = b.step("run", "Run the app"); run_step.dependOn(&run_cmd.step); // Creates a step for unit testing. This only builds the test executable // but does not run it. const lib_unit_tests = b.addTest(.{ .root_source_file = b.path("src/root.zig"), .target = target, .optimize = optimize, }); const run_lib_unit_tests = b.addRunArtifact(lib_unit_tests); const exe_unit_tests = b.addTest(.{ .root_source_file = b.path("src/main.zig"), .target = target, .optimize = optimize, }); const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests); // Similar to creating the run step earlier, this exposes a `test` step to // the `zig build --help` menu, providing a way for the user to request // running the unit tests. const test_step = b.step("test", "Run unit tests"); test_step.dependOn(&run_lib_unit_tests.step); test_step.dependOn(&run_exe_unit_tests.step); }
0
repos
repos/http2.0/scrap.zig
const std = @import("std"); const hpack = @import("./hpack.zig"); //const Head = @import("frames/frames.zig").Head; //const Pp = @import("frames/pp.zig"); const frames = @import("frames.zig"); const Headers = frames.Headers; const Head = frames.Head; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; pub fn maintt() !void { //var buf = [_]u8{0} ** 24; // const hd = frames.Head{ // .ty = .rst, // .len = 4, // .streamid = 89, // .flags = .{} // }; var buf = [_]u8{0} ** 4096; //var stream = std.io.fixedBufferStream(buf[0..]); var ctx = try hpack.Tables.init(gpa.allocator()); //var p = hpack.Parser.init(&ctx, buf[0..]); var headerheap = [_]u8{0} ** 4096; var streamr = std.io.fixedBufferStream(headerheap[0..]); var pp = frames.PushPromise{ .builder = hpack.Builder.init(&ctx, buf[0..]), .parser = hpack.Parser.init(&ctx, headerheap[0..]) }; var expected = [_]hpack.HeaderField{ .{ .name = ":status", .value = "302" }, .{ .name = "cache-control", .value = "private" }, .{ .name = "date", .value = "Mon, 21 Oct 2013 20:13:21 GMT" }, .{ .name = "location", .value = "https://www.example.com" } }; try pp.write(streamr.writer(), 90, 100, expected[0..]); //frames.PushPromise.write(self: *PushPromise, stream: anytype, id: u31, max_header_list: u24, headers: hpack.HeaderField) } //var gpa = std.heap.GeneralPurposeAllocator(.{}){}; pub fn main() !void { var buf = [_]u8{0} ** 4096; var ctx = try hpack.Tables.init(gpa.allocator()); //var p = hpack.Parser.init(&ctx, buf[0..]); var headerheap = [_]u8{0} ** 4096; var h = Headers{ .builder = hpack.Builder.init(&ctx, buf[0..]), .parser = hpack.Parser.init(&ctx, headerheap[0..]) }; //try h.builder.add(.{.name ="hello", .value = "world"}, false, false); //try h.builder.add(.{.name ="hello", .value = "world"}, false, false); //try h.builder.add(.{.name ="hello", .value = "world"}, false, false); //try h.builder.add(.{.name ="hello", .value = "world"}, false, false); //try h.builder.add(.{.name ="hello", .value = "world"}, false, false); //try h.builder.add(.{.name ="hello", .value = "world"}, false, false); //try h.builder.add(.{.name ="hello", .value = "world"}, false, false); //try h.builder.add(.{.name ="hello", .value = "world"}, false, false); var streambuf = [_]u8{0} ** 4096; var stream = std.io.fixedBufferStream(streambuf[0..]); var expected = [_]hpack.HeaderField{ .{ .name = "", .value = "" }, .{ .name = "", .value = "" }, .{ .name = "three", .value = "Mon, 21 Oct 2013 20:13:21 GMT" }, .{ .name = "four", .value = "https://www.example.com" } }; try h.write(stream.writer(), 100, expected[0..], 4, true); //var headerbuf = [_]u8{0}**400; stream.pos = 0; const hh = try Head.read(stream.reader()); //const n = try read(stream.reader(), headerbuf[0..],hh); var headers = [_]hpack.HeaderField{.{}} ** 10; const kuku = try h.readAndParse(stream.reader(), hh, headers[0..]); //parser.parse(headerbuf[0..n], headers[0..]); for (kuku) |k| { std.debug.print("{s}: {s}\n", .{ k.name, k.value }); } std.debug.print("{}\n", .{kuku.len}); }
0
repos/http2.0
repos/http2.0/src/server.zig
const std = @import("std"); const hpack = @import("./hpack.zig"); const Connection = @import("connection.zig"); const Stream = @import("stream.zig"); const frames = @import("frames.zig"); const Headers = frames.Headers; const Head = frames.Head; const Code = @import("errors.zig").Code; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; pub fn main() !void { const address = try std.net.Address.parseIp4("127.0.0.1", 3000); var server = try address.listen(.{ .reuse_address = true, .reuse_port = true }); while (true) { var con = try Connection.init(gpa.allocator(), (try server.accept()).stream, .server); defer con.close() catch {}; con.onStream(struct { pub fn f(stream: *Stream) void { stream.onHeaders(struct { pub fn cb(headers: []hpack.HeaderField, strm: *Stream) void { for (headers) |h| { h.display(); } var reply = [_]hpack.HeaderField{ .{ .name = ":status", .value = "200" }, .{ .name = "content-type", .value = "text-plain" } }; strm.sendHeaders(reply[0..], false) catch {}; strm.write("i wonder but kalao", "hello world", true) catch {}; } }.cb); stream.onData(struct { pub fn cb(strm: *Stream, head: frames.Head) void { _ = head; var buf = [_]u8{0} ** 512; var n = strm.read(buf[0..]) catch @panic("err reading"); while (n > 0) { n = strm.read(buf[0..]) catch @panic("err reading"); std.debug.print("Data: {s}, [{}]\n", .{ buf[0..n], n }); } } }.cb); stream.onRst(struct { pub fn cb(strm: *Stream, code: Code) void { _ = strm; std.debug.print("RST: code[{}]\n", .{code}); } }.cb); } }.f); con.onPing(struct { pub fn f(c: *Connection, payload: [8]u8) void { c.pong(payload) catch {}; std.debug.print("PING: {s}", .{payload}); } }.f); con.onSettings(struct { pub fn f(c: *Connection, settings: frames.Settings) void { std.debug.print("SET: {}\n", .{settings}); c.acceptSettings(settings) catch {}; } }.f); con.onGoAway(struct { pub fn f(_: *Connection, payload: frames.GoAway.PayLoad) void { std.debug.print("GOAWAY: {}", .{payload}); } }.f); try con.processFrames(); } }
0
repos/http2.0
repos/http2.0/src/hpack.zig
//pub const hpack = @import("hpack/main.zig"); const std = @import("std"); pub const Codec = @import("hpack/codec.zig"); pub const staticTable = @import("hpack/static_table.zig"); pub const DynamicTable = @import("hpack/dyn_table.zig"); pub const HeaderField = staticTable.HeaderField; pub const Tables = @import("hpack/tables.zig"); pub const Builder = @import("hpack/builder.zig"); pub const Parser = @import("hpack/parser.zig"); pub const Field = @import("hpack/field.zig"); test { _ = std.testing.refAllDecls(Builder); _ = std.testing.refAllDecls(Tables); _ = std.testing.refAllDecls(Parser); _ = std.testing.refAllDecls(Codec); }
0
repos/http2.0
repos/http2.0/src/errors.zig
pub const Code = enum(u32) { no_error = 0, protocol_error = 1, internal_error = 2, flow_control_error = 3, settings_timeout = 4, stream_closed = 5, frame_size_error = 6, reused_stream = 7, cancel = 8, compression_error = 9, connect_error = 10, enhance_your_cum = 11, inadequate_security = 12, http_1_1_required = 13 };
0
repos/http2.0
repos/http2.0/src/stream.zig
const std = @import("std"); const frames = @import("frames.zig"); const Connection = @import("connection.zig"); const errors = @import("./errors.zig"); const hpack = @import("./hpack.zig"); pub const StreamState = enum { idle, reservedlocal, reservedremote, open, halfclosedlocal, halfclosedremote, closed, }; pub const DataState = enum { recv_chunks, idle }; pub const StreamTransition = enum { recvpushpromise, sendpushpromise, recvheaders, sendheaders, recveos, sendeos, sendrst, recvrst }; pub const HeadersCallBack = fn ([]hpack.HeaderField, *Stream) void; pub const DataCallBack = fn (*Stream, frames.Head) void; pub const RstCallBack = fn (*Stream, errors.Code) void; const Stream = @This(); state: StreamState = .idle, id: u31, parent: *Connection, datastate: DataState = .idle, head: frames.Head = .{ .ty = .data, .len = 0, .streamid = 0, .flags = .{} }, paddlen: usize = 0, remaining: usize = 0, peer_window_size: isize = 0, window_size: isize = 0, headerscb: ?*const HeadersCallBack = null, datacb: ?*const DataCallBack = null, rstcb: ?*const RstCallBack = null, buffer: std.ArrayList(u8), pendingbuf: []const u8 = "", pub fn onHeaders(self: *Stream, cb: *const HeadersCallBack) void { self.headerscb = cb; } pub fn onData(self: *Stream, cb: *const DataCallBack) void { self.datacb = cb; } pub fn onRst(self: *Stream, cb: *const RstCallBack) void { self.rstcb = cb; } pub fn transition(self: *Stream, st: StreamTransition) void { self.state = switch (self.state) { .closed => switch (st) { .sendpushpromise, .recvpushpromise, .sendheaders, .sendeos, .recveos, .recvheaders, .recvrst, .sendrst => @panic("invalid transition"), }, .halfclosedlocal => switch (st) { .sendpushpromise, .recvpushpromise, .sendheaders, .sendeos, .recvheaders => @panic("invalid transition"), .recveos, .recvrst, .sendrst => .closed, }, .halfclosedremote => switch (st) { .sendpushpromise, .recvpushpromise, .sendheaders, .recveos, .recvheaders => @panic("invalid transition"), .sendeos, .recvrst, .sendrst => .closed, }, .idle => switch (st) { .sendpushpromise => .reservedlocal, .recvpushpromise => .reservedremote, .sendheaders, .recvheaders => .open, .sendeos, .recveos, .recvrst, .sendrst => @panic("invalid transition"), }, .open => switch (st) { .sendpushpromise, .recvpushpromise, .sendheaders, .recvheaders => @panic("invalid transition"), .recvrst, .sendrst => .closed, .sendeos => .halfclosedlocal, .recveos => .halfclosedremote, }, .reservedlocal => switch (st) { .sendpushpromise, .recvpushpromise, .sendeos, .recveos, .recvheaders => @panic("invalid transition"), .sendheaders => .halfclosedremote, .recvrst, .sendrst => .closed, }, .reservedremote => switch (st) { .sendpushpromise, .recvpushpromise, .sendheaders, .recveos, .sendeos => @panic("invalid transition"), .recvheaders => .halfclosedlocal, .recvrst, .sendrst => .closed, }, }; } pub fn init(parent: *Connection, id: u31, allocator: std.mem.Allocator) Stream { return Stream{ .id = id, .parent = parent, .peer_window_size = parent.peer_settings.initial_window_size, .state = .idle, .buffer = std.ArrayList(u8).init(allocator) }; } pub fn terminate(self: *Stream, code: errors.Code) !void { if (self.state != .idle) { self.transition(.sendrst); try frames.Rst.write(self.id, code, self.parent.stream.writer()); } else unreachable; } pub fn sendHeaders(self: *Stream, headers: []hpack.HeaderField, eos: bool) !void { var hproc = frames.Headers{ .builder = &self.parent.builder, .parser = &self.parent.parser }; try hproc.write(self.parent.stream.writer(), @truncate(self.parent.peer_settings.max_header_list), headers[0..], self.id, eos); if (eos) self.transition(.sendeos); } /// send 200 OK pub fn ok(self: *Stream, eos: bool) !void { var reply = [_]hpack.HeaderField{.{ .name = ":status", .value = "200" }}; var hproc = frames.Headers{ .builder = &self.parent.builder, .parser = &self.parent.parser }; try hproc.write(self.parent.stream.writer(), @truncate(self.parent.peer_settings.max_header_list), reply[0..], self.id, eos); if (eos) self.transition(.sendeos); } pub fn setReadable(self: *Stream, head: frames.Head) !void { if (!(self.state == .open or self.state == .halfclosedlocal)) @panic("Invalid state to receive data"); self.head = head; self.datastate = .recv_chunks; self.paddlen = if (head.flags.padded) try self.parent.stream.reader().readInt(u8, .big) else 0; self.remaining = head.len - self.paddlen; } pub fn decrementWindows(self: *Stream, dec: isize) void { self.peer_window_size -= dec; self.parent.peer_window_size -= dec; } pub fn read(self: *Stream, buf: []u8) !usize { std.debug.assert(self.datastate == .recv_chunks); if (self.remaining == 0) { if (self.paddlen > 0) { try self.parent.stream.reader().skipBytes(self.paddlen, .{ .buf_size = 256 }); try self.updateWindows(@truncate(self.paddlen + 1)); } if (self.head.flags.ack) { self.transition(.recveos); } self.datastate = .idle; return 0; } const n = try self.parent.stream.readAll(buf[0..@min(buf.len, self.remaining)]); self.remaining -= n; try self.updateWindows(n); return n; } pub fn updateWindows(self: *Stream, n: usize) !void { try self.windowUpdate(@truncate(n)); try self.parent.updateWindow(@truncate(n)); } pub fn windowAvailable(self: *Stream) bool { return self.peer_window_size > 0 and self.parent.peer_window_size > 0; } pub fn write(self: *Stream, buf: []const u8, padding: []const u8, eos: bool) !void { _ = padding; if (!(self.state == .halfclosedremote or self.state == .open)) @panic("Invalid state to send data"); var tosend = buf; while (tosend.len > 0 and self.windowAvailable()) { const offset = @min(tosend.len, @as(usize, @intCast(self.peer_window_size))); try frames.Data.write(self.parent.stream.writer(), self.id, tosend[0..offset], "", if (offset == tosend.len) eos else false); tosend = tosend[offset..]; self.decrementWindows(@intCast(offset)); } if (tosend.len > 0) { try self.buffer.appendSlice(tosend[0..]); self.pendingbuf = self.buffer.items; try self.parent.pending_data.put(self, {}); } } pub fn writePending(self: *Stream) !void { if (!(self.state == .halfclosedremote or self.state == .open)) @panic("Invalid state to send data"); while (self.pendingbuf.len > 0 and self.windowAvailable()) { const offset = @min(self.pendingbuf.len, @as(usize, @intCast(self.peer_window_size))); try frames.Data.write(self.parent.stream.writer(), self.id, self.pendingbuf[0..offset], "", if (offset == self.pendingbuf.len) true else false); self.pendingbuf = self.pendingbuf[offset..]; self.decrementWindows(@intCast(offset)); } if (self.pendingbuf.len <= 0) _ = self.parent.pending_data.remove(self); } pub fn bufferEmpty(self: *Stream) bool { return self.pendingbuf.len > 0; } pub fn closing(self: *Stream) bool { return self.state == .halfclosedremote or self.state == .closed; } pub fn opening(self: *Stream) bool { return self.state == .halfclosedlocal or self.state == .open; } pub fn windowUpdate(self: *Stream, increment: u31) !void { std.debug.print("WIndow update: {} -> {}\n", .{ increment, self.peer_window_size }); try frames.WindowUpdate.write(self.parent.stream.writer(), self.id, increment); } pub fn pushRequest(self: *Stream, headers: []hpack.HeaderField) !*Stream { std.debug.assert(self.parent.peer_settings.enable_push == 1); std.debug.assert(self.parent.contype == .server); std.debug.assert(self.state == .open or self.state == .halfclosedremote); std.debug.assert(self.parent.streamoffset < self.parent.peer_settings.max_concurrent_streams); if (self.parent.streamoffset == 0) { self.parent.streamoffset = 2; } else { self.parent.streamoffset += 2; } try self.parent.openStream(self.parent.streamoffset); const s = self.parent.streams.getPtr(self.parent.streamoffset).?; var pproc = frames.PushPromise{ .builder = &self.parent.builder, .parser = &self.parent.parser }; try pproc.write(self.parent.stream.writer(), self.id, s.id, @truncate(self.parent.peer_settings.header_table_size), headers); return s; }
0
repos/http2.0
repos/http2.0/src/client.zig
const std = @import("std"); const hpack = @import("./hpack.zig"); const Connection = @import("connection.zig"); const Stream = @import("stream.zig"); const frames = @import("frames.zig"); const Headers = frames.Headers; const Head = frames.Head; const Code = @import("errors.zig").Code; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; pub fn main() !void { const address = try std.net.Address.parseIp4("127.0.0.1", 3000); const st = try std.net.tcpConnectToAddress(address); var con = try Connection.init(gpa.allocator(), st, .client); defer con.close() catch {}; var reply = [_]hpack.HeaderField{ .{ .name = ":method", .value = "GET" }, .{ .name = ":authority", .value = "127.0.0.1:3000" }, .{ .name = ":scheme", .value = "http" }, .{ .name = ":path", .value = "/mogoka/baze" }, }; var stream = try con.request(reply[0..]); stream.onHeaders(struct { pub fn cb(headers: []hpack.HeaderField, _: *Stream) void { for (headers) |h| { h.display(); } } }.cb); const DataProcesser = struct { pub fn cb(strm: *Stream, head: frames.Head) void { _ = head; std.debug.print("DATA...\n", .{}); var buf = [_]u8{0} ** 512; var n = strm.read(buf[0..]) catch |e| @panic(@errorName(e)); std.debug.print("Data: {s}\n", .{buf[0..n]}); while (n > 0) { n = strm.read(buf[0..]) catch @panic("err reading"); } } }; stream.onData(DataProcesser.cb); stream.onRst(struct { pub fn cb(strm: *Stream, code: Code) void { _ = strm; std.debug.print("RST: code[{}]\n", .{code}); } }.cb); con.onPushPromise(struct { pub fn f(strm: *Stream, headers: []hpack.HeaderField) void { //_ = strm; for (headers) |value| value.display(); //strm.terminate(.no_error) catch @panic("err terminating"); strm.onData(DataProcesser.cb); } }.f); con.onPing(struct { pub fn f(c: *Connection, payload: [8]u8) void { c.pong(payload) catch {}; std.debug.print("PING: {s}", .{payload}); } }.f); con.onSettings(struct { pub fn f(c: *Connection, settings: frames.Settings) void { std.debug.print("SET: {}\n", .{settings}); c.acceptSettings(settings) catch {}; } }.f); con.onGoAway(struct { pub fn f(_: *Connection, payload: frames.GoAway.PayLoad) void { std.debug.print("GOAWAY: {}", .{payload}); } }.f); try con.processFrames(); }
0
repos/http2.0
repos/http2.0/src/frames.zig
pub const Data = @import("frames/data.zig"); pub const Head = @import("frames/head.zig").Head; pub const Headers = @import("frames/headers.zig"); pub const Rst = @import("frames/rst.zig").Rst; pub const Settings = @import("frames/settings.zig").Settings; pub const PushPromise = @import("frames/pp.zig"); pub const GoAway = @import("frames/go.zig"); pub const Ping = @import("frames/ping.zig"); pub const WindowUpdate = @import("frames/wupdate.zig");
0
repos/http2.0
repos/http2.0/src/connection.zig
const std = @import("std"); const Stream = @import("stream.zig"); const hpack = @import("hpack.zig"); const frames = @import("frames.zig"); const Allocator = std.mem.Allocator; const errcodes = @import("errors.zig"); streams: std.AutoHashMap(u31, Stream), settings: frames.Settings = .{}, peer_settings: frames.Settings = .{}, stream: std.net.Stream, allocator: Allocator, streamoffset: u31 = 0, parse_buff: [4096]u8 = [_]u8{0} ** 4096, build_buff: [4096]u8 = [_]u8{0} ** 4096, recv_buff: [4096]u8 = [_]u8{0} ** 4096, ctx: hpack.Tables, builder: hpack.Builder, parser: hpack.Parser, contype: Type, peer_window_size: isize = 0, pending_pongs: std.AutoHashMap([8]u8, void), pending_data: std.AutoHashMap(*Stream, void), pending_settings: usize = 0, closing: bool = false, pingcb: ?*const PingCallBack = null, streamcb: ?*const StreamCallBack = null, settingscb: ?*const SettingsCallBack = null, goawaycb: ?*const GoAwayCallBack = null, ppcb: ?*const PushPromiseCallBack = null, pub const StreamCallBack = fn (*Stream) void; pub const PingCallBack = fn (*Self, payload: [8]u8) void; pub const SettingsCallBack = fn (*Self, frames.Settings) void; pub const GoAwayCallBack = fn (*Self, frames.GoAway.PayLoad) void; pub const PushPromiseCallBack = fn (*Stream, []hpack.HeaderField) void; pub const Type = enum { server, client }; const Self = @This(); pub fn init(allocator: Allocator, stream: std.net.Stream, contype: Type) !Self { var self: Self = undefined; self.pingcb = null; self.streamcb = null; self.settingscb = null; self.goawaycb = null; self.ppcb = null; self.contype = contype; self.settings = .{}; self.peer_settings = .{}; self.pending_pongs = std.AutoHashMap([8]u8, void).init(allocator); // waiting for one ack self.pending_settings = 1; self.stream = stream; self.streams = std.AutoHashMap(u31, Stream).init(allocator); self.pending_data = std.AutoHashMap(*Stream, void).init(allocator); self.settings.initial_window_size = 10; if (self.contype == .server) { const n = try stream.reader().readAll(self.recv_buff[0..24]); std.debug.assert(n == 24); std.debug.assert(std.mem.eql(u8, self.recv_buff[0..n], "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n")); try self.settings.write(stream.writer(), false); } else { self.settings.enable_push = 1; try self.stream.writer().writeAll("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"); try self.settings.write(stream.writer(), false); const head = try frames.Head.read(stream.reader()); std.debug.assert(head.ty == .settings and !head.flags.ack); self.peer_settings = try frames.Settings.read(stream.reader(), self.settings, head.len); try frames.Settings.writeAck(stream.writer()); try self.openStream(1); } self.streamoffset = 0; self.allocator = allocator; self.peer_window_size = self.peer_settings.initial_window_size; self.ctx = try hpack.Tables.init(allocator, self.settings.header_table_size); self.parser = hpack.Parser.init(&self.ctx, self.parse_buff[0..]); self.builder = hpack.Builder.init(&self.ctx, self.build_buff[0..]); return self; } pub fn onStream(self: *Self, cb: *const StreamCallBack) void { self.streamcb = cb; } pub fn onPing(self: *Self, cb: *const PingCallBack) void { self.pingcb = cb; } pub fn onSettings(self: *Self, cb: *const SettingsCallBack) void { self.settingscb = cb; } pub fn onGoAway(self: *Self, cb: *const GoAwayCallBack) void { self.goawaycb = cb; } pub fn onPushPromise(self: *Self, cb: *const PushPromiseCallBack) void { self.ppcb = cb; } pub fn openStream(self: *Self, id: u31) !void { var stream = Stream.init(self, id, self.allocator); stream.state = .open; try self.streams.put(id, stream); } pub fn shouldClose(self: *Self) bool { if (self.streams.count() == 0) return false; var iter = self.streams.valueIterator(); while (iter.next()) |stream| { if (stream.state != .closed) return false; } return true; } pub fn processFrames(self: *Self) !void { while (!self.shouldClose()) { const head = try frames.Head.read(self.stream.reader()); switch (head.ty) { .goaway => { const payload = try frames.GoAway.read(self.stream.reader(), head); if (self.goawaycb) |cb| cb(self, payload); break; }, .headers => { const affected_stream = if (self.streams.getPtr(head.streamid)) |stream| blk: { if (!stream.opening()) @panic("Invalid state to receive headers"); break :blk stream; } else blk: { try self.openStream(head.streamid); break :blk self.streams.getPtr(head.streamid).?; }; var headerbuff = [_]hpack.HeaderField{.{}} ** 100; var hproc = frames.Headers{ .builder = &self.builder, .parser = &self.parser }; const headers = try hproc.readAndParse(self.stream.reader(), head, headerbuff[0..]); if (head.flags.ack) affected_stream.transition(.recveos); if (self.streamcb) |cb| cb(affected_stream); if (affected_stream.headerscb) |cb| cb(headers, affected_stream); if (affected_stream.closing()) break; }, .settings => { if (head.streamid != 0) @panic("SETTING: stream id not 0"); if (head.flags.ack) { std.debug.assert(head.len == 0); if (self.pending_settings > 0) self.pending_settings -= 1; } else { const settings = try frames.Settings.read(self.stream.reader(), self.peer_settings, head.len); if (self.settingscb) |cb| cb(self, settings); } }, .ping => { var payload = try frames.Ping.read(self.stream.reader(), head); if (head.flags.ack) { if (!self.pending_pongs.remove(payload)) { std.debug.panic("invalid pong: {s}\n", .{&payload}); } } else { if (self.pingcb) |cb| cb(self, payload) else try frames.Ping.pong(self.stream.writer(), payload); } }, .rst => { const affected_stream = self.streams.getPtr(head.streamid); const rst = try frames.Rst.read(self.stream.reader()); if (affected_stream) |s| { if (s.rstcb) |cb| cb(s, rst.code); if (s.state != .idle) { s.transition(.recvrst); self.pending_pongs.clearAndFree(); self.pending_settings = 0; } else unreachable; } else @panic("RST: Stream not found"); }, .data => { const affected_stream = self.streams.getPtr(head.streamid) orelse @panic("DATA: stream not found"); try affected_stream.setReadable(head); if (affected_stream.datacb) |cb| cb(affected_stream, head); std.debug.assert(affected_stream.datastate == .idle); }, .priority => { try self.stream.reader().skipBytes(head.len, .{}); }, .pushpromise => { var pproc = frames.PushPromise{ .builder = &self.builder, .parser = &self.parser }; var headerbuff = [_]hpack.HeaderField{.{}} ** 100; const promise = try pproc.readAndParse(self.stream.reader(), head, headerbuff[0..]); try self.openStream(promise.streamId); const stream = self.streams.getPtr(promise.streamId).?; if (self.ppcb) |cb| cb(stream, promise.headers); }, .windowupdate => { const inc = try frames.WindowUpdate.read(self.stream.reader(), head); if (head.streamid == 0) self.peer_window_size += @intCast(inc) else { if (self.streams.getPtr(head.streamid)) |s| { s.peer_window_size += @intCast(inc); } } }, .continuation => unreachable, } var iter = self.pending_data.keyIterator(); while (iter.next()) |stream| { try stream.*.writePending(); } if (self.closing) break; } var iter = self.pending_data.keyIterator(); while (iter.next()) |stream| { try stream.*.writePending(); } } pub fn ping(self: *Self, payload: [8]u8) !void { try self.pending_pongs.put(payload, {}); try frames.Ping.ping(self.stream.writer(), payload); } pub fn pong(self: *Self, payload: [8]u8) !void { try frames.Ping.pong(self.stream.writer(), payload); } pub fn flush(self: *Self) !void { while (self.pending_data.count() > 0) { try self.processFrames(); } } pub fn close(self: *Self) !void { try self.flush(); self.closing = true; while (self.pending_pongs.count() != 0 or self.pending_settings != 0) { try self.processFrames(); } self.stream.close(); } pub fn goAway(self: *Self, last_processed_id: u31, code: errcodes.Code, debug_info: ?[]const u8) !void { _ = self.streams.getPtr(last_processed_id) orelse @panic("invalid stream id"); if (self.contype == .server) { const payload = [_]u8{ 'c', 'l', 'o', 's', 'i', 'n', 'g', '!' }; try self.ping(payload); while (self.pending_pongs.get(payload)) |_| try self.processFrames(); } try frames.GoAway.write(self.stream.writer(), frames.GoAway.PayLoad{ .code = code, .debug_info = debug_info, .last_id = last_processed_id }); self.stream.close(); } pub fn updateWindow(self: *Self, increment: u31) !void { try frames.WindowUpdate.write(self.stream.writer(), 0, increment); } pub fn acceptSettings(self: *Self, settings: frames.Settings) !void { if (settings.initial_window_size != self.peer_settings.initial_window_size) { var stream_iter = self.streams.valueIterator(); while (stream_iter.next()) |stream| { stream.peer_window_size += settings.initial_window_size - self.peer_settings.initial_window_size; } } if (settings.header_table_size != self.peer_settings.header_table_size) self.resizeDynamicTable(settings.header_table_size); self.peer_settings = settings; try frames.Settings.writeAck(self.stream.writer()); } pub fn resizeDynamicTable(self: *Self, new_size: u32) void { self.ctx.dynamic_table.resize(new_size); } pub fn request(self: *Self, headers: []hpack.HeaderField) !*Stream { std.debug.assert(self.contype == .client); if (self.streamoffset == 0) self.streamoffset = 1 else self.streamoffset += 2; try self.openStream(self.streamoffset); var s = self.streams.getPtr(self.streamoffset).?; try s.sendHeaders(headers[0..], true); return s; }
0
repos/http2.0/src
repos/http2.0/src/frames/pp.zig
const std = @import("std"); //pub const PushPromise = @This(); const hpack = @import("../hpack.zig"); const frame = @import("../frames.zig"); //const frame = @import("../frames.zig"); //const hpack = @import("../hpack.zig"); const Head = frame.Head; const sort = std.sort; builder: *hpack.Builder, parser: *hpack.Parser, fn cmp(_: void, lhs: hpack.HeaderField, rhs: hpack.HeaderField) bool { return lhs.size() < rhs.size(); } pub const idmask: u32 = (1 << 31); // todo; add padding pub fn write( self: *@This(), stream: anytype, id: u31, promiseId: u32, max_header_list: u24, headers: []hpack.HeaderField, ) !void { sort.block(hpack.HeaderField, headers, {}, cmp); if (hpack.Field.getHeaderFieldLen(headers) <= max_header_list) { self.builder.clear(); try self.builder.addSlice(headers); const hfin = self.builder.final(); var header = frame.Head{ .flags = .{ .endheaders = true }, .len = @intCast(hfin.len + 4), .streamid = id, .ty = .pushpromise }; try header.write(stream); try stream.writeInt(u32, promiseId, .big); try stream.writeAll(hfin); } else { var fitting = hpack.Field.sliceOnFieldlen(headers, max_header_list); var first_round = true; while (fitting.b.len > 0) { self.builder.clear(); try self.builder.addSlice(fitting.a); const hfin = self.builder.final(); var h = frame.Head{ .flags = .{}, .len = @intCast(if (first_round) hfin.len + 4 else hfin.len), .streamid = id, .ty = if (first_round) .pushpromise else .continuation }; try h.write(stream); if (first_round) try stream.writeInt(u32, promiseId, .big); try stream.writeAll(hfin); fitting = hpack.Field.sliceOnFieldlen(fitting.b, max_header_list); first_round = false; } self.builder.clear(); try self.builder.addSlice(fitting.a); const hfin = self.builder.final(); var finalhead = frame.Head{ .flags = .{ .endheaders = true }, .len = @intCast(if (first_round) hfin.len + 4 else hfin.len), .streamid = id, .ty = if (first_round) .pushpromise else .continuation }; try finalhead.write(stream); if (first_round) try stream.writeInt(u32, promiseId, .big); try stream.writeAll(hfin); } } pub fn read(instream: anytype, out: []u8, head: Head, id: *u31) !usize { var hd = head; var r: usize = 0; while (!hd.flags.endheaders) { var toread = hd.len; const paddlen = if (hd.flags.padded) try instream.readInt(u8, .big) else 0; if (hd.flags.padded) toread -= (1 + paddlen); r += try instream.readAll(out[r .. r + toread]); try instream.skipBytes(paddlen, .{}); hd = try Head.read(instream); } const paddlen = if (hd.flags.padded) try instream.readInt(u8, .big) else 0; var toread = hd.len - (paddlen + 4); if (hd.flags.padded) toread -= 1; id.* = @truncate(try instream.readInt(u32, .big) & ~idmask); r += try instream.readAll(out[r .. r + toread]); try instream.skipBytes(paddlen, .{}); return r; } pub const Promise = struct { headers: []hpack.HeaderField, streamId: u31 }; /// `buf` is heap pub fn readAndParse( self: *@This(), instream: anytype, head: Head, out: []hpack.HeaderField, ) !Promise { var buf = [_]u8{0} ** 10000; var id: u31 = 0; const n = try read(instream, buf[0..], head, &id); return .{ .headers = try self.parser.parse(buf[0..n], out[0..]), .streamId = id }; }
0
repos/http2.0/src
repos/http2.0/src/frames/rst.zig
const std = @import("std"); pub const Rst = @This(); const Head = @import("../frames.zig").Head; const errors = @import("../errors.zig"); code: errors.Code, pub fn read(in: anytype) !Rst { return Rst{ .code = @enumFromInt((try in.readInt(u32, .big))) }; } pub fn write(streamid: u31, code: errors.Code, out: anytype) !void { var head = Head{ .flags = .{}, .ty = .rst, .streamid = streamid, .len = 4 }; try Head.write(&head, out); try out.writeInt(u32, @intFromEnum(code), .big); } // pub fn main() !void { // var buf = [_]u8{0} ** 24; // var stream = std.io.fixedBufferStream(buf[0..]); // // const h = frames.Head{ // .ty = .rst, // .len = 4, // .streamid = 89, // .flags = .{} // }; // // try frames.Rst.write(h, 78, stream.writer()); // // stream.pos = 0; // // //var bufr = [_]u8{0} ** 24; // //var streamr = std.io.fixedBufferStream(bufr[0..]); // // const hd = try frames.Head.read(stream.reader()); // _ = hd; // // const rst = try frames.Rst.read(stream.reader()); // // std.debug.print("{}\n", .{rst}); // }
0
repos/http2.0/src
repos/http2.0/src/frames/settings.zig
const std = @import("std"); const Head = @import("../frames.zig").Head; //pub const Setting pub const Settings = struct { // 0x01 header_table_size: u32 = 4096, // 0x02 enable_push: u32 = 0, // 0x03 max_concurrent_streams: u32 = 100, // 0x04 initial_window_size: u32 = 65535, // 0x05 max_frame_size: u32 = 16384, // 0x06 max_header_list: u32 = std.math.maxInt(u32), pub fn writeAck(stream: anytype) !void { var header = Head{ .flags = .{ .ack = true }, .len = 0, .streamid = 0, .ty = .settings, }; try header.write(stream); } pub fn write(self: *Settings, stream: anytype, ack: bool) !void { var header = Head{ .flags = .{ .ack = ack }, .len = if (!ack) 6 * 6 else 0, .streamid = 0, .ty = .settings, }; try header.write(stream); if (!ack) { const T = @typeInfo(Settings); inline for (T.Struct.fields, 1..) |f, i| { try stream.writeInt(u16, @intCast(i), .big); try stream.writeInt(u32, @field(self, f.name), .big); } } } pub fn read(stream: anytype, old: Settings, len: usize) !Settings { var s = old; if (len % 6 != 0) std.debug.panic("invalid setting size: {}", .{len}); for (0..len / 6) |_| { switch (try stream.readInt(u16, .big)) { 0x01 => s.header_table_size = try stream.readInt(u32, .big), 0x02 => s.enable_push = (try stream.readInt(u32, .big)), 0x03 => s.max_concurrent_streams = try stream.readInt(u32, .big), 0x04 => s.initial_window_size = try stream.readInt(u32, .big), 0x05 => s.max_frame_size = try stream.readInt(u32, .big), 0x06 => s.max_header_list = try stream.readInt(u32, .big), else => @panic("invalid setting"), } } return s; } }; pub fn update(self: *Settings, new: Settings) void { const T = @typeInfo(Settings); inline for (T.Struct.fields) |f| @field(self, f.name) = @field(new, f.name); } pub fn main() !void { var s = Settings{}; var buf = [_]u8{0} ** 256; var b = std.io.fixedBufferStream(buf[0..]); try s.write(b.writer(), true); b.pos = 0; //s.read(b.reader(), 4); const h = try Head.read(b.reader()); const set = try Settings.read(b.reader(), h.len); std.debug.print("{}\n", .{h}); std.debug.print("{}\n", .{set}); }
0
repos/http2.0/src
repos/http2.0/src/frames/data.zig
const std = @import("std"); const frame = @import("../frames.zig"); pub fn readAll(in: anytype, out: anytype, head: frame.Head) !void { // todo var buf = [_]u8{0} ** 512; const paddlen: u8 = if (head.flags.padded) try in.readInt(u8, .big) else 0; var rem: usize = (head.len - paddlen); if (head.flags.padded) rem -= 1; while (rem > 0) { const n = try in.read(buf[0..@min(buf.len, rem)]); try out.writeAll(buf[0..n]); rem -= @intCast(n); } try in.skipBytes(paddlen, .{}); } pub fn writeStream(in: anytype, out: anytype, head: frame.Head) !void { var hd = head; hd.flags.padded = false; var buf = [_]u8{0} ** 512; var rem = head.len; try hd.write(out); while (rem > 0) { const n = try in.read(buf[0..@min(buf.len, rem)]); try out.writeAll(buf[0..n]); rem -= @intCast(n); } } pub fn writeEmpty(stream: anytype, id: u31) !void { var hd = frame.Head{ .flags = .{ .ack = true }, .streamid = id, .len = 0, .ty = .data }; try hd.write(stream); } pub fn write(out: anytype, id: u31, buf: []const u8, padding: []const u8, eos: bool) !void { _ = padding; var hd = frame.Head{ .ty = .data, .len = @truncate(buf.len //+ padding.len ), .streamid = id, .flags = .{ .ack = eos }, }; //if(padding.len > 0) hd.flags.padded = true; try hd.write(out); try out.writeAll(buf); //if(padding.len > 0) try out.writeAll(padding); //std.debug.print("--------------------------> {any}\n", .{hd}); } pub fn main() !void { const message = "hello world!!"; var messagestream = std.io.fixedBufferStream(message); const head = frame.Head{ .flags = .{}, .len = @intCast(message.len), .streamid = 90, .ty = .data }; var buf = [_]u8{0} ** 256; var stream = std.io.fixedBufferStream(buf[0..]); try write(messagestream.reader(), stream.writer(), head); stream.pos = 0; var recvbuf = [_]u8{0} ** 256; var recstream = std.io.fixedBufferStream(recvbuf[0..]); const rechead = try frame.Head.read(stream.reader()); try readAll(stream.reader(), recstream.writer(), rechead); std.debug.print("{s}\n", .{recvbuf}); }
0
repos/http2.0/src
repos/http2.0/src/frames/head.zig
const std = @import("std"); pub const hpack = @import("../hpack.zig"); pub const preface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; pub const idmask: u32 = (1 << 31); // le 1 1 1 1 0 0 0 // be 0 0 0 1 1 1 1 pub const Head = struct { pub const Type = enum(u8) { data = 0x00, headers = 0x01, priority = 0x02, rst = 0x03, settings = 0x04, pushpromise = 0x05, ping = 0x06, goaway = 0x07, windowupdate = 0x08, continuation = 0x09, }; pub const Flags = packed struct(u8) { // ack or end of stream ack: bool = false, @"6": bool = false, // end headers endheaders: bool = false, // padded padded: bool = false, @"3": bool = false, // priority priority: bool = false, @"1": bool = false, @"0": bool = false, }; len: u24, streamid: u31, ty: Type, flags: Flags, pub fn read(stream: anytype) !Head { var len = try stream.readInt(u32, .big); const ty: u8 = @intCast(len & 0xff); len >>= 8; const f = try stream.readInt(u8, .big); const id = try stream.readInt(u32, .big); return Head{ .len = @truncate(len), .ty = @enumFromInt(ty), .flags = @bitCast(f), .streamid = @truncate(id & ~idmask) }; } pub fn write(self: *Head, stream: anytype) !void { const lt = self.len << 8 | @as(u8, @intFromEnum(self.ty)); try stream.writeInt(u32, lt, .big); try stream.writeInt(u8, @bitCast(self.flags), .big); try stream.writeInt(u32, @intCast(self.streamid), .big); } }; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; pub fn main() !void { //_ = FrameHeader{}; var buf = [_]u8{0} ** 4096; var ctx = try hpack.Tables.init(gpa.allocator()); var p = hpack.Parser.init(&ctx, buf[0..]); //builder.add(.{ .name = "path"}, false, false); //_ = p; //var b = std.io.fixedBufferStream(buf[0..]); //b.writer().st //const h = try b.reader(readStruct(FrameHeader.LengthType); //var h = try FrameHeader.read(b.reader()); //var c = [_]u8{0} ** 9; //h.flags.endheaders = true; //h.flags.eos = true; //b.reset(); //try h.write(b.writer()); //std.debug.print("{any}\n", .{buf}); //============ const address = try std.net.Address.parseIp4("127.0.0.1", 3000); var server = try address.listen(.{ .reuse_address = true, .reuse_port = true }); var con = try server.accept(); var recvbuf = [_]u8{0} ** 4096; const n = try con.stream.reader().readAll(recvbuf[0..24]); std.debug.assert(n == preface.len); var settings_ack = Head{ .flags = .{ .ack = true }, .ty = .settings, .streamid = 0, .len = 0 }; var settings = try Head.read(con.stream.reader()); //_ = settings; std.debug.print("{}\n", .{settings}); try settings.write(con.stream.writer()); try settings_ack.write(con.stream.writer()); const settin = try Head.read(con.stream.reader()); //std.debug.print("{b}\n", .{@as(u8, @bitCast(settings_ack.flags))}); //settings = try FrameHeader.read(con.stream.reader()); std.debug.print("HHH -> {any}\n", .{settin}); const hlen = try con.stream.reader().readAll(recvbuf[0..settin.len]); std.debug.print("hlen => {}\n", .{hlen}); var headers = [_]hpack.HeaderField{.{}} ** 24; const heads = try p.parse(recvbuf[0..hlen], headers[0..]); for (heads) |value| { std.debug.print("{s}: {s}\n", .{ value.name, value.value }); } const jaba = try Head.read(con.stream.reader()); std.debug.print("====={}======\n", .{jaba}); var builder = hpack.Builder.init(&ctx, buf[0..]); //try builder.add(.{ .name = ":method", .value = "GET"}, false, false); //try builder.add(.{ .name = ":scheme", .value = "http"}, false, false); try builder.add(.{ .name = ":status", .value = "404", }, false, false); //try builder.add(.{ .name = "content-type", .value = "text/plain"}, false, false); const hfin = builder.final(); var ddd = Head{ .flags = .{ .endheaders = true, .ack = true }, .ty = .headers, .streamid = 1, .len = @intCast(hfin.len) }; try ddd.write(con.stream.writer()); try con.stream.writer().writeAll(hfin); //_ = try con.stream.reader().readInt(u32, .big); //const status = try con.stream.reader().readInt(u32, .big); //std.debug.print("jaba => {} , {any}\n", .{status, jaba}); //const jaba2 = try FrameHeader.read(con.stream.reader()); //_ = try con.stream.reader().readInt(u32, .big); //const status2 = try con.stream.reader().readInt(u32, .big); //std.debug.print("jaba2 => {} , {any}\n", .{status2, jaba2}); //try builder.add(.{.name = ":content-type", .value = "text/plain"}, false, false); // settin = try FrameHeader.read(con.stream.reader()); // const hlen2 = try con.stream.reader().readAll(recvbuf[0..settin.len]); // var headers2 = [_]hpack.HeaderField{.{}} ** 24; // const heads2 = try p.parse(recvbuf[0..hlen2], headers2[0..]); // for (heads2) |value| { // std.debug.print("{s}: {s}\n", .{value.name, value.value}); // } //var end_stream = FrameHeader{ // .flags = .{}, // .len = 4, // .ty = .rst, // .streamid = 1 //}; //try end_stream.write(con.stream.writer()); //try con.stream.writer().writeInt(u32, 1, .big); //try settings_ack.write(con.stream.writer()); //std.debug.print("{}\n", .{settings}); //settings = try FrameHeader.read(con.stream.reader()); // const r = try con.stream.read(recvbuf[0..]); // std.debug.print("n = {}\n", .{r}); //p.parse(in: []const u8, output: []stable.HeaderField) //try settings.write(con.stream.writer()); //const ciko = try FrameHeader.read(con.stream.reader()); //_ = settin; //std.debug.print("{}\n", .{settings}); std.debug.print("{}\n", .{settin}); } // var b = std.io.fixedBufferStream(buf[0..]); // // var settings_ack = FrameHeader{ // .flags = .{.@"0" = true }, // .ty = .settings, // .streamid = 0, // .len = 56 // }; // // try settings_ack.write(b.writer()); // b.pos = 0; // const f = try FrameHeader.read(b.reader()); // std.debug.print("{any}\n", .{f}); pub fn maitn() !void { var buf = [_]u8{0} ** 50; var b = std.io.fixedBufferStream(buf[0..]); var a = Head{ .flags = .{ .ack = true }, .streamid = 45, .len = 7, .ty = .data }; try a.write(b.writer()); b.pos = 0; const c = try Head.read(b.reader()); std.debug.print("{any}\n", .{c}); }
0
repos/http2.0/src
repos/http2.0/src/frames/ping.zig
const std = @import("std"); const frame = @import("../frames.zig"); const hpack = @import("../hpack.zig"); const Head = frame.Head; pub fn read(stream: anytype, head: frame.Head) ![8]u8 { std.debug.assert(head.len == 8); var buf: [8]u8 = undefined; _ = try stream.readAll(buf[0..]); //std.debug.print("READ PING: {any}\n", .{buf}); return buf; } pub fn ping(stream: anytype, opaque_data: [8]u8) !void { var p = Head{ .flags = .{}, .len = 8, .streamid = 0, .ty = .ping }; try p.write(stream); _ = try stream.writeAll(opaque_data[0..]); } pub fn pong(stream: anytype, payload: [8]u8) !void { var p = Head{ .flags = .{ .ack = true }, .len = 8, .streamid = 0, .ty = .ping }; try p.write(stream); _ = try stream.writeAll(payload[0..]); } const Self = @This(); pub const Ping = struct { payload: [8]u8, };
0
repos/http2.0/src
repos/http2.0/src/frames/wupdate.zig
const std = @import("std"); const frame = @import("../frames.zig"); const hpack = @import("../hpack.zig"); const Head = frame.Head; pub const idmask: u32 = (1 << 31); pub fn read(stream: anytype, head: frame.Head) !u64 { std.debug.assert(head.len == 4); return try stream.readInt(u32, .big) & ~idmask; } pub fn write(stream: anytype, id: u31, increment: u31) !void { var p = Head{ .flags = .{}, .len = 4, .streamid = id, .ty = .windowupdate }; try p.write(stream); _ = try stream.writeInt(u32, increment, .big); }
0
repos/http2.0/src
repos/http2.0/src/frames/go.zig
const std = @import("std"); const frame = @import("../frames.zig"); const hpack = @import("../hpack.zig"); const Head = frame.Head; const errors = @import("../errors.zig"); pub const PayLoad = struct { last_id: u31, code: errors.Code, debug_info: ?[]const u8 = null }; const idmask: u32 = (1 << 31); pub fn read(stream: anytype, head: frame.Head) !PayLoad { const last_id = try stream.readInt(u32, .big) & ~idmask; const code = try stream.readInt(u32, .big); try stream.skipBytes(head.len - 8, .{}); return PayLoad{ .code = @enumFromInt(code), .last_id = @truncate(last_id) }; } pub fn write(stream: anytype, payload: PayLoad) !void { var p = Head{ .flags = .{}, .len = @truncate(8 + if (payload.debug_info) |info| info.len else 0), .streamid = 0, .ty = .goaway }; try p.write(stream); try stream.writeInt(u32, payload.last_id, .big); try stream.writeInt(u32, @intFromEnum(payload.code), .big); if (payload.debug_info) |info| _ = try stream.writeAll(info[0 .. p.len - 8]); }
0
repos/http2.0/src
repos/http2.0/src/frames/headers.zig
const std = @import("std"); const frame = @import("../frames.zig"); const hpack = @import("../hpack.zig"); const Head = frame.Head; pub const Headers = @This(); const sort = std.sort; fn cmp(_: void, lhs: hpack.HeaderField, rhs: hpack.HeaderField) bool { return lhs.size() < rhs.size(); } builder: *hpack.Builder, parser: *hpack.Parser, // todo; add padding pub fn write(self: *Headers, stream: anytype, max_header_list: u24, headers: []hpack.HeaderField, id: u31, endstream: bool) !void { sort.block(hpack.HeaderField, headers, {}, cmp); if (hpack.Field.getHeaderFieldLen(headers) <= max_header_list) { self.builder.clear(); try self.builder.addSlice(headers); const hfin = self.builder.final(); var header = frame.Head{ .flags = .{ .endheaders = true, .ack = endstream }, .len = @intCast(hfin.len), .streamid = id, .ty = .headers }; try header.write(stream); try stream.writeAll(hfin); return; } var fitting = hpack.Field.sliceOnFieldlen(headers, max_header_list); var first_round = false; while (fitting.b.len > 0) { self.builder.clear(); try self.builder.addSlice(fitting.a); const hfin = self.builder.final(); var h = frame.Head{ .flags = if (first_round) .{ .ack = endstream } else .{}, .len = @intCast(hfin.len), .streamid = id, .ty = if (first_round) .headers else .continuation }; try h.write(stream); try stream.writeAll(hfin); fitting = hpack.Field.sliceOnFieldlen(fitting.b, max_header_list); first_round = false; } self.builder.clear(); try self.builder.addSlice(fitting.a); const hfin = self.builder.final(); var finalhead = frame.Head{ .flags = if (first_round) .{ .ack = endstream, .endheaders = true } else .{ .endheaders = true }, .len = @intCast(hfin.len), .streamid = id, .ty = if (first_round) .headers else .continuation }; try finalhead.write(stream); try stream.writeAll(hfin); } pub fn read(instream: anytype, out: []u8, head: Head) !usize { var hd = head; var r: usize = 0; while (!hd.flags.endheaders) { var toread = hd.len; const paddlen = if (hd.flags.padded) try instream.readInt(u8, .big) else 0; if (hd.flags.padded) toread -= (1 + paddlen); if (hd.flags.priority) { try instream.skipBytes(5, .{}); toread -= 5; } r += try instream.readAll(out[r .. r + toread]); try instream.skipBytes(paddlen, .{}); hd = try Head.read(instream); } const paddlen = if (hd.flags.padded) try instream.readInt(u8, .big) else 0; var toread = hd.len - paddlen; if (hd.flags.padded) toread -= 1; // Todo if (hd.flags.priority) { try instream.skipBytes(5, .{}); toread -= 5; } r += try instream.readAll(out[r .. r + toread]); try instream.skipBytes(paddlen, .{}); return r; } /// `buf` is heap pub fn readAndParse( self: *Headers, instream: anytype, head: Head, out: []hpack.HeaderField, ) ![]hpack.HeaderField { var buf = [_]u8{0} ** 10000; const n = try read(instream, buf[0..], head); //std.debug.print("---------> > > n = {}\n", .{n}); return try self.parser.parse(buf[0..n], out[0..]); } var gpa = std.heap.GeneralPurposeAllocator(.{}){}; pub fn main() !void { var buf = [_]u8{0} ** 4096; var ctx = try hpack.Tables.init(gpa.allocator()); //var p = hpack.Parser.init(&ctx, buf[0..]); var headerheap = [_]u8{0} ** 400; var h = Headers{ .builder = hpack.Builder.init(&ctx, buf[0..]), .parser = hpack.Parser.init(&ctx, headerheap[0..]) }; try h.builder.add(.{ .name = "hello", .value = "world" }, false, false); try h.builder.add(.{ .name = "hello", .value = "world" }, false, false); try h.builder.add(.{ .name = "hello", .value = "world" }, false, false); try h.builder.add(.{ .name = "hello", .value = "world" }, false, false); try h.builder.add(.{ .name = "hello", .value = "world" }, false, false); try h.builder.add(.{ .name = "hello", .value = "world" }, false, false); try h.builder.add(.{ .name = "hello", .value = "world" }, false, false); try h.builder.add(.{ .name = "hello", .value = "world" }, false, false); var streambuf = [_]u8{0} ** 4096; var stream = std.io.fixedBufferStream(streambuf[0..]); try h.write(stream.writer(), 10, 4, true); //var headerbuf = [_]u8{0}**400; stream.pos = 0; const hh = try Head.read(stream.reader()); //const n = try read(stream.reader(), headerbuf[0..],hh); var headers = [_]hpack.HeaderField{.{}} ** 10; const kuku = try h.readAndParse(stream.reader(), hh, headers[0..]); //parser.parse(headerbuf[0..n], headers[0..]); for (kuku) |k| { std.debug.print("{s}: {s}\n", .{ k.name, k.value }); } std.debug.print("{}\n", .{kuku.len}); }
0
repos/http2.0/src
repos/http2.0/src/hpack/codes.zig
pub const huffman_codes = [_]u32{ 0x1ff8, 0x7fffd8, 0xfffffe2, 0xfffffe3, 0xfffffe4, 0xfffffe5, 0xfffffe6, 0xfffffe7, 0xfffffe8, 0xffffea, 0x3ffffffc, 0xfffffe9, 0xfffffea, 0x3ffffffd, 0xfffffeb, 0xfffffec, 0xfffffed, 0xfffffee, 0xfffffef, 0xffffff0, 0xffffff1, 0xffffff2, 0x3ffffffe, 0xffffff3, 0xffffff4, 0xffffff5, 0xffffff6, 0xffffff7, 0xffffff8, 0xffffff9, 0xffffffa, 0xffffffb, 0x14, 0x3f8, 0x3f9, 0xffa, 0x1ff9, 0x15, 0xf8, 0x7fa, 0x3fa, 0x3fb, 0xf9, 0x7fb, 0xfa, 0x16, 0x17, 0x18, 0x0, 0x1, 0x2, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x5c, 0xfb, 0x7ffc, 0x20, 0xffb, 0x3fc, 0x1ffa, 0x21, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0xfc, 0x73, 0xfd, 0x1ffb, 0x7fff0, 0x1ffc, 0x3ffc, 0x22, 0x7ffd, 0x3, 0x23, 0x4, 0x24, 0x5, 0x25, 0x26, 0x27, 0x6, 0x74, 0x75, 0x28, 0x29, 0x2a, 0x7, 0x2b, 0x76, 0x2c, 0x8, 0x9, 0x2d, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7ffe, 0x7fc, 0x3ffd, 0x1ffd, 0xffffffc, 0xfffe6, 0x3fffd2, 0xfffe7, 0xfffe8, 0x3fffd3, 0x3fffd4, 0x3fffd5, 0x7fffd9, 0x3fffd6, 0x7fffda, 0x7fffdb, 0x7fffdc, 0x7fffdd, 0x7fffde, 0xffffeb, 0x7fffdf, 0xffffec, 0xffffed, 0x3fffd7, 0x7fffe0, 0xffffee, 0x7fffe1, 0x7fffe2, 0x7fffe3, 0x7fffe4, 0x1fffdc, 0x3fffd8, 0x7fffe5, 0x3fffd9, 0x7fffe6, 0x7fffe7, 0xffffef, 0x3fffda, 0x1fffdd, 0xfffe9, 0x3fffdb, 0x3fffdc, 0x7fffe8, 0x7fffe9, 0x1fffde, 0x7fffea, 0x3fffdd, 0x3fffde, 0xfffff0, 0x1fffdf, 0x3fffdf, 0x7fffeb, 0x7fffec, 0x1fffe0, 0x1fffe1, 0x3fffe0, 0x1fffe2, 0x7fffed, 0x3fffe1, 0x7fffee, 0x7fffef, 0xfffea, 0x3fffe2, 0x3fffe3, 0x3fffe4, 0x7ffff0, 0x3fffe5, 0x3fffe6, 0x7ffff1, 0x3ffffe0, 0x3ffffe1, 0xfffeb, 0x7fff1, 0x3fffe7, 0x7ffff2, 0x3fffe8, 0x1ffffec, 0x3ffffe2, 0x3ffffe3, 0x3ffffe4, 0x7ffffde, 0x7ffffdf, 0x3ffffe5, 0xfffff1, 0x1ffffed, 0x7fff2, 0x1fffe3, 0x3ffffe6, 0x7ffffe0, 0x7ffffe1, 0x3ffffe7, 0x7ffffe2, 0xfffff2, 0x1fffe4, 0x1fffe5, 0x3ffffe8, 0x3ffffe9, 0xffffffd, 0x7ffffe3, 0x7ffffe4, 0x7ffffe5, 0xfffec, 0xfffff3, 0xfffed, 0x1fffe6, 0x3fffe9, 0x1fffe7, 0x1fffe8, 0x7ffff3, 0x3fffea, 0x3fffeb, 0x1ffffee, 0x1ffffef, 0xfffff4, 0xfffff5, 0x3ffffea, 0x7ffff4, 0x3ffffeb, 0x7ffffe6, 0x3ffffec, 0x3ffffed, 0x7ffffe7, 0x7ffffe8, 0x7ffffe9, 0x7ffffea, 0x7ffffeb, 0xffffffe, 0x7ffffec, 0x7ffffed, 0x7ffffee, 0x7ffffef, 0x7fffff0, 0x3ffffee, 0x3fffffff, // EOS }; pub const huffman_code_lengths = [_]u8{ 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, 30, // EOS };
0
repos/http2.0/src
repos/http2.0/src/hpack/codec.zig
const codes = @import("codes.zig"); const std = @import("std"); const math = std.math; const Malloc = std.mem.Allocator; const hcodes = codes.huffman_codes; const lens = codes.huffman_code_lengths; const Self = @This(); tree: *Node = undefined, treeHeap: [256 * @sizeOf(Node)]Node = [1]Node{.{}} ** (256 * @sizeOf(Node)), heapIdx: usize = 0, pub fn init() Self { var self = Self{}; self.tree = self.makeNode(); for (hcodes, lens, 0..) |code, len, sym| try self.tree.insert(&self, code, @truncate(sym), @intCast(len)); return self; } inline fn makeNode(self: *Self) *Node { self.treeHeap[self.heapIdx] = .{}; self.heapIdx += 1; return &self.treeHeap[self.heapIdx - 1]; } /// encode and return encoded length. /// `buf` = writer pub fn encode(source: []const u8, buf: anytype) !usize { var byteoffset: u8 = 0; var acc: u32 = 0; var idx: usize = 0; for (source) |value| { const codeseq = codes.huffman_codes[value]; const codelen = codes.huffman_code_lengths[value]; const rem: u8 = @intCast(32 - byteoffset); idx += codelen; if (rem == 0) { try buf.writeInt(u32, acc, .big); } else if (rem == codelen) { acc <<= @intCast(codelen); acc |= codeseq; try buf.writeInt(u32, acc, .big); byteoffset = 0; acc = 0; } else if (rem > codelen) { acc <<= @intCast(codelen); acc |= codeseq; byteoffset += codelen; } else if (rem < codelen) { acc <<= @intCast(rem); acc |= (codeseq >> @intCast(codelen - rem)); try buf.writeInt(u32, acc, .big); acc = codeseq & (math.pow(u32, 2, codelen - rem) - 1); byteoffset = codelen - rem; } } if (byteoffset != 0) { const f: u32 = 0xffffffff; acc <<= @intCast(32 - byteoffset); acc = (f >> @intCast(byteoffset)) | acc; const bytes = std.mem.asBytes(&acc); const k = std.mem.alignForward(u64, byteoffset, 8) / 8; var n: usize = 4; for (0..k) |_| { try buf.writeInt(u8, bytes[n - 1], .big); n -= 1; } } return std.mem.alignForward(u64, idx, 8) / 8; } const Node = struct { symbol: u16 = 0, bits: u8 = 0, left: ?*Node = null, right: ?*Node = null, fn isLeaf(self: *Node) bool { return self.left == null and self.left == null; } fn insert(self: *Node, codec: *Self, c: u32, symbol: u16, len: u8) !void { //_ = symbol; var code = c << @intCast(32 - len); const mask: u32 = 0x80000000; var current = self; for (0..len) |_| { var new_node: ?*Node = null; if (mask & code > 0) { new_node = if (current.right) |n| n else blk: { const n = codec.makeNode(); current.right = n; break :blk n; }; } else { new_node = if (current.left) |n| n else blk: { const n = codec.makeNode(); current.left = n; break :blk n; }; } new_node.?.symbol = symbol; new_node.?.bits = len; current = new_node.?; code <<= 1; } } fn getBranch(self: *Node, code: u8) *Node { if (code == 1) { return self.right.?; } else if (code == 0) { return self.left.?; } @panic("unfukabol"); } fn debug(self: *Node) void { if (self.right) |r| r.debug(); if (self.left) |l| l.debug(); if (self.isLeaf()) std.debug.print("leaf: {c}\n", .{self.symbol}); } }; //var gpa = std.heap.GeneralPurposeAllocator(.{}){}; pub fn decode(self: *Self, input: []const u8, output: anytype) !usize { var outidx: usize = 0; var space: usize = 0; var t = self.tree; var bitlen: i32 = 0; for (input) |v| { var value = v; for (0..8) |_| { t = t.getBranch(value >> 7); if (t.isLeaf()) { if (t.symbol == hcodes[256]) @panic("EOS has been detected!!! call 911"); try output.writeInt(u8, @truncate(t.symbol), .little); bitlen += t.bits; t = self.tree; outidx += 1; } value <<= 1; } space += 1; } return space; } pub fn encodeInt(value: u64, n: u4, stream: anytype) !usize { std.debug.assert(n > 0); var pos: usize = 0; var v = value; const max_int = math.pow(usize, 2, n); if (value < max_int - 1) { try stream.writeInt(u8, @intCast(value), .little); pos += 1; } else { try stream.writeInt(u8, @intCast(max_int - 1), .little); pos += 1; v = v - (max_int - 1); while (v >= 128) : (v /= 128) { try stream.writeInt(u8, @intCast(v % 128 + 128), .little); pos += 1; } try stream.writeInt(u8, @intCast(v), .little); pos += 1; } return pos; } pub fn decodeInt(value: []const u8, n: u4, end: *usize) u64 { end.* = 1; const max_int = math.pow(usize, 2, n); var result = value[0] & (max_int - 1); if (result < max_int - 1) return result; var m: usize = 0; while (true) { const b = value[end.*]; result = result + (b & 127) * math.pow(usize, 2, m); m = m + 7; end.* += 1; if (b & 128 != 128) break; } return result; } pub fn decodePlainString(source: []const u8, stream: anytype) !usize { if (source[0] & 128 > 0) { std.debug.panic("Huffamn encoded\n", .{}); } var end: usize = 0; const len = decodeInt(source, 7, &end); const slice = source[end .. len + 1]; try stream.writer().writeAll(); return slice.len; } pub fn decodeHuffmanString(ctx: *Self, source: []const u8, stream: anytype) usize { if (source[0] & 128 == 0) std.debug.panic("Plain string detected\n", .{}); var end: usize = 0; const len = decodeInt(source, 7, &end); return ctx.decode(source[end .. len + 1], stream); } pub fn decodeString(ctx: *Self, source: []const u8, output: anytype) !usize { if (source[0] & 128 == 0) return try decodePlainString(source, output); return ctx.decodeHuffmanString(source, output); } test "fuckin tree" { const tv1 = [_]u8{ '>', 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1 }; const tv2 = [_]u8{ '?', 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 }; const tv3 = [_]u8{ '@', 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0 }; const tv4 = [_]u8{ 'A', 1, 0, 0, 0, 0, 1 }; const tv5 = [_]u8{ 'D', 1, 0, 1, 1, 1, 1, 1 }; const tv6 = [_]u8{ 'Q', 1, 1, 0, 1, 1, 0, 0 }; const tv7 = [_]u8{ 'm', 1, 0, 1, 0, 0, 1 }; const tv8 = [_]u8{ ']', 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 }; const tvs = [_][]const u8{ tv1[0..], tv2[0..], tv3[0..], tv4[0..], tv5[0..], tv6[0..], tv7[0..], tv8[0..] }; //const malloc = gpa.allocator(); //var tree = try Node.init(malloc); //for (hcodes, lens, 0..) |code, len, sym| // try tree.insert(code, @truncate(sym), @intCast(len)); const codec = Self.init(); for (tvs) |value| { var t = codec.tree; for (value[1..]) |bit| { t = t.getBranch(bit); } try std.testing.expectEqual(value[0], t.symbol); try std.testing.expect(t.isLeaf()); } } test "enc dec" { var bufs: [255][200]u8 = undefined; for (&bufs) |*value| { std.crypto.random.bytes(value[0..]); } //const malloc = std.testing.allocator; var ctx = Self.init(); //defer ctx.deinit(); var enc = [_]u8{0} ** 1000; var dec = [_]u8{0} ** 1000; var encstream = std.io.fixedBufferStream(enc[0..]); var decstream = std.io.fixedBufferStream(dec[0..]); for (bufs[0..]) |buf| { defer { encstream.reset(); decstream.reset(); } const l = calcEncodedLength(buf[0..]); const out = try encode(buf[0..], encstream.writer()); try std.testing.expectEqual(out, l); _ = try decode(&ctx, enc[0..out], decstream.writer()); try std.testing.expectEqualSlices(u8, buf[0..], dec[0..buf.len]); } } pub fn calcEncodedLength(source: []const u8) usize { var idx: usize = 0; for (source) |value| { const codelen = codes.huffman_code_lengths[value]; idx += codelen; } return std.mem.alignForward(u64, idx, 8) / 8; } test "leaf ones" { //const malloc = gpa.allocator(); //var tree = try Node.init(malloc); //for (hcodes, lens, 0..) |code, len, sym| // try tree.insert(code, @truncate(sym), @intCast(len)); const codec = Self.init(); var t = codec.tree; for (0..24) |_| { t = t.getBranch(1); try std.testing.expect(!t.isLeaf()); } } test "integer" { var buf = [_]u8{0} ** 10; var stream = std.io.fixedBufferStream(buf[0..]); var int: usize = 0; var a = try encodeInt(500, 3, stream.writer()); var out = decodeInt(buf[0..a], 3, &int); try std.testing.expect(out == 500); stream.reset(); a = try encodeInt(500, 1, stream.writer()); out = decodeInt(buf[0..a], 1, &int); try std.testing.expect(out == 500); stream.reset(); a = try encodeInt(500, 8, stream.writer()); out = decodeInt(buf[0..a], 8, &int); try std.testing.expect(out == 500); stream.reset(); a = try encodeInt(50000000000, 8, stream.writer()); out = decodeInt(buf[0..a], 8, &int); try std.testing.expect(out == 50000000000); stream.reset(); } test "integer doubt" { var buf = [_]u8{0} ** 10; var stream = std.io.fixedBufferStream(buf[0..]); for (1..9) |j| { for (0..1000000) |i| { var int: usize = 0; const a = try encodeInt(i, @truncate(j), stream.writer()); const out = decodeInt(buf[0..a], @truncate(j), &int); try std.testing.expectEqual(out, i); stream.reset(); } } }
0
repos/http2.0/src
repos/http2.0/src/hpack/static_table.zig
const std = @import("std"); const Map = std.AutoHashMap; const whash = std.hash.Wyhash; table: std.HashMap(HeaderField, usize, HashCtx, 75), //Map(HeaderField, usize), const HashCtx = struct { pub fn hash(_: HashCtx, header: HeaderField) u64 { var hashfn = whash.init(0); hashfn.update(header.name); hashfn.update(header.value); return hashfn.final(); } pub fn eql(_: HashCtx, a: HeaderField, b: HeaderField) bool { var hashfn = whash.init(0); hashfn.update(a.name); hashfn.update(a.value); const ha = hashfn.final(); hashfn = whash.init(0); hashfn.update(b.name); hashfn.update(b.value); return ha == hashfn.final(); } }; const Self = @This(); pub const size = headers.len; pub fn init(allocator: std.mem.Allocator) !Self { var table = std.HashMap(HeaderField, usize, HashCtx, 75).init(allocator); //Map(HeaderField, usize).init(allocator); for (headers, 1..) |h, i| { try table.put(h, i); } return Self{ .table = table }; } pub fn deinit(self: *Self) void { self.table.deinit(); } pub inline fn getByValue(self: *Self, field: HeaderField) ?usize { // for(headers, 0..)|h, i| { // //std.debug.print("{s} -> {s}\n", .{h.name, field.name}); // if(std.mem.eql(u8, field.name, h.name) and std.mem.eql(u8, field.value, h.value)) // return i + 1; // } // return null; return self.table.get(field); } pub inline fn get(idx: usize) ?HeaderField { return headers[idx]; } pub const HeaderField = struct { name: []const u8 = "", value: []const u8 = "", pub fn size(self: *const HeaderField) usize { return self.name.len + self.value.len + 32; } pub fn eql(self: *const HeaderField, h: HeaderField) bool { return std.mem.eql(u8, self.name, h.name) and std.mem.eql(u8, self.value, h.value); } pub fn display(self: *const HeaderField) void { std.debug.print("{s}: {s}\n", .{ self.name, self.value }); } }; pub const headers = [_]HeaderField{ .{ .name = ":authority" }, .{ .name = ":method", .value = "GET" }, .{ .name = ":method", .value = "POST" }, .{ .name = ":path", .value = "/" }, .{ .name = ":path", .value = "/index.html" }, .{ .name = ":scheme", .value = "http" }, .{ .name = ":scheme", .value = "https" }, .{ .name = ":status", .value = "200" }, .{ .name = ":status", .value = "204" }, .{ .name = ":status", .value = "206" }, .{ .name = ":status", .value = "304" }, .{ .name = ":status", .value = "400" }, .{ .name = ":status", .value = "404" }, .{ .name = ":status", .value = "500" }, .{ .name = "accept-charset" }, .{ .name = "accept-encoding", .value = "gzip, deflate" }, .{ .name = "accept-language" }, .{ .name = "accept-ranges" }, .{ .name = "accept" }, .{ .name = "access-control-allow-origin" }, .{ .name = "age" }, .{ .name = "allow" }, .{ .name = "authorization" }, .{ .name = "cache-control" }, .{ .name = "content-disposition" }, .{ .name = "content-encoding" }, .{ .name = "content-language" }, .{ .name = "content-length" }, .{ .name = "content-location" }, .{ .name = "content-range" }, .{ .name = "content-type" }, .{ .name = "cookie" }, .{ .name = "date" }, .{ .name = "etag" }, .{ .name = "expect" }, .{ .name = "expires" }, .{ .name = "from" }, .{ .name = "host" }, .{ .name = "if-match" }, .{ .name = "if-modified-since" }, .{ .name = "if-none-match" }, .{ .name = "if-range" }, .{ .name = "if-unmodified-since" }, .{ .name = "last-modified" }, .{ .name = "link" }, .{ .name = "location" }, .{ .name = "max-forwards" }, .{ .name = "proxy-authenticate" }, .{ .name = "proxy-authorization" }, .{ .name = "range" }, .{ .name = "referer" }, .{ .name = "refresh" }, .{ .name = "retry-after" }, .{ .name = "server" }, .{ .name = "set-cookie" }, .{ .name = "strict-transport-security" }, .{ .name = "transfer-encoding" }, .{ .name = "user-agent" }, .{ .name = "vary" }, .{ .name = "via" }, .{ .name = "www-authenticate" } };
0
repos/http2.0/src
repos/http2.0/src/hpack/main.zig
const std = @import("std"); pub const Codec = @import("codec.zig"); pub const staticTable = @import("static_table.zig"); pub const DynamicTable = @import("dyn_table.zig"); pub const HeaderField = staticTable.HeaderField; pub const Tables = @import("tables.zig"); pub const Builder = @import("builder.zig"); pub const Parser = @import("parser.zig"); test { _ = std.testing.refAllDecls(Builder); _ = std.testing.refAllDecls(Tables); _ = std.testing.refAllDecls(Parser); _ = std.testing.refAllDecls(Codec); }
0
repos/http2.0/src
repos/http2.0/src/hpack/parser.zig
const std = @import("std"); //const math = std.math; //const testing = std.testing; const codec = @import("codec.zig"); const stable = @import("static_table.zig"); //const dtable = @import("dyn_table.zig"); //const Allocator = std.mem.Allocator; const t = @import("tables.zig"); const builder = @import("builder.zig"); const Parser = @import("parser.zig"); //var gpa = std.heap.GeneralPurposeAllocator(.{}){}; heap: std.io.FixedBufferStream([]u8), heapidx: usize = 0, ctx: *t, pub fn init(ctx: *t, heap: []u8) @This() { return @This(){ .heap = std.io.fixedBufferStream(heap), .ctx = ctx }; } pub fn parse(self: *@This(), in: []const u8, output: []stable.HeaderField) ![]stable.HeaderField { var input = in; var outidx: usize = 0; while (input.len > 0) { var step: bool = true; if (input[0] & 128 == 128) { //6.1. Indexed Header Field Representation const idx = decodeInt(&input, 7); if (self.ctx.at(idx - 1)) |h| output[outidx] = h; } else if (input[0] & 64 == 64) { //6.2.1. Literal Header Field with Incremental Indexing const idx = decodeInt(&input, 6); if (idx > 0) { if (self.ctx.at(idx - 1)) |h| output[outidx] = h; } else output[outidx].name = try self.decodeString(&input); output[outidx].value = try self.decodeString(&input); try self.ctx.dynamic_table.put(output[outidx]); } else if (input[0] & 32 == 32) { step = false; //6.3. Dynamic Table Size Update const new_size = decodeInt(&input, 5); if (new_size <= self.ctx.dynamic_table.set_capcity) { self.ctx.dynamic_table.resize(new_size); } else @panic("too large size"); } else if (input[0] & 16 == 16) { //6.2.3. Literal Header Field Never Indexed const idx = decodeInt(&input, 4); if (idx > 0) { if (self.ctx.at(idx - 1)) |h| output[outidx] = h; // else @panic("greeeat errrooorrr"); } else output[outidx].name = try self.decodeString(&input); output[outidx].value = try self.decodeString(&input); } else if (input[0] & 240 == 0) { //6.2.2. Literal Header Field without Indexing const idx = decodeInt(&input, 4); if (idx > 0) { if (self.ctx.at(idx - 1)) |h| output[outidx] = h; } else output[outidx].name = try self.decodeString(&input); output[outidx].value = try self.decodeString(&input); } //std.debug.print("{s} -> {s}\n", .{output[outidx].name, output[outidx].value}); if (step) outidx += 1; } return output[0..outidx]; } pub fn decodeString(self: *@This(), input: *[]const u8) ![]const u8 { var end: usize = 0; const compressed = input.*[0] & 128 == 128; const len = codec.decodeInt(input.*, 7, &end); input.* = input.*[end..]; const pos = self.heap.pos; if (compressed) _ = try self.ctx.codec.decode(input.*[0..len], self.heap.writer()) else try self.heap.writer().writeAll(input.*[0..len]); input.* = input.*[len..]; return self.heap.buffer[pos..self.heap.pos]; } pub fn decodeInt(input: *[]const u8, n: u4) u64 { var end: usize = 0; const res = codec.decodeInt(input.*, n, &end); input.* = input.*[end..]; return res; } var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const tst = std.testing; test "plain" { const malloc = gpa.allocator(); var ctx = try t.init(malloc, 256); ctx.dynamic_table.max_capacity = 256; var heap = [_]u8{0} ** 4096; var p = @This().init(&ctx, heap[0..]); var headers = [_]stable.HeaderField{.{}} ** 50; { const expected = [_]stable.HeaderField{ .{ .name = ":status", .value = "302" }, .{ .name = "cache-control", .value = "private" }, .{ .name = "date", .value = "Mon, 21 Oct 2013 20:13:21 GMT" }, .{ .name = "location", .value = "https://www.example.com" } }; const out = try p.parse(&.{ 0x48, 0x03, 0x33, 0x30, 0x32, 0x58, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x61, 0x1d, 0x4d, 0x6f, 0x6e, 0x2c, 0x20, 0x32, 0x31, 0x20, 0x4f, 0x63, 0x74, 0x20, 0x32, 0x30, 0x31, 0x33, 0x20, 0x32, 0x30, 0x3a, 0x31, 0x33, 0x3a, 0x32, 0x31, 0x20, 0x47, 0x4d, 0x54, 0x6e, 0x17, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, }, headers[0..]); for (out, expected) |a, b| try tst.expect(a.eql(b)); try tst.expectEqual(222, ctx.dynamic_table.capacity); } { const expected = [_]stable.HeaderField{ .{ .name = ":status", .value = "307" }, .{ .name = "cache-control", .value = "private" }, .{ .name = "date", .value = "Mon, 21 Oct 2013 20:13:21 GMT" }, .{ .name = "location", .value = "https://www.example.com" } }; const out = try p.parse(&.{ 0x48, 0x03, 0x33, 0x30, 0x37, 0xc1, 0xc0, 0xbf }, headers[0..]); for (out, expected) |a, b| try tst.expect(a.eql(b)); try tst.expectEqual(222, ctx.dynamic_table.capacity); } { const expected = [_]stable.HeaderField{ .{ .name = ":status", .value = "200" }, .{ .name = "cache-control", .value = "private" }, .{ .name = "date", .value = "Mon, 21 Oct 2013 20:13:22 GMT" }, .{ .name = "location", .value = "https://www.example.com" }, .{ .name = "content-encoding", .value = "gzip" }, .{ .name = "set-cookie", .value = "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1" } }; const out = try p.parse(&.{ 0x88, 0xc1, 0x61, 0x1d, 0x4d, 0x6f, 0x6e, 0x2c, 0x20, 0x32, 0x31, 0x20, 0x4f, 0x63, 0x74, 0x20, 0x32, 0x30, 0x31, 0x33, 0x20, 0x32, 0x30, 0x3a, 0x31, 0x33, 0x3a, 0x32, 0x32, 0x20, 0x47, 0x4d, 0x54, 0xc0, 0x5a, 0x04, 0x67, 0x7a, 0x69, 0x70, 0x77, 0x38, 0x66, 0x6f, 0x6f, 0x3d, 0x41, 0x53, 0x44, 0x4a, 0x4b, 0x48, 0x51, 0x4b, 0x42, 0x5a, 0x58, 0x4f, 0x51, 0x57, 0x45, 0x4f, 0x50, 0x49, 0x55, 0x41, 0x58, 0x51, 0x57, 0x45, 0x4f, 0x49, 0x55, 0x3b, 0x20, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, 0x3d, 0x33, 0x36, 0x30, 0x30, 0x3b, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x31 }, headers[0..]); for (out, expected) |a, b| try tst.expect(a.eql(b)); try tst.expectEqual(215, ctx.dynamic_table.capacity); } } test "compress" { const malloc = gpa.allocator(); var ctx = try t.init(malloc, 256); ctx.dynamic_table.max_capacity = 256; var heap = [_]u8{0} ** 4096; var p = @This().init(&ctx, heap[0..]); var headers = [_]stable.HeaderField{.{}} ** 50; { const expected = [_]stable.HeaderField{ .{ .name = ":status", .value = "302" }, .{ .name = "cache-control", .value = "private" }, .{ .name = "date", .value = "Mon, 21 Oct 2013 20:13:21 GMT" }, .{ .name = "location", .value = "https://www.example.com" } }; const out = try p.parse(&.{ 0x48, 0x82, 0x64, 0x02, 0x58, 0x85, 0xae, 0xc3, 0x77, 0x1a, 0x4b, 0x61, 0x96, 0xd0, 0x7a, 0xbe, 0x94, 0x10, 0x54, 0xd4, 0x44, 0xa8, 0x20, 0x05, 0x95, 0x04, 0x0b, 0x81, 0x66, 0xe0, 0x82, 0xa6, 0x2d, 0x1b, 0xff, 0x6e, 0x91, 0x9d, 0x29, 0xad, 0x17, 0x18, 0x63, 0xc7, 0x8f, 0x0b, 0x97, 0xc8, 0xe9, 0xae, 0x82, 0xae, 0x43, 0xd3 }, headers[0..]); for (out, expected) |a, b| try tst.expect(a.eql(b)); try tst.expectEqual(222, ctx.dynamic_table.capacity); } { const expected = [_]stable.HeaderField{ .{ .name = ":status", .value = "307" }, .{ .name = "cache-control", .value = "private" }, .{ .name = "date", .value = "Mon, 21 Oct 2013 20:13:21 GMT" }, .{ .name = "location", .value = "https://www.example.com" } }; const out = try p.parse(&.{ 0x48, 0x83, 0x64, 0x0e, 0xff, 0xc1, 0xc0, 0xbf }, headers[0..]); for (out, expected) |a, b| try tst.expect(a.eql(b)); try tst.expectEqual(222, ctx.dynamic_table.capacity); } { const expected = [_]stable.HeaderField{ .{ .name = ":status", .value = "200" }, .{ .name = "cache-control", .value = "private" }, .{ .name = "date", .value = "Mon, 21 Oct 2013 20:13:22 GMT" }, .{ .name = "location", .value = "https://www.example.com" }, .{ .name = "content-encoding", .value = "gzip" }, .{ .name = "set-cookie", .value = "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1" } }; const out = try p.parse(&.{ 0x88, 0xc1, 0x61, 0x96, 0xd0, 0x7a, 0xbe, 0x94, 0x10, 0x54, 0xd4, 0x44, 0xa8, 0x20, 0x05, 0x95, 0x04, 0x0b, 0x81, 0x66, 0xe0, 0x84, 0xa6, 0x2d, 0x1b, 0xff, 0xc0, 0x5a, 0x83, 0x9b, 0xd9, 0xab, 0x77, 0xad, 0x94, 0xe7, 0x82, 0x1d, 0xd7, 0xf2, 0xe6, 0xc7, 0xb3, 0x35, 0xdf, 0xdf, 0xcd, 0x5b, 0x39, 0x60, 0xd5, 0xaf, 0x27, 0x08, 0x7f, 0x36, 0x72, 0xc1, 0xab, 0x27, 0x0f, 0xb5, 0x29, 0x1f, 0x95, 0x87, 0x31, 0x60, 0x65, 0xc0, 0x03, 0xed, 0x4e, 0xe5, 0xb1, 0x06, 0x3d, 0x50, 0x07 }, headers[0..]); for (out, expected) |a, b| try tst.expect(a.eql(b)); try tst.expectEqual(215, ctx.dynamic_table.capacity); } { const expected = [_]stable.HeaderField{.{ .name = "password", .value = "secret" }}; const out = try p.parse(&.{ 0x10, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74 }, headers[0..]); for (out, expected) |a, b| try tst.expect(a.eql(b)); } } test "table resize" { const malloc = gpa.allocator(); var ctx = try t.init(malloc, 4096); var heap = [_]u8{0} ** 4096; var buildbuf = [_]u8{0} ** 4096; var p = Parser.init(&ctx, heap[0..]); var b = builder.init(&ctx, buildbuf[0..]); try b.addDynResize(4096); try b.addDynResize(405); try b.addDynResize(789); try b.addDynResize(45); try b.addDynResize(678); var headers = [_]stable.HeaderField{.{}} ** 50; { const out = try p.parse(b.final(), headers[0..]); try tst.expectEqual(out.len, 0); try tst.expectEqual(678, ctx.dynamic_table.max_capacity); } }
0
repos/http2.0/src
repos/http2.0/src/hpack/tables.zig
const std = @import("std"); const math = std.math; const testing = std.testing; const codec = @import("codec.zig"); const stable = @import("static_table.zig"); const dtable = @import("dyn_table.zig"); const Allocator = std.mem.Allocator; //var gpa = std.heap.GeneralPurposeAllocator(.{}){}; dynamic_table: dtable, static_table: stable, allocator: Allocator, codec: codec, pub fn init(allocator: Allocator, dynamic_capacity: usize) !@This() { return @This(){ .allocator = allocator, .dynamic_table = dtable.init(allocator, dynamic_capacity), .static_table = try stable.init(allocator), .codec = codec.init() }; } pub fn get(self: *@This(), header: stable.HeaderField) ?usize { if (self.dynamic_table.getByValue(header)) |h| { return h; } if (self.static_table.getByValue(header)) |h| return h; return null; } pub fn at(self: *@This(), idx: usize) ?stable.HeaderField { if (idx >= stable.size + self.dynamic_table.table.items.len) return null; if (idx < stable.size) return stable.get(idx); return self.dynamic_table.table.items[idx - stable.size]; } pub fn deinit(self: *@This()) void { self.dynamic_table.deinit(); self.static_table.deinit(); } pub fn clear(self: *@This()) void { self.dynamic_table.clear(); }
0
repos/http2.0/src
repos/http2.0/src/hpack/field.zig
const HeaderField = @import("static_table.zig").HeaderField; pub fn getHeaderFieldLen(headers: []HeaderField) usize { var l: usize = 0; for (headers) |h| l += h.size(); return l; } fn cmp(_: void, lhs: HeaderField, rhs: HeaderField) bool { return lhs.size() > rhs.size(); } pub const SliceResult = struct { a: []HeaderField, b: []HeaderField, }; pub fn sliceOnFieldlen(headers: []HeaderField, max: usize) SliceResult { var l: usize = 0; for (headers, 0..) |value, i| { if (value.size() + l > max) { const res = SliceResult{ .a = headers[0..i], .b = headers[i..] }; if (res.a.len == 0) @panic("failed to find headers of max size and below"); return res; } l += value.size(); } if (l != getHeaderFieldLen(headers)) @panic("failed to find headers of max size and below"); return .{ .a = headers[0..], .b = headers[0..0] }; }
0
repos/http2.0/src
repos/http2.0/src/hpack/builder.zig
const std = @import("std"); const math = std.math; const testing = std.testing; const codec = @import("codec.zig"); const stable = @import("static_table.zig"); const dtable = @import("dyn_table.zig"); const Allocator = std.mem.Allocator; const t = @import("tables.zig"); //var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const Self = @This(); //allocator: std.mem.Allocator, buf: std.io.FixedBufferStream([]u8), //std.ArrayList(u8), ctx: *t, header_list_len: u24 = 0, pub fn init(ctx: *t, buf: []u8) Self { return Self{ .buf = std.io.fixedBufferStream(buf), .ctx = ctx }; } pub fn add(self: *Self, header: stable.HeaderField, index: bool, never_index: bool) !void { self.header_list_len += @intCast(header.size()); if (self.ctx.get(header)) |idx| { const pos = self.buf.pos; _ = try codec.encodeInt(idx, 7, self.buf.writer()); self.buf.buffer[pos] |= 128; } else if (self.ctx.get(.{ .name = header.name, .value = if (std.mem.eql(u8, ":path", header.name)) "/" else "" })) |idx| { var pos = self.buf.pos; if (never_index) { _ = try codec.encodeInt(idx, 4, self.buf.writer()); self.buf.buffer[pos] |= 16; } else if (index) { try self.ctx.dynamic_table.put(header); _ = try codec.encodeInt(idx, 6, self.buf.writer()); self.buf.buffer[pos] |= 64; } else { _ = try codec.encodeInt(idx, 4, self.buf.writer()); self.buf.buffer[pos] &= 0xf; } pos = self.buf.pos; _ = try codec.encodeInt(header.value.len, 7, self.buf.writer()); try self.buf.writer().writeAll(header.value); self.buf.buffer[pos] &= 127; } else { if (never_index) { try self.buf.writer().writeInt(u8, 16, .little); } else if (index) { try self.ctx.dynamic_table.put(header); try self.buf.writer().writeInt(u8, 64, .little); } else { try self.buf.writer().writeInt(u8, 0, .little); } var pos = self.buf.pos; _ = try codec.encodeInt(header.name.len, 7, self.buf.writer()); try self.buf.writer().writeAll(header.name); self.buf.buffer[pos] &= 127; pos = self.buf.pos; _ = try codec.encodeInt(header.value.len, 7, self.buf.writer()); try self.buf.writer().writeAll(header.value); self.buf.buffer[pos] &= 127; } } pub fn addCompress(self: *Self, header: stable.HeaderField, index: bool, never_index: bool) !void { if (self.ctx.get(header)) |idx| { const pos = self.buf.pos; _ = try codec.encodeInt(idx, 7, self.buf.writer()); self.buf.buffer[pos] |= 128; } else if (self.ctx.get(.{ .name = header.name, .value = if (std.mem.eql(u8, ":path", header.name)) "/" else "" })) |idx| { var pos = self.buf.pos; if (never_index) { _ = try codec.encodeInt(idx, 4, self.buf.writer()); self.buf.buffer[pos] |= 16; } else if (index) { try self.ctx.dynamic_table.put(header); _ = try codec.encodeInt(idx, 6, self.buf.writer()); self.buf.buffer[pos] |= 64; } else { _ = try codec.encodeInt(idx, 4, self.buf.writer()); self.buf.buffer[pos] &= 0xf; } pos = self.buf.pos; _ = try codec.encodeInt(codec.calcEncodedLength(header.value), 7, self.buf.writer()); //try self.buf.writer().writeAll(header.value); _ = try codec.encode(header.value, self.buf.writer()); self.buf.buffer[pos] |= 128; } else { if (never_index) { try self.buf.writer().writeInt(u8, 16, .little); } else if (index) { try self.ctx.dynamic_table.put(header); try self.buf.writer().writeInt(u8, 64, .little); } else { try self.buf.writer().writeInt(u8, 0, .little); } var pos = self.buf.pos; _ = try codec.encodeInt(codec.calcEncodedLength(header.name), 7, self.buf.writer()); //try self.buf.writer().writeAll(header.name); _ = try codec.encode(header.name, self.buf.writer()); self.buf.buffer[pos] |= 128; pos = self.buf.pos; _ = try codec.encodeInt(codec.calcEncodedLength(header.value), 7, self.buf.writer()); //try self.buf.writer().writeAll(header.value); _ = try codec.encode(header.value, self.buf.writer()); self.buf.buffer[pos] |= 128; } } pub fn addDynResize(self: *Self, size: usize) !void { const pos = self.buf.pos; _ = try codec.encodeInt(size, 5, self.buf.writer()); self.buf.buffer[pos] |= 32; } pub fn addSlice(self: *Self, headers: []stable.HeaderField) !void { for (headers) |value| try self.add(value, false, false); } pub fn final(self: *Self) []const u8 { return self.buf.buffer[0..self.buf.pos]; } pub fn clear(self: *Self) void { //try self.buf.seekTo(0); self.buf.pos = 0; } pub fn deinit(_: *Self) void { //self.buf.deinit(); } test "plain" { const malloc = std.testing.allocator; var ctx = try t.init(malloc, 4096); defer ctx.deinit(); var header_buf = [_]u8{0} ** 4096; var b = @This().init(&ctx, header_buf[0..]); defer b.deinit(); { defer b.clear(); try b.add(.{ .name = "custom-key", .value = "custom-header" }, true, false); try std.testing.expectEqualSlices(u8, &.{ 0x40, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x2d, 0x6b, 0x65, 0x79, 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x2d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72 }, b.final()); } { defer b.clear(); try b.add(.{ .name = ":path", .value = "/sample/path" }, false, false); try std.testing.expectEqualSlices(u8, &.{ 0x04, 0x0c, 0x2f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2f, 0x70, 0x61, 0x74, 0x68 }, b.final()); } { defer b.clear(); try b.add(.{ .name = "password", .value = "secret" }, false, true); try std.testing.expectEqualSlices(u8, &.{ 0x10, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74 }, b.final()); } { defer b.clear(); defer ctx.clear(); try b.add(.{ .name = ":method", .value = "GET" }, false, false); try std.testing.expectEqualSlices(u8, &.{0x82}, b.final()); } { defer b.clear(); //defer ctx.clear(); try b.add(.{ .name = ":method", .value = "GET" }, false, true); try b.add(.{ .name = ":scheme", .value = "http" }, false, true); try b.add(.{ .name = ":path", .value = "/" }, false, true); try b.add(.{ .name = ":authority", .value = "www.example.com" }, true, false); try std.testing.expectEqualSlices(u8, &.{ 0x82, 0x86, 0x84, 0x41, 0x0f, 0x77, 0x77, 0x77, 0x2e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d }, b.final()); try testing.expectEqual(57, ctx.dynamic_table.capacity); } { defer b.clear(); //defer ctx.clear(); try b.add(.{ .name = ":method", .value = "GET" }, false, true); try b.add(.{ .name = ":scheme", .value = "http" }, false, true); try b.add(.{ .name = ":path", .value = "/" }, false, true); try b.add(.{ .name = ":authority", .value = "www.example.com" }, true, false); try b.add(.{ .name = "cache-control", .value = "no-cache" }, true, false); try std.testing.expectEqualSlices(u8, &.{ 0x82, 0x86, 0x84, 0xbe, 0x58, 0x08, 0x6e, 0x6f, 0x2d, 0x63, 0x61, 0x63, 0x68, 0x65 }, b.final()); try testing.expectEqual(110, ctx.dynamic_table.capacity); } { defer b.clear(); //defer ctx.clear(); try b.add(.{ .name = ":method", .value = "GET" }, false, true); try b.add(.{ .name = ":scheme", .value = "https" }, false, true); try b.add(.{ .name = ":path", .value = "/index.html" }, false, true); try b.add(.{ .name = ":authority", .value = "www.example.com" }, true, false); try b.add(.{ .name = "custom-key", .value = "custom-value" }, true, false); try std.testing.expectEqualSlices(u8, &.{ 0x82, 0x87, 0x85, 0xbf, 0x40, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x2d, 0x6b, 0x65, 0x79, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x2d, 0x76, 0x61, 0x6c, 0x75, 0x65 }, b.final()); try testing.expectEqual(164, ctx.dynamic_table.capacity); } } test "compress" { const malloc = std.testing.allocator; var ctx = try t.init(malloc, 4096); var header_buf = [_]u8{0} ** 4096; defer ctx.deinit(); var b = @This().init(&ctx, header_buf[0..]); defer b.deinit(); { defer b.clear(); //defer ctx.clear(); try b.addCompress(.{ .name = ":method", .value = "GET" }, false, true); try b.addCompress(.{ .name = ":scheme", .value = "http" }, false, true); try b.addCompress(.{ .name = ":path", .value = "/" }, false, true); try b.addCompress(.{ .name = ":authority", .value = "www.example.com" }, true, false); try std.testing.expectEqualSlices(u8, &.{ 0x82, 0x86, 0x84, 0x41, 0x8c, 0xf1, 0xe3, 0xc2, 0xe5, 0xf2, 0x3a, 0x6b, 0xa0, 0xab, 0x90, 0xf4, 0xff }, b.final()); try testing.expectEqual(57, ctx.dynamic_table.capacity); } { defer b.clear(); //defer ctx.clear(); try b.addCompress(.{ .name = ":method", .value = "GET" }, false, true); try b.addCompress(.{ .name = ":scheme", .value = "http" }, false, true); try b.addCompress(.{ .name = ":path", .value = "/" }, false, true); try b.addCompress(.{ .name = ":authority", .value = "www.example.com" }, true, false); try b.addCompress(.{ .name = "cache-control", .value = "no-cache" }, true, false); try std.testing.expectEqualSlices(u8, &.{ 0x82, 0x86, 0x84, 0xbe, 0x58, 0x86, 0xa8, 0xeb, 0x10, 0x64, 0x9c, 0xbf }, b.final()); try testing.expectEqual(110, ctx.dynamic_table.capacity); } { defer b.clear(); //defer ctx.clear(); try b.addCompress(.{ .name = ":method", .value = "GET" }, false, true); try b.addCompress(.{ .name = ":scheme", .value = "https" }, false, true); try b.addCompress(.{ .name = ":path", .value = "/index.html" }, false, true); try b.addCompress(.{ .name = ":authority", .value = "www.example.com" }, true, false); try b.addCompress(.{ .name = "custom-key", .value = "custom-value" }, true, false); try std.testing.expectEqualSlices(u8, &.{ 0x82, 0x87, 0x85, 0xbf, 0x40, 0x88, 0x25, 0xa8, 0x49, 0xe9, 0x5b, 0xa9, 0x7d, 0x7f, 0x89, 0x25, 0xa8, 0x49, 0xe9, 0x5b, 0xb8, 0xe8, 0xb4, 0xbf }, b.final()); try testing.expectEqual(164, ctx.dynamic_table.capacity); } }
0
repos/http2.0/src
repos/http2.0/src/hpack/dyn_table.zig
const std = @import("std"); const static = @import("static_table.zig"); const List = std.ArrayList(HeaderField); const HeaderField = static.HeaderField; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; set_capcity: usize = 4096, capacity: usize = 0, max_capacity: usize, table: List, const Self = @This(); pub fn init(allocator: std.mem.Allocator, max_capacity: usize) Self { return Self{ .max_capacity = max_capacity, .table = List.init(allocator), .set_capcity = max_capacity }; } pub fn deinit(self: *Self) void { self.table.deinit(); } pub fn put(self: *Self, header: HeaderField) !void { const header_size = header.size(); var gap = self.max_capacity - self.capacity; if (header_size > self.max_capacity) { self.capacity = 0; return self.table.clearRetainingCapacity(); } else if (header_size > gap) { while (header_size > gap) { const s = self.table.pop().size(); gap += s; self.capacity -= s; } } try self.table.insert(0, header); self.capacity += header_size; } pub fn get(self: *Self, idx: usize) HeaderField { return self.table.items[idx]; } pub fn getByValue(self: *Self, field: HeaderField) ?usize { for (self.table.items, static.size + 1..) |h, i| { if (std.mem.eql(u8, field.name, h.name) and std.mem.eql(u8, field.value, h.value)) return i; } return null; } pub fn resize(self: *Self, new_size: u64) void { self.max_capacity = new_size; while (self.capacity > self.max_capacity) self.max_capacity -= self.table.pop().size(); } pub fn clear(self: *Self) void { self.capacity = 0; self.table.clearRetainingCapacity(); }
0
repos
repos/pluto/fat32_cp.sh
#!/usr/bin/env bash set -ex IMAGE_PATH_DIR=$1 mkdir test/fat32/mnt whoami if [ "$(whoami)" = "root" ]; then echo "Am root" mount -o utf8=true $IMAGE_PATH_DIR test/fat32/mnt/ cp -r test/fat32/test_files/. test/fat32/mnt/ umount test/fat32/mnt/ else echo "Not root" sudo mount -o utf8=true $IMAGE_PATH_DIR test/fat32/mnt/ sudo cp -r test/fat32/test_files/. test/fat32/mnt/ sudo umount test/fat32/mnt/ fi rm -rf test/fat32/mnt
0
repos
repos/pluto/mkfat32.zig
const std = @import("std"); // This is the assembly for the FAT bootleader. // [bits 16] // [org 0x7C00] // // jmp short _start // nop // // times 87 db 0xAA // // _start: // jmp long 0x0000:start_16bit // // start_16bit: // cli // mov ax, cs // mov ds, ax // mov es, ax // mov ss, ax // mov sp, 0x7C00 // lea si, [message] // .print_string_with_new_line: // mov ah, 0x0E // xor bx, bx // .print_string_loop: // lodsb // cmp al, 0 // je .print_string_done // int 0x10 // jmp short .print_string_loop // .print_string_done: // mov al, 0x0A // int 0x10 // mov al, 0x0D // int 0x10 // // .reboot: // xor ah, ah // int 0x16 // int 0x19 // // .loop_forever: // hlt // jmp .loop_forever // message db "This is not a bootable disk. Please insert a bootable floppy and press any key to try again", 0 // times 510 - ($ - $$) db 0 // dw 0xAA55 /// A FAT32 static structure for creating a empty FAT32 image. This contains helper functions /// for creating a empty FAT32 image. /// For more information, see: https://en.wikipedia.org/wiki/Design_of_the_FAT_file_system pub const Fat32 = struct { /// The FAT32 boot sector header (without the jmp and boot code) including the extended /// parameter block for FAT32 with the extended signature 0x29. const Header = struct { /// The OEM. oem: [8]u8 = "ZystemOS".*, /// The number of bytes per sector. This needs to be initialised by the user or use the /// default value at runtime. Use `getDefaultSectorSize` for the default. bytes_per_sector: u16, /// The number of sectors per cluster. This needs to be initialised by the user or use the /// default value at runtime. Use `getDefaultSectorsPerCluster` for the default. sectors_per_cluster: u8, /// The number of reserved sectors at the beginning of the image. This is where the fat /// header, FSInfo and FAT is stored. The default is 32 sectors. /// TODO: Investigate if this can be reduced if not all sectors are needed. reserved_sectors: u16 = getReservedSectors(), /// The number of FAT's. This is always 2. fat_count: u8 = 2, /// The size in bytes of the root directory. This is only used by FAT12 and FAT16, so is /// always 0 for FAT32. zero_root_directory_size: u16 = 0, /// The total number of sectors. This is only used for FAT12 and FAT16, so is always 0 for /// FAT32. zero_total_sectors_16: u16 = 0, /// The media type. This is used to identify the type of media this image is. As all FAT's /// that are created as a fixed disk, non of that old school floppy, this is always 0xF8. media_descriptor_type: u8 = 0xF8, /// The total number of sectors of the FAT. This is only used for FAT12 and FAT16, so /// always 0 for FAT32. zero_sectors_per_fat: u16 = 0, /// The number of sectors per track for Int 13h. This isn't really needed as we are /// creating a fixed disk. An example used 63. sectors_per_track: u16 = 63, /// The number of heads for Int 13h. This isn't really needed as we are creating a fixed /// disk. An example used 255. head_count: u16 = 255, /// The number of hidden sectors. As there is no support for partitions, this is set ot zero. hidden_sectors: u32 = 0, /// The total number of sectors of the image. This is determined at runtime by the size of /// image and the sector size. total_sectors: u32, /// The number of sectors the FAT takes up. This is set based on the size of the image. sectors_per_fat: u32, /// Mirror flags. If bit 7 is set, then bits 0-3 represent the number of active FAT entries /// (zero based) and if clear, all FAT's are mirrored. Always mirror, so zero. mirror_flags: u16 = 0x0000, /// The version. This is usually always zero (0.0). If version is 1.2, this will be stored /// as 0x0201. version_number: u16 = 0x0000, /// The start cluster of the root directory. This is usually always 2 unless there is a bad /// sector, 0 or 1 are invalid. root_directory_cluster: u32 = 2, /// The sector where is the FS information sector is located. A value of 0x0000 or 0xFFFF /// indicates that there isn't a FSInfo structure. A value of 0x0000 should not be treated /// as a valid FSInfo sector. Without a FS Information Sector, the minimum allowed logical /// sector size of FAT32 volumes can be reduced down to 128 bytes for special purposes. /// This is usually sector 1. fsinfo_sector: u16 = 1, /// The sector for the backup boot record where the first 3 sectors are copied. A value of /// 0x000 or 0xFFFF indicate there is no backup. This usually 6. backup_boot_sector: u16 = 6, /// Reserved,. All zero. reserved0: u96 = 0x000000000000, /// The physical drive number for Int 13h in the BIOS. 0x80 is for fixed disks, but as this /// is used for bootloaders, this isn't really needed. drive_number: u8 = 0x80, /// Reserved. All zero. reserved1: u8 = 0x00, /// The extended boot signature. 0x29, can be 0x28, but will only accept 0x29. If 0x28, /// then the below (serial_number, volume_label and filesystem_type) will be unavailable. signature: u8 = 0x29, /// The serial number of the FAT image at format. This is a function of the current /// timestamp of creation. serial_number: u32, /// The partitioned volume label. This can be set by the user else the default name will be /// give: "NO NAME ". volume_label: [11]u8, /// The file system type, "FAT32 " for FAT32. filesystem_type: [8]u8 = "FAT32 ".*, }; /// Options used for initialising a new FAT image. pub const Options = struct { /// The FAT32 image size in bytes. This is not the formatted size. /// The default (recommenced smallest) is 34090496 bytes (~32.5KB). This can be reduced to /// the lowest value of 17920 bytes (17.5KB). This image size is the smallest possible size /// where you can only create files, but not write to them. This image size can be mounted /// in Linux and ZystemOS. image_size: u32 = getDefaultImageSize(), /// The sector size in bytes. The minimum value for this is 512, and a maximum of 4096. /// This also must be a multiple of 512. Default 512. sector_size: u16 = getDefaultSectorSize(), /// The number of sectors per clusters. A default is chosen depending on the image size and /// sector size. Valid value are: 1, 2, 4, 8, 16, 32, 64 and 128. Default 1. cluster_size: u8 = getDefaultSectorsPerCluster(getDefaultImageSize(), getDefaultSectorSize()) catch unreachable, /// The formatted volume name. Volume names shorter than 11 characters must have trailing /// spaces. Default NO MANE. volume_name: [11]u8 = getDefaultVolumeName(), }; /// The error set for the static functions for creating a FAT32 image. pub const Error = error{ /// If the FAT image that the user want's to create is too small: < 17.5KB. TooSmall, /// If the FAT image that the user want's to create is too large: > 2TB. TooLarge, /// The value in a option provided from the user is invalid. InvalidOptionValue, }; /// The log function for printing errors when creating a FAT32 image. const log = std.log.scoped(.mkfat32); /// The bootloader code for the FAT32 boot sector. const bootsector_boot_code = [512]u8{ 0xEB, 0x58, 0x90, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0x66, 0xEA, 0x62, 0x7C, 0x00, 0x00, 0x00, 0x00, 0xFA, 0x8C, 0xC8, 0x8E, 0xD8, 0x8E, 0xC0, 0x8E, 0xD0, 0xBC, 0x00, 0x7C, 0x8D, 0x36, 0x8F, 0x7C, 0xB4, 0x0E, 0x31, 0xDB, 0xAC, 0x3C, 0x00, 0x74, 0x04, 0xCD, 0x10, 0xEB, 0xF7, 0xB0, 0x0A, 0xCD, 0x10, 0xB0, 0x0D, 0xCD, 0x10, 0x30, 0xE4, 0xCD, 0x16, 0xCD, 0x19, 0xEB, 0xFE, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x6E, 0x6F, 0x74, 0x20, 0x61, 0x20, 0x62, 0x6F, 0x6F, 0x74, 0x61, 0x62, 0x6C, 0x65, 0x20, 0x64, 0x69, 0x73, 0x6B, 0x2E, 0x20, 0x50, 0x6C, 0x65, 0x61, 0x73, 0x65, 0x20, 0x69, 0x6E, 0x73, 0x65, 0x72, 0x74, 0x20, 0x61, 0x20, 0x62, 0x6F, 0x6F, 0x74, 0x61, 0x62, 0x6C, 0x65, 0x20, 0x66, 0x6C, 0x6F, 0x70, 0x70, 0x79, 0x20, 0x61, 0x6E, 0x64, 0x20, 0x70, 0x72, 0x65, 0x73, 0x73, 0x20, 0x61, 0x6E, 0x79, 0x20, 0x6B, 0x65, 0x79, 0x20, 0x74, 0x6F, 0x20, 0x74, 0x72, 0x79, 0x20, 0x61, 0x67, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0xAA, }; /// /// A convenient function for returning the error types for reading, writing and seeking a stream. /// /// Argument: /// IN comptime StreamType: type - The stream to get the error set from. /// /// Return: type /// The Error set for reading, writing and seeking the stream. /// fn ErrorSet(comptime StreamType: type) type { const WriteError = switch (@typeInfo(StreamType)) { .Pointer => |p| p.child.WriteError, else => StreamType.WriteError, }; const SeekError = switch (@typeInfo(StreamType)) { .Pointer => |p| p.child.SeekError, else => StreamType.SeekError, }; return WriteError || SeekError; } /// /// Get the number of reserved sectors. The number of reserved sectors doesn't have to be 32, /// but this is a commonly used value. /// /// Return: u16 /// The number of reserved sectors. /// fn getReservedSectors() u16 { return 32; } /// /// Get the default sector size for a FAT32 image. (512) /// /// Return: u16 /// The default sector size (512). /// fn getDefaultSectorSize() u16 { // TODO: Look into if this could be different for different scenarios return 512; } /// /// Based on the image size, the sector size per cluster can vary. See /// https://support.microsoft.com/en-us/help/140365/default-cluster-size-for-ntfs-fat-and-exfat /// but with some modifications to allow for smaller size images (The smallest image size possible is 17.5KB): /// 17.5KB -> 64MB: 512 bytes /// 64MB -> 128MB: 1024 bytes (1KB) /// 128MB -> 256MB: 2048 bytes (2KB) /// 256MB -> 8GB: 4096 bytes (4KB) /// 8GB -> 16GB: 8192 bytes (8KB) /// 16GB -> 32GB: 16384 bytes (16KB) /// 32GB -> 2TB: 32768 bytes (32KB) /// else: Error /// /// The smallest size is calculated as follows: /// (reserved_sectors + num_of_fats + root_directory) * smallest_sector_size = (32 + 2 + 1) * 512 = 35 * 512 /// /// Arguments: /// IN image_size: u64 - The image size the user wants to create. /// IN bytes_per_sector: u16 - The bytes per sector the user defined, or the default value. /// /// Return: u16 /// The default sectors per cluster based on the image size and sector size. /// /// Error: Error /// error.TooSmall - If the image size is too small (below 35 * 512 (17.5KB)) /// error.TooLarge - If the image size is too large (above 2TB) /// fn getDefaultSectorsPerCluster(image_size: u64, bytes_per_sector: u16) Error!u8 { // A couple of constants to help const KB = 1024; const MB = 1024 * KB; const GB = 1024 * MB; const TB = 1024 * GB; return switch (image_size) { 0...35 * 512 - 1 => Error.TooSmall, 35 * 512...64 * MB - 1 => @intCast(u8, std.math.max(512, bytes_per_sector) / bytes_per_sector), 64 * MB...128 * MB - 1 => @intCast(u8, std.math.max(1024, bytes_per_sector) / bytes_per_sector), 128 * MB...256 * MB - 1 => @intCast(u8, std.math.max(2048, bytes_per_sector) / bytes_per_sector), 256 * MB...8 * GB - 1 => @intCast(u8, std.math.max(4096, bytes_per_sector) / bytes_per_sector), 8 * GB...16 * GB - 1 => @intCast(u8, std.math.max(8192, bytes_per_sector) / bytes_per_sector), 16 * GB...32 * GB - 1 => @intCast(u8, std.math.max(16384, bytes_per_sector) / bytes_per_sector), 32 * GB...2 * TB - 1 => @intCast(u8, std.math.max(32768, bytes_per_sector) / bytes_per_sector), else => Error.TooLarge, }; } /// /// Get the default volume name of a FAT image: "NO NAME ". /// /// Return: [11]u8 /// The default volume name. /// fn getDefaultVolumeName() [11]u8 { return "NO NAME ".*; } /// /// Get the default image size: 34090496 (~32.5MB). This is the recommended minimum image size /// for FAT32. (Valid cluster values + (sectors per FAT * 2) + reserved sectors) * bytes per sector. /// /// Return: u32 /// The smallest recommended FAT32 image size. /// fn getDefaultImageSize() u32 { // The 513*2 was pre calculated return (0xFFF5 + 1026 + @as(u32, getReservedSectors())) * @as(u32, getDefaultSectorSize()); } /// /// Create the FAT32 serial number. This is generated from the date. Reference: /// https://www.digital-detective.net/documents/Volume%20Serial%20Numbers.pdf /// /// Return: u32 /// Serial number for a new FAT32 image. /// fn createSerialNumber() u32 { // TODO: Get the actual date. Currently there is no std lib for human readable date. const year = 2020; const month = 09; const day = 27; const hour = 13; const minute = 46; const second = 53; const millisecond_10 = 54; const p1 = (@as(u16, month) << 8) | day; const p2 = (@as(u16, second) << 8) | millisecond_10; const p3 = (@as(u16, hour) << 8) | minute; const p4 = p1 + p2; const p5 = p3 + year; return (@as(u32, p4) << 16) + p5; } /// /// Write the FSInfo and backup FSInfo sector to a stream. This uses a valid FAT32 boot sector /// header for creating the FSInfo sector. /// /// Argument: /// IN/OUT stream: anytype - The stream to write the FSInfo to. /// IN fat32_header: Header - A valid FAT32 boot header for creating the FSInfo sector. /// IN free_cluster_num: u32 - The number of free data clusters on the image. /// IN next_free_cluster: u32 - The next free data cluster to start looking for when writing /// files. /// /// Error @TypeOf(stream).WriteError || @TypeOf(stream).SeekError /// @TypeOf(stream).WriteError - If there is an error when writing. See the relevant error for the stream. /// @TypeOf(stream).SeekError - If there is an error when seeking. See the relevant error for the stream. /// fn writeFSInfo(stream: anytype, fat32_header: Header, free_cluster_num: u32, next_free_cluster: u32) ErrorSet(@TypeOf(stream))!void { const seekable_stream = stream.seekableStream(); const writer = stream.writer(); // Seek to the correct location try seekable_stream.seekTo(fat32_header.fsinfo_sector * fat32_header.bytes_per_sector); // First signature try writer.writeIntLittle(u32, 0x41615252); // These next bytes are reserved and unused try seekable_stream.seekBy(480); // Second signature try writer.writeIntLittle(u32, 0x61417272); // Last know free cluster count try writer.writeIntLittle(u32, free_cluster_num); // Cluster number to start looking for available clusters try writer.writeIntLittle(u32, next_free_cluster); // These next bytes are reserved and unused try seekable_stream.seekBy(12); // Trail signature try writer.writeIntLittle(u32, 0xAA550000); // Repeat again for the backup try seekable_stream.seekTo((fat32_header.backup_boot_sector + fat32_header.fsinfo_sector) * fat32_header.bytes_per_sector); try writer.writeIntLittle(u32, 0x41615252); try seekable_stream.seekBy(480); try writer.writeIntLittle(u32, 0x61417272); try writer.writeIntLittle(u32, free_cluster_num); try writer.writeIntLittle(u32, next_free_cluster); try seekable_stream.seekBy(12); try writer.writeIntLittle(u32, 0xAA550000); } /// /// Write the FAT to the stream. This sets up a blank FAT with end marker of 0x0FFFFFFF and root /// directory cluster of 0x0FFFFFFF (one cluster chain). /// /// Argument: /// IN/OUT stream: anytype - The stream to write the FSInfo to. /// IN fat32_header: Header - A valid FAT32 boot header for creating the FAT. /// /// Error @TypeOf(stream).WriteError || @TypeOf(stream).SeekError /// @TypeOf(stream).WriteError - If there is an error when writing. See the relevant error for the stream. /// @TypeOf(stream).SeekError - If there is an error when seeking. See the relevant error for the stream. /// fn writeFAT(stream: anytype, fat32_header: Header) ErrorSet(@TypeOf(stream))!void { const seekable_stream = stream.seekableStream(); const writer = stream.writer(); // This FAT is below the reserved sectors try seekable_stream.seekTo(fat32_header.reserved_sectors * fat32_header.bytes_per_sector); // Last byte is the same as the media descriptor, the rest are 1's. try writer.writeIntLittle(u32, 0xFFFFFF00 | @as(u32, fat32_header.media_descriptor_type)); // End of chain indicator. This can be 0x0FFFFFF8 - 0x0FFFFFFF, but 0x0FFFFFFF is better supported. try writer.writeIntLittle(u32, 0x0FFFFFFF); // Root director cluster, 0x0FFFFFFF = initially only one cluster for root directory try writer.writeIntLittle(u32, 0x0FFFFFFF); // Write the second FAT, same as the first try seekable_stream.seekTo(fat32_header.reserved_sectors * fat32_header.bytes_per_sector + (fat32_header.sectors_per_fat * fat32_header.bytes_per_sector)); try writer.writeIntLittle(u32, 0xFFFFFF00 | @as(u32, fat32_header.media_descriptor_type)); try writer.writeIntLittle(u32, 0x0FFFFFFF); try writer.writeIntLittle(u32, 0x0FFFFFFF); } /// /// Write the FAT boot sector with the boot code and FAT32 header to the stream. /// /// Argument: /// IN/OUT stream: anytype - The stream to write the FSInfo to. /// IN fat32_header: Header - A valid FAT32 boot header for creating the FAT. /// /// Error: @TypeOf(stream).WriteError || @TypeOf(stream).SeekError /// @TypeOf(stream).WriteError - If there is an error when writing. See the relevant error for the stream. /// @TypeOf(stream).SeekError - If there is an error when seeking. See the relevant error for the stream. /// fn writeBootSector(stream: anytype, fat32_header: Header) ErrorSet(@TypeOf(stream))!void { const seekable_stream = stream.seekableStream(); const writer = stream.writer(); var boot_sector: [512]u8 = undefined; std.mem.copy(u8, &boot_sector, &bootsector_boot_code); // Write the header into the boot sector variable var fat32_header_stream = std.io.fixedBufferStream(boot_sector[3..90]); inline for (std.meta.fields(Header)) |item| { switch (@typeInfo(item.field_type)) { .Array => |info| switch (info.child) { u8 => try fat32_header_stream.writer().writeAll(&@field(fat32_header, item.name)), else => @compileError("Unexpected field type: " ++ @typeName(info.child)), }, .Int => try fat32_header_stream.writer().writeIntLittle(item.field_type, @field(fat32_header, item.name)), else => @compileError("Unexpected field type: " ++ @typeName(@typeInfo(item.field_type))), } } // Write the bootstrap and header sector to the image try seekable_stream.seekTo(0); try writer.writeAll(&boot_sector); // Also write the backup sector to the image try seekable_stream.seekTo(fat32_header.backup_boot_sector * fat32_header.bytes_per_sector); try writer.writeAll(&boot_sector); } /// /// Create a valid FAT32 header with out the bootstrap code with either user defined parameters /// or default FAT32 parameters. /// /// Argument: /// IN options: Options - The values provided by the user or default values. /// /// Return: Fat32.Header /// A valid FAT32 header without the bootstrap code. /// /// Error: Error /// Error.InvalidOptionValue - If the values provided by the user are invalid and/or out of /// bounds. A log message will be printed to display the valid /// ranges. /// fn createFATHeader(options: Options) Error!Header { // 512 is the smallest sector size for FAT32 if (options.sector_size < 512 or options.sector_size > 4096 or options.sector_size % 512 != 0) { log.err("Bytes per sector is invalid. Must be greater then 512 and a multiple of 512. Found: {}", .{options.sector_size}); return Error.InvalidOptionValue; } // Valid clusters are 1, 2, 4, 8, 16, 32, 64 and 128 if (options.cluster_size < 1 or options.cluster_size > 128 or !std.math.isPowerOfTwo(options.cluster_size)) { log.err("Sectors per cluster is invalid. Must be less then or equal to 128 and a power of 2. Found: {}", .{options.cluster_size}); return Error.InvalidOptionValue; } // Ensure the image is aligned to the bytes per sector // Backwards as if being imaged to a device, we can't can't go over const image_size = std.mem.alignBackward(options.image_size, options.sector_size); // This is the bare minimum. It wont hold any data in a file, but can create some files. But it still can be mounted in Linux // +1 for the root director sector, 2 TB // FAT count for FAT32 is always 2 if (image_size < ((getReservedSectors() + 2 + 1) * options.sector_size) or image_size >= 2 * 1024 * 1024 * 1024 * 1024) { log.err("Image size is invalid. Must be greater then 17919 (~18KB). Found: {}", .{image_size}); return Error.InvalidOptionValue; } // See: https://board.flatassembler.net/topic.php?t=12680 var sectors_per_fat = @intCast(u32, (image_size - getReservedSectors() + (2 * options.cluster_size)) / ((options.cluster_size * (options.sector_size / 4)) + 2)); // round up sectors sectors_per_fat = (sectors_per_fat + options.sector_size - 1) / options.sector_size; return Header{ .bytes_per_sector = options.sector_size, .sectors_per_cluster = options.cluster_size, .total_sectors = @intCast(u32, @divExact(image_size, options.sector_size)), .sectors_per_fat = sectors_per_fat, .serial_number = createSerialNumber(), .volume_label = options.volume_name, }; } /// /// Clear the stream. This is the same as writeByteNTimes but with a bigger buffer (4096 bytes). /// This improves performance a lot. /// /// Arguments: /// IN stream: anytype - The stream to clear. /// IN size: usize - The size to clear from the beginning /// /// Error: @TypeOf(stream).WriteError /// @TypeOf(stream).WriteError - Error writing to the stream. /// fn clearStream(stream: anytype, size: usize) ErrorSet(@TypeOf(stream))!void { const buff_size = 4096; const bytes: [buff_size]u8 = [_]u8{0x00} ** buff_size; var remaining: usize = size; while (remaining > 0) { const to_write = std.math.min(remaining, bytes.len); try stream.writer().writeAll(bytes[0..to_write]); remaining -= to_write; } } /// /// Make a FAT32 image. This will either use the default options or modified defaults from the /// user. The file will be saved to the path specified. If quick format is on, then the entire /// stream is zeroed else the reserved and FAT sectors are zeroed. /// /// Argument: /// IN options: Options - The FAT32 options that the user can provide to change the /// parameters of a FAT32 image. /// IN stream: anytype - The stream to create a new FAT32 image. This stream must /// support reader(), writer() and seekableStream() interfaces. /// IN quick_format: bool - Whether to completely zero the stream initially or zero just /// the important sectors. /// /// Error: @TypeOf(stream).WriteError || @TypeOf(stream).SeekError || Error /// @TypeOf(stream).WriteError - If there is an error when writing. See the relevant error for the stream. /// @TypeOf(stream).SeekError - If there is an error when seeking. See the relevant error for the stream. /// Error.InvalidOptionValue - In the user has provided invalid options. /// Error.TooLarge - The stream size is too small. < 17.5KB. /// Error.TooSmall - The stream size is to large. > 2TB. /// pub fn make(options: Options, stream: anytype, quick_format: bool) (ErrorSet(@TypeOf(stream)) || Error)!void { // First set up the header const fat32_header = try Fat32.createFATHeader(options); // Initialise the stream with all zeros try stream.seekableStream().seekTo(0); if (quick_format) { // Zero just the reserved and FAT sectors try clearStream(stream, (fat32_header.reserved_sectors + (fat32_header.sectors_per_fat * 2)) * fat32_header.bytes_per_sector); } else { const image_size = std.mem.alignBackward(options.image_size, fat32_header.bytes_per_sector); try clearStream(stream, image_size); } // Write the boot sector with the bootstrap code and header and the backup boot sector. try Fat32.writeBootSector(stream, fat32_header); // Write the FAT and second FAT try Fat32.writeFAT(stream, fat32_header); // Calculate the usable clusters. const usable_sectors = fat32_header.total_sectors - fat32_header.reserved_sectors - (fat32_header.fat_count * fat32_header.sectors_per_fat); const usable_clusters = @divFloor(usable_sectors, fat32_header.sectors_per_cluster) - 1; // Write the FSInfo and backup FSInfo sectors try Fat32.writeFSInfo(stream, fat32_header, usable_clusters, 2); } };
0
repos
repos/pluto/README.md
# Pluto [![Build Status](https://github.com/SamTebbs33/pluto/workflows/CI/badge.svg)](https://github.com/SamTebbs33/pluto/actions) Pluto is a kernel written almost entirely in [Zig](https://github.com/ziglang/zig) and supports x86, with aarch64 and x64 backends being planned. ![Hello image](hello.jpg) ## Goals * **Should be written in Zig as much as possible**. Assembly should only be used where required for functionality or performance reasons. * **Light and performant**. The kernel should be usable both on embedded and desktop class CPUs, made possible by it being lightweight and modular. * **Basic utilities will be written in Zig**. This includes a basic text editor and shell, and will be part of the filesystem external to the kernel itself. * **Easy to port**. The kernel is oblivious to the underlying architecture, meaning that ports only need to implement the defined interface and they should work without a hitch. All of these goals will benefit from the features of Zig. ## Build Requires a master build of Zig 0.9.1([downloaded](https://ziglang.org/download) or [built from source](https://github.com/ziglang/zig#building-from-source)), *xorriso* and the grub tools (such as *grub-mkrescue*). A *qemu-system* binary compatible with your chosen target is required to run the kernel (e.g. *qemu-system-i386*). ```Shell zig build ``` ## Run ```Shell zig build run ``` or if you want to wait for a gdb connection: ```Shell zig build debug-run ``` ## Debug Launch a gdb-multiarch instance and connect to qemu. ```Shell zig build debug ``` ## Unit testing Run the unit tests. ```Shell zig build test ``` ## Runtime testing Run the runtime tests. ```Shell zig build rt-test -Dtest-mode=<MODE> ``` Available test modes: * `None`: This is the default, this will run the OS normally. * `Initialisation`: Run the OS's initialisation runtime tests to ensure the OS is properly set up. * `Panic`: Run the panic runtime test. ## Options * `-D[build-mode]=`: Boolean (default `false`). * **build**: Build a certain build mode (*release-safe*, *release-fast*, *release-small*). Don't set in order to use the *debug* build mode. * **test**: Test a certain build mode (*release-safe*, *release-fast*, *release-small*). Don't set in order to use the *debug* build mode. * `-Dtarget=`: String (default `i386-freestanding`). The standard target options for building with zig. Currently supported targets: * `i386-freestanding` * `-Ddisable-display`: Boolean (default `false`) * This disables the display output of QEMU. ## Contribution We welcome all contributions, be it bug reports, feature suggestions or pull requests. We follow the style mandated by zig fmt so make sure you've run `zig fmt` on your code before submitting it. We also like to order a file's members (public after non-public): 1. imports 2. type definitions 3. constants 4. variables 5. inline functions 6. functions 7. entry point/init function More styling information is available on the [wiki](https://github.com/SamTebbs33/pluto/wiki/Code-Styling-(Detailed))
0
repos
repos/pluto/build.zig
const std = @import("std"); const log = std.log.scoped(.builder); const builtin = @import("builtin"); const rt = @import("test/runtime_test.zig"); const RuntimeStep = rt.RuntimeStep; const Allocator = std.mem.Allocator; const Builder = std.build.Builder; const Step = std.build.Step; const Target = std.Target; const CrossTarget = std.zig.CrossTarget; const fs = std.fs; const File = fs.File; const Mode = std.builtin.Mode; const TestMode = rt.TestMode; const ArrayList = std.ArrayList; const Fat32 = @import("mkfat32.zig").Fat32; const x86_i686 = CrossTarget{ .cpu_arch = .i386, .os_tag = .freestanding, .cpu_model = .{ .explicit = &Target.x86.cpu._i686 }, }; pub fn build(b: *Builder) !void { const target = b.standardTargetOptions(.{ .whitelist = &[_]CrossTarget{x86_i686}, .default_target = x86_i686 }); const arch = switch (target.getCpuArch()) { .i386 => "x86", else => unreachable, }; const fmt_step = b.addFmt(&[_][]const u8{ "build.zig", "mkfat32.zig", "src", "test", }); b.default_step.dependOn(&fmt_step.step); const main_src = "src/kernel/kmain.zig"; const arch_root = "src/kernel/arch"; const linker_script_path = try fs.path.join(b.allocator, &[_][]const u8{ arch_root, arch, "link.ld" }); const output_iso = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "pluto.iso" }); const iso_dir_path = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "iso" }); const boot_path = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "iso", "boot" }); const modules_path = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "iso", "modules" }); const ramdisk_path = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "initrd.ramdisk" }); const fat32_image_path = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "fat32.img" }); const test_fat32_image_path = try fs.path.join(b.allocator, &[_][]const u8{ "test", "fat32", "test_fat32.img" }); const build_mode = b.standardReleaseOptions(); comptime var test_mode_desc: []const u8 = "\n "; inline for (@typeInfo(TestMode).Enum.fields) |field| { const tm = @field(TestMode, field.name); test_mode_desc = test_mode_desc ++ field.name ++ " (" ++ TestMode.getDescription(tm) ++ ")"; test_mode_desc = test_mode_desc ++ "\n "; } const test_mode = b.option(TestMode, "test-mode", "Run a specific runtime test. This option is for the rt-test step. Available options: " ++ test_mode_desc) orelse .None; const disable_display = b.option(bool, "disable-display", "Disable the qemu window") orelse false; const exec = b.addExecutable("pluto.elf", main_src); const exec_output_path = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "pluto.elf" }); exec.setOutputDir(b.install_path); const exec_options = b.addOptions(); exec.addOptions("build_options", exec_options); exec_options.addOption(TestMode, "test_mode", test_mode); exec.setBuildMode(build_mode); exec.setLinkerScriptPath(std.build.FileSource{ .path = linker_script_path }); exec.setTarget(target); const make_iso = switch (target.getCpuArch()) { .i386 => b.addSystemCommand(&[_][]const u8{ "./makeiso.sh", boot_path, modules_path, iso_dir_path, exec_output_path, ramdisk_path, output_iso }), else => unreachable, }; make_iso.step.dependOn(&exec.step); var fat32_builder_step = Fat32BuilderStep.create(b, .{}, fat32_image_path); make_iso.step.dependOn(&fat32_builder_step.step); var ramdisk_files_al = ArrayList([]const u8).init(b.allocator); defer ramdisk_files_al.deinit(); if (test_mode == .Initialisation) { // Add some test files for the ramdisk runtime tests try ramdisk_files_al.append("test/ramdisk_test1.txt"); try ramdisk_files_al.append("test/ramdisk_test2.txt"); } else if (test_mode == .Scheduler) { inline for (&[_][]const u8{ "user_program_data", "user_program" }) |user_program| { // Add some test files for the user mode runtime tests const user_program_step = b.addExecutable(user_program ++ ".elf", null); user_program_step.setLinkerScriptPath(.{ .path = "test/user_program.ld" }); user_program_step.addAssemblyFile("test/" ++ user_program ++ ".s"); user_program_step.setOutputDir(b.install_path); user_program_step.setTarget(target); user_program_step.setBuildMode(build_mode); user_program_step.strip = true; exec.step.dependOn(&user_program_step.step); const user_program_path = try std.mem.join(b.allocator, "/", &[_][]const u8{ b.install_path, user_program ++ ".elf" }); try ramdisk_files_al.append(user_program_path); } } const ramdisk_step = RamdiskStep.create(b, target, ramdisk_files_al.toOwnedSlice(), ramdisk_path); make_iso.step.dependOn(&ramdisk_step.step); b.default_step.dependOn(&make_iso.step); const test_step = b.step("test", "Run tests"); const unit_tests = b.addTest(main_src); unit_tests.setBuildMode(build_mode); unit_tests.setMainPkgPath("."); const unit_test_options = b.addOptions(); unit_tests.addOptions("build_options", unit_test_options); unit_test_options.addOption(TestMode, "test_mode", test_mode); unit_tests.setTarget(.{ .cpu_arch = target.cpu_arch }); if (builtin.os.tag != .windows) { b.enable_qemu = true; } // Run the mock gen const mock_gen = b.addExecutable("mock_gen", "test/gen_types.zig"); mock_gen.setMainPkgPath("."); const mock_gen_run = mock_gen.run(); unit_tests.step.dependOn(&mock_gen_run.step); // Create test FAT32 image const test_fat32_img_step = Fat32BuilderStep.create(b, .{}, test_fat32_image_path); const copy_test_files_step = b.addSystemCommand(&[_][]const u8{ "./fat32_cp.sh", test_fat32_image_path }); copy_test_files_step.step.dependOn(&test_fat32_img_step.step); unit_tests.step.dependOn(&copy_test_files_step.step); test_step.dependOn(&unit_tests.step); const rt_test_step = b.step("rt-test", "Run runtime tests"); var qemu_args_al = ArrayList([]const u8).init(b.allocator); defer qemu_args_al.deinit(); switch (target.getCpuArch()) { .i386 => try qemu_args_al.append("qemu-system-i386"), else => unreachable, } try qemu_args_al.append("-serial"); try qemu_args_al.append("stdio"); switch (target.getCpuArch()) { .i386 => { try qemu_args_al.append("-boot"); try qemu_args_al.append("d"); try qemu_args_al.append("-cdrom"); try qemu_args_al.append(output_iso); }, else => unreachable, } if (disable_display) { try qemu_args_al.append("-display"); try qemu_args_al.append("none"); } var qemu_args = qemu_args_al.toOwnedSlice(); const rt_step = RuntimeStep.create(b, test_mode, qemu_args); rt_step.step.dependOn(&make_iso.step); rt_test_step.dependOn(&rt_step.step); const run_step = b.step("run", "Run with qemu"); const run_debug_step = b.step("debug-run", "Run with qemu and wait for a gdb connection"); const qemu_cmd = b.addSystemCommand(qemu_args); const qemu_debug_cmd = b.addSystemCommand(qemu_args); qemu_debug_cmd.addArgs(&[_][]const u8{ "-s", "-S" }); qemu_cmd.step.dependOn(&make_iso.step); qemu_debug_cmd.step.dependOn(&make_iso.step); run_step.dependOn(&qemu_cmd.step); run_debug_step.dependOn(&qemu_debug_cmd.step); const debug_step = b.step("debug", "Debug with gdb and connect to a running qemu instance"); const symbol_file_arg = try std.mem.join(b.allocator, " ", &[_][]const u8{ "symbol-file", exec_output_path }); const debug_cmd = b.addSystemCommand(&[_][]const u8{ "gdb-multiarch", "-ex", symbol_file_arg, "-ex", "set architecture auto", }); debug_cmd.addArgs(&[_][]const u8{ "-ex", "target remote localhost:1234", }); debug_step.dependOn(&debug_cmd.step); } /// The FAT32 step for creating a FAT32 image. const Fat32BuilderStep = struct { /// The Step, that is all you need to know step: Step, /// The builder pointer, also all you need to know builder: *Builder, /// The path to where the ramdisk will be written to. out_file_path: []const u8, /// Options for creating the FAT32 image. options: Fat32.Options, /// /// The make function that is called by the builder. /// /// Arguments: /// IN step: *Step - The step of this step. /// /// Error: error{EndOfStream} || File.OpenError || File.ReadError || File.WriteError || File.SeekError || Allocator.Error || Fat32.Error || Error /// error{EndOfStream} || File.OpenError || File.ReadError || File.WriteError || File.SeekError - Error related to file operations. See std.fs.File. /// Allocator.Error - If there isn't enough memory to allocate for the make step. /// Fat32.Error - If there was an error creating the FAT image. This will be invalid options. /// fn make(step: *Step) (error{EndOfStream} || File.OpenError || File.ReadError || File.WriteError || File.SeekError || Fat32.Error)!void { const self = @fieldParentPtr(Fat32BuilderStep, "step", step); // Open the out file const image = try std.fs.cwd().createFile(self.out_file_path, .{ .read = true }); // If there was an error, delete the image as this will be invalid errdefer (std.fs.cwd().deleteFile(self.out_file_path) catch unreachable); defer image.close(); try Fat32.make(self.options, image, false); } /// /// Create a FAT32 builder step. /// /// Argument: /// IN builder: *Builder - The build builder. /// IN options: Options - Options for creating FAT32 image. /// /// Return: *Fat32BuilderStep /// The FAT32 builder step pointer to add to the build process. /// pub fn create(builder: *Builder, options: Fat32.Options, out_file_path: []const u8) *Fat32BuilderStep { const fat32_builder_step = builder.allocator.create(Fat32BuilderStep) catch unreachable; fat32_builder_step.* = .{ .step = Step.init(.custom, builder.fmt("Fat32BuilderStep", .{}), builder.allocator, make), .builder = builder, .options = options, .out_file_path = out_file_path, }; return fat32_builder_step; } }; /// The ramdisk make step for creating the initial ramdisk. const RamdiskStep = struct { /// The Step, that is all you need to know step: Step, /// The builder pointer, also all you need to know builder: *Builder, /// The target for the build target: CrossTarget, /// The list of files to be added to the ramdisk files: []const []const u8, /// The path to where the ramdisk will be written to. out_file_path: []const u8, /// The possible errors for creating a ramdisk const Error = (error{EndOfStream} || File.ReadError || File.SeekError || Allocator.Error || File.WriteError || File.OpenError); /// /// Create and write the files to a raw ramdisk in the format: /// (NumOfFiles:usize)[(name_length:usize)(name:u8[name_length])(content_length:usize)(content:u8[content_length])]* /// /// Argument: /// IN comptime Usize: type - The usize type for the architecture. /// IN self: *RamdiskStep - Self. /// /// Error: Error /// Errors for opening, reading and writing to and from files and for allocating memory. /// fn writeRamdisk(comptime Usize: type, self: *RamdiskStep) Error!void { // 1GB, don't think the ram disk should be very big const max_file_size = 1024 * 1024 * 1024; // Open the out file var ramdisk = try fs.cwd().createFile(self.out_file_path, .{}); defer ramdisk.close(); // Get the targets endian const endian = self.target.getCpuArch().endian(); // First write the number of files/headers std.debug.assert(self.files.len < std.math.maxInt(Usize)); try ramdisk.writer().writeInt(Usize, @truncate(Usize, self.files.len), endian); var current_offset: usize = 0; for (self.files) |file_path| { // Open, and read the file. Can get the size from this as well const file_content = try fs.cwd().readFileAlloc(self.builder.allocator, file_path, max_file_size); // Get the last occurrence of / for the file name, if there isn't one, then the file_path is the name const file_name_index = if (std.mem.lastIndexOf(u8, file_path, "/")) |index| index + 1 else 0; // Write the header and file content to the ramdisk // Name length std.debug.assert(file_path[file_name_index..].len < std.math.maxInt(Usize)); try ramdisk.writer().writeInt(Usize, @truncate(Usize, file_path[file_name_index..].len), endian); // Name try ramdisk.writer().writeAll(file_path[file_name_index..]); // Length std.debug.assert(file_content.len < std.math.maxInt(Usize)); try ramdisk.writer().writeInt(Usize, @truncate(Usize, file_content.len), endian); // File contest try ramdisk.writer().writeAll(file_content); // Increment the offset to the new location current_offset += @sizeOf(Usize) * 3 + file_path[file_name_index..].len + file_content.len; } } /// /// The make function that is called by the builder. This will switch on the target to get the /// correct usize length for the target. /// /// Arguments: /// IN step: *Step - The step of this step. /// /// Error: Error /// Errors for opening, reading and writing to and from files and for allocating memory. /// fn make(step: *Step) Error!void { const self = @fieldParentPtr(RamdiskStep, "step", step); switch (self.target.getCpuArch()) { .i386 => try writeRamdisk(u32, self), else => unreachable, } } /// /// Create a ramdisk step. /// /// Argument: /// IN builder: *Builder - The build builder. /// IN target: CrossTarget - The target for the build. /// IN files: []const []const u8 - The file names to be added to the ramdisk. /// IN out_file_path: []const u8 - The output file path. /// /// Return: *RamdiskStep /// The ramdisk step pointer to add to the build process. /// pub fn create(builder: *Builder, target: CrossTarget, files: []const []const u8, out_file_path: []const u8) *RamdiskStep { const ramdisk_step = builder.allocator.create(RamdiskStep) catch unreachable; ramdisk_step.* = .{ .step = Step.init(.custom, builder.fmt("Ramdisk", .{}), builder.allocator, make), .builder = builder, .target = target, .files = files, .out_file_path = out_file_path, }; return ramdisk_step; } };
0
repos
repos/pluto/makeiso.sh
#!/usr/bin/env bash BOOT_DIR=$1 MODULES_DIR=$2 ISO_DIR=$3 PLUTO_ELF=$4 RAMDISK=$5 OUTPUT_FILE=$6 MAP_FILE=$MODULES_DIR/'kernel.map' exit_missing() { printf "$_ must be installed\n"; exit 1; } # Check dependencies which xorriso > /dev/null || exit_missing which grub-mkrescue > /dev/null || exit_missing which readelf > /dev/null || exit_missing mkdir -p $BOOT_DIR mkdir -p $MODULES_DIR cp -r grub $BOOT_DIR cp $PLUTO_ELF $BOOT_DIR/"pluto.elf" cp $RAMDISK $MODULES_DIR/"initrd.ramdisk" # Read the symbols from the binary, remove all the unnecessary columns with awk and emit to a map file readelf -s --wide $PLUTO_ELF | grep -F "FUNC" | awk '{$1=$3=$4=$5=$6=$7=""; print $0}' | sort -k 1 > $MAP_FILE echo "" >> $MAP_FILE grub-mkrescue -o $OUTPUT_FILE $ISO_DIR
0
repos/pluto/src
repos/pluto/src/kernel/tty.zig
const std = @import("std"); const fmt = std.fmt; const Allocator = std.mem.Allocator; const log = std.log.scoped(.tty); const build_options = @import("build_options"); const arch = @import("arch.zig").internals; const panic = @import("panic.zig").panic; /// The OutStream for the format function const Writer = std.io.Writer(void, anyerror, printCallback); pub const TTY = struct { /// Print a already-formatted string print: fn ([]const u8) anyerror!void, /// Set the TTY cursor position to a row and column setCursor: fn (u8, u8) void, /// Clear the screen and set the cursor to top left. The default implementation will be used if null clear: ?fn () void, /// The number of character rows supported rows: u8, /// The number of character columns supported cols: u8, }; /// The current tty stream var tty: TTY = undefined; var allocator: Allocator = undefined; /// /// A call back function for use in the formation of a string. This calls the architecture's print function. /// /// Arguments: /// IN ctx: void - The context of the printing. This will be empty. /// IN str: []const u8 - The string to print. /// /// Return: usize /// The number of characters printed /// fn printCallback(ctx: void, str: []const u8) !usize { // Suppress unused var warning _ = ctx; tty.print(str) catch |e| panic(@errorReturnTrace(), "Failed to print to tty: {}\n", .{e}); return str.len; } /// /// Print a formatted string to the terminal in the current colour. This used the standard zig /// formatting. /// /// Arguments: /// IN comptime format: []const u8 - The format string to print /// IN args: anytype - The arguments to be used in the formatted string /// pub fn print(comptime format: []const u8, args: anytype) void { // Printing can't error because of the scrolling, if it does, we have a big problem fmt.format(Writer{ .context = {} }, format, args) catch |e| { log.err("Error printing. Error: {}\n", .{e}); }; } /// /// Clear the screen by printing a space at each cursor position. Sets the cursor to the top left (0, 0) /// pub fn clear() void { if (tty.clear) |clr| { clr(); } else { // Try to allocate the number of spaces for a whole row to avoid calling print too many times var spaces = allocator.alloc(u8, tty.cols + 1) catch |e| switch (e) { Allocator.Error.OutOfMemory => { var row: u8 = 0; // If we can't allocate the spaces then try the unoptimised way instead while (row < tty.rows) : (row += 1) { var col: u8 = 0; while (col < tty.cols) : (col += 1) { print(" ", .{}); } print("\n", .{}); } tty.setCursor(0, 0); return; }, }; defer allocator.free(spaces); var col: u8 = 0; while (col < tty.cols) : (col += 1) { spaces[col] = " "[0]; } spaces[col] = "\n"[0]; var row: u8 = 0; while (row < tty.rows) : (row += 1) { print("{s}", .{spaces}); } tty.setCursor(0, 0); } } /// /// Initialise the TTY. The details of which are up to the architecture /// /// Arguments: /// IN alloc: Allocator - The allocator to use when requiring memory /// IN boot_payload: arch.BootPayload - The payload passed to the kernel on boot /// pub fn init(alloc: Allocator, boot_payload: arch.BootPayload) void { log.info("Init\n", .{}); defer log.info("Done\n", .{}); tty = arch.initTTY(boot_payload); allocator = alloc; }
0
repos/pluto/src
repos/pluto/src/kernel/task.zig
const std = @import("std"); const expectEqual = std.testing.expectEqual; const expectError = std.testing.expectError; const expect = std.testing.expect; const builtin = @import("builtin"); const is_test = builtin.is_test; const build_options = @import("build_options"); const arch = @import("arch.zig").internals; const panic = @import("panic.zig").panic; const vmm = @import("vmm.zig"); const pmm = @import("pmm.zig"); const mem = @import("mem.zig"); const elf = @import("elf.zig"); const bitmap = @import("bitmap.zig"); const vfs = @import("filesystem/vfs.zig"); const Allocator = std.mem.Allocator; const log = std.log.scoped(.task); /// The kernels main stack start as this is used to check for if the task being destroyed is this stack /// as we cannot deallocate this. extern var KERNEL_STACK_START: *u32; /// The number of vfs handles that a process can have pub const VFS_HANDLES_PER_PROCESS = std.math.maxInt(Handle); /// A vfs handle. 65k is probably a good limit for the number of files a task can have open at once so we use u16 as the type pub const Handle = u16; /// The function type for the entry point. pub const EntryPoint = usize; /// The bitmap type for the PIDs const PidBitmap = bitmap.Bitmap(1024, usize); /// The list of PIDs that have been allocated. var all_pids = PidBitmap.init(1024, null) catch unreachable; const FileHandleBitmap = bitmap.Bitmap(1024, usize); /// The default stack size of a task. Currently this is set to a page size. pub const STACK_SIZE: u32 = arch.MEMORY_BLOCK_SIZE / @sizeOf(u32); /// The task control block for storing all the information needed to save and restore a task. pub const Task = struct { pub const Error = error{ /// The supplied vfs handle hasn't been allocated VFSHandleNotSet, }; const Self = @This(); /// The unique task identifier pid: usize, /// Pointer to the kernel stack for the task. This will be allocated on initialisation. kernel_stack: []usize, /// Pointer to the user stack for the task. This will be allocated on initialisation and will be empty if it's a kernel task user_stack: []usize, /// The current stack pointer into the stack. stack_pointer: usize, /// Whether the process is a kernel process or not kernel: bool, /// The virtual memory manager belonging to the task vmm: *vmm.VirtualMemoryManager(arch.VmmPayload), /// The list of file handles for this process file_handles: FileHandleBitmap, /// The mapping between file handles and file nodes file_handle_mapping: std.hash_map.AutoHashMap(Handle, *vfs.Node), /// /// Create a task. This will allocate a PID and the stack. The stack will be set up as a /// kernel task. As this is a new task, the stack will need to be initialised with the CPU /// state as described in arch.CpuState struct. /// /// Arguments: /// IN entry_point: EntryPoint - The entry point into the task. This must be a function. /// IN kernel: bool - Whether the task has kernel or user privileges. /// IN task_vmm: *VirtualMemoryManager - The virtual memory manager associated with the task. /// IN allocator: Allocator - The allocator for allocating memory for a task. /// /// Return: *Task /// Pointer to an allocated task. This will then need to be added to the task queue. /// /// Error: Allocator.Error /// OutOfMemory - If there is no more memory to allocate. Any memory or PID allocated will /// be freed on return. /// pub fn create(entry_point: EntryPoint, kernel: bool, task_vmm: *vmm.VirtualMemoryManager(arch.VmmPayload), allocator: Allocator, alloc_kernel_stack: bool) Allocator.Error!*Task { var task = try allocator.create(Task); errdefer allocator.destroy(task); const pid = allocatePid(); errdefer freePid(pid) catch |e| panic(@errorReturnTrace(), "Failed to free task PID in errdefer ({}): {}\n", .{ pid, e }); var k_stack = if (alloc_kernel_stack) try allocator.alloc(usize, STACK_SIZE) else &[_]usize{}; errdefer if (alloc_kernel_stack) allocator.free(k_stack); var u_stack = if (kernel) &[_]usize{} else try allocator.alloc(usize, STACK_SIZE); errdefer if (!kernel) allocator.free(u_stack); task.* = .{ .pid = pid, .kernel_stack = k_stack, .user_stack = u_stack, .stack_pointer = if (!alloc_kernel_stack) 0 else @ptrToInt(&k_stack[STACK_SIZE - 1]), .kernel = kernel, .vmm = task_vmm, .file_handles = FileHandleBitmap.init(null, null) catch unreachable, .file_handle_mapping = std.hash_map.AutoHashMap(Handle, *vfs.Node).init(allocator), }; try arch.initTask(task, entry_point, allocator, alloc_kernel_stack); return task; } pub fn createFromElf(program_elf: elf.Elf, kernel: bool, task_vmm: *vmm.VirtualMemoryManager(arch.VmmPayload), allocator: Allocator) (bitmap.BitmapError || vmm.VmmError || Allocator.Error)!*Task { const task = try create(program_elf.header.entry_address, kernel, task_vmm, allocator, true); errdefer task.destroy(allocator); // Iterate over sections var i: usize = 0; errdefer { // Free the previously allocated addresses for (program_elf.section_headers) |header, j| { if (j >= i) break; if ((header.flags & elf.SECTION_ALLOCATABLE) != 0) task_vmm.free(header.virtual_address) catch |e| panic(null, "VMM failed to clean up a previously-allocated address after an error: {}\n", .{e}); } } while (i < program_elf.section_headers.len) : (i += 1) { const header = program_elf.section_headers[i]; if ((header.flags & elf.SECTION_ALLOCATABLE) == 0) { continue; } // If it is loadable then allocate it at its virtual address const attrs = vmm.Attributes{ .kernel = kernel, .writable = (header.flags & elf.SECTION_WRITABLE) != 0, .cachable = true }; const vmm_blocks = std.mem.alignForward(header.size, vmm.BLOCK_SIZE) / vmm.BLOCK_SIZE; const vaddr_opt = try task_vmm.alloc(vmm_blocks, header.virtual_address, attrs); const vaddr = vaddr_opt orelse return if (try task_vmm.isSet(header.virtual_address)) error.AlreadyAllocated else error.OutOfBounds; errdefer task_vmm.free(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to free VMM memory in createFromElf: {}\n", .{e}); // Copy it into memory try vmm.kernel_vmm.copyData(task_vmm, true, program_elf.section_data[i].?, vaddr); } return task; } /// /// Destroy the task. This will release the allocated PID and free the stack and self. /// /// Arguments: /// IN/OUT self: *Self - The pointer to self. /// IN allocator: Allocator - The allocator used to create the task. /// pub fn destroy(self: *Self, allocator: Allocator) void { freePid(self.pid) catch |e| panic(@errorReturnTrace(), "Failed to free task's PID ({}): {}\n", .{ self.pid, e }); // We need to check that the the stack has been allocated as task 0 (init) won't have a // stack allocated as this in the linker script if (@ptrToInt(self.kernel_stack.ptr) != @ptrToInt(&KERNEL_STACK_START) and self.kernel_stack.len > 0) { allocator.free(self.kernel_stack); } if (!self.kernel) { allocator.free(self.user_stack); } self.file_handle_mapping.deinit(); allocator.destroy(self); } /// /// Get the VFS node associated with a VFS handle. /// /// Arguments: /// IN self: *Self - The pointer to self. /// IN handle: Handle - The handle to get the node for. Must have been returned from addVFSHandle. /// /// Return: *vfs.Node /// The node associated with the handle. /// /// Error: bitmap.BitmapError /// See Bitmap. /// pub fn getVFSHandle(self: Self, handle: Handle) bitmap.BitmapError!?*vfs.Node { return self.file_handle_mapping.get(handle); } /// /// Check if the task has free handles to allocate. /// /// Arguments: /// IN self: Self - The self. /// /// Return: bool /// True if there are free handles, else false. /// pub fn hasFreeVFSHandle(self: Self) bool { return self.file_handles.num_free_entries > 0; } /// /// Add a handle associated with a node. The node can later be retrieved with getVFSHandle. /// /// Arguments: /// IN self: *Self - The pointer to self. /// IN node: *vfs.Node - The node to associate with the returned handle. /// /// Return: Handle /// The handle now associated with the vfs node. /// /// Error: std.mem.Allocator.Error /// pub fn addVFSHandle(self: *Self, node: *vfs.Node) std.mem.Allocator.Error!?Handle { if (self.file_handles.setFirstFree()) |handle| { const real_handle = @intCast(Handle, handle); try self.file_handle_mapping.put(real_handle, node); return real_handle; } return null; } /// /// Check if the task has a certain handle registered. /// /// Arguments: /// IN self: Self - The self. /// IN handle: Handle - The handle to check. /// /// Return: bool /// True if the handle has been registered to this task, else false. /// /// Error: bitmap.BitmapError /// See Bitmap. /// pub fn hasVFSHandle(self: Self, handle: Handle) bitmap.BitmapError!bool { return self.file_handles.isSet(handle); } /// /// Clear a registered handle and de-associate the node from it. /// /// Arguments: /// IN self: *Self - The pointer to self. /// IN handle: Handle - The handle to clear. Must have been registered before. /// /// Error: bitmap.BitmapError || Error /// bitmap.BitmapError.* - See bitmap.BitmapError /// Error.VFSHandleNotSet - The handle has not previously been registered /// pub fn clearVFSHandle(self: *Self, handle: Handle) (bitmap.BitmapError || Error)!void { if (try self.hasVFSHandle(handle)) { try self.file_handles.clearEntry(handle); _ = self.file_handle_mapping.remove(handle); } else { return Error.VFSHandleNotSet; } } }; /// /// Allocate a process identifier. If out of PIDs, then will panic. Is this occurs, will need to /// increase the bitmap. /// /// Return: u32 /// A new PID. /// fn allocatePid() usize { return all_pids.setFirstFree() orelse panic(@errorReturnTrace(), "Out of PIDs\n", .{}); } /// /// Free an allocated PID. One must be allocated to be freed. If one wasn't allocated will panic. /// /// Arguments: /// IN pid: usize - The PID to free. /// /// Error: BitmapError. /// OutOfBounds: The index given is out of bounds. /// fn freePid(pid: usize) bitmap.BitmapError!void { if (!(try all_pids.isSet(pid))) { panic(@errorReturnTrace(), "PID {} not allocated\n", .{pid}); } try all_pids.clearEntry(pid); } // For testing the errdefer const FailingAllocator = std.testing.FailingAllocator; const testing_allocator = std.testing.base_allocator_instance.allocator(); fn test_fn1() void {} test "create out of memory for task" { // Set the global allocator var fa = FailingAllocator.init(testing_allocator, 0); try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), true, undefined, fa.allocator(), true)); try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), false, undefined, fa.allocator(), true)); // Make sure any memory allocated is freed try expectEqual(fa.allocated_bytes, fa.freed_bytes); // Make sure no PIDs were allocated for (all_pids.bitmaps) |bmp| { try expectEqual(bmp, 0); } } test "create out of memory for stack" { // Set the global allocator var fa = FailingAllocator.init(testing_allocator, 1); try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), true, undefined, fa.allocator(), true)); try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), false, undefined, fa.allocator(), true)); // Make sure any memory allocated is freed try expectEqual(fa.allocated_bytes, fa.freed_bytes); // Make sure no PIDs were allocated for (all_pids.bitmaps) |bmp| { try expectEqual(bmp, 0); } } test "create expected setup" { var task = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator, true); defer task.destroy(std.testing.allocator); // Will allocate the first PID 0 try expectEqual(task.pid, 0); try expectEqual(task.kernel_stack.len, STACK_SIZE); try expectEqual(task.user_stack.len, 0); var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, std.testing.allocator, true); defer user_task.destroy(std.testing.allocator); try expectEqual(user_task.pid, 1); try expectEqual(user_task.user_stack.len, STACK_SIZE); try expectEqual(user_task.kernel_stack.len, STACK_SIZE); } test "destroy cleans up" { // This used the leak detector allocator in testing // So if any alloc were not freed, this will fail the test var allocator = std.testing.allocator; var task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator, true); var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, allocator, true); task.destroy(allocator); user_task.destroy(allocator); // All PIDs were freed for (all_pids.bitmaps) |bmp| { try expectEqual(bmp, 0); } } test "Multiple create" { var task1 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator, true); var task2 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator, true); try expectEqual(task1.pid, 0); try expectEqual(task2.pid, 1); try expectEqual(all_pids.bitmaps[0], 3); for (all_pids.bitmaps) |bmp, i| { if (i > 0) try expectEqual(bmp, 0); } task1.destroy(std.testing.allocator); try expectEqual(all_pids.bitmaps[0], 2); for (all_pids.bitmaps) |bmp, i| { if (i > 0) try expectEqual(bmp, 0); } var task3 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator, true); try expectEqual(task3.pid, 0); try expectEqual(all_pids.bitmaps[0], 3); for (all_pids.bitmaps) |bmp, i| { if (i > 0) try expectEqual(bmp, 0); } task2.destroy(std.testing.allocator); task3.destroy(std.testing.allocator); var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, std.testing.allocator, true); try expectEqual(user_task.pid, 0); try expectEqual(all_pids.bitmaps[0], 1); for (all_pids.bitmaps) |bmp, i| { if (i > 0) try expectEqual(bmp, 0); } user_task.destroy(std.testing.allocator); for (all_pids.bitmaps) |bmp| { try expectEqual(bmp, 0); } } test "allocatePid and freePid" { for (all_pids.bitmaps) |bmp| { try expectEqual(bmp, 0); } var i: usize = 0; while (i < all_pids.num_entries) : (i += 1) { try expectEqual(i, allocatePid()); } for (all_pids.bitmaps) |bmp| { try expectEqual(bmp, PidBitmap.BITMAP_FULL); } i = 0; while (i < all_pids.num_entries) : (i += 1) { try freePid(i); } for (all_pids.bitmaps) |bmp| { try expectEqual(bmp, 0); } } test "createFromElf" { var allocator = std.testing.allocator; var master_vmm = try vmm.testInit(32); defer vmm.testDeinit(&master_vmm); const code_address = 0; const elf_data = try elf.testInitData(allocator, "abc123", "strings", .Executable, code_address, 0, elf.SECTION_ALLOCATABLE, 0, code_address, 0); defer allocator.free(elf_data); var the_elf = try elf.Elf.init(elf_data, builtin.cpu.arch, std.testing.allocator); defer the_elf.deinit(); var the_vmm = try vmm.VirtualMemoryManager(arch.VmmPayload).init(0, 10000, std.testing.allocator, arch.VMM_MAPPER, arch.KERNEL_VMM_PAYLOAD); defer the_vmm.deinit(); const task = try Task.createFromElf(the_elf, true, &the_vmm, std.testing.allocator); defer task.destroy(allocator); try std.testing.expectEqual(task.pid, 0); try std.testing.expectEqual(task.user_stack.len, 0); try std.testing.expectEqual(task.kernel_stack.len, STACK_SIZE); } test "createFromElf clean-up" { var allocator = std.testing.allocator; var master_vmm = try vmm.testInit(32); defer vmm.testDeinit(&master_vmm); const code_address = 0; const elf_data = try elf.testInitData(allocator, "abc123", "strings", .Executable, code_address, 0, elf.SECTION_ALLOCATABLE, 0, code_address, 0); defer allocator.free(elf_data); var the_elf = try elf.Elf.init(elf_data, builtin.cpu.arch, std.testing.allocator); defer the_elf.deinit(); var the_vmm = try vmm.VirtualMemoryManager(arch.VmmPayload).init(0, 10000, std.testing.allocator, arch.VMM_MAPPER, arch.KERNEL_VMM_PAYLOAD); defer the_vmm.deinit(); const task = try Task.createFromElf(the_elf, true, &the_vmm, std.testing.allocator); defer task.destroy(allocator); // Test clean-up // Test OutOfMemory var allocator2 = std.testing.FailingAllocator.init(allocator, 0).allocator(); try std.testing.expectError(std.mem.Allocator.Error.OutOfMemory, Task.createFromElf(the_elf, true, &the_vmm, allocator2)); try std.testing.expectEqual(all_pids.num_free_entries, all_pids.num_entries - 1); // Test AlreadyAllocated try std.testing.expectError(error.AlreadyAllocated, Task.createFromElf(the_elf, true, &the_vmm, allocator)); // Test OutOfBounds the_elf.section_headers[0].virtual_address = the_vmm.end + 1; try std.testing.expectError(error.OutOfBounds, Task.createFromElf(the_elf, true, &the_vmm, allocator)); // Test errdefer clean-up by fillng up all but one block in the VMM so allocating the last section fails // The allocation for the first section should be cleaned up in case of an error const available_address = (try the_vmm.alloc(1, null, .{ .writable = false, .kernel = false, .cachable = false })) orelse unreachable; the_elf.section_headers[0].virtual_address = available_address; _ = try the_vmm.alloc(the_vmm.bmp.num_free_entries, null, .{ .kernel = false, .writable = false, .cachable = false }); try the_vmm.free(available_address); // Make the strings section allocatable so createFromElf tries to allocate more than one the_elf.section_headers[1].flags |= elf.SECTION_ALLOCATABLE; try std.testing.expectError(error.AlreadyAllocated, Task.createFromElf(the_elf, true, &the_vmm, std.testing.allocator)); } test "create doesn't allocate kernel stack" { var allocator = std.testing.allocator; const task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator, false); defer task.destroy(allocator); try std.testing.expectEqualSlices(usize, task.kernel_stack, &[_]usize{}); try std.testing.expectEqual(task.stack_pointer, 0); } test "addVFSHandle" { var task = try Task.create(0, true, undefined, std.testing.allocator, false); defer task.destroy(std.testing.allocator); var node1 = vfs.Node{ .Dir = .{ .fs = undefined, .mount = null } }; var node2 = vfs.Node{ .File = .{ .fs = undefined } }; const handle1 = (try task.addVFSHandle(&node1)) orelse return error.FailedToAddVFSHandle; try expectEqual(handle1, 0); try expectEqual(&node1, task.file_handle_mapping.get(handle1).?); try expectEqual(true, try task.file_handles.isSet(handle1)); const handle2 = (try task.addVFSHandle(&node2)) orelse return error.FailedToAddVFSHandle; try expectEqual(handle2, 1); try expectEqual(&node2, task.file_handle_mapping.get(handle2).?); try expectEqual(true, try task.file_handles.isSet(handle2)); } test "hasFreeVFSHandle" { var task = try Task.create(0, true, undefined, std.testing.allocator, false); defer task.destroy(std.testing.allocator); var node1 = vfs.Node{ .Dir = .{ .fs = undefined, .mount = null } }; try expect(task.hasFreeVFSHandle()); _ = (try task.addVFSHandle(&node1)) orelse return error.FailedToAddVFSHandle; try expect(task.hasFreeVFSHandle()); var i: usize = 0; const free_entries = task.file_handles.num_free_entries; while (i < free_entries) : (i += 1) { try expect(task.hasFreeVFSHandle()); _ = task.file_handles.setFirstFree(); } try expect(!task.hasFreeVFSHandle()); } test "getVFSHandle" { var task = try Task.create(0, true, undefined, std.testing.allocator, false); defer task.destroy(std.testing.allocator); var node1 = vfs.Node{ .Dir = .{ .fs = undefined, .mount = null } }; var node2 = vfs.Node{ .File = .{ .fs = undefined } }; const handle1 = (try task.addVFSHandle(&node1)) orelse return error.FailedToAddVFSHandle; try expectEqual(&node1, (try task.getVFSHandle(handle1)).?); const handle2 = (try task.addVFSHandle(&node2)) orelse return error.FailedToAddVFSHandle; try expectEqual(&node2, (try task.getVFSHandle(handle2)).?); try expectEqual(&node1, (try task.getVFSHandle(handle1)).?); try expectEqual(task.getVFSHandle(handle2 + 1), null); } test "clearVFSHandle" { var task = try Task.create(0, true, undefined, std.testing.allocator, false); defer task.destroy(std.testing.allocator); var node1 = vfs.Node{ .Dir = .{ .fs = undefined, .mount = null } }; var node2 = vfs.Node{ .File = .{ .fs = undefined } }; const handle1 = (try task.addVFSHandle(&node1)) orelse return error.FailedToAddVFSHandle; const handle2 = (try task.addVFSHandle(&node2)) orelse return error.FailedToAddVFSHandle; try task.clearVFSHandle(handle1); try expectEqual(false, try task.hasVFSHandle(handle1)); try task.clearVFSHandle(handle2); try expectEqual(false, try task.hasVFSHandle(handle2)); try expectError(Task.Error.VFSHandleNotSet, task.clearVFSHandle(handle2 + 1)); try expectError(Task.Error.VFSHandleNotSet, task.clearVFSHandle(handle2)); try expectError(Task.Error.VFSHandleNotSet, task.clearVFSHandle(handle1)); }
0
repos/pluto/src
repos/pluto/src/kernel/keyboard.zig
const std = @import("std"); const testing = std.testing; const ArrayList = std.ArrayList; const Allocator = std.mem.Allocator; const arch = @import("arch.zig").internals; /// An arbitrary number of keys to remember before dropping any more that arrive. Is a power of two so we can use nice overflowing addition pub const QUEUE_SIZE = 32; /// The position of a key on a keyboard using the qwerty layout. This does not determine the key pressed, just the position on the keyboard. pub const KeyPosition = enum(u7) { ESC, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, PRINT_SCREEN, SCROLL_LOCK, PAUSE, BACKTICK, ONE, TWO, THREE, FOUR, FIVE, SIX, SEVEN, EIGHT, NINE, ZERO, HYPHEN, EQUALS, BACKSPACE, TAB, Q, W, E, R, T, Y, U, I, O, P, LEFT_BRACKET, RIGHT_BRACKET, ENTER, CAPS_LOCK, A, S, D, F, G, H, J, K, L, SEMICOLON, APOSTROPHE, HASH, LEFT_SHIFT, BACKSLASH, Z, X, C, V, B, N, M, COMMA, DOT, FORWARD_SLASH, RIGHT_SHIFT, LEFT_CTRL, SPECIAL, LEFT_ALT, SPACE, RIGHT_ALT, FN, SPECIAL2, RIGHT_CTRL, INSERT, HOME, PAGE_UP, DELETE, END, PAGE_DOWN, LEFT_ARROW, UP_ARROW, DOWN_ARROW, RIGHT_ARROW, NUM_LOCK, KEYPAD_SLASH, KEYPAD_ASTERISK, KEYPAD_MINUS, KEYPAD_7, KEYPAD_8, KEYPAD_9, KEYPAD_PLUS, KEYPAD_4, KEYPAD_5, KEYPAD_6, KEYPAD_1, KEYPAD_2, KEYPAD_3, KEYPAD_ENTER, KEYPAD_0, KEYPAD_DOT, }; /// A keyboard action, either a press or release pub const KeyAction = struct { /// The position of the key position: KeyPosition, /// Whether it was a release or press released: bool, }; /// The type used to index the keyboard queue const QueueIndex = std.meta.Int(.unsigned, std.math.log2(QUEUE_SIZE)); /// A keyboard buffer that stores keyboard actions. This corresponds to a single hardware keyboard pub const Keyboard = struct { /// A circular queue storing key presses until full queue: [QUEUE_SIZE]KeyAction, /// The front of the queue i.e. the next item to be dequeued queue_front: QueueIndex, /// The end of the queue i.e. where the next item is enqueued queue_end: QueueIndex, /// /// Initialise a keyboard with an empty key buffer /// /// Return: Keyboard /// They keyboard created /// pub fn init() Keyboard { return .{ .queue = [_]KeyAction{undefined} ** QUEUE_SIZE, .queue_front = 0, .queue_end = 0, }; } /// /// Check if the keyboard queue is empty /// /// Arguments: /// self: *const Keyboard - The keyboard to check /// /// Return: bool /// True if the keyboard queue is empty, else false /// pub fn isEmpty(self: *const Keyboard) bool { return self.queue_end == self.queue_front; } /// /// Check if the keyboard queue is full /// /// Arguments: /// self: *const Keyboard - The keyboard to check /// /// Return: bool /// True if the keyboard queue is full, else false /// pub fn isFull(self: *const Keyboard) bool { var end_plus_one: QueueIndex = undefined; // This is a circular queue so overflow is allowed _ = @addWithOverflow(QueueIndex, self.queue_end, 1, &end_plus_one); return end_plus_one == self.queue_front; } /// /// Add a keyboard action to the keyboard's queue /// /// Arguments: /// self: *Keyboard - The keyboard whose queue it should be added to /// key: KeyAction - The action to add /// /// Return: bool /// True if there was room for the key, else false /// pub fn writeKey(self: *Keyboard, key: KeyAction) bool { if (!self.isFull()) { self.queue[self.queue_end] = key; _ = @addWithOverflow(QueueIndex, self.queue_end, 1, &self.queue_end); return true; } return false; } /// /// Read and remove the next key from the keyboard's queue /// /// Arguments: /// self: *Keyboard - The keyboard to get and remove the key from /// /// Return: ?KeyAction /// The first keyboard action in the queue, else null if there were none /// pub fn readKey(self: *Keyboard) ?KeyAction { if (self.isEmpty()) return null; const key = self.queue[self.queue_front]; _ = @addWithOverflow(QueueIndex, self.queue_front, 1, &self.queue_front); return key; } test "init" { const keyboard = Keyboard.init(); try testing.expectEqual(keyboard.queue_front, 0); try testing.expectEqual(keyboard.queue_end, 0); } test "isEmpty" { var keyboard = Keyboard.init(); try testing.expect(keyboard.isEmpty()); keyboard.queue_end += 1; try testing.expect(!keyboard.isEmpty()); keyboard.queue_front += 1; try testing.expect(keyboard.isEmpty()); keyboard.queue_end = std.math.maxInt(QueueIndex); keyboard.queue_front = 0; try testing.expect(!keyboard.isEmpty()); keyboard.queue_front = std.math.maxInt(QueueIndex); try testing.expect(keyboard.isEmpty()); } test "isFull" { var keyboard = Keyboard.init(); try testing.expect(!keyboard.isFull()); keyboard.queue_end += 1; try testing.expect(!keyboard.isFull()); keyboard.queue_front += 1; try testing.expect(!keyboard.isFull()); keyboard.queue_end = 0; try testing.expect(keyboard.isFull()); keyboard.queue_front = 0; keyboard.queue_end = std.math.maxInt(QueueIndex); try testing.expect(keyboard.isFull()); } test "writeKey" { var keyboard = Keyboard.init(); comptime var i = 0; inline while (i < QUEUE_SIZE - 1) : (i += 1) { try testing.expectEqual(keyboard.writeKey(.{ .position = @intToEnum(KeyPosition, i), .released = false, }), true); try testing.expectEqual(keyboard.queue[i].position, @intToEnum(KeyPosition, i)); try testing.expectEqual(keyboard.queue_end, i + 1); try testing.expectEqual(keyboard.queue_front, 0); } try testing.expectEqual(keyboard.writeKey(.{ .position = @intToEnum(KeyPosition, 33), .released = false, }), false); try testing.expect(keyboard.isFull()); } test "readKey" { var keyboard = Keyboard.init(); comptime var i = 0; inline while (i < QUEUE_SIZE - 1) : (i += 1) { try testing.expectEqual(keyboard.writeKey(.{ .position = @intToEnum(KeyPosition, i), .released = false, }), true); } i = 0; inline while (i < QUEUE_SIZE - 1) : (i += 1) { try testing.expectEqual(keyboard.readKey().?.position, @intToEnum(KeyPosition, i)); try testing.expectEqual(keyboard.queue_end, QUEUE_SIZE - 1); try testing.expectEqual(keyboard.queue_front, i + 1); } try testing.expect(keyboard.isEmpty()); try testing.expectEqual(keyboard.readKey(), null); } }; /// The registered keyboards var keyboards: ArrayList(*Keyboard) = undefined; /// /// Get the keyboard associated with an ID /// /// Arguments: /// id: usize - The ID of the keyboard to get /// /// Return: ?*Keyboard /// The keyboard associated with the ID, or null if there isn't one /// pub fn getKeyboard(id: usize) ?*Keyboard { if (keyboards.items.len <= id) { return null; } return keyboards.items[id]; } /// /// Add a keyboard to list of known keyboards /// /// Arguments: /// kb: *Keyboard - The keyboard to add /// /// Error: std.mem.Allocator.Error /// OutOfMemory - Adding the keyboard to the list failed due to there not being enough memory free /// pub fn addKeyboard(kb: *Keyboard) Allocator.Error!void { try keyboards.append(kb); } /// /// Initialise the keyboard system and the architecture's keyboard /// /// Arguments: /// allocator: std.mem.Allocator - The allocator to initialise the keyboard list and architecture keyboard with /// /// Return: ?*Keyboard /// The architecture keyboard found, else null if one wasn't detected /// /// Error: std.mem.Allocator.Error /// OutOfMemory - There wasn't enough memory to initialise the keyboard list or the architecture keyboard /// pub fn init(allocator: Allocator) Allocator.Error!?*Keyboard { keyboards = ArrayList(*Keyboard).init(allocator); return arch.initKeyboard(allocator); } test "" { _ = Keyboard.init(); }
0
repos/pluto/src
repos/pluto/src/kernel/mem.zig
const std = @import("std"); const expectEqual = std.testing.expectEqual; pub const Module = struct { /// The region of memory occupied by the module region: Range, /// The module's name name: []const u8, }; pub const Map = struct { /// The virtual range to reserve virtual: Range, /// The physical range to map to, if any physical: ?Range, }; /// A range of memory pub const Range = struct { /// The start of the range, inclusive start: usize, /// The end of the range, exclusive end: usize, }; pub const MemProfile = struct { /// The virtual end address of the kernel code. vaddr_end: [*]u8, /// The virtual end address of the kernel code. vaddr_start: [*]u8, /// The physical end address of the kernel code. physaddr_end: [*]u8, /// The physical start address of the kernel code. physaddr_start: [*]u8, /// The amount of memory in the system, in kilobytes. mem_kb: usize, /// The modules loaded into memory at boot. modules: []Module, /// The virtual regions of reserved memory. These are reserved and mapped by the VMM virtual_reserved: []Map, /// The physical regions of reserved memory. These are reserved by the PMM physical_reserved: []Range, /// The allocator to use before a heap can be set up. fixed_allocator: std.heap.FixedBufferAllocator, }; /// The size of the fixed allocator used before the heap is set up. Set to 1MiB. pub var fixed_buffer: [1024 * 1024]u8 = undefined; /// The fixed allocator used before the heap is set up. pub var fixed_buffer_allocator: std.heap.FixedBufferAllocator = std.heap.FixedBufferAllocator.init(fixed_buffer[0..]); /// The kernel's virtual address offset. It's assigned in the init function and this file's tests. /// We can't just use KERNEL_ADDR_OFFSET since using externs in the virtToPhys test is broken in /// release-safe. This is a workaround until that is fixed. pub var ADDR_OFFSET: usize = undefined; /// /// Convert a virtual address to its physical counterpart by subtracting the kernel virtual offset from the virtual address. /// /// Arguments: /// IN virt: anytype - The virtual address to covert. Either an integer or pointer. /// /// Return: @TypeOf(virt) /// The physical address. /// pub fn virtToPhys(virt: anytype) @TypeOf(virt) { const T = @TypeOf(virt); return switch (@typeInfo(T)) { .Pointer => @intToPtr(T, @ptrToInt(virt) - ADDR_OFFSET), .Int => virt - ADDR_OFFSET, else => @compileError("Only pointers and integers are supported"), }; } /// /// Convert a physical address to its virtual counterpart by adding the kernel virtual offset to the physical address. /// /// Arguments: /// IN phys: anytype - The physical address to covert. Either an integer or pointer. /// /// Return: @TypeOf(virt) /// The virtual address. /// pub fn physToVirt(phys: anytype) @TypeOf(phys) { const T = @TypeOf(phys); return switch (@typeInfo(T)) { .Pointer => @intToPtr(T, @ptrToInt(phys) + ADDR_OFFSET), .Int => phys + ADDR_OFFSET, else => @compileError("Only pointers and integers are supported"), }; } test "physToVirt" { ADDR_OFFSET = 0xC0000000; const offset: usize = ADDR_OFFSET; try expectEqual(physToVirt(@as(usize, 0)), offset + 0); try expectEqual(physToVirt(@as(usize, 123)), offset + 123); try expectEqual(@ptrToInt(physToVirt(@intToPtr(*align(1) usize, 123))), offset + 123); } test "virtToPhys" { ADDR_OFFSET = 0xC0000000; const offset: usize = ADDR_OFFSET; try expectEqual(virtToPhys(offset + 0), 0); try expectEqual(virtToPhys(offset + 123), 123); try expectEqual(@ptrToInt(virtToPhys(@intToPtr(*align(1) usize, offset + 123))), 123); }
0
repos/pluto/src
repos/pluto/src/kernel/arch.zig
const std = @import("std"); const builtin = @import("builtin"); const is_test = builtin.is_test; const build_options = @import("build_options"); pub const internals = if (is_test) @import("../../test/mock/kernel/arch_mock.zig") else switch (builtin.cpu.arch) { .i386 => @import("arch/x86/arch.zig"), else => unreachable, }; test "" { _ = @import("arch/x86/arch.zig"); }
0
repos/pluto/src
repos/pluto/src/kernel/bitmap.zig
const std = @import("std"); const builtin = std.builtin; const testing = std.testing; const expectEqual = testing.expectEqual; const Allocator = std.mem.Allocator; /// The possible errors thrown by bitmap functions pub const BitmapError = error{ /// The address given was outside the region covered by a bitmap OutOfBounds, }; /// /// A bitmap that uses a specific type to store the entries. /// The bitmap can either be statically or dynamically allocated. /// Statically allocated bitmaps are allocated at compile time and therefore must know the number of entries at compile time. /// Dynamically allocated bitmaps do not need to know the number of entries until being initialised, but do need an allocator. /// /// Arguments: /// IN static: comptime bool - Whether this bitmap is statically or dynamically allocated /// IN BitmapType: type - The integer type to use to store entries. /// IN num_entries: usize or ?usize - The number of entries if static, else ignored (can be null) /// /// Return: type. /// The bitmap type created. /// pub fn Bitmap(comptime num_entries: ?usize, comptime BitmapType: type) type { return struct { const Self = @This(); const static = num_entries != null; /// The number of entries that one bitmap type can hold. Evaluates to the number of bits the type has pub const ENTRIES_PER_BITMAP: usize = std.meta.bitCount(BitmapType); /// The value that a full bitmap will have pub const BITMAP_FULL = std.math.maxInt(BitmapType); /// The type of an index into a bitmap entry. The smallest integer needed to represent all bit positions in the bitmap entry type pub const IndexType = std.meta.Int(.unsigned, std.math.log2(std.math.ceilPowerOfTwo(u16, std.meta.bitCount(BitmapType)) catch unreachable)); num_bitmaps: usize, num_entries: usize, bitmaps: if (static) [std.mem.alignForward(num_entries.?, ENTRIES_PER_BITMAP) / ENTRIES_PER_BITMAP]BitmapType else []BitmapType, num_free_entries: usize, allocator: if (static) ?Allocator else Allocator, /// /// Create an instance of this bitmap type. /// /// Arguments: /// IN num: ?usize or usize - The number of entries that the bitmap created will have if dynamically allocated, else ignored (can be null) /// The number of BitmapType required to store this many entries will be allocated (dynamically or statically) and each will be zeroed. /// IN allocator: ?Allocator or Allocator - The allocator to use when allocating the BitmapTypes required. Ignored if statically allocated. /// /// Return: Self. /// The bitmap instance. /// /// Error: Allocator.Error /// OutOfMemory: There isn't enough memory available to allocate the required number of BitmapType. A statically allocated bitmap will not throw this error. /// pub fn init(num: if (static) ?usize else usize, allocator: if (static) ?Allocator else Allocator) !Self { if (static) { const n = std.mem.alignForward(num_entries.?, ENTRIES_PER_BITMAP) / ENTRIES_PER_BITMAP; return Self{ .num_bitmaps = n, .bitmaps = [_]BitmapType{0} ** (std.mem.alignForward(num_entries.?, ENTRIES_PER_BITMAP) / ENTRIES_PER_BITMAP), .num_entries = num_entries.?, .num_free_entries = num_entries.?, .allocator = null, }; } else { const n = std.mem.alignForward(num, ENTRIES_PER_BITMAP) / ENTRIES_PER_BITMAP; const self = Self{ .num_bitmaps = n, .num_entries = num, .bitmaps = try allocator.alloc(BitmapType, n), .num_free_entries = num, .allocator = allocator, }; for (self.bitmaps) |*bmp| { bmp.* = 0; } return self; } } /// /// Clone this bitmap. /// /// Arguments: /// IN self: *Self - The bitmap to clone. /// /// Return: Self /// The cloned bitmap /// /// Error: Allocator.Error /// OutOfMemory: There isn't enough memory available to allocate the required number of BitmapType. /// pub fn clone(self: *const Self) Allocator.Error!Self { var copy = try init(self.num_entries, self.allocator); var i: usize = 0; while (i < copy.num_entries) : (i += 1) { if (self.isSet(i) catch unreachable) { copy.setEntry(i) catch unreachable; } } return copy; } /// /// Free the memory occupied by this bitmap's internal state. It will become unusable afterwards. /// Does nothing if the bitmap was statically allocated. /// /// Arguments: /// IN self: *Self - The bitmap that should be deinitialised /// pub fn deinit(self: *Self) void { if (!static) self.allocator.free(self.bitmaps); } /// /// Set an entry within a bitmap as occupied. /// /// Arguments: /// IN/OUT self: *Self - The bitmap to modify. /// IN idx: usize - The index within the bitmap to set. /// /// Error: BitmapError. /// OutOfBounds: The index given is out of bounds. /// pub fn setEntry(self: *Self, idx: usize) BitmapError!void { if (idx >= self.num_entries) { return BitmapError.OutOfBounds; } if (!try self.isSet(idx)) { const bit = indexToBit(idx); self.bitmaps[idx / ENTRIES_PER_BITMAP] |= bit; self.num_free_entries -= 1; } } /// /// Set an entry within a bitmap as unoccupied. /// /// Arguments: /// IN/OUT self: *Self - The bitmap to modify. /// IN idx: usize - The index within the bitmap to clear. /// /// Error: BitmapError. /// OutOfBounds: The index given is out of bounds. /// pub fn clearEntry(self: *Self, idx: usize) BitmapError!void { if (idx >= self.num_entries) { return BitmapError.OutOfBounds; } if (try self.isSet(idx)) { const bit = indexToBit(idx); self.bitmaps[idx / ENTRIES_PER_BITMAP] &= ~bit; self.num_free_entries += 1; } } /// /// Convert a global bitmap index into the bit corresponding to an entry within a single BitmapType. /// /// Arguments: /// IN self: *const Self - The bitmap to use. /// IN idx: usize - The index into all of the bitmaps entries. /// /// Return: BitmapType. /// The bit corresponding to that index but within a single BitmapType. /// fn indexToBit(idx: usize) BitmapType { return @as(BitmapType, 1) << @intCast(IndexType, idx % ENTRIES_PER_BITMAP); } /// /// Find a number of contiguous free entries and set them. /// /// Arguments: /// IN/OUT self: *Self - The bitmap to modify. /// IN num: usize - The number of entries to set. /// IN from: ?usize - The entry number to allocate from or null if it can start anywhere /// /// Return: ?usize /// The first entry set or null if there weren't enough contiguous entries. /// If `from` was not null and any entry between `from` and `from` + num is set then null is returned. /// pub fn setContiguous(self: *Self, num: usize, from: ?usize) ?usize { if (num > self.num_free_entries) { return null; } var count: usize = 0; var start: ?usize = from; var i: usize = if (from) |f| f / ENTRIES_PER_BITMAP else 0; var bit: IndexType = if (from) |f| @truncate(IndexType, f % ENTRIES_PER_BITMAP) else 0; while (i < self.bitmaps.len) : ({ i += 1; bit = 0; }) { var bmp = self.bitmaps[i]; while (true) { const entry = bit + i * ENTRIES_PER_BITMAP; if (entry >= self.num_entries) { return null; } if ((bmp & @as(BitmapType, 1) << bit) != 0) { // This is a one so clear the progress count = 0; start = null; // If the caller requested the allocation to start from // a specific entry and it failed then return null if (from) |_| { return null; } } else { // It's a zero so increment the count count += 1; if (start == null) { // Start of the contiguous zeroes start = entry; } if (count == num) { // Reached the desired number break; } } // Avoiding overflow by checking if bit is less than the max - 1 if (bit < ENTRIES_PER_BITMAP - 1) { bit += 1; } else { // Reached the end of the bitmap break; } } if (count == num) { break; } } if (count == num) { if (start) |start_entry| { var j: usize = 0; while (j < num) : (j += 1) { // Can't fail as the entry was found to be free self.setEntry(start_entry + j) catch unreachable; } return start_entry; } } return null; } /// /// Set the first free entry within the bitmaps as occupied. /// /// Return: ?usize. /// The index within all bitmaps that was set or null if there wasn't one free. /// 0 .. ENTRIES_PER_BITMAP - 1 if in the first bitmap, ENTRIES_PER_BITMAP .. ENTRIES_PER_BITMAP * 2 - 1 if in the second etc. /// pub fn setFirstFree(self: *Self) ?usize { if (self.num_free_entries == 0) { return null; } for (self.bitmaps) |*bmp, i| { if (bmp.* == BITMAP_FULL) { continue; } const bit = @truncate(IndexType, @ctz(BitmapType, ~bmp.*)); const idx = bit + i * ENTRIES_PER_BITMAP; // Failing here means that the index is outside of the bitmap, so there are no free entries self.setEntry(idx) catch return null; return idx; } return null; } /// /// Check if an entry is set. /// /// Arguments: /// IN self: *const Self - The bitmap to check. /// IN idx: usize - The entry to check. /// /// Return: bool. /// True if the entry is set, else false. /// /// Error: BitmapError. /// OutOfBounds: The index given is out of bounds. /// pub fn isSet(self: *const Self, idx: usize) BitmapError!bool { if (idx >= self.num_entries) { return BitmapError.OutOfBounds; } return (self.bitmaps[idx / ENTRIES_PER_BITMAP] & indexToBit(idx)) != 0; } }; } test "static setEntry" { const BmpTy = Bitmap(32, u32); var bmp = try BmpTy.init(null, null); try testing.expectEqual(@as(u32, 1), bmp.bitmaps.len); try testing.expectEqual(@as(u32, 32), bmp.num_free_entries); try bmp.setEntry(0); try testing.expectEqual(@as(u32, 1), bmp.bitmaps[0]); try testing.expectEqual(@as(u32, 31), bmp.num_free_entries); try bmp.setEntry(1); try testing.expectEqual(@as(u32, 3), bmp.bitmaps[0]); try testing.expectEqual(@as(u32, 30), bmp.num_free_entries); // Repeat setting entry 1 to make sure state doesn't change try bmp.setEntry(1); try testing.expectEqual(@as(u32, 3), bmp.bitmaps[0]); try testing.expectEqual(@as(u32, 30), bmp.num_free_entries); } test "static clearEntry" { const BmpTy = Bitmap(32, u32); var bmp = try BmpTy.init(null, null); try testing.expectEqual(@as(u32, 1), bmp.bitmaps.len); try testing.expectEqual(@as(u32, 32), bmp.num_free_entries); try bmp.setEntry(0); try testing.expectEqual(@as(u32, 31), bmp.num_free_entries); try bmp.setEntry(1); try testing.expectEqual(@as(u32, 30), bmp.num_free_entries); try testing.expectEqual(@as(u32, 3), bmp.bitmaps[0]); try bmp.clearEntry(0); try testing.expectEqual(@as(u32, 31), bmp.num_free_entries); try testing.expectEqual(@as(u32, 2), bmp.bitmaps[0]); // Repeat to make sure state doesn't change try bmp.clearEntry(0); try testing.expectEqual(@as(u32, 31), bmp.num_free_entries); try testing.expectEqual(@as(u32, 2), bmp.bitmaps[0]); // Try clearing an unset entry to make sure state doesn't change try bmp.clearEntry(2); try testing.expectEqual(@as(u32, 31), bmp.num_free_entries); try testing.expectEqual(@as(u32, 2), bmp.bitmaps[0]); } test "static setFirstFree" { const BmpTy = Bitmap(32, u32); var bmp = try BmpTy.init(null, null); try testing.expectEqual(@as(u32, 1), bmp.bitmaps.len); // Allocate the first entry try testing.expectEqual(bmp.setFirstFree() orelse unreachable, 0); try testing.expectEqual(bmp.bitmaps[0], 1); // Allocate the second entry try testing.expectEqual(bmp.setFirstFree() orelse unreachable, 1); try testing.expectEqual(bmp.bitmaps[0], 3); // Make all but the MSB occupied and try to allocate it for (bmp.bitmaps) |*b, i| { b.* = BmpTy.BITMAP_FULL; if (i <= bmp.num_bitmaps - 1) b.* &= ~(@as(usize, 1) << BmpTy.ENTRIES_PER_BITMAP - 1); } bmp.num_free_entries = 1; try testing.expectEqual(bmp.setFirstFree() orelse unreachable, bmp.num_entries - 1); for (bmp.bitmaps) |b| { try testing.expectEqual(b, BmpTy.BITMAP_FULL); } // We should no longer be able to allocate any entries try testing.expectEqual(bmp.setFirstFree(), null); for (bmp.bitmaps) |b| { try testing.expectEqual(b, BmpTy.BITMAP_FULL); } } test "static isSet" { const BmpTy = Bitmap(32, u32); var bmp = try BmpTy.init(null, null); try testing.expectEqual(@as(u32, 1), bmp.bitmaps.len); bmp.bitmaps[0] = 1; // Make sure that only the set entry is considered set try testing.expect(try bmp.isSet(0)); var i: usize = 1; while (i < bmp.num_entries) : (i += 1) { try testing.expect(!(try bmp.isSet(@truncate(BmpTy.IndexType, i)))); } bmp.bitmaps[0] = 3; try testing.expect(try bmp.isSet(0)); try testing.expect(try bmp.isSet(1)); i = 2; while (i < bmp.num_entries) : (i += 1) { try testing.expect(!(try bmp.isSet(@truncate(BmpTy.IndexType, i)))); } bmp.bitmaps[0] = 11; try testing.expect(try bmp.isSet(0)); try testing.expect(try bmp.isSet(1)); try testing.expect(!(try bmp.isSet(2))); try testing.expect(try bmp.isSet(3)); i = 4; while (i < bmp.num_entries) : (i += 1) { try testing.expect(!(try bmp.isSet(@truncate(BmpTy.IndexType, i)))); } } test "static indexToBit" { const Type = Bitmap(8, u8); try testing.expectEqual(Type.indexToBit(0), 1); try testing.expectEqual(Type.indexToBit(1), 2); try testing.expectEqual(Type.indexToBit(2), 4); try testing.expectEqual(Type.indexToBit(3), 8); try testing.expectEqual(Type.indexToBit(4), 16); try testing.expectEqual(Type.indexToBit(5), 32); try testing.expectEqual(Type.indexToBit(6), 64); try testing.expectEqual(Type.indexToBit(7), 128); } test "static setContiguous" { var bmp = try Bitmap(16, u16).init(null, null); try testing.expectEqual(@as(u32, 1), bmp.bitmaps.len); // Test trying to set more entries than the bitmap has try testing.expectEqual(bmp.setContiguous(bmp.num_free_entries + 1, null), null); try testing.expectEqual(bmp.setContiguous(bmp.num_free_entries + 1, 1), null); // All entries should still be free try testing.expectEqual(bmp.num_free_entries, 16); for (bmp.bitmaps) |b| { try expectEqual(b, 0); } try testing.expectEqual(bmp.setContiguous(3, 0) orelse unreachable, 0); try expectEqual(bmp.bitmaps[0], 0b0000000000000111); for (bmp.bitmaps) |b, i| { if (i > 0) try expectEqual(b, 0); } // Test setting from top try testing.expectEqual(bmp.setContiguous(2, 14) orelse unreachable, 14); try expectEqual(bmp.bitmaps[0], 0b1100000000000111); for (bmp.bitmaps) |b, i| { if (i > 0) try expectEqual(b, 0); } try testing.expectEqual(bmp.setContiguous(3, 12), null); try expectEqual(bmp.bitmaps[0], 0b1100000000000111); for (bmp.bitmaps) |b, i| { if (i > 0) try expectEqual(b, 0); } try testing.expectEqual(bmp.setContiguous(3, null) orelse unreachable, 3); try expectEqual(bmp.bitmaps[0], 0b1100000000111111); for (bmp.bitmaps) |b, i| { if (i > 0) try expectEqual(b, 0); } // Test setting beyond the what is available try testing.expectEqual(bmp.setContiguous(9, null), null); try expectEqual(bmp.bitmaps[0], 0b1100000000111111); for (bmp.bitmaps) |b, i| { if (i > 0) try expectEqual(b, 0); } try testing.expectEqual(bmp.setContiguous(8, null) orelse unreachable, 6); try expectEqual(bmp.bitmaps[0], 0b1111111111111111); for (bmp.bitmaps) |b, i| { if (i > 0) try expectEqual(b, 0); } // No more are possible try testing.expectEqual(bmp.setContiguous(1, null), null); try expectEqual(bmp.bitmaps[0], 0b1111111111111111); for (bmp.bitmaps) |b, i| { if (i > 0) try expectEqual(b, 0); } try testing.expectEqual(bmp.setContiguous(1, 0), null); try expectEqual(bmp.bitmaps[0], 0b1111111111111111); for (bmp.bitmaps) |b, i| { if (i > 0) try expectEqual(b, 0); } } test "setEntry" { var bmp = try Bitmap(null, u32).init(31, std.testing.allocator); try testing.expectEqual(@as(u32, 1), bmp.bitmaps.len); defer bmp.deinit(); try testing.expectEqual(@as(u32, 31), bmp.num_free_entries); try bmp.setEntry(0); try testing.expectEqual(@as(u32, 1), bmp.bitmaps[0]); try testing.expectEqual(@as(u32, 30), bmp.num_free_entries); try bmp.setEntry(1); try testing.expectEqual(@as(u32, 3), bmp.bitmaps[0]); try testing.expectEqual(@as(u32, 29), bmp.num_free_entries); // Repeat setting entry 1 to make sure state doesn't change try bmp.setEntry(1); try testing.expectEqual(@as(u32, 3), bmp.bitmaps[0]); try testing.expectEqual(@as(u32, 29), bmp.num_free_entries); try testing.expectError(BitmapError.OutOfBounds, bmp.setEntry(31)); try testing.expectEqual(@as(u32, 29), bmp.num_free_entries); } test "clearEntry" { var bmp = try Bitmap(null, u32).init(32, std.testing.allocator); try testing.expectEqual(@as(u32, 1), bmp.bitmaps.len); defer bmp.deinit(); try testing.expectEqual(@as(u32, 32), bmp.num_free_entries); try bmp.setEntry(0); try testing.expectEqual(@as(u32, 31), bmp.num_free_entries); try bmp.setEntry(1); try testing.expectEqual(@as(u32, 30), bmp.num_free_entries); try testing.expectEqual(@as(u32, 3), bmp.bitmaps[0]); try bmp.clearEntry(0); try testing.expectEqual(@as(u32, 31), bmp.num_free_entries); try testing.expectEqual(@as(u32, 2), bmp.bitmaps[0]); // Repeat to make sure state doesn't change try bmp.clearEntry(0); try testing.expectEqual(@as(u32, 31), bmp.num_free_entries); try testing.expectEqual(@as(u32, 2), bmp.bitmaps[0]); // Try clearing an unset entry to make sure state doesn't change try bmp.clearEntry(2); try testing.expectEqual(@as(u32, 31), bmp.num_free_entries); try testing.expectEqual(@as(u32, 2), bmp.bitmaps[0]); try testing.expectError(BitmapError.OutOfBounds, bmp.clearEntry(32)); } test "setFirstFree multiple bitmaps" { const BmpTy = Bitmap(null, u8); var bmp = try BmpTy.init(9, std.testing.allocator); try testing.expectEqual(@as(u32, 2), bmp.bitmaps.len); defer bmp.deinit(); // Allocate the first entry try testing.expectEqual(bmp.setFirstFree() orelse unreachable, 0); try testing.expectEqual(bmp.bitmaps[0], 1); // Allocate the second entry try testing.expectEqual(bmp.setFirstFree() orelse unreachable, 1); try testing.expectEqual(bmp.bitmaps[0], 3); // Allocate the entirety of the first bitmap var entry: u32 = 2; var expected: u8 = 7; while (entry < BmpTy.ENTRIES_PER_BITMAP) { try testing.expectEqual(bmp.setFirstFree() orelse unreachable, entry); try testing.expectEqual(bmp.bitmaps[0], expected); if (entry + 1 < BmpTy.ENTRIES_PER_BITMAP) { entry += 1; expected = expected * 2 + 1; } else { break; } } // Try allocating an entry in the next bitmap try testing.expectEqual(bmp.setFirstFree() orelse unreachable, BmpTy.ENTRIES_PER_BITMAP); try testing.expectEqual(bmp.bitmaps[0], BmpTy.BITMAP_FULL); try testing.expectEqual(bmp.bitmaps[1], 1); // We should no longer be able to allocate any entries try testing.expectEqual(bmp.setFirstFree(), null); try testing.expectEqual(bmp.bitmaps[0], BmpTy.BITMAP_FULL); try testing.expectEqual(bmp.bitmaps[1], 1); } test "setFirstFree" { const BmpTy = Bitmap(null, u32); var bmp = try BmpTy.init(32, std.testing.allocator); try testing.expectEqual(@as(u32, 1), bmp.bitmaps.len); defer bmp.deinit(); // Allocate the first entry try testing.expectEqual(bmp.setFirstFree() orelse unreachable, 0); try testing.expectEqual(bmp.bitmaps[0], 1); // Allocate the second entry try testing.expectEqual(bmp.setFirstFree() orelse unreachable, 1); try testing.expectEqual(bmp.bitmaps[0], 3); // Make all but the MSB occupied and try to allocate it bmp.bitmaps[0] = BmpTy.BITMAP_FULL & ~@as(u32, 1 << (BmpTy.ENTRIES_PER_BITMAP - 1)); try testing.expectEqual(bmp.setFirstFree() orelse unreachable, BmpTy.ENTRIES_PER_BITMAP - 1); try testing.expectEqual(bmp.bitmaps[0], BmpTy.BITMAP_FULL); // We should no longer be able to allocate any entries try testing.expectEqual(bmp.setFirstFree(), null); try testing.expectEqual(bmp.bitmaps[0], BmpTy.BITMAP_FULL); } test "isSet" { var bmp = try Bitmap(null, u32).init(32, std.testing.allocator); try testing.expectEqual(@as(u32, 1), bmp.bitmaps.len); defer bmp.deinit(); bmp.bitmaps[0] = 1; // Make sure that only the set entry is considered set try testing.expect(try bmp.isSet(0)); var i: u32 = 1; while (i < bmp.num_entries) : (i += 1) { try testing.expect(!try bmp.isSet(i)); } bmp.bitmaps[0] = 3; try testing.expect(try bmp.isSet(0)); try testing.expect(try bmp.isSet(1)); i = 2; while (i < bmp.num_entries) : (i += 1) { try testing.expect(!try bmp.isSet(i)); } bmp.bitmaps[0] = 11; try testing.expect(try bmp.isSet(0)); try testing.expect(try bmp.isSet(1)); try testing.expect(!try bmp.isSet(2)); try testing.expect(try bmp.isSet(3)); i = 4; while (i < bmp.num_entries) : (i += 1) { try testing.expect(!try bmp.isSet(i)); } try testing.expectError(BitmapError.OutOfBounds, bmp.isSet(33)); } test "indexToBit" { const Type = Bitmap(null, u8); var bmp = try Type.init(10, std.testing.allocator); try testing.expectEqual(@as(u32, 2), bmp.bitmaps.len); defer bmp.deinit(); try testing.expectEqual(Type.indexToBit(0), 1); try testing.expectEqual(Type.indexToBit(1), 2); try testing.expectEqual(Type.indexToBit(2), 4); try testing.expectEqual(Type.indexToBit(3), 8); try testing.expectEqual(Type.indexToBit(4), 16); try testing.expectEqual(Type.indexToBit(5), 32); try testing.expectEqual(Type.indexToBit(6), 64); try testing.expectEqual(Type.indexToBit(7), 128); try testing.expectEqual(Type.indexToBit(8), 1); try testing.expectEqual(Type.indexToBit(9), 2); } fn testCheckBitmaps(bmp: Bitmap(null, u4), b1: u4, b2: u4, b3: u4, b4: u4) !void { try testing.expectEqual(@as(u4, b1), bmp.bitmaps[0]); try testing.expectEqual(@as(u4, b2), bmp.bitmaps[1]); try testing.expectEqual(@as(u4, b3), bmp.bitmaps[2]); try testing.expectEqual(@as(u4, b4), bmp.bitmaps[3]); } test "setContiguous" { var bmp = try Bitmap(null, u4).init(16, std.testing.allocator); try testing.expectEqual(@as(u32, 4), bmp.bitmaps.len); defer bmp.deinit(); // Test trying to set more entries than the bitmap has try testing.expectEqual(bmp.setContiguous(bmp.num_entries + 1, null), null); try testing.expectEqual(bmp.setContiguous(bmp.num_entries + 1, 1), null); // All entries should still be free try testing.expectEqual(bmp.num_free_entries, bmp.num_entries); try testCheckBitmaps(bmp, 0, 0, 0, 0); try testing.expectEqual(bmp.setContiguous(3, 0) orelse unreachable, 0); try testCheckBitmaps(bmp, 0b0111, 0, 0, 0); // Test setting from top try testing.expectEqual(bmp.setContiguous(2, 14) orelse unreachable, 14); try testCheckBitmaps(bmp, 0b0111, 0, 0, 0b1100); try testing.expectEqual(bmp.setContiguous(3, 12), null); try testCheckBitmaps(bmp, 0b0111, 0, 0, 0b1100); try testing.expectEqual(bmp.setContiguous(3, null) orelse unreachable, 3); try testCheckBitmaps(bmp, 0b1111, 0b0011, 0, 0b1100); // Test setting beyond the what is available try testing.expectEqual(bmp.setContiguous(9, null), null); try testCheckBitmaps(bmp, 0b1111, 0b0011, 0, 0b1100); try testing.expectEqual(bmp.setContiguous(8, null) orelse unreachable, 6); try testCheckBitmaps(bmp, 0b1111, 0b1111, 0b1111, 0b1111); // No more are possible try testing.expectEqual(bmp.setContiguous(1, null), null); try testCheckBitmaps(bmp, 0b1111, 0b1111, 0b1111, 0b1111); try testing.expectEqual(bmp.setContiguous(1, 0), null); try testCheckBitmaps(bmp, 0b1111, 0b1111, 0b1111, 0b1111); }
0
repos/pluto/src
repos/pluto/src/kernel/serial.zig
const arch = @import("arch.zig").internals; const build_options = @import("build_options"); pub const Serial = struct { /// Function that writes a single byte to the serial stream pub const Write = fn (byte: u8) void; write: Write, /// /// Write a slice of bytes to the serial stream. /// /// Arguments: /// str: []const u8 - The bytes to send. /// pub fn writeBytes(self: *const @This(), bytes: []const u8) void { for (bytes) |byte| { self.write(byte); } } }; /// /// Initialise the serial interface. The details of how this is done depends on the architecture. /// /// Arguments: /// IN boot_payload: arch.BootPayload - The payload passed to the kernel at boot. How this is used depends on the architecture /// /// Return: Serial /// The serial interface constructed by the architecture /// pub fn init(boot_payload: arch.BootPayload) Serial { const serial = arch.initSerial(boot_payload); switch (build_options.test_mode) { .Initialisation => runtimeTests(serial), else => {}, } return serial; } /// /// Run all the runtime tests /// pub fn runtimeTests(serial: Serial) void { rt_writeByte(serial); rt_writeBytes(serial); } /// /// Test writing a byte and a new line separately /// fn rt_writeByte(serial: Serial) void { serial.write('c'); serial.write('\n'); } /// /// Test writing a series of bytes /// fn rt_writeBytes(serial: Serial) void { serial.writeBytes(&[_]u8{ '1', '2', '3', '\n' }); }
0
repos/pluto/src
repos/pluto/src/kernel/vmm.zig
const build_options = @import("build_options"); const mock_path = build_options.mock_path; const builtin = std.builtin; const is_test = builtin.is_test; const std = @import("std"); const log = std.log.scoped(.vmm); const bitmap = @import("bitmap.zig"); const pmm = @import("pmm.zig"); const mem = @import("mem.zig"); const tty = @import("tty.zig"); const panic = @import("panic.zig").panic; const arch = @import("arch.zig").internals; const Allocator = std.mem.Allocator; const assert = std.debug.assert; /// Attributes for a virtual memory allocation pub const Attributes = struct { /// Whether this memory belongs to the kernel and can therefore not be accessed in user mode kernel: bool, /// If this memory can be written to writable: bool, /// If this memory can be cached. Memory mapped to a device shouldn't, for example cachable: bool, }; /// All data that must be remembered for a virtual memory allocation const Allocation = struct { /// The physical blocks of memory associated with this allocation physical: std.ArrayList(usize), }; /// The size of each allocatable block, the same as the physical memory manager's block size pub const BLOCK_SIZE: usize = pmm.BLOCK_SIZE; pub const MapperError = error{ InvalidVirtualAddress, InvalidPhysicalAddress, AddressMismatch, MisalignedVirtualAddress, MisalignedPhysicalAddress, NotMapped, }; /// /// Returns a container that can map and unmap virtual memory to physical memory. /// The mapper can pass some payload data when mapping an unmapping, which is of type `Payload`. This can be anything that the underlying mapper needs to carry out the mapping process. /// For x86, it would be the page directory that is being mapped within. An architecture or other mapper can specify the data it needs when mapping by specifying this type. /// /// Arguments: /// IN comptime Payload: type - The type of the VMM-specific payload to pass when mapping and unmapping /// /// Return: type /// The Mapper type constructed. /// pub fn Mapper(comptime Payload: type) type { return struct { /// /// Map a region (can span more than one block) of virtual memory to physical memory. After a call to this function, the memory should be present the next time it is accessed. /// The attributes given must be obeyed when possible. /// /// Arguments: /// IN virtual_start: usize - The start of the virtual memory to map /// IN virtual_end: usize - The end of the virtual memory to map /// IN physical_start: usize - The start of the physical memory to map to /// IN physical_end: usize - The end of the physical memory to map to /// IN attrs: Attributes - The attributes to apply to this region of memory /// IN/OUT allocator: Allocator - The allocator to use when mapping, if required /// IN spec: Payload - The payload to pass to the mapper /// /// Error: AllocatorError || MapperError /// The causes depend on the mapper used /// mapFn: fn (virtual_start: usize, virtual_end: usize, physical_start: usize, physical_end: usize, attrs: Attributes, allocator: Allocator, spec: Payload) (Allocator.Error || MapperError)!void, /// /// Unmap a region (can span more than one block) of virtual memory from its physical memory. After a call to this function, the memory should not be accessible without error. /// /// Arguments: /// IN virtual_start: usize - The start of the virtual region to unmap /// IN virtual_end: usize - The end of the virtual region to unmap /// IN/OUT allocator: Allocator - The allocator to use to free the mapping /// IN spec: Payload - The payload to pass to the mapper /// /// Error: MapperError /// The causes depend on the mapper used /// unmapFn: fn (virtual_start: usize, virtual_end: usize, allocator: Allocator, spec: Payload) MapperError!void, }; } /// Errors that can be returned by VMM functions pub const VmmError = error{ /// A memory region expected to be allocated wasn't NotAllocated, /// A memory region expected to not be allocated was AlreadyAllocated, /// A physical memory region expected to not be allocated was PhysicalAlreadyAllocated, /// A physical region of memory isn't of the same size as a virtual region PhysicalVirtualMismatch, /// Virtual addresses are invalid InvalidVirtAddresses, /// Physical addresses are invalid InvalidPhysAddresses, /// Not enough virtual space in the VMM OutOfMemory, }; /// The boot-time offset that the virtual addresses are from the physical addresses /// This is the start of the memory owned by the kernel and so is where the kernel VMM starts extern var KERNEL_ADDR_OFFSET: *u32; /// The virtual memory manager associated with the kernel address space pub var kernel_vmm: VirtualMemoryManager(arch.VmmPayload) = undefined; /// /// Construct a virtual memory manager to keep track of allocated and free virtual memory regions within a certain space /// /// Arguments: /// IN comptime Payload: type - The type of the payload to pass to the mapper /// /// Return: type /// The constructed type /// pub fn VirtualMemoryManager(comptime Payload: type) type { return struct { /// The bitmap that keeps track of allocated and free regions bmp: bitmap.Bitmap(null, usize), /// The start of the memory to be tracked start: usize, /// The end of the memory to be tracked end: usize, /// The allocator to use when allocating and freeing regions allocator: Allocator, /// All allocations that have been made with this manager allocations: std.hash_map.AutoHashMap(usize, Allocation), /// The mapper to use when allocating and freeing regions mapper: Mapper(Payload), /// The payload to pass to the mapper functions payload: Payload, const Self = @This(); /// /// Initialise a virtual memory manager /// /// Arguments: /// IN start: usize - The start of the memory region to manage /// IN end: usize - The end of the memory region to manage. Must be greater than the start /// IN/OUT allocator: Allocator - The allocator to use when allocating and freeing regions /// IN mapper: Mapper - The mapper to use when allocating and freeing regions /// IN payload: Payload - The payload data to be passed to the mapper /// /// Return: Self /// The manager constructed /// /// Error: Allocator.Error /// error.OutOfMemory - The allocator cannot allocate the memory required /// pub fn init(start: usize, end: usize, allocator: Allocator, mapper: Mapper(Payload), payload: Payload) Allocator.Error!Self { const size = end - start; var bmp = try bitmap.Bitmap(null, usize).init(std.mem.alignForward(size, pmm.BLOCK_SIZE) / pmm.BLOCK_SIZE, allocator); return Self{ .bmp = bmp, .start = start, .end = end, .allocator = allocator, .allocations = std.hash_map.AutoHashMap(usize, Allocation).init(allocator), .mapper = mapper, .payload = payload, }; } /// /// Copy this VMM. Changes to one copy will not affect the other /// /// Arguments: /// IN self: *Self - The VMM to copy /// /// Error: Allocator.Error /// OutOfMemory - There wasn't enough memory for copying /// /// Return: Self /// The copy /// pub fn copy(self: *const Self) Allocator.Error!Self { var clone = Self{ .bmp = try self.bmp.clone(), .start = self.start, .end = self.end, .allocator = self.allocator, .allocations = std.hash_map.AutoHashMap(usize, Allocation).init(self.allocator), .mapper = self.mapper, .payload = self.payload, }; var it = self.allocations.iterator(); while (it.next()) |entry| { var list = std.ArrayList(usize).init(self.allocator); for (entry.value_ptr.physical.items) |block| { _ = try list.append(block); } _ = try clone.allocations.put(entry.key_ptr.*, Allocation{ .physical = list }); } return clone; } /// /// Free the internal state of the VMM. It is unusable afterwards /// /// Arguments: /// IN self: *Self - The VMM to deinitialise /// pub fn deinit(self: *Self) void { self.bmp.deinit(); var it = self.allocations.iterator(); while (it.next()) |entry| { entry.value_ptr.physical.deinit(); } self.allocations.deinit(); } /// /// Find the physical address that a given virtual address is mapped to. /// /// Arguments: /// IN self: *const Self - The VMM to check for mappings in /// IN virt: usize - The virtual address to find the physical address for /// /// Return: usize /// The physical address that the virtual address is mapped to /// /// Error: VmmError /// VmmError.NotAllocated - The virtual address hasn't been mapped within the VMM /// pub fn virtToPhys(self: *const Self, virt: usize) VmmError!usize { var it = self.allocations.iterator(); while (it.next()) |entry| { const vaddr = entry.key_ptr.*; const allocation = entry.value_ptr.*; // If this allocation range covers the virtual address then figure out the corresponding physical block if (vaddr <= virt and vaddr + (allocation.physical.items.len * BLOCK_SIZE) > virt) { const block_number = (virt - vaddr) / BLOCK_SIZE; const block_offset = (virt - vaddr) % BLOCK_SIZE; return allocation.physical.items[block_number] + block_offset; } } return VmmError.NotAllocated; } /// /// Find the virtual address that a given physical address is mapped to. /// /// Arguments: /// IN self: *const Self - The VMM to check for mappings in /// IN phys: usize - The physical address to find the virtual address for /// /// Return: usize /// The virtual address that the physical address is mapped to /// /// Error: VmmError /// VmmError.NotAllocated - The physical address hasn't been mapped within the VMM /// pub fn physToVirt(self: *const Self, phys: usize) VmmError!usize { var it = self.allocations.iterator(); while (it.next()) |entry| { const vaddr = entry.key_ptr.*; const allocation = entry.value_ptr.*; for (allocation.physical.items) |block, i| { if (block <= phys and block + BLOCK_SIZE > phys) { const block_addr = vaddr + i * BLOCK_SIZE; const block_offset = phys % BLOCK_SIZE; return block_addr + block_offset; } } } return VmmError.NotAllocated; } /// /// Check if a virtual memory address has been set /// /// Arguments: /// IN self: *Self - The manager to check /// IN virt: usize - The virtual memory address to check /// /// Return: bool /// Whether the address is set /// /// Error: pmm.PmmError /// Bitmap(u32).Error.OutOfBounds - The address given is outside of the memory managed /// pub fn isSet(self: *const Self, virt: usize) bitmap.BitmapError!bool { if (virt < self.start) { return bitmap.BitmapError.OutOfBounds; } return self.bmp.isSet((virt - self.start) / BLOCK_SIZE); } /// /// Map a region (can span more than one block) of virtual memory to a specific region of memory /// /// Arguments: /// IN/OUT self: *Self - The manager to modify /// IN virtual: mem.Range - The virtual region to set /// IN physical: ?mem.Range - The physical region to map to or null if only the virtual region is to be set /// IN attrs: Attributes - The attributes to apply to the memory regions /// /// Error: VmmError || Bitmap(u32).BitmapError || Allocator.Error || MapperError /// VmmError.AlreadyAllocated - The virtual address has already been allocated /// VmmError.PhysicalAlreadyAllocated - The physical address has already been allocated /// VmmError.PhysicalVirtualMismatch - The physical region and virtual region are of different sizes /// VmmError.InvalidVirtAddresses - The start virtual address is greater than the end address /// VmmError.InvalidPhysicalAddresses - The start physical address is greater than the end address /// Bitmap.BitmapError.OutOfBounds - The physical or virtual addresses are out of bounds /// Allocator.Error.OutOfMemory - Allocating the required memory failed /// MapperError.* - The causes depend on the mapper used /// pub fn set(self: *Self, virtual: mem.Range, physical: ?mem.Range, attrs: Attributes) (VmmError || bitmap.BitmapError || Allocator.Error || MapperError)!void { var virt = virtual.start; while (virt < virtual.end) : (virt += BLOCK_SIZE) { if (try self.isSet(virt)) { return VmmError.AlreadyAllocated; } } if (virtual.start > virtual.end) { return VmmError.InvalidVirtAddresses; } if (physical) |p| { if (virtual.end - virtual.start != p.end - p.start) { return VmmError.PhysicalVirtualMismatch; } if (p.start > p.end) { return VmmError.InvalidPhysAddresses; } var phys = p.start; while (phys < p.end) : (phys += BLOCK_SIZE) { if (try pmm.isSet(phys)) { return VmmError.PhysicalAlreadyAllocated; } } } var phys_list = std.ArrayList(usize).init(self.allocator); virt = virtual.start; while (virt < virtual.end) : (virt += BLOCK_SIZE) { try self.bmp.setEntry((virt - self.start) / BLOCK_SIZE); } if (physical) |p| { var phys = p.start; while (phys < p.end) : (phys += BLOCK_SIZE) { try pmm.setAddr(phys); try phys_list.append(phys); } } // Do this before mapping as the mapper may depend on the allocation being tracked _ = try self.allocations.put(virtual.start, Allocation{ .physical = phys_list }); if (physical) |p| { try self.mapper.mapFn(virtual.start, virtual.end, p.start, p.end, attrs, self.allocator, self.payload); } } /// /// Allocate a number of contiguous blocks of virtual memory /// /// Arguments: /// IN/OUT self: *Self - The manager to allocate for /// IN num: usize - The number of blocks to allocate /// IN virtual_addr: ?usize - The virtual address to allocate to or null if any address is acceptable /// IN attrs: Attributes - The attributes to apply to the mapped memory /// /// Return: ?usize /// The address at the start of the allocated region, or null if no region could be allocated due to a lack of contiguous blocks. /// /// Error: Allocator.Error /// error.OutOfMemory: The required amount of memory couldn't be allocated /// pub fn alloc(self: *Self, num: usize, virtual_addr: ?usize, attrs: Attributes) Allocator.Error!?usize { if (num == 0) { return null; } // Ensure that there is both enough physical and virtual address space free if (pmm.blocksFree() >= num and self.bmp.num_free_entries >= num) { // The virtual address space must be contiguous // Allocate from a specific entry if the caller requested it if (self.bmp.setContiguous(num, if (virtual_addr) |a| (a - self.start) / BLOCK_SIZE else null)) |entry| { var block_list = std.ArrayList(usize).init(self.allocator); try block_list.ensureUnusedCapacity(num); var i: usize = 0; const vaddr_start = self.start + entry * BLOCK_SIZE; var vaddr = vaddr_start; // Map the blocks to physical memory while (i < num) : (i += 1) { const addr = pmm.alloc() orelse unreachable; try block_list.append(addr); // The map function failing isn't the caller's responsibility so panic as it shouldn't happen self.mapper.mapFn(vaddr, vaddr + BLOCK_SIZE, addr, addr + BLOCK_SIZE, attrs, self.allocator, self.payload) catch |e| { panic(@errorReturnTrace(), "Failed to map virtual memory: {X}\n", .{e}); }; vaddr += BLOCK_SIZE; } _ = try self.allocations.put(vaddr_start, Allocation{ .physical = block_list }); return vaddr_start; } } return null; } /// /// Copy data from an address in a virtual memory manager to an address in another virtual memory manager /// /// Arguments: /// IN self: *Self - One of the VMMs to copy between. This should be the currently active VMM /// IN other: *Self - The second of the VMMs to copy between /// IN from: bool - Whether the data should be copied from `self` to `other`, or the other way around /// IN data: if (from) []const u8 else []u8 - The being copied from or written to (depending on `from`). Must be mapped within the VMM being copied from/to /// IN address: usize - The address within `other` that is to be copied from or to /// /// Error: VmmError || pmm.PmmError || Allocator.Error /// VmmError.NotAllocated - Some or all of the destination isn't mapped /// VmmError.OutOfMemory - There wasn't enough space in the VMM to use for temporary mapping /// Bitmap(u32).Error.OutOfBounds - The address given is outside of the memory managed /// Allocator.Error.OutOfMemory - There wasn't enough memory available to fulfill the request /// pub fn copyData(self: *Self, other: *const Self, comptime from: bool, data: if (from) []const u8 else []u8, address: usize) (bitmap.BitmapError || VmmError || Allocator.Error)!void { if (data.len == 0) { return; } const start_addr = std.mem.alignBackward(address, BLOCK_SIZE); const end_addr = std.mem.alignForward(address + data.len, BLOCK_SIZE); if (end_addr >= other.end or start_addr < other.start) return bitmap.BitmapError.OutOfBounds; // Find physical blocks for the address var blocks = std.ArrayList(usize).init(self.allocator); defer blocks.deinit(); var it = other.allocations.iterator(); while (it.next()) |allocation| { const virtual = allocation.key_ptr.*; const physical = allocation.value_ptr.*.physical.items; if (start_addr >= virtual and virtual + physical.len * BLOCK_SIZE >= end_addr) { const first_block_idx = (start_addr - virtual) / BLOCK_SIZE; const last_block_idx = (end_addr - virtual) / BLOCK_SIZE; try blocks.appendSlice(physical[first_block_idx..last_block_idx]); } } // Make sure the address is actually mapped in the destination VMM if (blocks.items.len != std.mem.alignForward(data.len, BLOCK_SIZE) / BLOCK_SIZE) { return VmmError.NotAllocated; } // Map them into self for some vaddr so they can be accessed from this VMM if (self.bmp.setContiguous(blocks.items.len, null)) |entry| { const v_start = entry * BLOCK_SIZE + self.start; for (blocks.items) |block, i| { const v = v_start + i * BLOCK_SIZE; const v_end = v + BLOCK_SIZE; const p = block; const p_end = p + BLOCK_SIZE; self.mapper.mapFn(v, v_end, p, p_end, .{ .kernel = true, .writable = true, .cachable = false }, self.allocator, self.payload) catch |e| { // If we fail to map one of the blocks then attempt to free all previously mapped if (i > 0) { self.mapper.unmapFn(v_start, v_end, self.allocator, self.payload) catch |e2| { // If we can't unmap then just panic panic(@errorReturnTrace(), "Failed to unmap virtual region 0x{X} -> 0x{X}: {}\n", .{ v_start, v_end, e2 }); }; } panic(@errorReturnTrace(), "Failed to map virtual region 0x{X} -> 0x{X} to 0x{X} -> 0x{X}: {}\n", .{ v, v_end, p, p_end, e }); }; } // Copy to vaddr from above const align_offset = address - start_addr; var data_copy = @intToPtr([*]u8, v_start + align_offset)[0..data.len]; if (from) { std.mem.copy(u8, data_copy, data); } else { std.mem.copy(u8, data, data_copy); } // TODO Unmap and freee virtual blocks from self so they can be used in the future } else { return VmmError.OutOfMemory; } } /// /// Free a previous allocation /// /// Arguments: /// IN/OUT self: *Self - The manager to free within /// IN vaddr: usize - The start of the allocation to free. This should be the address returned from a prior `alloc` call /// /// Error: Bitmap.BitmapError || VmmError /// VmmError.NotAllocated - This address hasn't been allocated yet /// Bitmap.BitmapError.OutOfBounds - The address is out of the manager's bounds /// pub fn free(self: *Self, vaddr: usize) (bitmap.BitmapError || VmmError)!void { const entry = (vaddr - self.start) / BLOCK_SIZE; if (try self.bmp.isSet(entry)) { // There will be an allocation associated with this virtual address const allocation = self.allocations.get(vaddr).?; const physical = allocation.physical; defer physical.deinit(); const num_physical_allocations = physical.items.len; for (physical.items) |block, i| { // Clear the address space entry and free the physical memory try self.bmp.clearEntry(entry + i); pmm.free(block) catch |e| { panic(@errorReturnTrace(), "Failed to free PMM reserved memory at 0x{X}: {}\n", .{ block * BLOCK_SIZE, e }); }; } // Unmap the entire range const region_start = vaddr; const region_end = vaddr + (num_physical_allocations * BLOCK_SIZE); self.mapper.unmapFn(region_start, region_end, self.allocator, self.payload) catch |e| { panic(@errorReturnTrace(), "Failed to unmap VMM reserved memory from 0x{X} to 0x{X}: {}\n", .{ region_start, region_end, e }); }; // The allocation is freed so remove from the map assert(self.allocations.remove(vaddr)); } else { return VmmError.NotAllocated; } } }; } /// /// Initialise the main system virtual memory manager covering 4GB. Maps in the kernel code and reserved virtual memory /// /// Arguments: /// IN mem_profile: *const mem.MemProfile - The system's memory profile. This is used to find the kernel code region and boot modules /// IN/OUT allocator: Allocator - The allocator to use when needing to allocate memory /// /// Return: VirtualMemoryManager /// The virtual memory manager created with all reserved virtual regions allocated /// /// Error: Allocator.Error /// error.OutOfMemory - The allocator cannot allocate the memory required /// pub fn init(mem_profile: *const mem.MemProfile, allocator: Allocator) Allocator.Error!*VirtualMemoryManager(arch.VmmPayload) { log.info("Init\n", .{}); defer log.info("Done\n", .{}); kernel_vmm = try VirtualMemoryManager(arch.VmmPayload).init(@ptrToInt(&KERNEL_ADDR_OFFSET), 0xFFFFFFFF, allocator, arch.VMM_MAPPER, arch.KERNEL_VMM_PAYLOAD); // Map all the reserved virtual addresses. for (mem_profile.virtual_reserved) |entry| { const virtual = mem.Range{ .start = std.mem.alignBackward(entry.virtual.start, BLOCK_SIZE), .end = std.mem.alignForward(entry.virtual.end, BLOCK_SIZE), }; const physical: ?mem.Range = if (entry.physical) |phys| mem.Range{ .start = std.mem.alignBackward(phys.start, BLOCK_SIZE), .end = std.mem.alignForward(phys.end, BLOCK_SIZE), } else null; kernel_vmm.set(virtual, physical, .{ .kernel = true, .writable = true, .cachable = true }) catch |e| switch (e) { VmmError.AlreadyAllocated => {}, else => panic(@errorReturnTrace(), "Failed mapping region in VMM {X}: {}\n", .{ entry, e }), }; } return &kernel_vmm; } test "virtToPhys" { const num_entries = 512; var vmm = try testInit(num_entries); defer testDeinit(&vmm); const vstart = vmm.start + BLOCK_SIZE; const vend = vstart + BLOCK_SIZE * 3; const pstart = BLOCK_SIZE * 20; const pend = BLOCK_SIZE * 23; // Set the physical and virtual back to front to complicate the mappings a bit try vmm.set(.{ .start = vstart, .end = vstart + BLOCK_SIZE }, mem.Range{ .start = pstart + BLOCK_SIZE * 2, .end = pend }, .{ .kernel = true, .writable = true, .cachable = true }); try vmm.set(.{ .start = vstart + BLOCK_SIZE, .end = vend }, mem.Range{ .start = pstart, .end = pstart + BLOCK_SIZE * 2 }, .{ .kernel = true, .writable = true, .cachable = true }); try std.testing.expectEqual(pstart + BLOCK_SIZE * 2, try vmm.virtToPhys(vstart)); try std.testing.expectEqual(pstart + BLOCK_SIZE * 2 + 29, (try vmm.virtToPhys(vstart + 29))); try std.testing.expectEqual(pstart + 29, (try vmm.virtToPhys(vstart + BLOCK_SIZE + 29))); try std.testing.expectError(VmmError.NotAllocated, vmm.virtToPhys(vstart - 1)); try std.testing.expectError(VmmError.NotAllocated, vmm.virtToPhys(vend)); try std.testing.expectError(VmmError.NotAllocated, vmm.virtToPhys(vend + 1)); } test "physToVirt" { const num_entries = 512; var vmm = try testInit(num_entries); defer testDeinit(&vmm); const vstart = vmm.start + BLOCK_SIZE; const vend = vstart + BLOCK_SIZE * 3; const pstart = BLOCK_SIZE * 20; const pend = BLOCK_SIZE * 23; // Set the physical and virtual back to front to complicate the mappings a bit try vmm.set(.{ .start = vstart, .end = vstart + BLOCK_SIZE }, mem.Range{ .start = pstart + BLOCK_SIZE * 2, .end = pend }, .{ .kernel = true, .writable = true, .cachable = true }); try vmm.set(.{ .start = vstart + BLOCK_SIZE, .end = vend }, mem.Range{ .start = pstart, .end = pstart + BLOCK_SIZE * 2 }, .{ .kernel = true, .writable = true, .cachable = true }); try std.testing.expectEqual(vstart, try vmm.physToVirt(pstart + BLOCK_SIZE * 2)); try std.testing.expectEqual(vstart + 29, (try vmm.physToVirt(pstart + BLOCK_SIZE * 2 + 29))); try std.testing.expectEqual(vstart + BLOCK_SIZE + 29, (try vmm.physToVirt(pstart + 29))); try std.testing.expectError(VmmError.NotAllocated, vmm.physToVirt(pstart - 1)); try std.testing.expectError(VmmError.NotAllocated, vmm.physToVirt(pend)); try std.testing.expectError(VmmError.NotAllocated, vmm.physToVirt(pend + 1)); } test "alloc and free" { const num_entries = 512; var vmm = try testInit(num_entries); defer testDeinit(&vmm); var allocations = test_allocations.?; var virtual_allocations = std.ArrayList(usize).init(std.testing.allocator); defer virtual_allocations.deinit(); var entry: u32 = 0; while (entry < num_entries) { // Test allocating various numbers of blocks all at once // Rather than using a random number generator, just set the number of blocks to allocate based on how many entries have been done so far var num_to_alloc: u32 = if (entry > 400) @as(u32, 8) else if (entry > 320) @as(u32, 14) else if (entry > 270) @as(u32, 9) else if (entry > 150) @as(u32, 26) else @as(u32, 1); const result = try vmm.alloc(num_to_alloc, null, .{ .kernel = true, .writable = true, .cachable = true }); var should_be_set = true; if (entry + num_to_alloc > num_entries) { // If the number to allocate exceeded the number of entries, then allocation should have failed try std.testing.expectEqual(@as(?usize, null), result); should_be_set = false; } else { // Else it should have succeeded and allocated the correct address try std.testing.expectEqual(@as(?usize, vmm.start + entry * BLOCK_SIZE), result); try virtual_allocations.append(result.?); } // Make sure that the entries are set or not depending on the allocation success var vaddr = vmm.start + entry * BLOCK_SIZE; while (vaddr < (entry + num_to_alloc) * BLOCK_SIZE) : (vaddr += BLOCK_SIZE) { if (should_be_set) { // Allocation succeeded so this address should be set try std.testing.expect(try vmm.isSet(vaddr)); // The test mapper should have received this address try std.testing.expect(try allocations.isSet(vaddr / BLOCK_SIZE)); } else { // Allocation failed as there weren't enough free entries if (vaddr >= num_entries * BLOCK_SIZE) { // If this address is beyond the VMM's end address, it should be out of bounds try std.testing.expectError(bitmap.BitmapError.OutOfBounds, vmm.isSet(vaddr)); try std.testing.expectError(bitmap.BitmapError.OutOfBounds, allocations.isSet(vaddr / BLOCK_SIZE)); } else { // Else it should not be set try std.testing.expect(!(try vmm.isSet(vaddr))); // The test mapper should not have received this address try std.testing.expect(!(try allocations.isSet(vaddr / BLOCK_SIZE))); } } } entry += num_to_alloc; // All later entries should not be set var later_entry = entry; while (later_entry < num_entries) : (later_entry += 1) { try std.testing.expect(!(try vmm.isSet(vmm.start + later_entry * BLOCK_SIZE))); try std.testing.expect(!(try pmm.isSet(later_entry * BLOCK_SIZE))); } } // Out of bounds entries should cause an error try std.testing.expectError(bitmap.BitmapError.OutOfBounds, vmm.isSet((entry + 1) * BLOCK_SIZE)); // Try freeing all allocations for (virtual_allocations.items) |alloc| { const alloc_group = vmm.allocations.get(alloc); try std.testing.expect(alloc_group != null); const physical = alloc_group.?.physical; // We need to create a copy of the physical allocations since the free call deinits them var physical_copy = std.ArrayList(usize).init(std.testing.allocator); defer physical_copy.deinit(); // Make sure they are all reserved in the PMM for (physical.items) |phys| { try std.testing.expect(try pmm.isSet(phys)); try physical_copy.append(phys); } vmm.free(alloc) catch unreachable; // This virtual allocation should no longer be in the hashmap try std.testing.expectEqual(vmm.allocations.get(alloc), null); try std.testing.expect(!try vmm.isSet(alloc)); // And all its physical blocks should now be free for (physical_copy.items) |phys| { try std.testing.expect(!try pmm.isSet(phys)); } } } test "alloc at a specific address" { const num_entries = 100; var vmm = try testInit(num_entries); defer testDeinit(&vmm); const attrs = Attributes{ .writable = true, .cachable = true, .kernel = true }; // Try allocating at the start try std.testing.expectEqual(vmm.alloc(10, vmm.start, attrs), vmm.start); // Try that again try std.testing.expectEqual(vmm.alloc(5, vmm.start, attrs), null); const middle = vmm.start + (vmm.end - vmm.start) / 2; // Try allocating at the middle try std.testing.expectEqual(vmm.alloc(num_entries / 2, middle, attrs), middle); // Allocating after the start and colliding with the middle should be impossible try std.testing.expectEqual(vmm.alloc(num_entries / 2, vmm.start + 10 * BLOCK_SIZE, attrs), null); // Allocating within the last half should be impossible try std.testing.expectEqual(vmm.alloc(num_entries / 4, middle + BLOCK_SIZE, attrs), null); // It should still be possible to allocate between the start and middle try std.testing.expectEqual(vmm.alloc(num_entries / 2 - 10, vmm.start + 10 * BLOCK_SIZE, attrs), vmm.start + 10 * BLOCK_SIZE); // It should now be full try std.testing.expectEqual(vmm.bmp.num_free_entries, 0); // Allocating at the end and before the start should fail try std.testing.expectEqual(vmm.alloc(1, vmm.end, attrs), null); try std.testing.expectEqual(vmm.alloc(1, vmm.start - BLOCK_SIZE, attrs), null); } test "set" { const num_entries = 512; var vmm = try testInit(num_entries); defer testDeinit(&vmm); const vstart = vmm.start + BLOCK_SIZE * 37; const vend = vmm.start + BLOCK_SIZE * 46; const pstart = BLOCK_SIZE * 37 + 123; const pend = BLOCK_SIZE * 46 + 123; const attrs = Attributes{ .kernel = true, .writable = true, .cachable = true }; try vmm.set(.{ .start = vstart, .end = vend }, mem.Range{ .start = pstart, .end = pend }, attrs); // Make sure it put the correct address in the map try std.testing.expect(vmm.allocations.get(vstart) != null); var allocations = test_allocations.?; // The entries before the virtual start shouldn't be set var vaddr = vmm.start; while (vaddr < vstart) : (vaddr += BLOCK_SIZE) { try std.testing.expect(!(try allocations.isSet((vaddr - vmm.start) / BLOCK_SIZE))); } // The entries up until the virtual end should be set while (vaddr < vend) : (vaddr += BLOCK_SIZE) { try std.testing.expect(try allocations.isSet((vaddr - vmm.start) / BLOCK_SIZE)); } // The entries after the virtual end should not be set while (vaddr < vmm.end) : (vaddr += BLOCK_SIZE) { try std.testing.expect(!(try allocations.isSet((vaddr - vmm.start) / BLOCK_SIZE))); } } test "copy" { const num_entries = 512; var vmm = try testInit(num_entries); defer testDeinit(&vmm); const attrs = .{ .kernel = true, .cachable = true, .writable = true }; _ = (try vmm.alloc(24, null, attrs)).?; var mirrored = try vmm.copy(); defer mirrored.deinit(); try std.testing.expectEqual(vmm.bmp.num_free_entries, mirrored.bmp.num_free_entries); try std.testing.expectEqual(vmm.start, mirrored.start); try std.testing.expectEqual(vmm.end, mirrored.end); try std.testing.expectEqual(vmm.allocations.count(), mirrored.allocations.count()); var it = vmm.allocations.iterator(); while (it.next()) |next| { for (mirrored.allocations.get(next.key_ptr.*).?.physical.items) |block, i| { try std.testing.expectEqual(block, vmm.allocations.get(next.key_ptr.*).?.physical.items[i]); } } try std.testing.expectEqual(vmm.mapper, mirrored.mapper); try std.testing.expectEqual(vmm.payload, mirrored.payload); // Allocating in the new VMM shouldn't allocate in the mirrored one const alloc1 = (try mirrored.alloc(3, null, attrs)).?; try std.testing.expectEqual(vmm.allocations.count() + 1, mirrored.allocations.count()); try std.testing.expectEqual(vmm.bmp.num_free_entries - 3, mirrored.bmp.num_free_entries); try std.testing.expectError(VmmError.NotAllocated, vmm.virtToPhys(alloc1)); // And vice-versa _ = (try vmm.alloc(3, null, attrs)).?; const alloc3 = (try vmm.alloc(1, null, attrs)).?; const alloc4 = (try vmm.alloc(1, null, attrs)).?; try std.testing.expectEqual(vmm.allocations.count() - 2, mirrored.allocations.count()); try std.testing.expectEqual(vmm.bmp.num_free_entries + 2, mirrored.bmp.num_free_entries); try std.testing.expectError(VmmError.NotAllocated, mirrored.virtToPhys(alloc3)); try std.testing.expectError(VmmError.NotAllocated, mirrored.virtToPhys(alloc4)); } test "copyData from" { var vmm = try testInit(100); defer testDeinit(&vmm); const alloc1_blocks = 1; const alloc = (try vmm.alloc(alloc1_blocks, null, .{ .kernel = true, .writable = true, .cachable = true })) orelse unreachable; var vmm2 = try VirtualMemoryManager(arch.VmmPayload).init(vmm.start, vmm.end, std.testing.allocator, test_mapper, arch.KERNEL_VMM_PAYLOAD); defer vmm2.deinit(); var vmm_free_entries = vmm.bmp.num_free_entries; var vmm2_free_entries = vmm2.bmp.num_free_entries; var buff: [4]u8 = [4]u8{ 10, 11, 12, 13 }; try vmm2.copyData(&vmm, true, buff[0..buff.len], alloc); // Make sure they are the same var buff2 = @intToPtr([*]u8, alloc)[0..buff.len]; try std.testing.expectEqualSlices(u8, buff[0..buff.len], buff2); try std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries); // TODO Remove the subtraction by one once we are able to free the temp space in copyData try std.testing.expectEqual(vmm2_free_entries - 1, vmm2.bmp.num_free_entries); // Test NotAllocated try std.testing.expectError(VmmError.NotAllocated, vmm2.copyData(&vmm, true, buff[0..buff.len], alloc + alloc1_blocks * BLOCK_SIZE)); try std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries); try std.testing.expectEqual(vmm2_free_entries - 1, vmm2.bmp.num_free_entries); // Test Bitmap.Error.OutOfBounds try std.testing.expectError(bitmap.BitmapError.OutOfBounds, vmm2.copyData(&vmm, true, buff[0..buff.len], vmm.end)); try std.testing.expectError(bitmap.BitmapError.OutOfBounds, vmm.copyData(&vmm2, true, buff[0..buff.len], vmm2.end)); try std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries); try std.testing.expectEqual(vmm2_free_entries - 1, vmm2.bmp.num_free_entries); } test "copyDaya to" { var vmm = try testInit(100); defer testDeinit(&vmm); const alloc1_blocks = 1; const alloc = (try vmm.alloc(alloc1_blocks, null, .{ .kernel = true, .writable = true, .cachable = true })) orelse unreachable; var vmm2 = try VirtualMemoryManager(arch.VmmPayload).init(vmm.start, vmm.end, std.testing.allocator, test_mapper, arch.KERNEL_VMM_PAYLOAD); defer vmm2.deinit(); var vmm_free_entries = vmm.bmp.num_free_entries; var vmm2_free_entries = vmm2.bmp.num_free_entries; var buff: [4]u8 = [4]u8{ 10, 11, 12, 13 }; var buff2 = @intToPtr([*]u8, alloc)[0..buff.len]; try vmm2.copyData(&vmm, false, buff[0..], alloc); try std.testing.expectEqualSlices(u8, buff[0..buff.len], buff2); try std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries); try std.testing.expectEqual(vmm2_free_entries - 1, vmm2.bmp.num_free_entries); } var test_allocations: ?*bitmap.Bitmap(null, u64) = null; var test_mapper = Mapper(arch.VmmPayload){ .mapFn = testMap, .unmapFn = testUnmap }; /// /// Initialise a virtual memory manager used for testing /// /// Arguments: /// IN num_entries: u32 - The number of entries the VMM should track /// /// Return: VirtualMemoryManager(u8) /// The VMM constructed /// /// Error: Allocator.Error /// OutOfMemory: The allocator couldn't allocate the structures needed /// pub fn testInit(num_entries: u32) Allocator.Error!VirtualMemoryManager(arch.VmmPayload) { if (test_allocations == null) { test_allocations = try std.testing.allocator.create(bitmap.Bitmap(null, u64)); test_allocations.?.* = try bitmap.Bitmap(null, u64).init(num_entries, std.testing.allocator); } else |allocations| { var entry: u32 = 0; while (entry < allocations.num_entries) : (entry += 1) { allocations.clearEntry(entry) catch unreachable; } } const mem_profile = mem.MemProfile{ .vaddr_end = undefined, .vaddr_start = undefined, .physaddr_start = undefined, .physaddr_end = undefined, .mem_kb = num_entries * BLOCK_SIZE / 1024, .fixed_allocator = undefined, .virtual_reserved = &[_]mem.Map{}, .physical_reserved = &[_]mem.Range{}, .modules = &[_]mem.Module{}, }; pmm.init(&mem_profile, std.testing.allocator); const test_vaddr_start = @ptrToInt(&(try std.testing.allocator.alloc(u8, num_entries * BLOCK_SIZE))[0]); kernel_vmm = try VirtualMemoryManager(arch.VmmPayload).init(test_vaddr_start, test_vaddr_start + num_entries * BLOCK_SIZE, std.testing.allocator, test_mapper, arch.KERNEL_VMM_PAYLOAD); return kernel_vmm; } pub fn testDeinit(vmm: *VirtualMemoryManager(arch.VmmPayload)) void { vmm.deinit(); const space = @intToPtr([*]u8, vmm.start)[0 .. vmm.end - vmm.start]; vmm.allocator.free(space); if (test_allocations) |allocs| { allocs.deinit(); std.testing.allocator.destroy(allocs); test_allocations = null; } pmm.deinit(); } /// /// A mapping function used when doing unit tests /// /// Arguments: /// IN vstart: usize - The start of the virtual region to map /// IN vend: usize - The end of the virtual region to map /// IN pstart: usize - The start of the physical region to map /// IN pend: usize - The end of the physical region to map /// IN attrs: Attributes - The attributes to map with /// IN/OUT allocator: Allocator - The allocator to use. Ignored /// IN payload: arch.VmmPayload - The payload value. Expected to be arch.KERNEL_VMM_PAYLOAD /// fn testMap(vstart: usize, vend: usize, pstart: usize, pend: usize, attrs: Attributes, allocator: Allocator, payload: arch.VmmPayload) MapperError!void { // Suppress unused var warning _ = attrs; _ = allocator; if (vend - vstart != pend - pstart) return MapperError.AddressMismatch; std.testing.expectEqual(arch.KERNEL_VMM_PAYLOAD, payload) catch unreachable; var vaddr = vstart; var allocations = test_allocations.?; while (vaddr < vend) : (vaddr += BLOCK_SIZE) { allocations.setEntry((vaddr - kernel_vmm.start) / BLOCK_SIZE) catch unreachable; } } /// /// An unmapping function used when doing unit tests /// /// Arguments: /// IN vstart: usize - The start of the virtual region to unmap /// IN vend: usize - The end of the virtual region to unmap /// IN payload: arch.VmmPayload - The payload value. Expected to be arch.KERNEL_VMM_PAYLOAD /// fn testUnmap(vstart: usize, vend: usize, allocator: Allocator, payload: arch.VmmPayload) MapperError!void { // Suppress unused var warning _ = allocator; std.testing.expectEqual(arch.KERNEL_VMM_PAYLOAD, payload) catch unreachable; var vaddr = vstart; var allocations = test_allocations.?; while (vaddr < vend) : (vaddr += BLOCK_SIZE) { if (allocations.isSet((vaddr - kernel_vmm.start) / BLOCK_SIZE) catch unreachable) { allocations.clearEntry((vaddr - kernel_vmm.start) / BLOCK_SIZE) catch unreachable; } else { return MapperError.NotMapped; } } } /// /// Run the runtime tests. /// /// Arguments: /// IN comptime Payload: type - The type of the payload passed to the mapper /// IN vmm: VirtualMemoryManager(Payload) - The virtual memory manager to test /// IN mem_profile: *const mem.MemProfile - The mem profile with details about all the memory regions that should be reserved /// IN mb_info: *multiboot.multiboot_info_t - The multiboot info struct that should also be reserved /// pub fn runtimeTests(comptime Payload: type, vmm: *VirtualMemoryManager(Payload), mem_profile: *const mem.MemProfile) void { rt_correctMapping(Payload, vmm, mem_profile); rt_copyData(vmm); } /// /// Test that the correct mappings have been made in the VMM /// /// Arguments: /// IN vmm: VirtualMemoryManager(Payload) - The virtual memory manager to test /// IN mem_profile: *const mem.MemProfile - The mem profile with details about all the memory regions that should be reserved /// fn rt_correctMapping(comptime Payload: type, vmm: *VirtualMemoryManager(Payload), mem_profile: *const mem.MemProfile) void { const v_start = std.mem.alignBackward(@ptrToInt(mem_profile.vaddr_start), BLOCK_SIZE); const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end), BLOCK_SIZE); var vaddr = vmm.start; while (vaddr < vmm.end - BLOCK_SIZE) : (vaddr += BLOCK_SIZE) { const set = vmm.isSet(vaddr) catch unreachable; var should_be_set = false; if (vaddr < v_end and vaddr >= v_start) { should_be_set = true; } else { for (mem_profile.virtual_reserved) |entry| { if (vaddr >= std.mem.alignBackward(entry.virtual.start, BLOCK_SIZE) and vaddr < std.mem.alignForward(entry.virtual.end, BLOCK_SIZE)) { if (entry.physical) |phys| { const expected_phys = phys.start + (vaddr - entry.virtual.start); if (vmm.virtToPhys(vaddr) catch unreachable != expected_phys) { panic(@errorReturnTrace(), "virtToPhys didn't return the correct physical address for 0x{X} (0x{X})\n", .{ vaddr, vmm.virtToPhys(vaddr) }); } if (vmm.physToVirt(expected_phys) catch unreachable != vaddr) { panic(@errorReturnTrace(), "physToVirt didn't return the correct virtual address for 0x{X} (0x{X})\n", .{ expected_phys, vaddr }); } } should_be_set = true; break; } } } if (set and !should_be_set) { panic(@errorReturnTrace(), "An address was set in the VMM when it shouldn't have been: 0x{x}\n", .{vaddr}); } else if (!set and should_be_set) { panic(@errorReturnTrace(), "An address was not set in the VMM when it should have been: 0x{x}\n", .{vaddr}); } } log.info("Tested allocations\n", .{}); } /// /// Test copying data to and from another VMM /// /// Arguments: /// IN vmm: *VirtualMemoryManager() - The active VMM to test /// fn rt_copyData(vmm: *VirtualMemoryManager(arch.VmmPayload)) void { const expected_free_entries = vmm.bmp.num_free_entries - 1; // Mirror the VMM var vmm2 = vmm.copy() catch |e| { panic(@errorReturnTrace(), "Failed to mirror VMM: {}\n", .{e}); }; // Allocate within secondary VMM const addr = vmm2.alloc(1, null, .{ .kernel = true, .cachable = true, .writable = true }) catch |e| { panic(@errorReturnTrace(), "Failed to allocate within the secondary VMM in rt_copyData: {}\n", .{e}); } orelse panic(@errorReturnTrace(), "Failed to get an allocation within the secondary VMM in rt_copyData\n", .{}); defer vmm2.free(addr) catch |e| { panic(@errorReturnTrace(), "Failed to free the allocation in secondary VMM: {}\n", .{e}); }; const expected_free_entries2 = vmm2.bmp.num_free_entries; const expected_free_pmm_entries = pmm.blocksFree(); // Try copying to vmm2 var buff: [6]u8 = [_]u8{ 4, 5, 9, 123, 90, 67 }; vmm.copyData(&vmm2, true, buff[0..buff.len], addr) catch |e| { panic(@errorReturnTrace(), "Failed to copy data to secondary VMM in rt_copyData: {}\n", .{e}); }; // Make sure the function cleaned up if (vmm.bmp.num_free_entries != expected_free_entries) { panic(@errorReturnTrace(), "Expected {} free entries in VMM, but there were {}\n", .{ expected_free_entries, vmm.bmp.num_free_entries }); } if (vmm2.bmp.num_free_entries != expected_free_entries2) { panic(@errorReturnTrace(), "Expected {} free entries in the secondary VMM, but there were {}\n", .{ expected_free_entries2, vmm2.bmp.num_free_entries }); } if (pmm.blocksFree() != expected_free_pmm_entries) { panic(@errorReturnTrace(), "Expected {} free entries in PMM, but there were {}\n", .{ expected_free_pmm_entries, pmm.blocksFree() }); } // Make sure that the data at the allocated address is correct // Since vmm2 is a mirror of vmm, this address should be mapped by the CPU's MMU const dest_buff = @intToPtr([*]u8, addr)[0..buff.len]; if (!std.mem.eql(u8, buff[0..buff.len], dest_buff)) { panic(@errorReturnTrace(), "Data copied to vmm2 doesn't have the expected values\n", .{}); } // Now try copying the same buffer from vmm2 var buff2 = vmm.allocator.alloc(u8, buff.len) catch |e| { panic(@errorReturnTrace(), "Failed to allocate a test buffer in rt_copyData: {}\n", .{e}); }; vmm.copyData(&vmm2, false, buff2, addr) catch |e| { panic(@errorReturnTrace(), "Failed to copy data from secondary VMM in rt_copyData: {}\n", .{e}); }; if (!std.mem.eql(u8, buff[0..buff.len], buff2)) { panic(@errorReturnTrace(), "Data copied from vmm2 doesn't have the expected values\n", .{}); } // Make sure that a second copy will succeed const addr2 = vmm2.alloc(1, null, .{ .kernel = true, .cachable = true, .writable = true }) catch |e| { panic(@errorReturnTrace(), "Failed to allocate within the secondary VMM in rt_copyData: {}\n", .{e}); } orelse panic(@errorReturnTrace(), "Failed to get an allocation within the secondary VMM in rt_copyData\n", .{}); defer vmm2.free(addr2) catch |e| { panic(@errorReturnTrace(), "Failed to free the allocation in secondary VMM: {}\n", .{e}); }; const expected_free_entries3 = vmm2.bmp.num_free_entries; const expected_free_pmm_entries3 = pmm.blocksFree(); // Try copying to vmm2 var buff3: [6]u8 = [_]u8{ 3, 9, 0, 12, 50, 7 }; vmm.copyData(&vmm2, true, buff3[0..buff3.len], addr) catch |e| { panic(@errorReturnTrace(), "Failed to copy third lot of data to secondary VMM in rt_copyData: {}\n", .{e}); }; // Make sure the function cleaned up if (vmm.bmp.num_free_entries != expected_free_entries - 2) { panic(@errorReturnTrace(), "Expected {} free entries in VMM after third copy, but there were {}\n", .{ expected_free_entries - 2, vmm.bmp.num_free_entries }); } if (vmm2.bmp.num_free_entries != expected_free_entries3) { panic(@errorReturnTrace(), "Expected {} free entries in the secondary VMM after third copy, but there were {}\n", .{ expected_free_entries2, vmm2.bmp.num_free_entries }); } if (pmm.blocksFree() != expected_free_pmm_entries3) { panic(@errorReturnTrace(), "Expected {} free entries in PMM after third copy, but there were {}\n", .{ expected_free_pmm_entries, pmm.blocksFree() }); } // Make sure that the data at the allocated address is correct // Since vmm2 is a mirror of vmm, this address should be mapped by the CPU's MMU if (!std.mem.eql(u8, buff3[0..buff3.len], dest_buff)) { panic(@errorReturnTrace(), "Third lot of data copied doesn't have the expected values\n", .{}); } }
0
repos/pluto/src
repos/pluto/src/kernel/elf.zig
const std = @import("std"); const builtin = @import("builtin"); const Arch = std.Target.Cpu.Arch; const Endian = std.builtin.Endian; const log = std.log.scoped(.elf); const testing = std.testing; /// The data sizes that ELF files support. The int value corresponds to the value used in the file pub const DataSize = enum(u8) { /// 32-bit ThirtyTwoBit = 1, /// 64-bit SixtyFourBit = 2, /// /// Get the number of bits taken by the data size /// /// Arguments: /// IN self: DataSize - The data size to get the number of bits for /// /// Return: usize /// The number of bits /// pub fn toNumBits(self: @This()) usize { return switch (self) { .ThirtyTwoBit => 32, .SixtyFourBit => 64, }; } }; /// The endiannesses supported by Elf files. The int value corresponds to the value used in the file pub const Endianness = enum(u8) { /// Little-endian Little = 1, /// Big-endian Big = 2, /// /// Translate into the corresponding std lib Endian /// /// Arguments: /// IN self: Endianness - The endianness to translate /// /// Return: Endian /// The corresponding std lib Endian value /// pub fn toEndian(self: @This()) Endian { return switch (self) { .Big => .Big, .Little => .Little, }; } }; /// The type of the elf file. The int value corresponds to the value used in the file pub const Type = enum(u16) { /// Unused None = 0, /// Relocatable Rel = 1, /// Executable Executable = 2, /// Dynamic linking Dynamic = 3, /// Core dump Core = 4, /// OS-specific LowOS = 0xFE00, /// OS-specific HighOS = 0xFEFF, /// CPU-specific LowCPU = 0xFF00, /// CPU-specific HighCPU = 0xFFFF, }; /// The architectures supported by ELF pub const Architecture = enum(u16) { /// No specific instruction set None = 0, WE32100 = 1, Sparc = 2, x86 = 3, Motoroloa_68k = 4, Motorola_88k = 5, Intel_MCU = 6, Intel_80860 = 7, MIPS = 8, IBM_370 = 9, MIPS_RS3000_LE = 10, Reserved1 = 11, Reserved2 = 12, Reserved3 = 13, HP_PA_RISC = 14, Reserbed4 = 15, Intel_80960 = 19, PowerPC = 20, PowerPC_64 = 21, S390 = 22, ARM = 0x28, SuperH = 0x2A, IA_64 = 0x32, AMD_64 = 0x3E, TMS = 0x8C, Aarch64 = 0xB7, RISC_V = 0xF3, WDC_65C816 = 0x101, /// /// Translate to a std lib Arch /// /// Arguments: /// IN self: Architecture - The architecture to translate /// /// Return: Arch /// The corresponding std lib Arch /// pub fn toArch(self: @This()) Error!Arch { return switch (self) { .None, .TMS, .WDC_65C816, .SuperH, .IA_64, .S390, .IBM_370, .Reserved1, .Reserved2, .WE32100, .Motorola_88k, .Motoroloa_68k, .Intel_MCU, .Intel_80860, .Intel_80960, .MIPS_RS3000_LE, .HP_PA_RISC, .Reserved3, .Reserbed4 => Error.UnknownArchitecture, .Sparc => .sparc, .x86 => .i386, .MIPS => .mips, .PowerPC => .powerpc, .PowerPC_64 => .powerpc64, .ARM => .arm, .AMD_64 => .x86_64, .Aarch64 => .aarch64, .RISC_V => .riscv32, }; } }; /// The header describing the entire ELF file pub const Header = packed struct { /// Should be 0x7f | 0x45 | 0x4C | 0x46 magic_number: u32, /// The size of the fields in the header after file_type. Should be the size/s compatibile with the machine data_size: DataSize, /// The endianness of the fields in the header after file_type. Should be the endianness compatible with the machine endianness: Endianness, /// ELF version. Set to 1 version: u8, /// The target OS' ABI. Normally set to 0 no matter the platform, so we ignore it abi: u8, /// The version of the above ABI. Mostly ignored but some toolchains put expected linker features here abi_version: u8, /// All zeroes padding: u32, padding2: u16, padding3: u8, /// The type of elf file file_type: Type, /// The target architecture. Should be compatible with the machine architecture: Architecture, /// Same as above version field version2: u32, /// Execution entry point entry_address: usize, /// Offset of the program header form the start of the elf file program_header_offset: usize, /// offset of the section header from the start of the elf file section_header_offset: usize, /// Architecture-dependent flags flags: u32, /// The size of this elf header in bytes. 64 for the 64-bit format and 52 for the 32-bit format elf_header_size: u16, /// The size of a program header table entry program_header_entry_size: u16, /// The number of entries in the program header table program_header_entries: u16, /// The size of a section header table entry section_header_entry_size: u16, /// The number of entries in the section header table section_header_entries: u16, /// The index into the section header table that contains the section names section_name_index: u16, }; /// The type of a program header entry pub const ProgramEntryType = enum(u32) { Unused = 0, /// Should be loaded into memory Loadable = 1, /// Dynamic linking info Dynamic = 2, /// Interpreter info InterpreterInfo = 3, /// Extra information used depending on the elf type Auxiliary = 4, Reserved = 5, /// Entry containing the program header table ProgramHeader = 6, /// Info for thread-local storage ThreadLocalStorage = 7, /// Gnu toolchain-specific information GnuStack = 0x6474E551, /// OS-specific info LowOS = 0x60000000, /// OS-specific info HighOS = 0x6FFFFFFF, /// CPU-specific info LowCPU = 0x70000000, /// CPU-specific info HighCPU = 0x7FFFFFFF, }; /// The header desribing the program entries pub const ProgramHeader = packed struct { /// The type of the entry entry_type: ProgramEntryType, /// Entry type-specific flags for 64-bit ELF files flags_64bit: if (@bitSizeOf(usize) == 32) u0 else u32, /// Offset of the entry within the ELF file offset: usize, /// The virtual address associated with the entry virtual_address: usize, /// The virtual address associated with the entry, if applicable physical_address: usize, /// Size of the entry in the file file_size: usize, /// Size of the entry in memory mem_size: usize, /// Entry type-specific flags for 32-bit ELF files flags_32bit: if (@bitSizeOf(usize) == 64) u0 else u32, /// Alignment of the entry alignment: usize, }; /// The type of section pub const SectionType = enum(u32) { Unused = 0, /// Executable code ProgramData = 1, /// The symbol table SymbolTable = 2, /// The table containing all strings used by other sections StringTable = 3, /// Relocation data with addends RelocationWithAddends = 4, /// The symbol has table SymbolHashTable = 5, /// Dynamic linking info Dynamic = 6, /// Extra information Auxiliary = 7, /// Space within the program, normally used to store data ProgramSpace = 8, /// Relocation data without addends RelocationWithoutAddends = 9, Reserved = 10, /// The dynamic linker symbol table DynamicSymbolTable = 11, /// List of constructors Constructors = 14, /// List of destructors Destructors = 15, /// List of pre-contructors PreConstructors = 16, /// A group of sections SectionGroup = 17, /// Extended section indices ExtendedSectionIndices = 18, /// The number of defined types NumberDefinedType = 19, /// OS-specific LowOS = 0x60000000, /// /// Check if the section has an associated chunk of data in the ELF file /// /// Arguments: /// IN self: SectionType - The section type /// /// Return: bool /// Whether the section type has associated data /// pub fn hasData(self: @This()) bool { return switch (self) { .Unused, .ProgramSpace, .Reserved => false, else => true, }; } }; /// The section is writable pub const SECTION_WRITABLE = 1; /// The section occupies memory during execution pub const SECTION_ALLOCATABLE = 2; /// The section is executable pub const SECTION_EXECUTABLE = 4; /// The section may be merged pub const SECTION_MERGED = 16; /// The section contains strings pub const SECTION_HAS_STRINGS = 32; /// Contains a SHT index pub const SECTION_INFO_LINK = 64; /// Preserve the section order after combining pub const SECTION_PRESERVE_ORDER = 128; /// Non-standard OS-specific handling is required pub const SECTION_OS_NON_STANDARD = 256; /// Member of a group pub const SECTION_GROUP = 512; /// The section contains thread-local data pub const SECTION_THREAD_LOCAL_DATA = 1024; /// OS-specific pub const SECTION_OS_MASK = 0x0FF00000; /// CPU-specific pub const SECTION_CPU_MASK = 0xF0000000; /// The header for an ELF section pub const SectionHeader = packed struct { /// Offset into the string table of the section's name name_offset: u32, /// The section's type section_type: SectionType, /// Flags for this section flags: usize, /// The virtual address at which this section should be loaded (if it is loadable) virtual_address: usize, /// Offset of the section's data into the file offset: usize, /// The size of the section's data size: usize, /// An associated section. Usage depends on the section type linked_section_idx: u32, /// Extra info. Usage depends on the section type info: u32, /// The section's alignment alignment: usize, /// The size of each entry within this section, for sections that contain sub-entries entry_size: usize, const Self = @This(); /// /// Find the name of this section from the ELF's string table. /// /// Arguments: /// IN self: SectionHeader - The header to get the name for /// IN elf: Elf - The elf file /// /// Return: []const u8 /// The name of the section /// pub fn getName(self: Self, elf: Elf) []const u8 { // section_name_index has already been checked so will exist const string_table = elf.section_data[elf.header.section_name_index] orelse unreachable; const str = @ptrCast([*]const u8, string_table.ptr + self.name_offset); var len: usize = 0; while (str[len] != 0) : (len += 1) {} const name = str[0..len]; return name; } }; /// A loaded ELF file pub const Elf = struct { /// The ELF header that describes the entire file header: Header, /// The program entry headers program_headers: []ProgramHeader, /// The section headers section_headers: []SectionHeader, /// The data associated with each section, or null if a section doesn't have a data area section_data: []?[]const u8, /// The allocator used allocator: std.mem.Allocator, const Self = @This(); /// /// Load and initialise from a data stream for a specific architecture /// /// Arguments: /// IN elf_data: []const u8 - The data stream to load the elf information from /// IN arch: Arch - The intended architecture to load for /// IN allocator: Allocator - The allocator to use when needing memory /// /// Return: Elf /// The loaded ELF file /// /// Error: Allocator.Error || Error /// Allocator.Error - There wasn't enough memory free to allocate the required state /// Error.InvalidMagicNumber - The ELF file magic number wasn't as expected /// Error.InvalidArchitecture - The ELF file wasn't built for the expected architecture /// Error.InvalidDataSize - The ELF file wasn't built for the data size supported by the given architecture /// Error.InvalidEndianness - The ELF file wasn't built with the endianness supported by the given architecture /// Error.WrongStringTableIndex - The string table index in the header does not point to a StringTable section /// pub fn init(elf_data: []const u8, arch: Arch, allocator: std.mem.Allocator) (std.mem.Allocator.Error || Error)!Self { const header = std.mem.bytesToValue(Header, elf_data[0..@sizeOf(Header)]); if (header.magic_number != 0x464C457F) { return Error.InvalidMagicNumber; } if ((try header.architecture.toArch()) != arch) { return Error.InvalidArchitecture; } if (header.data_size.toNumBits() != @bitSizeOf(usize)) { return Error.InvalidDataSize; } if (header.endianness.toEndian() != arch.endian()) { return Error.InvalidEndianness; } if (header.section_name_index >= header.section_header_entries) return Error.WrongStringTableIndex; var program_segments = try allocator.alloc(ProgramHeader, header.program_header_entries); errdefer allocator.free(program_segments); var seg_offset = header.program_header_offset; for (program_segments) |*segment| { segment.* = @ptrCast(*const ProgramHeader, elf_data.ptr + seg_offset).*; seg_offset += header.program_header_entry_size; } var section_headers = try allocator.alloc(SectionHeader, header.section_header_entries); errdefer allocator.free(section_headers); var section_data = try allocator.alloc(?[]const u8, header.section_header_entries); errdefer allocator.free(section_data); var sec_offset = header.section_header_offset; for (section_headers) |*section, i| { section.* = std.mem.bytesToValue(SectionHeader, (elf_data.ptr + sec_offset)[0..@sizeOf(SectionHeader)]); section_data[i] = if (section.section_type.hasData()) elf_data[section.offset .. section.offset + section.size] else null; sec_offset += header.section_header_entry_size; } if (section_headers[header.section_name_index].section_type != .StringTable) { return Error.WrongStringTableIndex; } return Elf{ .header = header, .program_headers = program_segments, .section_headers = section_headers, .section_data = section_data, .allocator = allocator, }; } pub fn deinit(self: *const Self) void { self.allocator.free(self.section_data); self.allocator.free(self.section_headers); self.allocator.free(self.program_headers); } }; pub const Error = error{ UnknownArchitecture, InvalidArchitecture, InvalidDataSize, InvalidMagicNumber, InvalidEndianness, WrongStringTableIndex, }; fn testSetHeader(data: []u8, header: Header) void { std.mem.copy(u8, data, @ptrCast([*]const u8, &header)[0..@sizeOf(Header)]); } fn testSetSection(data: []u8, header: SectionHeader, idx: usize) void { const offset = @sizeOf(Header) + @sizeOf(SectionHeader) * idx; var dest = data[offset .. offset + @sizeOf(SectionHeader)]; std.mem.copy(u8, dest, @ptrCast([*]const u8, &header)[0..@sizeOf(SectionHeader)]); } pub fn testInitData(allocator: std.mem.Allocator, section_name: []const u8, string_section_name: []const u8, file_type: Type, entry_address: usize, flags: u32, section_flags: u32, strings_flags: u32, section_address: usize, strings_address: usize) ![]u8 { const is_32_bit = @bitSizeOf(usize) == 32; const header_size = if (is_32_bit) 0x34 else 0x40; const p_header_size = if (is_32_bit) 0x20 else 0x38; const s_header_size = if (is_32_bit) 0x28 else 0x40; const section_size = 1024; const data_size = header_size + s_header_size + s_header_size + section_name.len + 1 + string_section_name.len + 1 + section_size; var data = try allocator.alloc(u8, data_size); var header = Header{ .magic_number = 0x464C457F, .data_size = switch (@bitSizeOf(usize)) { 32 => .ThirtyTwoBit, 64 => .SixtyFourBit, else => unreachable, }, .endianness = switch (builtin.cpu.arch.endian()) { .Big => .Big, .Little => .Little, }, .version = 1, .abi = 0, .abi_version = 0, .padding = 0, .padding2 = 0, .padding3 = 0, .file_type = file_type, .architecture = switch (builtin.cpu.arch) { .i386 => .x86, .x86_64 => .AMD_64, else => unreachable, }, .version2 = 1, .entry_address = entry_address, .program_header_offset = undefined, .section_header_offset = header_size, .flags = flags, .elf_header_size = header_size, .program_header_entry_size = p_header_size, .program_header_entries = 0, .section_header_entry_size = s_header_size, .section_header_entries = 2, .section_name_index = 1, }; var data_offset: usize = 0; testSetHeader(data, header); data_offset += header_size; var section_header = SectionHeader{ .name_offset = 0, .section_type = .ProgramData, .flags = section_flags, .virtual_address = section_address, .offset = data_offset + s_header_size + s_header_size, .size = section_size, .linked_section_idx = undefined, .info = undefined, .alignment = 1, .entry_size = undefined, }; testSetSection(data, section_header, 0); data_offset += s_header_size; var string_section_header = SectionHeader{ .name_offset = @intCast(u32, section_name.len) + 1, .section_type = .StringTable, .flags = strings_flags, .virtual_address = strings_address, .offset = data_offset + s_header_size + section_size, .size = section_name.len + 1 + string_section_name.len + 1, .linked_section_idx = undefined, .info = undefined, .alignment = 1, .entry_size = undefined, }; testSetSection(data, string_section_header, 1); data_offset += s_header_size; std.mem.set(u8, data[data_offset .. data_offset + section_size], 0); data_offset += section_size; std.mem.copy(u8, data[data_offset .. data_offset + section_name.len], section_name); data_offset += section_name.len; data[data_offset] = 0; data_offset += 1; std.mem.copy(u8, data[data_offset .. data_offset + string_section_name.len], string_section_name); data_offset += string_section_name.len; data[data_offset] = 0; data_offset += 1; return data; } test "init" { const section_name = "some_section"; const string_section_name = "strings"; const is_32_bit = @bitSizeOf(usize) == 32; var data = try testInitData(testing.allocator, section_name, string_section_name, .Executable, 0, 0, 123, 789, 456, 012); defer testing.allocator.free(data); const elf = try Elf.init(data, builtin.cpu.arch, testing.allocator); defer elf.deinit(); try testing.expectEqual(elf.header.data_size, if (is_32_bit) .ThirtyTwoBit else .SixtyFourBit); try testing.expectEqual(elf.header.file_type, .Executable); try testing.expectEqual(elf.header.architecture, switch (builtin.cpu.arch) { .i386 => .x86, .x86_64 => .AMD_64, else => unreachable, }); try testing.expectEqual(elf.header.entry_address, 0); try testing.expectEqual(elf.header.flags, 0); try testing.expectEqual(elf.header.section_name_index, 1); try testing.expectEqual(elf.program_headers.len, 0); try testing.expectEqual(elf.section_headers.len, 2); const section_one = elf.section_headers[0]; try testing.expectEqual(@as(u32, 0), section_one.name_offset); try testing.expectEqual(SectionType.ProgramData, section_one.section_type); try testing.expectEqual(@as(usize, 123), section_one.flags); try testing.expectEqual(@as(usize, 456), section_one.virtual_address); const section_two = elf.section_headers[1]; try testing.expectEqual(section_name.len + 1, section_two.name_offset); try testing.expectEqual(SectionType.StringTable, section_two.section_type); try testing.expectEqual(@as(usize, 789), section_two.flags); try testing.expectEqual(@as(usize, 012), section_two.virtual_address); try testing.expectEqual(@as(usize, 2), elf.section_data.len); try testing.expectEqual(elf.section_headers[0].size, elf.section_data[0].?.len); for ("some_section" ++ [_]u8{0} ++ "strings" ++ [_]u8{0}) |char, i| { try testing.expectEqual(char, elf.section_data[1].?[i]); } // Test the string section having the wrong type var section_header = elf.section_headers[1]; section_header.section_type = .ProgramData; testSetSection(data, section_header, 1); try testing.expectError(Error.WrongStringTableIndex, Elf.init(data, builtin.cpu.arch, testing.allocator)); testSetSection(data, elf.section_headers[1], 1); // Test the section_name_index being out of bounds var header = elf.header; header.section_name_index = 3; testSetHeader(data, header); try testing.expectError(Error.WrongStringTableIndex, Elf.init(data, builtin.cpu.arch, testing.allocator)); // Test incorrect endianness header = elf.header; header.endianness = switch (builtin.cpu.arch.endian()) { .Big => .Little, .Little => .Big, }; testSetHeader(data, header); try testing.expectError(Error.InvalidEndianness, Elf.init(data, builtin.cpu.arch, testing.allocator)); // Test invalid data size header.data_size = switch (@bitSizeOf(usize)) { 32 => .SixtyFourBit, else => .ThirtyTwoBit, }; testSetHeader(data, header); try testing.expectError(Error.InvalidDataSize, Elf.init(data, builtin.cpu.arch, testing.allocator)); // Test invalid architecture header.architecture = switch (builtin.cpu.arch) { .x86_64 => .Aarch64, else => .AMD_64, }; testSetHeader(data, header); try testing.expectError(Error.InvalidArchitecture, Elf.init(data, builtin.cpu.arch, testing.allocator)); // Test incorrect magic number header.magic_number = 123; testSetHeader(data, header); try testing.expectError(Error.InvalidMagicNumber, Elf.init(data, builtin.cpu.arch, testing.allocator)); } test "getName" { // The entire ELF test data. The header, program header, two section headers and the section name (with the null terminator) var section_name = "some_section"; var string_section_name = "strings"; const data = try testInitData(testing.allocator, section_name, string_section_name, .Executable, 0, undefined, undefined, undefined, undefined, undefined); defer testing.allocator.free(data); const elf = try Elf.init(data, builtin.cpu.arch, testing.allocator); defer elf.deinit(); try testing.expectEqualSlices(u8, elf.section_headers[0].getName(elf), section_name); try testing.expectEqualSlices(u8, elf.section_headers[1].getName(elf), string_section_name); } test "toNumBits" { try testing.expectEqual(DataSize.ThirtyTwoBit.toNumBits(), 32); try testing.expectEqual(DataSize.SixtyFourBit.toNumBits(), 64); } test "toEndian" { try testing.expectEqual(Endianness.Little.toEndian(), Endian.Little); try testing.expectEqual(Endianness.Big.toEndian(), Endian.Big); } test "toArch" { inline for (@typeInfo(Architecture).Enum.fields) |field| { const architecture = @field(Architecture, field.name); const is_known = switch (architecture) { .Sparc, .x86, .MIPS, .PowerPC, .PowerPC_64, .ARM, .AMD_64, .Aarch64, .RISC_V => true, else => false, }; if (!is_known) { try testing.expectError(Error.UnknownArchitecture, architecture.toArch()); } else { try testing.expectEqual(architecture.toArch(), switch (architecture) { .Sparc => .sparc, .x86 => .i386, .MIPS => .mips, .PowerPC => .powerpc, .PowerPC_64 => .powerpc64, .ARM => .arm, .AMD_64 => .x86_64, .Aarch64 => .aarch64, .RISC_V => .riscv32, else => unreachable, }); } } } test "hasData" { inline for (@typeInfo(SectionType).Enum.fields) |field| { const sec_type = @field(SectionType, field.name); const should_not_have_data = sec_type == .Unused or sec_type == .ProgramSpace or sec_type == .Reserved; try testing.expectEqual(should_not_have_data, !sec_type.hasData()); } }
0
repos/pluto/src
repos/pluto/src/kernel/pmm.zig
const is_test = @import("builtin").is_test; const std = @import("std"); const log = std.log.scoped(.pmm); const build_options = @import("build_options"); const arch = @import("arch.zig").internals; const MemProfile = @import("mem.zig").MemProfile; const testing = std.testing; const panic = @import("panic.zig").panic; const bitmap = @import("bitmap.zig"); const Bitmap = bitmap.Bitmap; const Allocator = std.mem.Allocator; const PmmBitmap = Bitmap(null, u32); /// The possible errors thrown by bitmap functions const PmmError = error{ /// The address given hasn't been allocated NotAllocated, }; /// The size of memory associated with each bitmap entry pub const BLOCK_SIZE: usize = arch.MEMORY_BLOCK_SIZE; var the_bitmap: PmmBitmap = undefined; /// /// Set the bitmap entry for an address as occupied /// /// Arguments: /// IN addr: usize - The address. /// /// Error: PmmBitmap.BitmapError. /// *: See PmmBitmap.setEntry. Could occur if the address is out of bounds. /// pub fn setAddr(addr: usize) bitmap.BitmapError!void { try the_bitmap.setEntry(@intCast(u32, addr / BLOCK_SIZE)); } /// /// Check if an address is set as occupied. /// /// Arguments: /// IN addr: usize - The address to check. /// /// Return: True if occupied, else false. /// /// Error: PmmBitmap.BitmapError. /// *: See PmmBitmap.setEntry. Could occur if the address is out of bounds. /// pub fn isSet(addr: usize) bitmap.BitmapError!bool { return the_bitmap.isSet(@intCast(u32, addr / BLOCK_SIZE)); } /// /// Find the next free memory block, set it as occupied and return it. The region allocated will be of size BLOCK_SIZE. /// /// Return: The address that was allocated. /// pub fn alloc() ?usize { if (the_bitmap.setFirstFree()) |entry| { return entry * BLOCK_SIZE; } return null; } /// /// Set the address as free so it can be allocated in the future. This will free a block of size BLOCK_SIZE. /// /// Arguments: /// IN addr: usize - The previously allocated address to free. Will be aligned down to the nearest multiple of BLOCK_SIZE. /// /// Error: PmmError || PmmBitmap.BitmapError. /// PmmError.NotAllocated: The address wasn't allocated. /// PmmBitmap.BitmapError.OutOfBounds: The address given was out of bounds. /// pub fn free(addr: usize) (bitmap.BitmapError || PmmError)!void { const idx = @intCast(u32, addr / BLOCK_SIZE); if (try the_bitmap.isSet(idx)) { try the_bitmap.clearEntry(idx); } else { return PmmError.NotAllocated; } } /// /// Get the number of unallocated blocks of memory. /// /// Return: usize. /// The number of unallocated blocks of memory /// pub fn blocksFree() usize { return the_bitmap.num_free_entries; } /// Intiialise the physical memory manager and set all unavailable regions as occupied (those from the memory map and those from the linker symbols). /// /// Arguments: /// IN mem: *const MemProfile - The system's memory profile. /// IN allocator: Allocator - The allocator to use to allocate the bitmaps. /// pub fn init(mem_profile: *const MemProfile, allocator: Allocator) void { log.info("Init\n", .{}); defer log.info("Done\n", .{}); the_bitmap = PmmBitmap.init(mem_profile.mem_kb * 1024 / BLOCK_SIZE, allocator) catch |e| { panic(@errorReturnTrace(), "Bitmap allocation failed: {}\n", .{e}); }; // Occupy the regions of memory that the memory map describes as reserved for (mem_profile.physical_reserved) |entry| { var addr = std.mem.alignBackward(entry.start, BLOCK_SIZE); var end = entry.end - 1; // If the end address can be aligned without overflowing then align it if (end <= std.math.maxInt(usize) - BLOCK_SIZE) { end = std.mem.alignForward(end, BLOCK_SIZE); } while (addr < end) : (addr += BLOCK_SIZE) { setAddr(addr) catch |e| switch (e) { // We can ignore out of bounds errors as the memory won't be available anyway bitmap.BitmapError.OutOfBounds => break, else => panic(@errorReturnTrace(), "Failed setting address 0x{x} from memory map as occupied: {}", .{ addr, e }), }; } } switch (build_options.test_mode) { .Initialisation => runtimeTests(mem_profile, allocator), else => {}, } } /// /// Free the internal state of the PMM. Is unusable aftwards unless re-initialised /// pub fn deinit() void { the_bitmap.deinit(); } test "alloc" { the_bitmap = try Bitmap(null, u32).init(32, testing.allocator); defer the_bitmap.deinit(); comptime var addr = 0; comptime var i = 0; // Allocate all entries, making sure they succeed and return the correct addresses inline while (i < 32) : ({ i += 1; addr += BLOCK_SIZE; }) { try testing.expect(!(try isSet(addr))); try testing.expect(alloc().? == addr); try testing.expect(try isSet(addr)); try testing.expectEqual(blocksFree(), 31 - i); } // Allocation should now fail try testing.expect(alloc() == null); } test "free" { the_bitmap = try Bitmap(null, u32).init(32, testing.allocator); defer the_bitmap.deinit(); comptime var i = 0; // Allocate and free all entries inline while (i < 32) : (i += 1) { const addr = alloc().?; try testing.expect(try isSet(addr)); try testing.expectEqual(blocksFree(), 31); try free(addr); try testing.expectEqual(blocksFree(), 32); try testing.expect(!(try isSet(addr))); // Double frees should be caught try testing.expectError(PmmError.NotAllocated, free(addr)); } } test "setAddr and isSet" { const num_entries: u32 = 32; the_bitmap = try Bitmap(null, u32).init(num_entries, testing.allocator); defer the_bitmap.deinit(); var addr: u32 = 0; var i: u32 = 0; while (i < num_entries) : ({ i += 1; addr += BLOCK_SIZE; }) { // Ensure all previous blocks are still set var h: u32 = 0; var addr2: u32 = 0; while (h < i) : ({ h += 1; addr2 += BLOCK_SIZE; }) { try testing.expect(try isSet(addr2)); } try testing.expectEqual(blocksFree(), num_entries - i); // Set the current block try setAddr(addr); try testing.expect(try isSet(addr)); try testing.expectEqual(blocksFree(), num_entries - i - 1); // Ensure all successive entries are not set var j: u32 = i + 1; var addr3: u32 = addr + BLOCK_SIZE; while (j < num_entries) : ({ j += 1; addr3 += BLOCK_SIZE; }) { try testing.expect(!try isSet(addr3)); } } } /// /// Allocate all blocks and make sure they don't overlap with any reserved addresses. /// /// Arguments: /// IN mem_profile: *const MemProfile - The memory profile to check for reserved memory regions. /// IN/OUT allocator: Allocator - The allocator to use when needing to create intermediate structures used for testing /// fn runtimeTests(mem_profile: *const MemProfile, allocator: Allocator) void { // Make sure that occupied memory can't be allocated var prev_alloc: usize = std.math.maxInt(usize); var alloc_list = std.ArrayList(usize).init(allocator); defer alloc_list.deinit(); while (alloc()) |alloced| { if (prev_alloc == alloced) { panic(null, "FAILURE: PMM allocated the same address twice: 0x{x}", .{alloced}); } prev_alloc = alloced; for (mem_profile.physical_reserved) |entry| { var addr = std.mem.alignBackward(@intCast(usize, entry.start), BLOCK_SIZE); if (addr == alloced) { panic(null, "FAILURE: PMM allocated an address that should be reserved by the memory map: 0x{x}", .{addr}); } } alloc_list.append(alloced) catch |e| { panic(@errorReturnTrace(), "FAILURE: Failed to add PMM allocation to list: {}", .{e}); }; } // Clean up for (alloc_list.items) |alloced| { free(alloced) catch |e| { panic(@errorReturnTrace(), "FAILURE: Failed freeing allocation in PMM rt test: {}", .{e}); }; } log.info("Tested allocation\n", .{}); }
0
repos/pluto/src
repos/pluto/src/kernel/scheduler.zig
const std = @import("std"); const expectEqual = std.testing.expectEqual; const expectError = std.testing.expectError; const assert = std.debug.assert; const log = std.log.scoped(.scheduler); const builtin = @import("builtin"); const is_test = builtin.is_test; const build_options = @import("build_options"); const arch = @import("arch.zig").internals; const panic = @import("panic.zig").panic; const task = @import("task.zig"); const vmm = @import("vmm.zig"); const mem = @import("mem.zig"); const fs = @import("filesystem/vfs.zig"); const elf = @import("elf.zig"); const pmm = @import("pmm.zig"); const Task = task.Task; const EntryPoint = task.EntryPoint; const Allocator = std.mem.Allocator; const TailQueue = std.TailQueue; /// The default stack size of a task. Currently this is set to a page size. const STACK_SIZE: u32 = arch.MEMORY_BLOCK_SIZE / @sizeOf(usize); /// Pointer to the start of the main kernel stack extern var KERNEL_STACK_START: []u32; extern var KERNEL_STACK_END: []u32; /// The current task running pub var current_task: *Task = undefined; /// Array list of all runnable tasks var tasks: TailQueue(*Task) = undefined; /// Whether the scheduler is allowed to switch tasks. var can_switch: bool = true; /// /// The idle task that just halts the CPU but the CPU can still handle interrupts. /// fn idle() noreturn { arch.spinWait(); } pub fn taskSwitching(enabled: bool) void { can_switch = enabled; } /// /// Round robin. This will first save the the current tasks stack pointer, then will pick the next /// task to be run from the queue. It will add the current task to the end of the queue and pop the /// next task from the front as set this as the current task. Then will return the stack pointer /// of the next task to be loaded into the stack register to load the next task stack to pop off /// its state. Interrupts are assumed disabled. /// /// Argument: /// IN ctx: *arch.CpuState - Pointer to the exception context containing the contents /// of the registers at the time of a exception. /// /// Return: usize /// The new stack pointer to the next stack of the next task. /// pub fn pickNextTask(ctx: *arch.CpuState) usize { switch (build_options.test_mode) { .Scheduler => if (!current_task.kernel) { if (!arch.runtimeTestCheckUserTaskState(ctx)) { panic(null, "User task state check failed\n", .{}); } }, else => {}, } // Save the stack pointer from old task current_task.stack_pointer = @ptrToInt(ctx); // If we can't switch, then continue with the current task if (!can_switch) { return current_task.stack_pointer; } // Pick the next task // If there isn't one, then just return the same task if (tasks.pop()) |new_task_node| { // Get the next task const next_task = new_task_node.data; // Move some pointers to don't need to allocate memory, speeds things up new_task_node.data = current_task; new_task_node.prev = null; new_task_node.next = null; // Add the 'current_task' node to the end of the queue tasks.prepend(new_task_node); current_task = next_task; } // Context switch in the interrupt stub handler which will pop the next task state off the // stack return current_task.stack_pointer; } /// /// Create a new task and add it to the scheduling queue. No locking. /// /// Arguments: /// IN entry_point: EntryPoint - The entry point into the task. This must be a function. /// IN allocator: Allocator - The allocator to use /// /// Error: Allocator.Error /// OutOfMemory - If there isn't enough memory for the a task/stack. Any memory allocated will /// be freed on return. /// pub fn scheduleTask(new_task: *Task, allocator: Allocator) Allocator.Error!void { var task_node = try allocator.create(TailQueue(*Task).Node); task_node.* = .{ .data = new_task }; tasks.prepend(task_node); } /// /// Initialise the scheduler. This will set up the current task to the code that is currently /// running. So if there is a task switch before kmain can finish, can continue when switched back. /// This will set the stack to KERNEL_STACK_START from the linker stript. This will also create the /// idle task for when there is no more tasks to run. /// /// Arguments: /// IN allocator: Allocator - The allocator to use when needing to allocate memory. /// IN mem_profile: *const mem.MemProfile - The system's memory profile used for runtime testing. /// /// Error: Allocator.Error /// OutOfMemory - There is no more memory. Any memory allocated will be freed on return. /// pub fn init(allocator: Allocator, mem_profile: *const mem.MemProfile) Allocator.Error!void { // TODO: Maybe move the task init here? log.info("Init\n", .{}); defer log.info("Done\n", .{}); // Init the task list for round robin tasks = TailQueue(*Task){}; // Set up the init task to continue execution. // The kernel stack will point to the stack section rather than the heap current_task = try Task.create(0, true, &vmm.kernel_vmm, allocator, false); errdefer allocator.destroy(current_task); const kernel_stack_size = @ptrToInt(&KERNEL_STACK_END) - @ptrToInt(&KERNEL_STACK_START); current_task.kernel_stack = @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0..kernel_stack_size]; // ESP will be saved on next schedule // Run the runtime tests here switch (build_options.test_mode) { .Scheduler => runtimeTests(allocator, mem_profile), else => {}, } // Create the idle task when there are no more tasks left var idle_task = try Task.create(@ptrToInt(idle), true, &vmm.kernel_vmm, allocator, true); errdefer idle_task.destroy(allocator); try scheduleTask(idle_task, allocator); } // For testing the errdefer const FailingAllocator = std.testing.FailingAllocator; const testing_allocator = &std.testing.base_allocator_instance.allocator; fn test_fn1() void {} fn test_fn2() void {} var test_pid_counter: u7 = 1; fn createTestTask(allocator: Allocator) Allocator.Error!*Task { var t = try allocator.create(Task); errdefer allocator.destroy(t); t.pid = test_pid_counter; // Just alloc something t.kernel_stack = try allocator.alloc(u32, 1); t.stack_pointer = 0; test_pid_counter += 1; return t; } fn destroyTestTask(self: *Task, allocator: Allocator) void { if (@ptrToInt(self.kernel_stack.ptr) != @ptrToInt(&KERNEL_STACK_START)) { allocator.free(self.kernel_stack); } allocator.destroy(self); } test "pickNextTask" { var ctx: arch.CpuState = std.mem.zeroes(arch.CpuState); var allocator = std.testing.allocator; tasks = TailQueue(*Task){}; // Set up a current task var first = try Task.create(0, true, &vmm.kernel_vmm, allocator, false); // We use an intermediary variable to avoid a double-free. // Deferring freeing current_task will free whatever current_task points to at the end defer first.destroy(allocator); current_task = first; current_task.pid = 0; current_task.kernel_stack = @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0..4096]; current_task.stack_pointer = @ptrToInt(&KERNEL_STACK_START); // Create two tasks and schedule them var test_fn1_task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator, true); defer test_fn1_task.destroy(allocator); try scheduleTask(test_fn1_task, allocator); var test_fn2_task = try Task.create(@ptrToInt(test_fn2), true, undefined, allocator, true); defer test_fn2_task.destroy(allocator); try scheduleTask(test_fn2_task, allocator); // Get the stack pointers of the created tasks const fn1_stack_pointer = test_fn1_task.stack_pointer; const fn2_stack_pointer = test_fn2_task.stack_pointer; try expectEqual(pickNextTask(&ctx), fn1_stack_pointer); // The stack pointer of the re-added task should point to the context try expectEqual(tasks.first.?.data.stack_pointer, @ptrToInt(&ctx)); // Should be the PID of the next task try expectEqual(current_task.pid, 1); try expectEqual(pickNextTask(&ctx), fn2_stack_pointer); // The stack pointer of the re-added task should point to the context try expectEqual(tasks.first.?.data.stack_pointer, @ptrToInt(&ctx)); // Should be the PID of the next task try expectEqual(current_task.pid, 2); try expectEqual(pickNextTask(&ctx), @ptrToInt(&ctx)); // The stack pointer of the re-added task should point to the context try expectEqual(tasks.first.?.data.stack_pointer, @ptrToInt(&ctx)); // Should be back to the beginning try expectEqual(current_task.pid, 0); // Reset the test pid test_pid_counter = 1; // Free the queue while (tasks.pop()) |elem| { allocator.destroy(elem); } } test "createNewTask add new task" { // Set the global allocator var allocator = std.testing.allocator; // Init the task list tasks = TailQueue(*Task){}; var test_fn1_task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator, true); defer test_fn1_task.destroy(allocator); try scheduleTask(test_fn1_task, allocator); try expectEqual(tasks.len, 1); // Free the memory allocator.destroy(tasks.first.?); } test "init" { var allocator = std.testing.allocator; try init(allocator, undefined); try expectEqual(current_task.pid, 0); try expectEqual(@ptrToInt(current_task.kernel_stack.ptr), @ptrToInt(&KERNEL_STACK_START)); try expectEqual(current_task.kernel_stack.len, @ptrToInt(&KERNEL_STACK_END) - @ptrToInt(&KERNEL_STACK_START)); try expectEqual(tasks.len, 1); // Free the tasks created current_task.destroy(allocator); while (tasks.pop()) |elem| { elem.data.destroy(allocator); allocator.destroy(elem); } } /// A volatile pointer used to control a loop outside the task. This is so to ensure a task switch /// ocurred. var is_set: *volatile bool = undefined; /// /// The test task function. /// fn task_function() noreturn { log.info("Switched\n", .{}); is_set.* = false; while (true) {} } /// /// This tests that variables in registers and on the stack are preserved when a task switch /// occurs. Also tests that a global volatile can be test in one task and be reacted to in another. /// /// Arguments: /// IN allocator: Allocator - The allocator to use when needing to allocate memory. /// fn rt_variable_preserved(allocator: Allocator) void { // Create the memory for the boolean is_set = allocator.create(bool) catch unreachable; defer allocator.destroy(is_set); is_set.* = true; var test_task = Task.create(@ptrToInt(task_function), true, &vmm.kernel_vmm, allocator, true) catch |e| panic(@errorReturnTrace(), "Failed to create task in rt_variable_preserved: {}\n", .{e}); scheduleTask(test_task, allocator) catch |e| panic(@errorReturnTrace(), "Failed to schedule a task in rt_variable_preserved: {}\n", .{e}); // TODO: Need to add the ability to remove tasks var w: u32 = 0; var x: u32 = 1; var y: u32 = 2; var z: u32 = 3; while (is_set.*) { if (w != 0) { panic(@errorReturnTrace(), "FAILED: w not 0, but: {}\n", .{w}); } if (x != 1) { panic(@errorReturnTrace(), "FAILED: x not 1, but: {}\n", .{x}); } if (y != 2) { panic(@errorReturnTrace(), "FAILED: y not 2, but: {}\n", .{y}); } if (z != 3) { panic(@errorReturnTrace(), "FAILED: z not 3, but: {}\n", .{z}); } } // Make sure these are the same values if (w != 0) { panic(@errorReturnTrace(), "FAILED: w not 0, but: {}\n", .{w}); } if (x != 1) { panic(@errorReturnTrace(), "FAILED: x not 1, but: {}\n", .{x}); } if (y != 2) { panic(@errorReturnTrace(), "FAILED: y not 2, but: {}\n", .{y}); } if (z != 3) { panic(@errorReturnTrace(), "FAILED: z not 3, but: {}\n", .{z}); } log.info("SUCCESS: Scheduler variables preserved\n", .{}); } /// /// Test the initialisation and running of a task running in user mode /// /// Arguments: /// IN allocator: *std.mem.Allocator - The allocator to use when intialising the task /// IN mem_profile: mem.MemProfile - The system's memory profile. Determines the end address of the user task's VMM. /// fn rt_user_task(allocator: Allocator, mem_profile: *const mem.MemProfile) void { for (&[_][]const u8{ "/user_program_data.elf", "/user_program.elf" }) |user_program| { // 1. Create user VMM var task_vmm = allocator.create(vmm.VirtualMemoryManager(arch.VmmPayload)) catch |e| { panic(@errorReturnTrace(), "Failed to allocate VMM for {s}: {}\n", .{ user_program, e }); }; task_vmm.* = vmm.VirtualMemoryManager(arch.VmmPayload).init(0, @ptrToInt(mem_profile.vaddr_start), allocator, arch.VMM_MAPPER, undefined) catch |e| panic(@errorReturnTrace(), "Failed to create the vmm for {s}: {}\n", .{ user_program, e }); const user_program_file = fs.openFile(user_program, .NO_CREATION) catch |e| { panic(@errorReturnTrace(), "Failed to open {s}: {}\n", .{ user_program, e }); }; defer user_program_file.close(); var code: [1024 * 9]u8 = undefined; const code_len = user_program_file.read(code[0..code.len]) catch |e| { panic(@errorReturnTrace(), "Failed to read {s}: {}\n", .{ user_program, e }); }; const program_elf = elf.Elf.init(code[0..code_len], builtin.cpu.arch, allocator) catch |e| panic(@errorReturnTrace(), "Failed to load {s}: {}\n", .{ user_program, e }); defer program_elf.deinit(); var user_task = task.Task.createFromElf(program_elf, false, task_vmm, allocator) catch |e| { panic(@errorReturnTrace(), "Failed to create task for {s}: {}\n", .{ user_program, e }); }; scheduleTask(user_task, allocator) catch |e| { panic(@errorReturnTrace(), "Failed to schedule the task for {s}: {}\n", .{ user_program, e }); }; var num_allocatable_sections: usize = 0; var size_allocatable_sections: usize = 0; for (program_elf.section_headers) |section| { if (section.flags & elf.SECTION_ALLOCATABLE != 0) { num_allocatable_sections += 1; size_allocatable_sections += std.mem.alignForward(section.size, vmm.BLOCK_SIZE); } } // Only a certain number of elf section are expected to have been allocated in the vmm if (task_vmm.allocations.count() != num_allocatable_sections) { panic(@errorReturnTrace(), "VMM allocated wrong number of virtual regions for {s}. Expected {} but found {}\n", .{ user_program, num_allocatable_sections, task_vmm.allocations.count() }); } const allocated_size = (task_vmm.bmp.num_entries - task_vmm.bmp.num_free_entries) * vmm.BLOCK_SIZE; if (size_allocatable_sections != allocated_size) { panic(@errorReturnTrace(), "VMM allocated wrong amount of memory for {s}. Expected {} but found {}\n", .{ user_program, size_allocatable_sections, allocated_size }); } } } /// /// The scheduler runtime tests that will test the scheduling functionality. /// /// Arguments: /// IN allocator: Allocator - The allocator to use when needing to allocate memory. /// IN mem_profile: *const mem.MemProfile - The system's memory profile. Used to set up user task VMMs. /// fn runtimeTests(allocator: Allocator, mem_profile: *const mem.MemProfile) void { arch.enableInterrupts(); rt_user_task(allocator, mem_profile); rt_variable_preserved(allocator); while (true) {} }
0
repos/pluto/src
repos/pluto/src/kernel/panic.zig
const std = @import("std"); const builtin = std.builtin; const arch = @import("arch.zig").internals; const mem = @import("mem.zig"); const build_options = @import("build_options"); const ArrayList = std.ArrayList; const Allocator = std.mem.Allocator; const testing = std.testing; const log = std.log.scoped(.panic); /// The possible errors from panic code const PanicError = error{ /// The symbol file is of an invalid format. /// This could be because it lacks whitespace, a column or required newline characters. InvalidSymbolFile, }; /// An entry within a symbol map. Corresponds to one entry in a symbol file const MapEntry = struct { /// The address that the entry corresponds to addr: usize, /// The name of the function that starts at the address func_name: []const u8, }; const SymbolMap = struct { symbols: ArrayList(MapEntry), /// /// Initialise an empty symbol map. /// /// Arguments: /// IN allocator: Allocator - The allocator to use to initialise the array list. /// /// Return: SymbolMap /// The symbol map. /// pub fn init(allocator: Allocator) SymbolMap { return SymbolMap{ .symbols = ArrayList(MapEntry).init(allocator), }; } /// /// Deinitialise the symbol map, freeing all memory used. /// pub fn deinit(self: *SymbolMap) void { self.symbols.deinit(); } /// /// Add a symbol map entry with a name and address. /// /// Arguments: /// IN name: []const u8 - The name of the entry. /// IN addr: usize - The address for the entry. /// /// Error: Allocator.Error /// error.OutOfMemory - If there isn't enough memory to append a map entry. /// pub fn add(self: *SymbolMap, name: []const u8, addr: usize) Allocator.Error!void { try self.addEntry(MapEntry{ .addr = addr, .func_name = name }); } /// /// Add a symbol map entry. /// /// Arguments: /// IN entry: MapEntry - The entry. /// /// Error: Allocator.Error /// error.OutOfMemory - If there isn't enough memory to append a map entry. /// pub fn addEntry(self: *SymbolMap, entry: MapEntry) Allocator.Error!void { try self.symbols.append(entry); } /// /// Search for the function name associated with the address. /// /// Arguments: /// IN addr: usize - The address to search for. /// /// Return: ?[]const u8 /// The function name associated with that program address, or null if one wasn't found. /// pub fn search(self: *const SymbolMap, addr: usize) ?[]const u8 { if (self.symbols.items.len == 0) return null; // Find the first element whose address is greater than addr var previous_name: ?[]const u8 = null; for (self.symbols.items) |entry| { if (entry.addr > addr) return previous_name; previous_name = entry.func_name; } return previous_name; } }; var symbol_map: ?SymbolMap = null; /// /// Log a stacktrace address. Logs "(no symbols are available)" if no symbols are available, /// "?????" if the address wasn't found in the symbol map, else logs the function name. /// /// Arguments: /// IN addr: usize - The address to log. /// fn logTraceAddress(addr: usize) void { const str = if (symbol_map) |syms| syms.search(addr) orelse "?????" else "(no symbols available)"; log.err("{x}: {s}\n", .{ addr, str }); } /// /// Parse a hexadecimal address from the pointer up until the end pointer. Must be terminated by a /// whitespace character. /// /// Arguments: /// IN/OUT ptr: *[*]const u8 - The address at which to start looking, updated after all /// characters have been consumed. /// IN end: *const u8 - The end address at which to start looking. A whitespace character must /// be found before this. /// /// Return: usize /// The address parsed. /// /// Error: PanicError || std.fmt.ParseIntError /// PanicError.InvalidSymbolFile - A terminating whitespace wasn't found before the end address. /// std.fmt.ParseIntError - See std.fmt.parseInt /// fn parseAddr(ptr: *[*]const u8, end: *const u8) (PanicError || std.fmt.ParseIntError)!usize { const addr_start = ptr.*; ptr.* = try parseNonWhitespace(ptr.*, end); const len = @ptrToInt(ptr.*) - @ptrToInt(addr_start); const addr_str = addr_start[0..len]; return std.fmt.parseInt(usize, addr_str, 16); } /// /// Parse a single character. The address given cannot be greater than or equal to the end address /// given. /// /// Arguments: /// IN ptr: [*]const u8 - The address at which to get the character from. /// IN end: *const u8 - The end address at which to start looking. ptr cannot be greater than or /// equal to this. /// /// Return: u8 /// The character parsed. /// /// Error: PanicError /// PanicError.InvalidSymbolFile - The address given is greater than or equal to the end address. /// fn parseChar(ptr: [*]const u8, end: *const u8) PanicError!u8 { if (@ptrToInt(ptr) >= @ptrToInt(end)) { return PanicError.InvalidSymbolFile; } return ptr[0]; } /// /// Parse until a non-whitespace character. Must be terminated by a non-whitespace character before /// the end address. /// /// Arguments: /// IN ptr: [*]const u8 - The address at which to start looking. /// IN end: *const u8 - The end address at which to start looking. A non-whitespace character /// must be found before this. /// /// Return: [*]const u8 /// ptr plus the number of whitespace characters consumed. /// /// Error: PanicError /// PanicError.InvalidSymbolFile - A terminating non-whitespace character wasn't found before /// the end address. /// fn parseWhitespace(ptr: [*]const u8, end: *const u8) PanicError![*]const u8 { var i: u32 = 0; while (std.ascii.isSpace(try parseChar(ptr + i, end))) : (i += 1) {} return ptr + i; } /// /// Parse until a whitespace character. Must be terminated by a whitespace character before the end /// address. /// /// Arguments: /// IN ptr: [*]const u8 - The address at which to start looking. /// IN end: *const u8 - The end address at which to start looking. A whitespace character must /// be found before this. /// /// Return: [*]const u8 /// ptr plus the number of non-whitespace characters consumed. /// /// Error: PanicError /// PanicError.InvalidSymbolFile - A terminating whitespace character wasn't found before the /// end address. /// fn parseNonWhitespace(ptr: [*]const u8, end: *const u8) PanicError![*]const u8 { var i: u32 = 0; while (!std.ascii.isSpace(try parseChar(ptr + i, end))) : (i += 1) {} return ptr + i; } /// /// Parse until a newline character. Must be terminated by a newline character before the end /// address. /// /// Arguments: /// IN ptr: [*]const u8 - The address at which to start looking. /// IN end: *const u8 - The end address at which to start looking. A newline character must /// be found before this. /// /// Return: [*]const u8 /// ptr plus the number of non-newline characters consumed. /// /// Error: PanicError /// PanicError.InvalidSymbolFile - A terminating newline character wasn't found before the /// end address. /// fn parseNonNewLine(ptr: [*]const u8, end: *const u8) PanicError![*]const u8 { var i: u32 = 0; while ((try parseChar(ptr + i, end)) != '\n') : (i += 1) {} return ptr + i; } /// /// Parse a name from the pointer up until the end pointer. Must be terminated by a whitespace /// character. /// /// Arguments: /// IN/OUT ptr: *[*]const u8 - The address at which to start looking, updated after all /// characters have been consumed. /// IN end: *const u8 - The end address at which to start looking. A whitespace character must /// be found before this. /// /// Return: []const u8 /// The name parsed. /// /// Error: PanicError /// PanicError.InvalidSymbolFile - A terminating whitespace wasn't found before the end address. /// fn parseName(ptr: *[*]const u8, end: *const u8) PanicError![]const u8 { const name_start = ptr.*; ptr.* = try parseNonNewLine(ptr.*, end); const len = @ptrToInt(ptr.*) - @ptrToInt(name_start); return name_start[0..len]; } /// /// Parse a symbol map entry from the pointer up until the end pointer, /// in the format of '\d+\w+[a-zA-Z0-9]+'. Must be terminated by a whitespace character. /// /// Arguments: /// IN/OUT ptr: *[*]const u8 - The address at which to start looking, updated once after the /// address has been consumed and once again after the name has been consumed. /// IN end: *const u8 - The end address at which to start looking. A whitespace character must /// be found before this. /// /// Return: MapEntry /// The entry parsed. /// /// Error: PanicError || std.fmt.ParseIntError /// PanicError.InvalidSymbolFile - A terminating whitespace wasn't found before the end address. /// std.fmt.ParseIntError - See parseAddr. /// fn parseMapEntry(start: *[*]const u8, end: *const u8) (PanicError || std.fmt.ParseIntError)!MapEntry { var ptr = try parseWhitespace(start.*, end); defer start.* = ptr; const addr = try parseAddr(&ptr, end); ptr = try parseWhitespace(ptr, end); const name = try parseName(&ptr, end); return MapEntry{ .addr = addr, .func_name = name }; } pub fn panic(trace: ?*builtin.StackTrace, comptime format: []const u8, args: anytype) noreturn { @setCold(true); log.err("Kernel panic: " ++ format ++ "\n", args); if (trace) |trc| { var last_addr: u64 = 0; for (trc.instruction_addresses) |ret_addr| { if (ret_addr != last_addr) logTraceAddress(ret_addr); last_addr = ret_addr; } } else { const first_ret_addr = @returnAddress(); var last_addr: u64 = 0; var it = std.debug.StackIterator.init(first_ret_addr, null); while (it.next()) |ret_addr| { if (ret_addr != last_addr) logTraceAddress(ret_addr); last_addr = ret_addr; } } arch.haltNoInterrupts(); } /// /// Initialise the symbol table used by the panic subsystem by looking for a boot module called "kernel.map" and loading the /// symbol entries from it. Exits early if no such module was found. /// /// Arguments: /// IN mem_profile: *const mem.MemProfile - The memory profile from which to get the loaded boot /// modules. /// IN allocator: Allocator - The allocator to use to store the symbol map. /// /// Error: PanicError || Allocator.Error || std.fmt.ParseIntError /// PanicError.InvalidSymbolFile - A terminating whitespace wasn't found before the end address. /// Allocator.Error.OutOfMemory - If there wasn't enough memory. /// std.fmt.ParseIntError - See parseMapEntry. /// pub fn initSymbols(mem_profile: *const mem.MemProfile, allocator: Allocator) (PanicError || Allocator.Error || std.fmt.ParseIntError)!void { log.info("Init\n", .{}); defer log.info("Done\n", .{}); // Exit if we haven't loaded all debug modules if (mem_profile.modules.len < 1) { return; } var kmap_start: usize = 0; var kmap_end: usize = 0; for (mem_profile.modules) |module| { const mod_start = module.region.start; const mod_end = module.region.end - 1; if (std.mem.eql(u8, module.name, "kernel.map")) { kmap_start = mod_start; kmap_end = mod_end; break; } } // Don't try to load the symbols if there was no symbol map file. This is a valid state so just // exit early if (kmap_start == 0 or kmap_end == 0) { return; } var syms = SymbolMap.init(allocator); errdefer syms.deinit(); var kmap_ptr = @intToPtr([*]u8, kmap_start); while (@ptrToInt(kmap_ptr) < kmap_end - 1) { const entry = try parseMapEntry(&kmap_ptr, @intToPtr(*const u8, kmap_end)); try syms.addEntry(entry); } symbol_map = syms; switch (build_options.test_mode) { .Panic => runtimeTests(), else => {}, } } test "parseChar" { const str: []const u8 = "plutoisthebest"; const end = @ptrCast(*const u8, str.ptr + str.len); var char = try parseChar(str.ptr, end); try testing.expectEqual(char, 'p'); char = try parseChar(str.ptr + 1, end); try testing.expectEqual(char, 'l'); try testing.expectError(PanicError.InvalidSymbolFile, parseChar(str.ptr + str.len, end)); } test "parseWhitespace" { const str: []const u8 = " a"; const end = @ptrCast(*const u8, str.ptr + str.len); var ptr = try parseWhitespace(str.ptr, end); try testing.expectEqual(@ptrToInt(str.ptr) + 4, @ptrToInt(ptr)); } test "parseWhitespace fails without a terminating whitespace" { const str: []const u8 = " "; const end = @ptrCast(*const u8, str.ptr + str.len); try testing.expectError(PanicError.InvalidSymbolFile, parseWhitespace(str.ptr, end)); } test "parseNonWhitespace" { const str: []const u8 = "ab "; const end = @ptrCast(*const u8, str.ptr + str.len); var ptr = try parseNonWhitespace(str.ptr, end); try testing.expectEqual(@ptrToInt(str.ptr) + 2, @ptrToInt(ptr)); } test "parseNonWhitespace fails without a terminating whitespace" { const str: []const u8 = "abc"; const end = @ptrCast(*const u8, str.ptr + str.len); try testing.expectError(PanicError.InvalidSymbolFile, parseNonWhitespace(str.ptr, end)); } test "parseNonNewLine" { const str: []const u8 = "ab\n"; const end = @ptrCast(*const u8, str.ptr + str.len); var ptr = try parseNonNewLine(str.ptr, end); try testing.expectEqual(@ptrToInt(str.ptr) + 2, @ptrToInt(ptr)); } test "parseNonNewLine fails without a terminating newline" { const str: []const u8 = "abc"; const end = @ptrCast(*const u8, str.ptr + str.len); try testing.expectError(PanicError.InvalidSymbolFile, parseNonNewLine(str.ptr, end)); } test "parseAddr" { const str: []const u8 = "1a2b3c4d "; const end = @ptrCast(*const u8, str.ptr + str.len); var ptr = str.ptr; try testing.expectEqual(try parseAddr(&ptr, end), 0x1a2b3c4d); } test "parseAddr fails without a terminating whitespace" { const str: []const u8 = "1a2b3c4d"; const end = @ptrCast(*const u8, str.ptr + str.len); var ptr = str.ptr; try testing.expectError(PanicError.InvalidSymbolFile, parseAddr(&ptr, end)); } test "parseAddr fails with an invalid integer" { const str: []const u8 = "1g2t "; const end = @ptrCast(*const u8, str.ptr + str.len); var ptr = str.ptr; try testing.expectError(error.InvalidCharacter, parseAddr(&ptr, end)); } test "parseName" { const str: []const u8 = "func_name\n"; const end = @ptrCast(*const u8, str.ptr + str.len); var ptr = str.ptr; try testing.expectEqualSlices(u8, try parseName(&ptr, end), "func_name"); } test "parseName with spaces" { const str: []const u8 = "func_name(*const type )\n"; const end = @ptrCast(*const u8, str.ptr + str.len); var ptr = str.ptr; try testing.expectEqualSlices(u8, try parseName(&ptr, end), "func_name(*const type )"); } test "parseName fails without a terminating newline" { const str: []const u8 = "func_name"; const end = @ptrCast(*const u8, str.ptr + str.len); var ptr = str.ptr; try testing.expectError(PanicError.InvalidSymbolFile, parseName(&ptr, end)); } test "parseMapEntry" { const str: []const u8 = "1a2b3c4d func_name\n5e6f7a8b func_name2\n"; const end = @ptrCast(*const u8, str.ptr + str.len); var ptr = str.ptr; var actual = try parseMapEntry(&ptr, end); var expected = MapEntry{ .addr = 0x1a2b3c4d, .func_name = "func_name" }; try testing.expectEqual(actual.addr, expected.addr); try testing.expectEqualSlices(u8, actual.func_name, expected.func_name); actual = try parseMapEntry(&ptr, end); expected = MapEntry{ .addr = 0x5e6f7a8b, .func_name = "func_name2" }; try testing.expectEqual(actual.addr, expected.addr); try testing.expectEqualSlices(u8, actual.func_name, expected.func_name); } test "parseMapEntry fails without a terminating newline" { const str: []const u8 = "1a2b3c4d func_name"; var ptr = str.ptr; try testing.expectError(PanicError.InvalidSymbolFile, parseMapEntry(&ptr, @ptrCast(*const u8, str.ptr + 18))); } test "parseMapEntry fails without any characters" { const str: []const u8 = " "; var ptr = str.ptr; try testing.expectError(PanicError.InvalidSymbolFile, parseMapEntry(&ptr, @ptrCast(*const u8, str.ptr))); } test "parseMapEntry fails with an invalid address" { const str: []const u8 = "xyz func_name"; var ptr = str.ptr; try testing.expectError(error.InvalidCharacter, parseMapEntry(&ptr, @ptrCast(*const u8, str.ptr + 13))); } test "parseMapEntry fails without a name" { const str: []const u8 = "123 "; var ptr = str.ptr; try testing.expectError(PanicError.InvalidSymbolFile, parseMapEntry(&ptr, @ptrCast(*const u8, str.ptr + 4))); } test "SymbolMap" { var allocator = std.testing.allocator; var map = SymbolMap.init(allocator); defer map.deinit(); try map.add("abc"[0..], 123); try map.addEntry(MapEntry{ .func_name = "def"[0..], .addr = 456 }); try map.add("ghi"[0..], 789); try map.addEntry(MapEntry{ .func_name = "jkl"[0..], .addr = 1010 }); try testing.expectEqual(map.search(54), null); try testing.expectEqual(map.search(122), null); try testing.expectEqual(map.search(123), "abc"); try testing.expectEqual(map.search(234), "abc"); try testing.expectEqual(map.search(455), "abc"); try testing.expectEqual(map.search(456), "def"); try testing.expectEqual(map.search(678), "def"); try testing.expectEqual(map.search(788), "def"); try testing.expectEqual(map.search(789), "ghi"); try testing.expectEqual(map.search(1009), "ghi"); try testing.expectEqual(map.search(1010), "jkl"); try testing.expectEqual(map.search(2345), "jkl"); } /// /// Runtime test for panic. This will trigger a integer overflow. /// pub fn runtimeTests() void { @setRuntimeSafety(true); var x: u8 = 255; x += 1; // If we get here, then a panic was not triggered so fail panic(@errorReturnTrace(), "FAILURE: No integer overflow\n", .{}); }
0
repos/pluto/src
repos/pluto/src/kernel/heap.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const testing = std.testing; const log = std.log.scoped(.heap); const builtin = std.builtin; const is_test = builtin.is_test; const build_options = @import("build_options"); const vmm = @import("vmm.zig"); const panic = @import("panic.zig").panic; pub const FreeListAllocator = struct { const Error = error{TooSmall}; const Header = struct { size: usize, next_free: ?*Header, const Self = @This(); /// /// Initialise the header for a free allocation node /// /// Arguments: /// IN size: usize - The node's size, not including the size of the header itself /// IN next_free: ?*Header - A pointer to the next free node /// /// Return: Header /// The header constructed fn init(size: usize, next_free: ?*Header) Header { return .{ .size = size, .next_free = next_free, }; } }; const Self = @This(); first_free: ?*Header, /// /// Initialise an empty and free FreeListAllocator /// /// Arguments: /// IN start: usize - The starting address for all allocations /// IN size: usize - The size of the region of memory to allocate within. Must be greater than @sizeOf(Header) /// /// Return: FreeListAllocator /// The FreeListAllocator constructed /// /// Error: Error /// Error.TooSmall - If size <= @sizeOf(Header) /// pub fn init(start: usize, size: usize) Error!FreeListAllocator { if (size <= @sizeOf(Header)) return Error.TooSmall; return FreeListAllocator{ .first_free = insertFreeHeader(start, size - @sizeOf(Header), null), }; } pub fn allocator(self: *Self) Allocator { return Allocator.init(self, alloc, resize, free); } /// /// Create a free header at a specific location /// /// Arguments: /// IN at: usize - The address to create it at /// IN size: usize - The node's size, excluding the size of the header itself /// IN next_free: ?*Header - The next free header in the allocator, or null if there isn't one /// /// Return *Header /// The pointer to the header created /// fn insertFreeHeader(at: usize, size: usize, next_free: ?*Header) *Header { var node = @intToPtr(*Header, at); node.* = Header.init(size, next_free); return node; } /// /// Update the free header pointers that should point to the provided header /// /// Arguments: /// IN self: *FreeListAllocator - The FreeListAllocator to modify /// IN previous: ?*Header - The previous free node or null if there wasn't one. If null, self.first_free will be set to header, else previous.next_free will be set to header /// IN header: ?*Header - The header being pointed to. This will be the new value of self.first_free or previous.next_free /// fn registerFreeHeader(self: *Self, previous: ?*Header, header: ?*Header) void { if (previous) |p| { p.next_free = header; } else { self.first_free = header; } } /// /// Free an allocation /// /// Arguments: /// IN self: *FreeListAllocator - The allocator being freed within /// IN mem: []u8 - The memory to free /// IN alignment: u29 - The alignment used to allocate the memory /// IN ret_addr: usize - The return address passed by the high-level Allocator API. This is ignored. /// fn free(self: *Self, mem: []u8, alignment: u29, ret_addr: usize) void { _ = alignment; _ = ret_addr; const size = std.math.max(mem.len, @sizeOf(Header)); const addr = @ptrToInt(mem.ptr); var header = insertFreeHeader(addr, size - @sizeOf(Header), null); if (self.first_free) |first| { var prev: ?*Header = null; // Find the previous free node if (@ptrToInt(first) < addr) { prev = first; while (prev.?.next_free) |next| { if (@ptrToInt(next) > addr) break; prev = next; } } // Make the freed header point to the next one, which is the one after the previous or the first if there was no previous header.next_free = if (prev) |p| p.next_free else first; self.registerFreeHeader(prev, header); // Join with the next one until the next isn't a neighbour if (header.next_free) |next| { if (@ptrToInt(next) == @ptrToInt(header) + header.size + @sizeOf(Header)) { header.size += next.size + @sizeOf(Header); header.next_free = next.next_free; } } // Try joining with the previous one if (prev) |p| { p.size += header.size + @sizeOf(Header); p.next_free = header.next_free; } } else { self.first_free = header; } } /// /// Attempt to resize an allocation. This should only be called via the Allocator interface. /// /// When the new size requested is 0, a free happens. See the free function for details. /// /// When the new size is greater than the old buffer's size, we attempt to steal some space from the neighbouring node. /// This can only be done if the neighbouring node is free and the remaining space after taking what is needed to resize is enough to create a new Header. This is because we don't want to leave any dangling memory that isn't tracked by a header. /// /// | <----- new_size -----> /// |---------|--------\----------------| /// | | \ | /// | old_mem | header \ header's space | /// | | \ | /// |---------|--------\----------------| /// /// After expanding to new_size, it will look like /// |-----------------------|--------\--| /// | | \ | /// | old_mem | header \ | /// | | \ | /// |-----------------------|--------\--| /// The free node before old_mem needs to then point to the new header rather than the old one and the new header needs to point to the free node after the old one. If there was no previous free node then the new one becomes the first free node. /// /// When the new size is smaller than the old_buffer's size, we attempt to shrink it and create a new header to the right. /// This can only be done if the space left by the shrinking is enough to create a new header, since we don't want to leave any dangling untracked memory. /// | <--- new_size ---> /// |-----------------------------------| /// | | /// | old_mem | /// | | /// |-----------------------------------| /// /// After shrinking to new_size, it will look like /// | <--- new_size ---> /// |-------------------|--------\-- ---| /// | | \ | /// | old_mem | header \ | /// | | \ | /// |-------------------|--------\------| /// We then attempt to join with neighbouring free nodes. /// The node before old_mem needs to then point to the new header and the new header needs to point to the next free node. /// /// Arguments: /// IN self: *FreeListAllocator - The allocator to resize within. /// IN old_mem: []u8 - The buffer to resize. /// IN old_align: u29 - The original alignment for old_mem. /// IN new_size: usize - What to resize to. /// IN size_alignment: u29 - The alignment that the size should have. /// IN ret_addr: usize - The return address passed by the high-level Allocator API. This is ignored /// /// Return: ?usize /// The new size of the buffer, which will be new_size if the operation was successfull, or null if the operation wasn't successful. /// fn resize(self: *Self, old_mem: []u8, old_align: u29, new_size: usize, size_alignment: u29, ret_addr: usize) ?usize { // Suppress unused var warning _ = old_align; _ = ret_addr; if (new_size == 0) { self.free(old_mem, old_align, ret_addr); return 0; } if (new_size == old_mem.len) return new_size; const end = @ptrToInt(old_mem.ptr) + old_mem.len; var real_size = if (size_alignment > 1) std.mem.alignAllocLen(old_mem.len, new_size, size_alignment) else new_size; // Try to find the buffer's neighbour (if it's free) and the previous free node // We'll be stealing some of the free neighbour's space when expanding or joining up with it when shrinking var free_node = self.first_free; var next: ?*Header = null; var prev: ?*Header = null; while (free_node) |f| { if (@ptrToInt(f) == end) { // This free node is right next to the node being freed so is its neighbour next = f; break; } else if (@ptrToInt(f) > end) { // We've found a node past the node being freed so end early break; } prev = f; free_node = f.next_free; } // If we're expanding the buffer if (real_size > old_mem.len) { if (next) |n| { // If the free neighbour isn't big enough then fail if (old_mem.len + n.size + @sizeOf(Header) < real_size) return null; const size_diff = real_size - old_mem.len; const consumes_whole_neighbour = size_diff == n.size + @sizeOf(Header); // If the space left over in the free neighbour from the resize isn't enough to fit a new node, then fail if (!consumes_whole_neighbour and n.size + @sizeOf(Header) - size_diff < @sizeOf(Header)) return null; var new_next: ?*Header = n.next_free; // We don't do any splitting when consuming the whole neighbour if (!consumes_whole_neighbour) { // Create the new header. It starts at the end of the buffer plus the stolen space // The size will be the previous size minus what we stole new_next = insertFreeHeader(end + size_diff, n.size - size_diff, n.next_free); } self.registerFreeHeader(prev, new_next); return real_size; } // The neighbour isn't free so we can't expand into it return null; } else { // Shrinking var size_diff = old_mem.len - real_size; // If shrinking would leave less space than required for a new header, // or if shrinking would make the buffer too small, don't shrink if (size_diff < @sizeOf(Header)) { return old_mem.len; } // Make sure the we have enough space for a header if (real_size < @sizeOf(Header)) { real_size = @sizeOf(Header); } // Create a new header for the space gained from shrinking var new_next = insertFreeHeader(@ptrToInt(old_mem.ptr) + real_size, size_diff - @sizeOf(Header), if (prev) |p| p.next_free else self.first_free); self.registerFreeHeader(prev, new_next); // Join with the neighbour if (next) |n| { new_next.size += n.size + @sizeOf(Header); new_next.next_free = n.next_free; } return real_size; } } /// /// Allocate a portion of memory. This should only be called via the Allocator interface. /// /// This will find the first free node within the heap that can fit the size requested. If the size of the node is larger than the requested size but any space left over isn't enough to create a new Header, the next node is tried. If the node would require some padding to reach the desired alignment and that padding wouldn't fit a new Header, the next node is tried (however this node is kept as a backup in case no future nodes can fit the request). /// /// |--------------\---------------------| /// | \ | /// | free header \ free space | /// | \ | /// |--------------\---------------------| /// /// When the alignment padding is large enough for a new Header, the node found is split on the left, like so /// <---- padding ----> /// |------------\-----|-------------\---| /// | \ | \ | /// | new header \ | free header \ | /// | \ | \ | /// |------------\-----|-------------\---| /// The previous free node should then point to the left split. The left split should point to the free node after the one that was found /// /// When the space left over in the free node is more than required for the allocation, it is split on the right /// |--------------\-------|------------\--| /// | \ | \ | /// | free header \ space | new header \ | /// | \ | \ | /// |--------------\-------|------------\--| /// The previous free node should then point to the new node on the left and the new node should point to the next free node /// /// Splitting on the left and right can both happen in one allocation /// /// Arguments: /// IN self: *FreeListAllocator - The allocator to use /// IN size: usize - The amount of memory requested /// IN alignment: u29 - The alignment that the address of the allocated memory should have /// IN size_alignment: u29 - The alignment that the length of the allocated memory should have /// IN ret_addr: usize - The return address passed by the high-level Allocator API. This is ignored /// /// Return: []u8 /// The allocated memory /// /// Error: std.Allocator.Error /// std.Allocator.Error.OutOfMemory - There wasn't enough memory left to fulfill the request /// pub fn alloc(self: *Self, size: usize, alignment: u29, size_alignment: u29, ret_addr: usize) Allocator.Error![]u8 { // Suppress unused var warning _ = ret_addr; if (self.first_free == null) return Allocator.Error.OutOfMemory; // Get the real size being allocated, which is the aligned size or the size of a header (whichever is largest) // The size must be at least the size of a header so that it can be freed properly const real_size = std.math.max(if (size_alignment > 1) std.mem.alignAllocLen(size, size, size_alignment) else size, @sizeOf(Header)); var free_header = self.first_free; var prev: ?*Header = null; var backup: ?*Header = null; var backup_prev: ?*Header = null; // Search for the first node that can fit the request const alloc_to = find: while (free_header) |h| : ({ prev = h; free_header = h.next_free; }) { if (h.size + @sizeOf(Header) < real_size) { continue; } // The address at which to allocate. This will clobber the header. const addr = @ptrToInt(h); var alignment_padding: usize = 0; if ((alignment > 1 and !std.mem.isAligned(addr, alignment)) or !std.mem.isAligned(addr, @alignOf(Header))) { alignment_padding = alignment - (addr % alignment); // If the size can't fit the alignment padding then try the next one if (h.size + @sizeOf(Header) < real_size + alignment_padding) { continue; } // If a new node couldn't be created from the space left by alignment padding then try the next one // This check is necessary as otherwise we'd have wasted space that could never be allocated // We do however set the backup variable to this node so that in the unfortunate case that no other nodes can take the allocation, we allocate it here and sacrifice the wasted space if (alignment_padding < @sizeOf(Header)) { backup = h; backup_prev = prev; continue; } } // If we wouldn't be able to create a node with any unused space, try the next one // This check is necessary as otherwise we'd have wasted space that could never be allocated // Much like with the alignment padding, we set this node as a backup if (@sizeOf(Header) + h.size - alignment_padding - real_size < @sizeOf(Header)) { backup = h; backup_prev = prev; continue; } break :find h; } else backup; if (alloc_to == backup) { prev = backup_prev; } if (alloc_to) |x| { var header = x; var addr = @ptrToInt(header); // Allocate to this node var alignment_padding: usize = 0; if (alignment > 1 and !std.mem.isAligned(addr, alignment)) { alignment_padding = alignment - (addr % alignment); } // If there is enough unused space to the right of this node, need to align that pointer to the alignment of the header if (header.size > real_size + alignment_padding) { const at = @ptrToInt(header) + real_size + alignment_padding; if (!std.mem.isAligned(at, @alignOf(Header))) { alignment_padding += @alignOf(Header) - (at % @alignOf(Header)); } } // If we were going to use alignment padding and it's big enough to fit a new node, create a node to the left using the unused space if (alignment_padding >= @sizeOf(Header)) { // Since the header's address is going to be reused for the smaller one being created, backup the header to its new position header = insertFreeHeader(addr + alignment_padding, header.size - alignment_padding, header.next_free); var left = insertFreeHeader(addr, alignment_padding - @sizeOf(Header), header.next_free); // The previous should link to the new one instead self.registerFreeHeader(prev, left); prev = left; alignment_padding = 0; } // If there is enough unused space to the right of this node then create a smaller node if (header.size > real_size + alignment_padding) { header.next_free = insertFreeHeader(@ptrToInt(header) + real_size + alignment_padding, header.size - real_size - alignment_padding, header.next_free); } self.registerFreeHeader(prev, header.next_free); return @intToPtr([*]u8, @ptrToInt(header))[0..std.mem.alignAllocLen(size, size, size_alignment)]; } return Allocator.Error.OutOfMemory; } test "init" { const size = 1024; var region = try testing.allocator.alloc(u8, size); defer testing.allocator.free(region); var free_list = &(try FreeListAllocator.init(@ptrToInt(region.ptr), size)); var header = @intToPtr(*FreeListAllocator.Header, @ptrToInt(region.ptr)); try testing.expectEqual(header, free_list.first_free.?); try testing.expectEqual(header.next_free, null); try testing.expectEqual(header.size, size - @sizeOf(Header)); try testing.expectError(Error.TooSmall, FreeListAllocator.init(0, @sizeOf(Header) - 1)); } test "alloc" { const size = 1024; var region = try testing.allocator.alloc(u8, size); defer testing.allocator.free(region); const start = @ptrToInt(region.ptr); var free_list = &(try FreeListAllocator.init(start, size)); std.debug.print("", .{}); const alloc0 = try free_list.alloc(64, 0, 0, @returnAddress()); const alloc0_addr = @ptrToInt(alloc0.ptr); // Should be at the start of the heap try testing.expectEqual(alloc0_addr, start); // The allocation should have produced a node on the right of the allocation var header = @intToPtr(*Header, start + 64); try testing.expectEqual(header.size, size - 64 - @sizeOf(Header)); try testing.expectEqual(header.next_free, null); try testing.expectEqual(free_list.first_free, header); std.debug.print("", .{}); // 64 bytes aligned to 4 bytes const alloc1 = try free_list.alloc(64, 4, 0, @returnAddress()); const alloc1_addr = @ptrToInt(alloc1.ptr); const alloc1_end = alloc1_addr + alloc1.len; // Should be to the right of the first allocation, with some alignment padding in between const alloc0_end = alloc0_addr + alloc0.len; try testing.expect(alloc0_end <= alloc1_addr); try testing.expectEqual(std.mem.alignForward(alloc0_end, 4), alloc1_addr); // It should have produced a node on the right header = @intToPtr(*Header, alloc1_end); try testing.expectEqual(header.size, size - (alloc1_end - start) - @sizeOf(Header)); try testing.expectEqual(header.next_free, null); try testing.expectEqual(free_list.first_free, header); const alloc2 = try free_list.alloc(64, 256, 0, @returnAddress()); const alloc2_addr = @ptrToInt(alloc2.ptr); const alloc2_end = alloc2_addr + alloc2.len; try testing.expect(alloc1_end < alloc2_addr); // There should be a free node to the right of alloc2 const second_header = @intToPtr(*Header, alloc2_end); try testing.expectEqual(second_header.size, size - (alloc2_end - start) - @sizeOf(Header)); try testing.expectEqual(second_header.next_free, null); // There should be a free node in between alloc1 and alloc2 due to the large alignment padding (depends on the allocation by the testing allocator, hence the check) if (alloc2_addr - alloc1_end >= @sizeOf(Header)) { header = @intToPtr(*Header, alloc1_end); try testing.expectEqual(free_list.first_free, header); try testing.expectEqual(header.next_free, second_header); } // Try allocating something smaller than @sizeOf(Header). This should scale up to @sizeOf(Header) var alloc3 = try free_list.alloc(1, 0, 0, @returnAddress()); const alloc3_addr = @ptrToInt(alloc3.ptr); const alloc3_end = alloc3_addr + @sizeOf(Header); const header2 = @intToPtr(*Header, alloc3_end); // The new free node on the right should be the first one free try testing.expectEqual(free_list.first_free, header2); // And it should point to the free node on the right of alloc2 try testing.expectEqual(header2.next_free, second_header); // Attempting to allocate more than the size of the largest free node should fail const remaining_size = second_header.size + @sizeOf(Header); try testing.expectError(Allocator.Error.OutOfMemory, free_list.alloc(remaining_size + 1, 0, 0, @returnAddress())); // Alloc a non aligned to header var alloc4 = try free_list.alloc(13, 1, 0, @returnAddress()); const alloc4_addr = @ptrToInt(alloc4.ptr); const alloc4_end = alloc4_addr + std.mem.alignForward(13, @alignOf(Header)); const header3 = @intToPtr(*Header, alloc4_end); // We should still have a length of 13 try testing.expectEqual(alloc4.len, 13); // But this should be aligned to Header (4) try testing.expectEqual(alloc4_end - alloc4_addr, 16); // Previous header should now point to the next header try testing.expectEqual(header2.next_free, header3); } test "free" { const size = 1024; var region = try testing.allocator.alloc(u8, size); defer testing.allocator.free(region); const start = @ptrToInt(region.ptr); var free_list = &(try FreeListAllocator.init(start, size)); var alloc0 = try free_list.alloc(128, 0, 0, @returnAddress()); var alloc1 = try free_list.alloc(256, 0, 0, @returnAddress()); var alloc2 = try free_list.alloc(64, 0, 0, @returnAddress()); // There should be a single free node after alloc2 const free_node3 = @intToPtr(*Header, @ptrToInt(alloc2.ptr) + alloc2.len); try testing.expectEqual(free_list.first_free, free_node3); try testing.expectEqual(free_node3.size, size - alloc0.len - alloc1.len - alloc2.len - @sizeOf(Header)); try testing.expectEqual(free_node3.next_free, null); free_list.free(alloc0, 0, 0); // There should now be two free nodes. One where alloc0 was and another after alloc2 const free_node0 = @intToPtr(*Header, start); try testing.expectEqual(free_list.first_free, free_node0); try testing.expectEqual(free_node0.size, alloc0.len - @sizeOf(Header)); try testing.expectEqual(free_node0.next_free, free_node3); // Freeing alloc1 should join it with free_node0 free_list.free(alloc1, 0, 0); try testing.expectEqual(free_list.first_free, free_node0); try testing.expectEqual(free_node0.size, alloc0.len - @sizeOf(Header) + alloc1.len); try testing.expectEqual(free_node0.next_free, free_node3); // Freeing alloc2 should then join them all together into one big free node free_list.free(alloc2, 0, 0); try testing.expectEqual(free_list.first_free, free_node0); try testing.expectEqual(free_node0.size, size - @sizeOf(Header)); try testing.expectEqual(free_node0.next_free, null); } test "resize" { std.debug.print("", .{}); const size = 1024; var region = try testing.allocator.alloc(u8, size); defer testing.allocator.free(region); const start = @ptrToInt(region.ptr); var free_list = &(try FreeListAllocator.init(start, size)); var alloc0 = try free_list.alloc(128, 0, 0, @returnAddress()); var alloc1 = try free_list.alloc(256, 0, 0, @returnAddress()); // Expanding alloc0 should fail as alloc1 is right next to it try testing.expectEqual(free_list.resize(alloc0, 0, 136, 0, @returnAddress()), null); // Expanding alloc1 should succeed try testing.expectEqual(free_list.resize(alloc1, 0, 512, 0, @returnAddress()), 512); alloc1 = alloc1.ptr[0..512]; // And there should be a free node on the right of it var header = @intToPtr(*Header, @ptrToInt(alloc1.ptr) + 512); try testing.expectEqual(header.size, size - 128 - 512 - @sizeOf(Header)); try testing.expectEqual(header.next_free, null); try testing.expectEqual(free_list.first_free, header); // Shrinking alloc1 should produce a big free node on the right try testing.expectEqual(free_list.resize(alloc1, 0, 128, 0, @returnAddress()), 128); alloc1 = alloc1.ptr[0..128]; header = @intToPtr(*Header, @ptrToInt(alloc1.ptr) + 128); try testing.expectEqual(header.size, size - 128 - 128 - @sizeOf(Header)); try testing.expectEqual(header.next_free, null); try testing.expectEqual(free_list.first_free, header); // Shrinking by less space than would allow for a new Header shouldn't work try testing.expectEqual(free_list.resize(alloc1, 0, alloc1.len - @sizeOf(Header) / 2, 0, @returnAddress()), 128); // Shrinking to less space than would allow for a new Header shouldn't work try testing.expectEqual(free_list.resize(alloc1, 0, @sizeOf(Header) / 2, 0, @returnAddress()), @sizeOf(Header)); } }; /// /// Initialise the kernel heap with a chosen allocator /// /// Arguments: /// IN vmm_payload: type - The payload passed around by the VMM. Decided by the architecture /// IN heap_vmm: *vmm.VirtualMemoryManager - The VMM associated with the kernel /// IN attributes: vmm.Attributes - The attributes to associate with the memory allocated for the heap /// IN heap_size: usize - The desired size of the heap, in bytes. Must be greater than @sizeOf(FreeListAllocator.Header) /// /// Return: FreeListAllocator /// The FreeListAllocator created to keep track of the kernel heap /// /// Error: FreeListAllocator.Error || Allocator.Error /// FreeListAllocator.Error.TooSmall - heap_size is too small /// Allocator.Error.OutOfMemory - heap_vmm's allocator didn't have enough memory available to fulfill the request /// pub fn init(comptime vmm_payload: type, heap_vmm: *vmm.VirtualMemoryManager(vmm_payload), attributes: vmm.Attributes, heap_size: usize) (FreeListAllocator.Error || Allocator.Error)!FreeListAllocator { log.info("Init\n", .{}); defer log.info("Done\n", .{}); var heap_start = (try heap_vmm.alloc(heap_size / vmm.BLOCK_SIZE, null, attributes)) orelse panic(null, "Not enough contiguous virtual memory blocks to allocate to kernel heap\n", .{}); // This free call cannot error as it is guaranteed to have been allocated above errdefer heap_vmm.free(heap_start) catch unreachable; return try FreeListAllocator.init(heap_start, heap_size); }
0
repos/pluto/src
repos/pluto/src/kernel/syscalls.zig
const std = @import("std"); const testing = std.testing; const is_test = @import("builtin").is_test; const scheduler = @import("scheduler.zig"); const panic = @import("panic.zig").panic; const log = std.log.scoped(.syscalls); const arch = @import("arch.zig").internals; const vfs = @import("filesystem/vfs.zig"); const task = @import("task.zig"); const vmm = @import("vmm.zig"); const mem = @import("mem.zig"); const pmm = @import("pmm.zig"); const bitmap = @import("bitmap.zig"); var allocator: std.mem.Allocator = undefined; /// The maximum amount of data to allocate when copying user memory into kernel memory pub const USER_MAX_DATA_LEN = 16 * 1024; pub const Error = error{ NoMoreFSHandles, TooBig, NotAFile }; /// All implemented syscalls pub const Syscall = enum { /// Open a new vfs node /// /// Arguments: /// path_ptr: usize - The user/kernel pointer to the file path to open /// path_len: usize - The length of the file path /// flags: usize - The flag specifying what to do with the opened node. Use the integer value of vfs.OpenFlags /// args: usize - The user/kernel pointer to the structure holding the vfs.OpenArgs /// ignored: usize - Ignored /// /// Return: usize /// The handle for the opened vfs node /// /// Error: /// NoMoreFSHandles - The task has reached the maximum number of allowed vfs handles /// OutOfMemory - There wasn't enough kernel (heap or VMM) memory left to fulfill the request. /// TooBig - The path length is greater than allowed /// InvalidAddress - A pointer that the user task passed is invalid (not mapped, out of bounds etc.) /// InvalidFlags - The flags provided don't correspond to a vfs.OpenFlags value /// Refer to vfs.Error for details on what causes vfs errors /// Open, /// Read data from an open vfs file /// /// Arguments: /// node_handle: usize - The file handle returned from the open syscall /// buff_ptr: usize ` - The user/kernel address of the buffer to put the read data in /// buff_len: usize - The size of the buffer /// ignored1: usize - Ignored /// ignored2: usize - Ignored /// /// Return: usize /// The number of bytes read and put into the buffer /// /// Error: /// OutOfBounds - The node handle is outside of the maximum per process /// TooBig - The buffer is bigger than what a user process is allowed to give the kernel /// NotAFile - The handle does not correspond to a file /// Refer to vfs.FileNode.read and vmm.VirtualMemoryManager.copyData for details on what causes other errors /// Read, /// Write data from to open vfs file /// /// Arguments: /// node_handle: usize - The file handle returned from the open syscall /// buff_ptr: usize ` - The user/kernel address of the buffer containing the data to write /// buff_len: usize - The size of the buffer /// ignored1: usize - Ignored /// ignored2: usize - Ignored /// /// Return: usize /// The number of bytes written /// /// Error: /// OutOfBounds - The node handle is outside of the maximum per process /// TooBig - The buffer is bigger than what a user process is allowed to give the kernel /// NotAFile - The handle does not correspond to a file /// Refer to vfs.FileNode.read and vmm.VirtualMemoryManager.copyData for details on what causes other errors /// Write, /// /// Close an open vfs node. What it means to "close" depends on the underlying file system, but often it will cause the file to be committed to disk or for a network socket to be closed /// /// Arguments: /// node_handle: usize - The handle to close /// ignored1..4: usize - Ignored /// /// Return: void /// /// Error: /// OutOfBounds - The node handle is outside of the maximum per process /// NotOpened - The node handle hasn't been opened Close, Test1, Test2, Test3, /// /// Get the handler associated with the syscall /// /// Arguments: /// IN self: Syscall - The syscall to get the handler for /// /// Return: Handler /// The handler that takes care of this syscall /// fn getHandler(self: @This()) Handler { return switch (self) { .Open => handleOpen, .Read => handleRead, .Write => handleWrite, .Close => handleClose, .Test1 => handleTest1, .Test2 => handleTest2, .Test3 => handleTest3, }; } /// /// Check if the syscall is just used for testing, and therefore shouldn't be exposed at runtime /// /// Arguments: /// IN self: Syscall - The syscall to check /// /// Return: bool /// true if the syscall is only to be used for testing, else false /// pub fn isTest(self: @This()) bool { return switch (self) { .Test1, .Test2, .Test3 => true, else => false, }; } }; /// A function that can handle a syscall and return a result or an error pub const Handler = fn (ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize; pub fn init(alloc: std.mem.Allocator) void { allocator = alloc; } /// /// Convert an error code to an instance of anyerror. The conversion must be synchronised with toErrorCode /// Passing an error code that does not correspond to an error results in safety-protected undefined behaviour /// /// Arguments: /// IN code: u16 - The erorr code to convert /// /// Return: anyerror /// The error corresponding to the error code /// pub fn fromErrorCode(code: u16) anyerror { return @intToError(code); } /// /// Convert an instance of anyerror to an error code. The conversion must be synchronised with fromErrorCode /// /// Arguments: /// IN err: anyerror - The erorr to convert /// /// Return: u16 /// The error code corresponding to the error /// pub fn toErrorCode(err: anyerror) u16 { return @errorToInt(err); } /// /// Handle a syscall and return a result or error /// /// Arguments: /// IN syscall: Syscall - The syscall to handle /// IN argX: usize - The xth argument that was passed to the syscall /// /// Return: usize /// The syscall result /// /// Error: anyerror /// The error raised by the handler /// pub fn handle(syscall: Syscall, ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize { return try syscall.getHandler()(ctx, arg1, arg2, arg3, arg4, arg5); } /// /// Get a slice containing the data at an address and length. If the current task is a kernel task then a simple pointer to slice conversion is performed, /// otherwise the slice is allocated on the heap and the data is copied in from user space. /// /// Arguments: /// IN ptr: usize - The slice's address /// IN len: usize - The number of bytes /// /// Error: Error || Allocator.Error || VmmError || BitmapError /// OutOfMemory - There wasn't enough kernel (heap or VMM) memory left to fulfill the request. /// TooBig - The user task requested to have too much data copied /// NotAllocated - The pointer hasn't been mapped by the task /// OutOfBounds - The pointer and length is out of bounds of the task's VMM /// /// Return: []u8 /// The slice of data. Will be stack-allocated if the current task is kernel-level, otherwise will be heap-allocated /// fn getData(ptr: usize, len: usize) (Error || std.mem.Allocator.Error || vmm.VmmError || bitmap.BitmapError)![]u8 { if (scheduler.current_task.kernel) { if (try vmm.kernel_vmm.isSet(ptr)) { return @intToPtr([*]u8, ptr)[0..len]; } else { return error.NotAllocated; } } else { if (len > USER_MAX_DATA_LEN) { return Error.TooBig; } var buff = try allocator.alloc(u8, len); errdefer allocator.free(buff); try vmm.kernel_vmm.copyData(scheduler.current_task.vmm, false, buff, ptr); return buff; } } /// Open a new vfs node /// /// Arguments: /// path_ptr: usize - The user/kernel pointer to the file path to open /// path_len: usize - The length of the file path /// flags: usize - The flag specifying what to do with the opened node. Use the integer value of vfs.OpenFlags /// args: usize - The user/kernel pointer to the structure holding the vfs.OpenArgs /// ignored: usize - Ignored /// /// Return: usize /// The handle for the opened vfs node /// /// Error: /// NoMoreFSHandles - The task has reached the maximum number of allowed vfs handles /// OutOfMemory - There wasn't enough kernel (heap or VMM) memory left to fulfill the request. /// TooBig - The path length is greater than allowed /// InvalidAddress - A pointer that the user task passed is invalid (not mapped, out of bounds etc.) /// InvalidFlags - The flags provided don't correspond to a vfs.OpenFlags value /// Refer to vfs.Error for details on what causes vfs errors /// fn handleOpen(ctx: *const arch.CpuState, path_ptr: usize, path_len: usize, flags: usize, args: usize, ignored: usize) anyerror!usize { _ = ctx; _ = ignored; const current_task = scheduler.current_task; if (!current_task.hasFreeVFSHandle()) { return Error.NoMoreFSHandles; } // Fetch the open arguments from user/kernel memory var open_args: vfs.OpenArgs = if (args == 0) .{} else blk: { const data = try getData(args, @sizeOf(vfs.OpenArgs)); defer if (!current_task.kernel) allocator.free(data); break :blk std.mem.bytesAsValue(vfs.OpenArgs, data[0..@sizeOf(vfs.OpenArgs)]).*; }; // The symlink target could refer to a location in user memory so convert that too if (open_args.symlink_target) |target| { open_args.symlink_target = try getData(@ptrToInt(target.ptr), target.len); } defer if (!current_task.kernel) if (open_args.symlink_target) |target| allocator.free(target); const open_flags = std.meta.intToEnum(vfs.OpenFlags, flags) catch return error.InvalidFlags; const path = try getData(path_ptr, path_len); defer if (!current_task.kernel) allocator.free(path); const node = try vfs.open(path, true, open_flags, open_args); errdefer vfs.close(node.*); return (try current_task.addVFSHandle(node)) orelse panic(null, "Failed to add a VFS handle to current_task\n", .{}); } /// Read data from an open vfs file /// /// Arguments: /// node_handle: usize - The file handle returned from the open syscall /// buff_ptr: usize ` - The user/kernel address of the buffer to put the read data in /// buff_len: usize - The size of the buffer /// ignored1: usize - Ignored /// ignored2: usize - Ignored /// /// Return: usize /// The number of bytes read and put into the buffer /// /// Error: /// OutOfBounds - The node handle is outside of the maximum per process /// TooBig - The buffer is bigger than what a user process is allowed to give the kernel /// NotAFile - The handle does not correspond to a file /// NotOpened - The handle doesn't correspond to an opened file /// Refer to vfs.FileNode.read and vmm.VirtualMemoryManager.copyData for details on what causes other errors /// fn handleRead(ctx: *const arch.CpuState, node_handle: usize, buff_ptr: usize, buff_len: usize, ignored1: usize, ignored2: usize) anyerror!usize { _ = ctx; _ = ignored1; _ = ignored2; if (node_handle >= task.VFS_HANDLES_PER_PROCESS) return error.OutOfBounds; const real_handle = @intCast(task.Handle, node_handle); if (buff_len > USER_MAX_DATA_LEN) { return Error.TooBig; } const current_task = scheduler.current_task; const node_opt = current_task.getVFSHandle(real_handle) catch panic(@errorReturnTrace(), "Failed to get VFS node for handle {}\n", .{real_handle}); if (node_opt) |node| { const file = switch (node.*) { .File => |*f| f, else => return error.NotAFile, }; var buff = if (current_task.kernel) @intToPtr([*]u8, buff_ptr)[0..buff_len] else try allocator.alloc(u8, buff_len); defer if (!current_task.kernel) allocator.free(buff); const bytes_read = try file.read(buff); // TODO: A more performant method would be mapping in the user memory and using that directly. Then we wouldn't need to allocate or copy the buffer if (!current_task.kernel) try vmm.kernel_vmm.copyData(current_task.vmm, true, buff, buff_ptr); return bytes_read; } return error.NotOpened; } /// Write data from to open vfs file /// /// Arguments: /// node_handle: usize - The file handle returned from the open syscall /// buff_ptr: usize ` - The user/kernel address of the buffer containing the data to write /// buff_len: usize - The size of the buffer /// ignored1: usize - Ignored /// ignored2: usize - Ignored /// /// Return: usize /// The number of bytes written /// /// Error: /// OutOfBounds - The node handle is outside of the maximum per process /// TooBig - The buffer is bigger than what a user process is allowed to give the kernel /// NotAFile - The handle does not correspond to a file /// NotOpened - The handle doesn't correspond to an opened file /// Refer to vfs.FileNode.read and vmm.VirtualMemoryManager.copyData for details on what causes other errors /// fn handleWrite(ctx: *const arch.CpuState, node_handle: usize, buff_ptr: usize, buff_len: usize, ignored1: usize, ignored2: usize) anyerror!usize { _ = ctx; _ = ignored1; _ = ignored2; if (node_handle >= task.VFS_HANDLES_PER_PROCESS) return error.OutOfBounds; const real_handle = @intCast(task.Handle, node_handle); const current_task = scheduler.current_task; const node_opt = current_task.getVFSHandle(real_handle) catch panic(@errorReturnTrace(), "Failed to get VFS node for handle {}\n", .{real_handle}); if (node_opt) |node| { const file = switch (node.*) { .File => |*f| f, else => return error.NotAFile, }; // TODO: A more performant method would be mapping in the user memory and using that directly. Then we wouldn't need to allocate or copy the buffer var buff = try getData(buff_ptr, buff_len); defer if (!current_task.kernel) allocator.free(buff); return try file.write(buff); } return error.NotOpened; } /// /// Close an open vfs node. What it means to "close" depends on the underlying file system, but often it will cause the file to be committed to disk or for a network socket to be closed /// /// Arguments: /// node_handle: usize - The handle to close /// ignored1..4: usize - Ignored /// /// Return: void /// /// Error: /// OutOfBounds - The node handle is outside of the maximum per process /// NotOpened - The node handle hasn't been opened fn handleClose(ctx: *const arch.CpuState, node_handle: usize, ignored1: usize, ignored2: usize, ignored3: usize, ignored4: usize) anyerror!usize { _ = ctx; _ = ignored1; _ = ignored2; _ = ignored3; _ = ignored4; if (node_handle >= task.VFS_HANDLES_PER_PROCESS) return error.OutOfBounds; const real_handle = @intCast(task.Handle, node_handle); const current_task = scheduler.current_task; const node_opt = current_task.getVFSHandle(real_handle) catch panic(@errorReturnTrace(), "Failed to get VFS node for handle {}\n", .{real_handle}); if (node_opt) |node| { current_task.clearVFSHandle(real_handle) catch |e| return switch (e) { error.VFSHandleNotSet, error.OutOfBounds => error.NotOpened, }; vfs.close(node.*); } return error.NotOpened; } pub fn handleTest1(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize { // Suppress unused variable warnings _ = ctx; _ = arg1; _ = arg2; _ = arg3; _ = arg4; _ = arg5; return 0; } pub fn handleTest2(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize { _ = ctx; return arg1 + arg2 + arg3 + arg4 + arg5; } pub fn handleTest3(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize { // Suppress unused variable warnings _ = ctx; _ = arg1; _ = arg2; _ = arg3; _ = arg4; _ = arg5; return error.OutOfMemory; } fn testInitMem(comptime num_vmm_entries: usize, alloc: std.mem.Allocator, map_all: bool) !std.heap.FixedBufferAllocator { // handleOpen requires that the name passed is mapped in the VMM // Allocate them within a buffer so we know the start and end address to give to the VMM var buffer = try alloc.alloc(u8, num_vmm_entries * vmm.BLOCK_SIZE); var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(buffer[0..]); vmm.kernel_vmm = try vmm.VirtualMemoryManager(arch.VmmPayload).init(@ptrToInt(fixed_buffer_allocator.buffer.ptr), @ptrToInt(fixed_buffer_allocator.buffer.ptr) + buffer.len, alloc, arch.VMM_MAPPER, arch.KERNEL_VMM_PAYLOAD); // The PMM is required as well const mem_profile = mem.MemProfile{ .vaddr_end = undefined, .vaddr_start = undefined, .physaddr_start = undefined, .physaddr_end = undefined, .mem_kb = num_vmm_entries * vmm.BLOCK_SIZE / 1024, .fixed_allocator = undefined, .virtual_reserved = &[_]mem.Map{}, .physical_reserved = &[_]mem.Range{}, .modules = &[_]mem.Module{}, }; pmm.init(&mem_profile, alloc); // Set the whole VMM space as mapped so all address within the buffer allocator will be considered valid if (map_all) _ = try vmm.kernel_vmm.alloc(num_vmm_entries, null, .{ .kernel = true, .writable = true, .cachable = true }); return fixed_buffer_allocator; } fn testDeinitMem(alloc: std.mem.Allocator, buffer_allocator: std.heap.FixedBufferAllocator) void { alloc.free(buffer_allocator.buffer); vmm.kernel_vmm.deinit(); pmm.deinit(); } test "getHandler" { try std.testing.expectEqual(Syscall.Test1.getHandler(), handleTest1); try std.testing.expectEqual(Syscall.Test2.getHandler(), handleTest2); try std.testing.expectEqual(Syscall.Test3.getHandler(), handleTest3); try std.testing.expectEqual(Syscall.Open.getHandler(), handleOpen); try std.testing.expectEqual(Syscall.Close.getHandler(), handleClose); try std.testing.expectEqual(Syscall.Read.getHandler(), handleRead); try std.testing.expectEqual(Syscall.Write.getHandler(), handleWrite); } test "handle" { const state = arch.CpuState.empty(); try std.testing.expectEqual(@as(usize, 0), try handle(.Test1, &state, 0, 0, 0, 0, 0)); try std.testing.expectEqual(@as(usize, 1 + 2 + 3 + 4 + 5), try handle(.Test2, &state, 1, 2, 3, 4, 5)); try std.testing.expectError(error.OutOfMemory, handle(.Test3, &state, 0, 0, 0, 0, 0)); } test "handleOpen" { allocator = std.testing.allocator; var testfs = try vfs.testInitFs(allocator); defer allocator.destroy(testfs); defer testfs.deinit(); testfs.instance = 1; try vfs.setRoot(testfs.tree.val); var fixed_buffer_allocator = try testInitMem(1, allocator, true); var buffer_allocator = fixed_buffer_allocator.allocator(); defer testDeinitMem(allocator, fixed_buffer_allocator); scheduler.current_task = try task.Task.create(0, true, undefined, allocator, true); defer scheduler.current_task.destroy(allocator); var current_task = scheduler.current_task; const empty = arch.CpuState.empty(); // Creating a file var name1 = try buffer_allocator.dupe(u8, "/abc.txt"); var test_handle = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(name1.ptr), name1.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined)); var test_node = (try current_task.getVFSHandle(test_handle)).?; try testing.expectEqual(testfs.tree.children.items.len, 1); var tree = testfs.tree.children.items[0]; try testing.expect(tree.val.isFile() and test_node.isFile()); try testing.expectEqual(&test_node.File, &tree.val.File); try testing.expect(std.mem.eql(u8, tree.name, "abc.txt")); try testing.expectEqual(tree.data, null); try testing.expectEqual(tree.children.items.len, 0); // Creating a dir var name2 = try buffer_allocator.dupe(u8, "/def"); test_handle = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(name2.ptr), name2.len, @enumToInt(vfs.OpenFlags.CREATE_DIR), 0, undefined)); test_node = (try current_task.getVFSHandle(test_handle)).?; try testing.expectEqual(testfs.tree.children.items.len, 2); tree = testfs.tree.children.items[1]; try testing.expect(tree.val.isDir() and test_node.isDir()); try testing.expectEqual(&test_node.Dir, &tree.val.Dir); try testing.expect(std.mem.eql(u8, tree.name, "def")); try testing.expectEqual(tree.data, null); try testing.expectEqual(tree.children.items.len, 0); // Creating a file under a new dir var name3 = try buffer_allocator.dupe(u8, "/def/ghi.zig"); test_handle = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(name3.ptr), name3.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined)); test_node = (try current_task.getVFSHandle(test_handle)).?; try testing.expectEqual(testfs.tree.children.items[1].children.items.len, 1); tree = testfs.tree.children.items[1].children.items[0]; try testing.expect(tree.val.isFile() and test_node.isFile()); try testing.expectEqual(&test_node.File, &tree.val.File); try testing.expect(std.mem.eql(u8, tree.name, "ghi.zig")); try testing.expectEqual(tree.data, null); try testing.expectEqual(tree.children.items.len, 0); // Opening an existing file test_handle = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(name3.ptr), name3.len, @enumToInt(vfs.OpenFlags.NO_CREATION), 0, undefined)); test_node = (try current_task.getVFSHandle(test_handle)).?; try testing.expectEqual(testfs.tree.children.items[1].children.items.len, 1); try testing.expect(test_node.isFile()); try testing.expectEqual(&test_node.File, &tree.val.File); } test "handleRead" { allocator = std.testing.allocator; var testfs = try vfs.testInitFs(allocator); defer allocator.destroy(testfs); defer testfs.deinit(); testfs.instance = 1; try vfs.setRoot(testfs.tree.val); var fixed_buffer_allocator = try testInitMem(1, allocator, true); var buffer_allocator = fixed_buffer_allocator.allocator(); defer testDeinitMem(allocator, fixed_buffer_allocator); scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true); defer scheduler.current_task.destroy(allocator); _ = scheduler.current_task; const empty = arch.CpuState.empty(); var test_file_path = try buffer_allocator.dupe(u8, "/foo.txt"); var test_file = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(test_file_path.ptr), test_file_path.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined)); var f_data = &testfs.tree.children.items[0].data; var str = "test123"; f_data.* = try testing.allocator.dupe(u8, str); var buffer: [str.len]u8 = undefined; { const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), buffer.len, 0, undefined); try testing.expect(std.mem.eql(u8, str, buffer[0..length])); } { const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), buffer.len + 1, 0, undefined); try testing.expect(std.mem.eql(u8, str, buffer[0..length])); } { const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), buffer.len + 3, 0, undefined); try testing.expect(std.mem.eql(u8, str, buffer[0..length])); } { const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), buffer.len - 1, 0, undefined); try testing.expect(std.mem.eql(u8, str[0 .. str.len - 1], buffer[0..length])); } { const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), 0, 0, undefined); try testing.expect(std.mem.eql(u8, str[0..0], buffer[0..length])); } // Try reading from a symlink var args = try buffer_allocator.create(vfs.OpenArgs); args.* = vfs.OpenArgs{ .symlink_target = test_file_path }; var link = try buffer_allocator.dupe(u8, "/link"); var test_link = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(link.ptr), link.len, @enumToInt(vfs.OpenFlags.CREATE_SYMLINK), @ptrToInt(args), undefined)); { const length = try handleRead(&empty, test_link, @ptrToInt(&buffer[0]), buffer.len, 0, undefined); try testing.expect(std.mem.eql(u8, str[0..str.len], buffer[0..length])); } } test "handleRead errors" { allocator = std.testing.allocator; var testfs = try vfs.testInitFs(allocator); { defer allocator.destroy(testfs); defer testfs.deinit(); testfs.instance = 1; try vfs.setRoot(testfs.tree.val); const empty = arch.CpuState.empty(); // The data we pass to handleRead needs to be mapped within the VMM, so we need to know their address // Allocating the data within a fixed buffer allocator is the best way to know the address of the data var fixed_buffer_allocator = try testInitMem(3, allocator, true); var buffer_allocator = fixed_buffer_allocator.allocator(); defer testDeinitMem(allocator, fixed_buffer_allocator); scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true); defer scheduler.current_task.destroy(allocator); // Invalid file handle try testing.expectError(error.OutOfBounds, handleRead(&empty, task.VFS_HANDLES_PER_PROCESS, 0, 0, 0, 0)); try testing.expectError(error.OutOfBounds, handleRead(&empty, task.VFS_HANDLES_PER_PROCESS + 1, 0, 0, 0, 0)); // Unopened file try testing.expectError(error.NotOpened, handleRead(&empty, 0, 0, 0, 0, 0)); try testing.expectError(error.NotOpened, handleRead(&empty, 1, 0, 0, 0, 0)); try testing.expectError(error.NotOpened, handleRead(&empty, task.VFS_HANDLES_PER_PROCESS - 1, 0, 0, 0, 0)); // Reading from a dir const name = try buffer_allocator.dupe(u8, "/dir"); const node = try handleOpen(&empty, @ptrToInt(name.ptr), name.len, @enumToInt(vfs.OpenFlags.CREATE_DIR), 0, 0); try testing.expectError(error.NotAFile, handleRead(&empty, node, 0, 0, 0, 0)); // User buffer is too big const name2 = try buffer_allocator.dupe(u8, "/file.txt"); const node2 = try handleOpen(&empty, @ptrToInt(name2.ptr), name2.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, 0); scheduler.current_task.kernel = false; try testing.expectError(Error.TooBig, handleRead(&empty, node2, 0, USER_MAX_DATA_LEN + 1, 0, 0)); } try testing.expect(!testing.allocator_instance.detectLeaks()); } test "handleWrite" { allocator = std.testing.allocator; var testfs = try vfs.testInitFs(allocator); defer allocator.destroy(testfs); defer testfs.deinit(); testfs.instance = 1; try vfs.setRoot(testfs.tree.val); var fixed_buffer_allocator = try testInitMem(1, allocator, true); var buffer_allocator = fixed_buffer_allocator.allocator(); defer testDeinitMem(allocator, fixed_buffer_allocator); scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true); defer scheduler.current_task.destroy(allocator); const empty = arch.CpuState.empty(); // Open test file const name = try buffer_allocator.dupe(u8, "/abc.txt"); const node = try handleOpen(&empty, @ptrToInt(name.ptr), name.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined); // Write const data = try buffer_allocator.dupe(u8, "test_data 123"); const res = try handleWrite(&empty, node, @ptrToInt(data.ptr), data.len, 0, 0); try testing.expectEqual(res, data.len); try testing.expectEqualSlices(u8, data, testfs.tree.children.items[0].data.?); // Write to a file in a folder const name2 = try buffer_allocator.dupe(u8, "/dir"); _ = try handleOpen(&empty, @ptrToInt(name2.ptr), name2.len, @enumToInt(vfs.OpenFlags.CREATE_DIR), 0, undefined); const name3 = try buffer_allocator.dupe(u8, "/dir/def.txt"); const node3 = try handleOpen(&empty, @ptrToInt(name3.ptr), name3.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined); const data2 = try buffer_allocator.dupe(u8, "some more test data!"); const res2 = try handleWrite(&empty, node3, @ptrToInt(data2.ptr), data2.len, 0, 0); try testing.expectEqual(res2, data2.len); try testing.expectEqualSlices(u8, data2, testfs.tree.children.items[1].children.items[0].data.?); } test "handleWrite errors" { allocator = std.testing.allocator; var testfs = try vfs.testInitFs(allocator); { defer allocator.destroy(testfs); defer testfs.deinit(); testfs.instance = 1; try vfs.setRoot(testfs.tree.val); const empty = arch.CpuState.empty(); // The data we pass to handleWrite needs to be mapped within the VMM, so we need to know their address // Allocating the data within a fixed buffer allocator is the best way to know the address of the data var fixed_buffer_allocator = try testInitMem(3, allocator, true); var buffer_allocator = fixed_buffer_allocator.allocator(); defer testDeinitMem(allocator, fixed_buffer_allocator); scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true); defer scheduler.current_task.destroy(allocator); // Invalid file handle try testing.expectError(error.OutOfBounds, handleWrite(&empty, task.VFS_HANDLES_PER_PROCESS, 0, 0, 0, 0)); try testing.expectError(error.OutOfBounds, handleWrite(&empty, task.VFS_HANDLES_PER_PROCESS + 1, 0, 0, 0, 0)); // Unopened file try testing.expectError(error.NotOpened, handleWrite(&empty, 0, 0, 0, 0, 0)); try testing.expectError(error.NotOpened, handleWrite(&empty, 1, 0, 0, 0, 0)); try testing.expectError(error.NotOpened, handleWrite(&empty, task.VFS_HANDLES_PER_PROCESS - 1, 0, 0, 0, 0)); // Writing to a dir const name = try buffer_allocator.dupe(u8, "/dir"); const node = try handleOpen(&empty, @ptrToInt(name.ptr), name.len, @enumToInt(vfs.OpenFlags.CREATE_DIR), 0, 0); try testing.expectError(error.NotAFile, handleWrite(&empty, node, 0, 0, 0, 0)); // User buffer is too big const name2 = try buffer_allocator.dupe(u8, "/file.txt"); const node2 = try handleOpen(&empty, @ptrToInt(name2.ptr), name2.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, 0); scheduler.current_task.kernel = false; try testing.expectError(Error.TooBig, handleWrite(&empty, node2, 0, USER_MAX_DATA_LEN + 1, 0, 0)); } try testing.expect(!testing.allocator_instance.detectLeaks()); } test "handleOpen errors" { allocator = std.testing.allocator; var testfs = try vfs.testInitFs(allocator); { defer allocator.destroy(testfs); defer testfs.deinit(); testfs.instance = 1; try vfs.setRoot(testfs.tree.val); const empty = arch.CpuState.empty(); // The data we pass to handleOpen needs to be mapped within the VMM, so we need to know their address // Allocating the data within a fixed buffer allocator is the best way to know the address of the data var fixed_buffer_allocator = try testInitMem(3, allocator, false); var buffer_allocator = fixed_buffer_allocator.allocator(); defer testDeinitMem(allocator, fixed_buffer_allocator); scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true); defer scheduler.current_task.destroy(allocator); // Check opening with no free file handles left const free_handles = scheduler.current_task.file_handles.num_free_entries; scheduler.current_task.file_handles.num_free_entries = 0; try testing.expectError(Error.NoMoreFSHandles, handleOpen(&empty, 0, 0, 0, 0, 0)); scheduler.current_task.file_handles.num_free_entries = free_handles; // Using a path that is too long scheduler.current_task.kernel = false; try testing.expectError(Error.TooBig, handleOpen(&empty, 0, USER_MAX_DATA_LEN + 1, 0, 0, 0)); // Unallocated user address const test_alloc = try buffer_allocator.alloc(u8, 1); // The kernel VMM and task VMM need to have their buffers mapped, so we'll temporarily use the buffer allocator since it operates within a known address space allocator = buffer_allocator; try testing.expectError(error.NotAllocated, handleOpen(&empty, @ptrToInt(test_alloc.ptr), 1, 0, 0, 0)); allocator = std.testing.allocator; // Unallocated kernel address scheduler.current_task.kernel = true; try testing.expectError(error.NotAllocated, handleOpen(&empty, @ptrToInt(test_alloc.ptr), 1, 0, 0, 0)); // Invalid flag enum value try testing.expectError(error.InvalidFlags, handleOpen(&empty, @ptrToInt(test_alloc.ptr), 1, 999, 0, 0)); } try testing.expect(!testing.allocator_instance.detectLeaks()); }
0
repos/pluto/src
repos/pluto/src/kernel/kmain.zig
const std = @import("std"); const kmain_log = std.log.scoped(.kmain); const builtin = @import("builtin"); const is_test = builtin.is_test; const build_options = @import("build_options"); const arch = @import("arch.zig").internals; const tty = @import("tty.zig"); const log_root = @import("log.zig"); const pmm = @import("pmm.zig"); const serial = @import("serial.zig"); const vmm = @import("vmm.zig"); const mem = @import("mem.zig"); const panic_root = @import("panic.zig"); const task = @import("task.zig"); const heap = @import("heap.zig"); const scheduler = @import("scheduler.zig"); const vfs = @import("filesystem/vfs.zig"); const initrd = @import("filesystem/initrd.zig"); const keyboard = @import("keyboard.zig"); const syscalls = @import("syscalls.zig"); const Allocator = std.mem.Allocator; comptime { if (!is_test) { switch (builtin.cpu.arch) { .i386 => _ = @import("arch/x86/boot.zig"), else => unreachable, } } } // This is for unit testing as we need to export KERNEL_ADDR_OFFSET as it is no longer available // from the linker script // These will need to be kept up to date with the debug logs in the mem init. export var KERNEL_ADDR_OFFSET: u32 = if (builtin.is_test) 0xC0000000 else undefined; export var KERNEL_STACK_START: u32 = if (builtin.is_test) 0xC014A000 else undefined; export var KERNEL_STACK_END: u32 = if (builtin.is_test) 0xC014E000 else undefined; export var KERNEL_VADDR_START: u32 = if (builtin.is_test) 0xC0100000 else undefined; export var KERNEL_VADDR_END: u32 = if (builtin.is_test) 0xC014E000 else undefined; export var KERNEL_PHYSADDR_START: u32 = if (builtin.is_test) 0x100000 else undefined; export var KERNEL_PHYSADDR_END: u32 = if (builtin.is_test) 0x14E000 else undefined; // Just call the panic function, as this need to be in the root source file pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn { @setCold(true); panic_root.panic(error_return_trace, "{s}", .{msg}); } pub const log_level: std.log.Level = .debug; // Define root.log to override the std implementation pub fn log( comptime level: std.log.Level, comptime scope: @TypeOf(.EnumLiteral), comptime format: []const u8, args: anytype, ) void { log_root.log(level, "(" ++ @tagName(scope) ++ "): " ++ format, args); } var kernel_heap: heap.FreeListAllocator = undefined; export fn kmain(boot_payload: arch.BootPayload) void { const serial_stream = serial.init(boot_payload); log_root.init(serial_stream); const mem_profile = arch.initMem(boot_payload) catch |e| { panic_root.panic(@errorReturnTrace(), "Failed to initialise memory profile: {}", .{e}); }; var fixed_allocator = mem_profile.fixed_allocator; pmm.init(&mem_profile, fixed_allocator.allocator()); var kernel_vmm = vmm.init(&mem_profile, fixed_allocator.allocator()) catch |e| { panic_root.panic(@errorReturnTrace(), "Failed to initialise kernel VMM: {}", .{e}); }; kmain_log.info("Init arch " ++ @tagName(builtin.cpu.arch) ++ "\n", .{}); arch.init(&mem_profile); kmain_log.info("Arch init done\n", .{}); panic_root.initSymbols(&mem_profile, fixed_allocator.allocator()) catch |e| { panic_root.panic(@errorReturnTrace(), "Failed to initialise panic symbols: {}\n", .{e}); }; // The VMM and mem runtime tests can't happen until the architecture has initialised itself switch (build_options.test_mode) { .Initialisation => vmm.runtimeTests(arch.VmmPayload, kernel_vmm, &mem_profile), .Memory => arch.runtimeTestChecksMem(kernel_vmm), else => {}, } // Give the kernel heap 10% of the available memory. This can be fine-tuned as time goes on. var heap_size = mem_profile.mem_kb / 10 * 1024; // The heap size must be a power of two so find the power of two smaller than or equal to the heap_size if (!std.math.isPowerOfTwo(heap_size)) { heap_size = std.math.floorPowerOfTwo(usize, heap_size); } kernel_heap = heap.init(arch.VmmPayload, kernel_vmm, vmm.Attributes{ .kernel = true, .writable = true, .cachable = true }, heap_size) catch |e| { panic_root.panic(@errorReturnTrace(), "Failed to initialise kernel heap: {}\n", .{e}); }; syscalls.init(kernel_heap.allocator()); tty.init(kernel_heap.allocator(), boot_payload); var arch_kb = keyboard.init(fixed_allocator.allocator()) catch |e| { panic_root.panic(@errorReturnTrace(), "Failed to inititalise keyboard: {}\n", .{e}); }; if (arch_kb) |kb| { keyboard.addKeyboard(kb) catch |e| panic_root.panic(@errorReturnTrace(), "Failed to add architecture keyboard: {}\n", .{e}); } // Get the ramdisk module const rd_module = for (mem_profile.modules) |module| { if (std.mem.eql(u8, module.name, "initrd.ramdisk")) { break module; } } else null; if (rd_module) |module| { // Load the ram disk const rd_len: usize = module.region.end - module.region.start; const ramdisk_bytes = @intToPtr([*]u8, module.region.start)[0..rd_len]; var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); var ramdisk_filesystem = initrd.InitrdFS.init(&initrd_stream, kernel_heap.allocator()) catch |e| { panic_root.panic(@errorReturnTrace(), "Failed to initialise ramdisk: {}\n", .{e}); }; // Can now free the module as new memory is allocated for the ramdisk filesystem kernel_vmm.free(module.region.start) catch |e| { panic_root.panic(@errorReturnTrace(), "Failed to free ramdisk: {}\n", .{e}); }; // Need to init the vfs after the ramdisk as we need the root node from the ramdisk filesystem vfs.setRoot(ramdisk_filesystem.root_node) catch |e| { panic_root.panic(@errorReturnTrace(), "Ramdisk root node isn't a directory node: {}\n", .{e}); }; } scheduler.init(kernel_heap.allocator(), &mem_profile) catch |e| { panic_root.panic(@errorReturnTrace(), "Failed to initialise scheduler: {}\n", .{e}); }; // Initialisation is finished, now does other stuff kmain_log.info("Init\n", .{}); // Main initialisation finished so can enable interrupts arch.enableInterrupts(); kmain_log.info("Creating init2\n", .{}); // Create a init2 task var stage2_task = task.Task.create(@ptrToInt(initStage2), true, kernel_vmm, kernel_heap.allocator(), true) catch |e| { panic_root.panic(@errorReturnTrace(), "Failed to create init stage 2 task: {}\n", .{e}); }; scheduler.scheduleTask(stage2_task, kernel_heap.allocator()) catch |e| { panic_root.panic(@errorReturnTrace(), "Failed to schedule init stage 2 task: {}\n", .{e}); }; // Can't return for now, later this can return maybe // TODO: Maybe make this the idle task arch.spinWait(); } /// /// Stage 2 initialisation. This will initialise main kernel features after the architecture /// initialisation. /// fn initStage2() noreturn { tty.clear(); const logo = \\ _____ _ _ _ _______ ____ \\ | __ \ | | | | | | |__ __| / __ \ \\ | |__) | | | | | | | | | | | | | \\ | ___/ | | | | | | | | | | | | \\ | | | |____ | |__| | | | | |__| | \\ |_| |______| \____/ |_| \____/ ; tty.print("{s}\n\n", .{logo}); tty.print("Hello Pluto from kernel :)\n", .{}); const devices = arch.getDevices(kernel_heap.allocator()) catch |e| { panic_root.panic(@errorReturnTrace(), "Unable to get device list: {}\n", .{e}); }; for (devices) |device| { device.print(); } switch (build_options.test_mode) { .Initialisation => { kmain_log.info("SUCCESS\n", .{}); }, else => {}, } // Can't return for now, later this can return maybe arch.spinWait(); } test "" { _ = @import("filesystem/fat32.zig"); }
0
repos/pluto/src
repos/pluto/src/kernel/log.zig
const std = @import("std"); const fmt = std.fmt; const build_options = @import("build_options"); const Serial = @import("serial.zig").Serial; const scheduler = @import("scheduler.zig"); /// The errors that can occur when logging const LoggingError = error{}; /// The Writer for the format function const Writer = std.io.Writer(void, LoggingError, logCallback); /// The serial object where the logs will be written to. This will be a COM serial port. var serial: Serial = undefined; /// /// The call back function for the std library format function. /// /// Arguments: /// context: void - The context of the printing. There isn't a need for a context for this /// so is void. /// str: []const u8 - The string to print to the serial terminal. /// /// Return: usize /// The number of bytes written. This will always be the length of the string to print. /// /// Error: LoggingError /// {} - No error as LoggingError is empty. /// fn logCallback(context: void, str: []const u8) LoggingError!usize { // Suppress unused var warning _ = context; serial.writeBytes(str); return str.len; } /// /// Write a message to the log output stream with a certain logging level. /// /// Arguments: /// IN comptime level: std.log.Level - The logging level to use. Determines the message prefix /// and whether it is filtered. /// IN comptime format: []const u8 - The message format. Uses the standard format /// specification options. /// IN args: anytype - A struct of the parameters for the format string. /// pub fn log(comptime level: std.log.Level, comptime format: []const u8, args: anytype) void { scheduler.taskSwitching(false); fmt.format(Writer{ .context = {} }, "[" ++ @tagName(level) ++ "] " ++ format, args) catch unreachable; scheduler.taskSwitching(true); } /// /// Initialise the logging stream using the given Serial instance. /// /// Arguments: /// IN ser: Serial - The serial instance to use when logging /// pub fn init(ser: Serial) void { serial = ser; switch (build_options.test_mode) { .Initialisation => runtimeTests(), else => {}, } } /// /// The logging runtime tests that will test all logging levels. /// fn runtimeTests() void { inline for (@typeInfo(std.log.Level).Enum.fields) |field| { const level = @field(std.log.Level, field.name); log(level, "Test " ++ field.name ++ " level\n", .{}); log(level, "Test " ++ field.name ++ " level with args {s}, {}\n", .{ "a", @as(u32, 1) }); } }
0
repos/pluto/src/kernel
repos/pluto/src/kernel/filesystem/vfs.zig
const std = @import("std"); const testing = std.testing; const TailQueue = std.TailQueue; const ArrayList = std.ArrayList; const Allocator = std.mem.Allocator; /// Flags specifying what to do when opening a file or directory pub const OpenFlags = enum { /// Create a directory if it doesn't exist CREATE_DIR, /// Create a file if it doesn't exist CREATE_FILE, /// Create a symlink if it doesn't exist CREATE_SYMLINK, /// Do not create a file or directory NO_CREATION, }; /// The args used when opening new or existing fs nodes pub const OpenArgs = struct { /// What to set the target to when creating a symlink symlink_target: ?[]const u8 = null, }; /// A filesystem node that could either be a directory or a file pub const Node = union(enum) { /// The file node if this represents a file File: FileNode, /// The dir node if this represents a directory Dir: DirNode, /// The absolute path that this symlink is linked to Symlink: SymlinkNode, const Self = @This(); /// /// Check if this node is a directory /// /// Arguments: /// IN self: Self - The node being checked /// /// Return: bool /// True if this is a directory else false /// pub fn isDir(self: Self) bool { return switch (self) { .Dir => true, else => false, }; } /// /// Check if this node is a file /// /// Arguments: /// IN self: Self - The node being checked /// /// Return: bool /// True if this is a file else false /// pub fn isFile(self: Self) bool { return switch (self) { .File => true, else => false, }; } /// /// Check if this node is a symlink /// /// Arguments: /// IN self: Self - The node being checked /// /// Return: bool /// True if this is a symlink else false /// pub fn isSymlink(self: Self) bool { return switch (self) { .Symlink => true, else => false, }; } }; /// The functions of a filesystem pub const FileSystem = struct { const Self = @This(); /// /// Close an open node, performing any last operations required to save data etc. /// /// Arguments: /// IN self: *const FileSystem - The filesystem in question being operated on. /// IN node: *const Node - The node being closed. /// const Close = fn (self: *const Self, node: *const Node) void; /// /// Read from an open file /// /// Arguments: /// IN self: *const FileSystem - The filesystem in question being operated on /// IN node: *const FileNode - The file being read from /// IN bytes: []u8 - The buffer to fill data from the file with /// /// Return: usize /// The length of the actual data read. This being < bytes.len is not considered an error. It is never > bytes.len /// /// Error: Allocator.Error || Error /// Allocator.Error.OutOfMemory - There wasn't enough memory to fulfill the request /// Error.NotOpened - If the node provided is not one that the file system recognised as being opened. /// const Read = fn (self: *const Self, node: *const FileNode, buffer: []u8) (Allocator.Error || Error)!usize; /// /// Write to an open file /// /// Arguments: /// IN self: *const FileSystem - The filesystem in question being operated on /// IN node: *const FileNode - The file being read from /// IN bytes: []u8 - The bytes to write to the file /// /// Return: usize /// The length of the actual data written to the file. This being < bytes.len is not considered an error. It is never > bytes.len /// /// Error: Allocator.Error /// Allocator.Error.OutOfMemory - There wasn't enough memory to fulfill the request /// const Write = fn (self: *const Self, node: *const FileNode, bytes: []const u8) (Allocator.Error || Error)!usize; /// /// Open a file/dir within the filesystem. The result can then be used for write, read or close operations /// /// Arguments: /// IN self: *const FileSystem - The filesystem in question being operated on /// IN node: *const DirNode - The directory under which to open the file/dir from /// IN name: []const u8 - The name of the file to open /// IN flags: OpenFlags - The flags to consult when opening the file /// IN args: OpenArgs - The arguments to use when creating a node /// /// Return: *const Node /// The node representing the file/dir opened /// /// Error: Allocator.Error || Error /// Allocator.Error.OutOfMemory - There wasn't enough memory to fulfill the request /// Error.NoSuchFileOrDir - The file/dir by that name doesn't exist and the flags didn't specify to create it /// Error.NoSymlinkTarget - A symlink was created but no symlink target was provided in the args /// const Open = fn (self: *const Self, node: *const DirNode, name: []const u8, flags: OpenFlags, args: OpenArgs) (Allocator.Error || Error)!*Node; /// /// Get the node representing the root of the filesystem. Used when mounting to bind the mount point to the root of the mounted fs /// /// Arguments: /// IN self: *const Self - The filesystem to get the root node for /// /// Return: *const DirNode /// The root directory node /// const GetRootNode = fn (self: *const Self) *const DirNode; /// The close function close: Close, /// The read function read: Read, /// The write function write: Write, /// The open function open: Open, /// The function for retrieving the root node getRootNode: GetRootNode, /// Points to a usize field within the underlying filesystem so that the close, read, write and open functions can access its low-level implementation using @fieldParentPtr. For example, this could point to a usize field within a FAT32 filesystem data structure, which stores all the data and state that is needed in order to interact with a physical disk /// The value of instance is reserved for future use and so should be left as 0 instance: *usize, }; /// A node representing a file within a filesystem pub const FileNode = struct { /// The filesystem that handles operations on this file fs: *const FileSystem, /// See the documentation for FileSystem.Read pub fn read(self: *const FileNode, bytes: []u8) (Allocator.Error || Error)!usize { return self.fs.read(self.fs, self, bytes); } /// See the documentation for FileSystem.Close pub fn close(self: *const FileNode) void { // TODO: Use @fieldParentPtr() once implemented for unions return self.fs.close(self.fs, @ptrCast(*const Node, self)); } /// See the documentation for FileSystem.Write pub fn write(self: *const FileNode, bytes: []const u8) (Allocator.Error || Error)!usize { return self.fs.write(self.fs, self, bytes); } }; /// A node representing a directory within a filesystem pub const DirNode = struct { /// The filesystem that handles operations on this directory fs: *const FileSystem, /// The directory that this directory is mounted to, else null mount: ?*const DirNode, /// See the documentation for FileSystem.Open pub fn open(self: *const DirNode, name: []const u8, flags: OpenFlags, args: OpenArgs) (Allocator.Error || Error)!*Node { var node = self.mount orelse self; return node.fs.open(node.fs, node, name, flags, args); } /// See the documentation for FileSystem.Close pub fn close(self: *const DirNode) void { var fs = self.fs; var node = self; // TODO: Use @fieldParentPtr() once implemented for unions const cast_node = @ptrCast(*const Node, node); // Can't close the root node if (cast_node == root) { return; } return fs.close(fs, cast_node); } }; pub const SymlinkNode = struct { /// The filesystem that handles operations on this directory fs: *const FileSystem, /// The absolute path that this symlink is linked to path: []const u8, /// See the documentation for FileSystem.Close pub fn close(self: *const SymlinkNode) void { // TODO: Use @fieldParentPtr() once implemented for unions return self.fs.close(self.fs, @ptrCast(*const Node, self)); } }; /// Errors that can be thrown by filesystem functions pub const Error = error{ /// The file or directory requested doesn't exist in the filesystem NoSuchFileOrDir, /// The parent of a requested file or directory isn't a directory itself NotADirectory, /// The requested file is actually a directory IsADirectory, /// The requested symlink is actually a file IsAFile, /// The path provided is not absolute NotAbsolutePath, /// The flags provided are invalid for the requested operation InvalidFlags, /// The node is not recognised as being opened by the filesystem NotOpened, /// No symlink target was provided when one was expected NoSymlinkTarget, /// An unexpected error ocurred when performing a VFS operation. Unexpected, }; /// Errors that can be thrown when attempting to mount pub const MountError = error{ /// The directory being mounted to a filesystem is already mounted to something DirAlreadyMounted, }; /// The separator used between segments of a file path pub const SEPARATOR: u8 = '/'; /// The root of the system's top-level filesystem var root: *Node = undefined; /// /// Traverse the specified path from the root and open the file/dir corresponding to that path. If the file/dir doesn't exist it can be created by specifying the open flags /// /// Arguments: /// IN path: []const u8 - The path to traverse. Must be absolute (see isAbsolute) /// IN flags: OpenFlags - The flags that specify if the file/dir should be created if it doesn't exist /// IN args: OpenArgs - The extra args needed when creating new nodes. /// /// Return: *const Node /// The node that exists at the path starting at the system root /// /// Error: Allocator.Error || Error /// Allocator.Error.OutOfMemory - There wasn't enough memory to fulfill the request /// Error.NotADirectory - A segment within the path which is not at the end does not correspond to a directory /// Error.NoSuchFileOrDir - The file/dir at the end of the path doesn't exist and the flags didn't specify to create it /// Error.NoSymlinkTarget - A non-null symlink target was not provided when creating a symlink /// fn traversePath(path: []const u8, follow_symlinks: bool, flags: OpenFlags, args: OpenArgs) (Allocator.Error || Error)!*Node { if (!isAbsolute(path)) { return Error.NotAbsolutePath; } const TraversalParent = struct { parent: *Node, child: []const u8, const Self = @This(); fn func(split: *std.mem.SplitIterator(u8), node: *Node, follow_links: bool, rec_flags: OpenFlags) (Allocator.Error || Error)!Self { // Get segment string. This will not be unreachable as we've made sure the spliterator has more segments left const seg = split.next() orelse unreachable; if (split.rest().len == 0) { return Self{ .parent = node, .child = seg, }; } return switch (node.*) { .File => Error.NotADirectory, .Dir => |*dir| blk: { var child = try dir.open(seg, rec_flags, .{}); defer dir.close(); // If the segment refers to a symlink, redirect to the node it represents instead if (child.isSymlink() and follow_links) { const new_child = try traversePath(child.Symlink.path, follow_links, rec_flags, .{}); child.Symlink.close(); child = new_child; } break :blk try func(split, child, follow_links, rec_flags); }, .Symlink => |target| if (follow_links) try func(split, try traversePath(target.path, follow_links, .NO_CREATION, .{}), follow_links, rec_flags) else Error.NotADirectory, }; } }; // Split path but skip the first separator character var split = std.mem.split(u8, path[1..], &[_]u8{SEPARATOR}); // Traverse directories while we're not at the last segment const result = try TraversalParent.func(&split, root, follow_symlinks, .NO_CREATION); // There won't always be a second segment in the path, e.g. in "/" if (std.mem.eql(u8, result.child, "")) { return result.parent; } // Open the final segment of the path from whatever the parent is return switch (result.parent.*) { .File => |*file| blk: { file.close(); break :blk Error.NotADirectory; }, .Symlink => |target| if (follow_symlinks) try traversePath(target.path, follow_symlinks, .NO_CREATION, .{}) else result.parent, .Dir => |*dir| blk: { var n = try dir.open(result.child, flags, args); defer dir.close(); if (n.isSymlink() and follow_symlinks) { // If the child is a symlink and we're following them, find the node it refers to const new_n = try traversePath(n.Symlink.path, follow_symlinks, flags, args); n.Symlink.close(); n = new_n; } break :blk n; }, }; } /// /// Mount the root of a filesystem to a directory. Opening files within that directory will then redirect to the target filesystem /// /// Arguments: /// IN dir: *DirNode - The directory to mount to. dir.mount is modified. /// IN fs: *const FileSystem - The filesystem to mount /// /// Error: MountError /// MountError.DirAlreadyMounted - The directory is already mounted to a filesystem /// pub fn mount(dir: *DirNode, fs: *const FileSystem) MountError!void { if (dir.mount) |_| { return MountError.DirAlreadyMounted; } dir.mount = fs.getRootNode(fs); } /// /// Unmount the filesystem attached to a directory. /// /// Arguments: /// IN dir: *DirNode - The directory to unmount from. /// pub fn umount(dir: *DirNode) void { dir.mount = null; } /// /// Open a node at a path. /// /// Arguments: /// IN path: []const u8 - The path to open. Must be absolute (see isAbsolute) /// IN follow_symlinks: bool - Whether symbolic links should be followed. When this is false and the path traversal encounters a symlink before the end segment of the path, NotADirectory is returned. /// IN flags: OpenFlags - The flags specifying if this node should be created if it doesn't exist /// IN args: OpenArgs - The extra args needed when creating new nodes. /// /// Return: *const Node /// The node that exists at the path starting at the system root /// /// Error: Allocator.Error || Error /// Allocator.Error.OutOfMemory - There wasn't enough memory to fulfill the request /// Error.NotADirectory - A segment within the path which is not at the end does not correspond to a directory /// Error.NoSuchFileOrDir - The file/dir at the end of the path doesn't exist and the flags didn't specify to create it /// Error.NoSymlinkTarget - A non-null symlink target was not provided when creating a symlink /// pub fn open(path: []const u8, follow_symlinks: bool, flags: OpenFlags, args: OpenArgs) (Allocator.Error || Error)!*Node { return try traversePath(path, follow_symlinks, flags, args); } /// /// Close a node. /// /// Arguments: /// IN node: Node - The node to close /// pub fn close(node: Node) void { switch (node) { .Dir => |d| d.close(), .File => |f| f.close(), .Symlink => |s| s.close(), } } /// /// Open a file at a path. /// /// Arguments: /// IN path: []const u8 - The path to open. Must be absolute (see isAbsolute) /// IN flags: OpenFlags - The flags specifying if this node should be created if it doesn't exist. Cannot be CREATE_DIR /// /// Return: *const FileNode /// The node that exists at the path starting at the system root /// /// Error: Allocator.Error || Error /// Allocator.Error.OutOfMemory - There wasn't enough memory to fulfill the request /// Error.InvalidFlags - The flags were a value invalid when opening files /// Error.NotADirectory - A segment within the path which is not at the end does not correspond to a directory /// Error.NoSuchFileOrDir - The file/dir at the end of the path doesn't exist and the flags didn't specify to create it /// Error.IsADirectory - The path corresponds to a directory rather than a file /// pub fn openFile(path: []const u8, flags: OpenFlags) (Allocator.Error || Error)!*const FileNode { switch (flags) { .CREATE_DIR, .CREATE_SYMLINK => return Error.InvalidFlags, .NO_CREATION, .CREATE_FILE => {}, } var node = try open(path, true, flags, .{}); return switch (node.*) { .File => &node.File, .Dir => |*dir| blk: { dir.close(); break :blk Error.IsADirectory; }, // We instructed open to follow symlinks above, so this is impossible .Symlink => unreachable, }; } /// /// Open a directory at a path. /// /// Arguments: /// IN path: []const u8 - The path to open. Must be absolute (see isAbsolute) /// IN flags: OpenFlags - The flags specifying if this node should be created if it doesn't exist. Cannot be CREATE_FILE /// /// Return: *const DirNode /// The node that exists at the path starting at the system root /// /// Error: Allocator.Error || Error /// Allocator.Error.OutOfMemory - There wasn't enough memory to fulfill the request /// Error.InvalidFlags - The flags were a value invalid when opening files /// Error.NotADirectory - A segment within the path which is not at the end does not correspond to a directory /// Error.NoSuchFileOrDir - The file/dir at the end of the path doesn't exist and the flags didn't specify to create it /// Error.IsAFile - The path corresponds to a file rather than a directory /// pub fn openDir(path: []const u8, flags: OpenFlags) (Allocator.Error || Error)!*DirNode { switch (flags) { .CREATE_FILE, .CREATE_SYMLINK => return Error.InvalidFlags, .NO_CREATION, .CREATE_DIR => {}, } var node = try open(path, true, flags, .{}); return switch (node.*) { .File => |*file| blk: { file.close(); break :blk Error.IsAFile; }, // We instructed open to follow symlinks above, so this is impossible .Symlink => unreachable, .Dir => &node.Dir, }; } /// /// Open a symlink at a path with a target. /// /// Arguments: /// IN path: []const u8 - The path to open. Must be absolute (see isAbsolute) /// IN target: ?[]const u8 - The target to use when creating the symlink. Can be null if .NO_CREATION is used as the open flag /// IN flags: OpenFlags - The flags specifying if this node should be created if it doesn't exist. Cannot be CREATE_FILE /// /// Return: []const u8 /// The opened symlink's target /// /// Error: Allocator.Error || Error /// Allocator.Error.OutOfMemory - There wasn't enough memory to fulfill the request /// Error.InvalidFlags - The flags were a value invalid when opening a symlink /// Error.NotADirectory - A segment within the path which is not at the end does not correspond to a directory /// Error.NoSuchFileOrDir - The symlink at the end of the path doesn't exist and the flags didn't specify to create it /// Error.IsAFile - The path corresponds to a file rather than a symlink /// Error.IsADirectory - The path corresponds to a directory rather than a symlink /// pub fn openSymlink(path: []const u8, target: ?[]const u8, flags: OpenFlags) (Allocator.Error || Error)![]const u8 { switch (flags) { .CREATE_DIR, .CREATE_FILE => return Error.InvalidFlags, .NO_CREATION, .CREATE_SYMLINK => {}, } var node = try open(path, false, flags, .{ .symlink_target = target }); return switch (node.*) { .Symlink => |t| t.path, .File => |*file| blk: { file.close(); break :blk Error.IsAFile; }, .Dir => |*dir| blk: { dir.close(); break :blk Error.IsADirectory; }, }; } // TODO: Replace this with the std lib implementation once the OS abstraction layer is up and running /// /// Check if a path is absolute, i.e. its length is greater than 0 and starts with the path separator character /// /// Arguments: /// IN path: []const u8 - The path to check /// /// Return: bool /// True if the path is absolute else false /// pub fn isAbsolute(path: []const u8) bool { return path.len > 0 and path[0] == SEPARATOR; } /// /// Initialise the virtual file system with a root Node. This will be a Directory node. /// /// Arguments: /// IN node: *Node - The node to initialise the root node. /// /// Error: Error /// Error.NotADirectory - The node isn't a directory node. /// pub fn setRoot(node: *Node) Error!void { if (!node.isDir()) { return Error.NotADirectory; } root = node; } const TestFS = struct { const TreeNode = struct { val: *Node, name: []u8, data: ?[]u8, children: *ArrayList(*@This()), fn deinit(self: *@This(), allocator: Allocator) void { allocator.destroy(self.val); allocator.free(self.name); if (self.data) |d| { allocator.free(d); } for (self.children.items) |child| { child.deinit(allocator); allocator.destroy(child); } self.children.deinit(); allocator.destroy(self.children); } }; tree: TreeNode, fs: *FileSystem, allocator: Allocator, open_count: usize, instance: usize, const Self = @This(); pub fn deinit(self: *@This()) void { self.tree.deinit(self.allocator); self.allocator.destroy(self.fs); } fn getTreeNode(test_fs: *Self, node: anytype) Allocator.Error!?*TreeNode { switch (@TypeOf(node)) { *const Node, *const FileNode, *const DirNode => {}, else => @compileError("Node is of type " ++ @typeName(@TypeOf(node)) ++ ". Only *const Node, *const FileNode and *const DirNode are supported"), } // Form a list containing all directory nodes to check via a breadth-first search // This is inefficient but good for testing as it's clear and easy to modify var to_check = TailQueue(*TreeNode){}; var root_node = try test_fs.allocator.create(TailQueue(*TreeNode).Node); root_node.* = .{ .data = &test_fs.tree }; to_check.append(root_node); while (to_check.popFirst()) |queue_node| { var tree_node = queue_node.data; test_fs.allocator.destroy(queue_node); if ((@TypeOf(node) == *const FileNode and tree_node.val.isFile() and &tree_node.val.File == node) or (@TypeOf(node) == *const DirNode and tree_node.val.isDir() and &tree_node.val.Dir == node) or (@TypeOf(node) == *const Node and &tree_node.val == node)) { // Clean up any unused queue nodes while (to_check.popFirst()) |t_node| { test_fs.allocator.destroy(t_node); } return tree_node; } for (tree_node.children.items) |child| { // It's not the parent so add its children to the list for checking var n = try test_fs.allocator.create(TailQueue(*TreeNode).Node); n.* = .{ .data = child }; to_check.append(n); } } return null; } fn getRootNode(fs: *const FileSystem) *const DirNode { var test_fs = @fieldParentPtr(TestFS, "instance", fs.instance); return &test_fs.tree.val.Dir; } fn close(fs: *const FileSystem, node: *const Node) void { // Suppress unused var warning _ = node; var test_fs = @fieldParentPtr(TestFS, "instance", fs.instance); test_fs.open_count -= 1; } fn read(fs: *const FileSystem, node: *const FileNode, bytes: []u8) (Allocator.Error || Error)!usize { var test_fs = @fieldParentPtr(TestFS, "instance", fs.instance); // Get the tree that corresponds to the node. Cannot error as the file is already open so it does exist var tree = (getTreeNode(test_fs, node) catch unreachable) orelse unreachable; const count = if (tree.data) |d| std.math.min(bytes.len, d.len) else 0; const data = if (tree.data) |d| d[0..count] else ""; std.mem.copy(u8, bytes, data); return count; } fn write(fs: *const FileSystem, node: *const FileNode, bytes: []const u8) (Allocator.Error || Error)!usize { var test_fs = @fieldParentPtr(TestFS, "instance", fs.instance); var tree = (try getTreeNode(test_fs, node)) orelse unreachable; if (tree.data) |_| { test_fs.allocator.free(tree.data.?); } tree.data = try test_fs.allocator.alloc(u8, bytes.len); std.mem.copy(u8, tree.data.?, bytes); return bytes.len; } fn open(fs: *const FileSystem, dir: *const DirNode, name: []const u8, flags: OpenFlags, args: OpenArgs) (Allocator.Error || Error)!*Node { var test_fs = @fieldParentPtr(TestFS, "instance", fs.instance); const parent = (try getTreeNode(test_fs, dir)) orelse unreachable; // Check if the children match the file wanted for (parent.children.items) |child| { if (std.mem.eql(u8, child.name, name)) { // Increment the open count test_fs.open_count += 1; return child.val; } } // The file/dir doesn't exist so create it if necessary if (flags != .NO_CREATION) { var child: *Node = undefined; switch (flags) { .CREATE_DIR => { // Create the fs node child = try test_fs.allocator.create(Node); child.* = .{ .Dir = .{ .fs = test_fs.fs, .mount = null } }; }, .CREATE_FILE => { // Create the fs node child = try test_fs.allocator.create(Node); child.* = .{ .File = .{ .fs = test_fs.fs } }; }, .CREATE_SYMLINK => { if (args.symlink_target) |target| { child = try test_fs.allocator.create(Node); child.* = .{ .Symlink = .{ .fs = test_fs.fs, .path = target } }; } else { return Error.NoSymlinkTarget; } }, .NO_CREATION => unreachable, } // Create the test fs tree node var child_tree = try test_fs.allocator.create(TreeNode); var child_name = try test_fs.allocator.alloc(u8, name.len); std.mem.copy(u8, child_name, name); child_tree.* = .{ .val = child, .name = child_name, .children = try test_fs.allocator.create(ArrayList(*TreeNode)), .data = null, }; child_tree.children.* = ArrayList(*TreeNode).init(test_fs.allocator); // Add it to the tree try parent.children.append(child_tree); // Increment the open count test_fs.open_count += 1; return child; } return Error.NoSuchFileOrDir; } }; pub fn testInitFs(allocator: Allocator) !*TestFS { const fs = try allocator.create(FileSystem); var testfs = try allocator.create(TestFS); var root_node = try allocator.create(Node); root_node.* = .{ .Dir = .{ .fs = fs, .mount = null } }; var name = try allocator.alloc(u8, 4); std.mem.copy(u8, name, "root"); testfs.* = TestFS{ .tree = .{ .val = root_node, .name = name, .children = try allocator.create(ArrayList(*TestFS.TreeNode)), .data = null, }, .fs = fs, .instance = 123, .open_count = 0, .allocator = allocator, }; testfs.tree.children.* = ArrayList(*TestFS.TreeNode).init(allocator); fs.* = .{ .open = TestFS.open, .close = TestFS.close, .read = TestFS.read, .write = TestFS.write, .instance = &testfs.instance, .getRootNode = TestFS.getRootNode, }; return testfs; } test "mount" { var allocator = testing.allocator; // The root fs var testfs = try testInitFs(allocator); defer allocator.destroy(testfs); defer testfs.deinit(); defer testing.expectEqual(testfs.open_count, 0) catch @panic("Test fs open count is not zero\n"); testfs.instance = 1; root = testfs.tree.val; // The fs that is to be mounted var testfs2 = try testInitFs(allocator); defer allocator.destroy(testfs2); defer testfs2.deinit(); defer testing.expectEqual(testfs2.open_count, 0) catch @panic("Second test fs open count is not zero\n"); testfs2.instance = 2; // Create the dir to mount to var dir = try openDir("/mnt", .CREATE_DIR); defer dir.close(); try mount(dir, testfs2.fs); defer umount(dir); try testing.expectError(MountError.DirAlreadyMounted, mount(dir, testfs2.fs)); // Ensure the mount worked try testing.expectEqual((dir.mount orelse unreachable), testfs2.fs.getRootNode(testfs2.fs)); try testing.expectEqual((dir.mount orelse unreachable).fs, testfs2.fs); // Create a file within the mounted directory var test_file = try openFile("/mnt/123.txt", .CREATE_FILE); defer test_file.close(); try testing.expectEqual(@ptrCast(*const FileSystem, testfs2.fs), test_file.fs); // This shouldn't be in the root fs try testing.expectEqual(@as(usize, 1), testfs.tree.children.items.len); try testing.expectEqual(@as(usize, 0), testfs.tree.children.items[0].children.items.len); // It should be in the mounted fs try testing.expectEqual(@as(usize, 1), testfs2.tree.children.items.len); try testing.expectEqual(test_file, &testfs2.tree.children.items[0].val.File); } test "traversePath" { var allocator = testing.allocator; var testfs = try testInitFs(allocator); defer allocator.destroy(testfs); defer testfs.deinit(); root = testfs.tree.val; // Get the root var test_root = try traversePath("/", false, .NO_CREATION, .{}); try testing.expectEqual(test_root, root); // Create a file in the root and try to traverse to it var child1 = try test_root.Dir.open("child1.txt", .CREATE_FILE, .{}); var child1_traversed = try traversePath("/child1.txt", false, .NO_CREATION, .{}); try testing.expectEqual(child1, child1_traversed); // Close the open files child1.File.close(); child1_traversed.File.close(); // Same but with a directory var child2 = try test_root.Dir.open("child2", .CREATE_DIR, .{}); const child2_traversed = try traversePath("/child2", false, .NO_CREATION, .{}); try testing.expectEqual(child2, child2_traversed); // Again but with a file within that directory var child3 = try child2.Dir.open("child3.txt", .CREATE_FILE, .{}); var child3_traversed = try traversePath("/child2/child3.txt", false, .NO_CREATION, .{}); try testing.expectEqual(child3, child3_traversed); // Close the open files child2.Dir.close(); child2_traversed.Dir.close(); child3_traversed.File.close(); // Create and open a symlink var child4 = try traversePath("/child2/link", false, .CREATE_SYMLINK, .{ .symlink_target = "/child2/child3.txt" }); var child4_linked = try traversePath("/child2/link", true, .NO_CREATION, .{}); try testing.expectEqual(child4_linked, child3); var child5 = try traversePath("/child4", false, .CREATE_SYMLINK, .{ .symlink_target = "/child2" }); var child5_linked = try traversePath("/child4/child3.txt", true, .NO_CREATION, .{}); try testing.expectEqual(child5_linked, child4_linked); child3.File.close(); child4.Symlink.close(); child5.Symlink.close(); child4_linked.File.close(); child5_linked.File.close(); try testing.expectError(Error.NotAbsolutePath, traversePath("abc", false, .NO_CREATION, .{})); try testing.expectError(Error.NotAbsolutePath, traversePath("", false, .NO_CREATION, .{})); try testing.expectError(Error.NotAbsolutePath, traversePath("a/", false, .NO_CREATION, .{})); try testing.expectError(Error.NoSuchFileOrDir, traversePath("/notadir/abc.txt", false, .NO_CREATION, .{})); try testing.expectError(Error.NoSuchFileOrDir, traversePath("/ ", false, .NO_CREATION, .{})); try testing.expectError(Error.NotADirectory, traversePath("/child1.txt/abc.txt", false, .NO_CREATION, .{})); try testing.expectError(Error.NoSymlinkTarget, traversePath("/childX.txt", false, .CREATE_SYMLINK, .{})); // Since we've closed all the files, the open files count should be zero try testing.expectEqual(testfs.open_count, 0); } test "isAbsolute" { try testing.expect(isAbsolute("/")); try testing.expect(isAbsolute("/abc")); try testing.expect(isAbsolute("/abc/def")); try testing.expect(isAbsolute("/ a bc/de f")); try testing.expect(isAbsolute("//")); try testing.expect(!isAbsolute(" /")); try testing.expect(!isAbsolute("")); try testing.expect(!isAbsolute("abc")); try testing.expect(!isAbsolute("abc/def")); } test "isDir" { const fs: FileSystem = undefined; const dir = Node{ .Dir = .{ .fs = &fs, .mount = null } }; const file = Node{ .File = .{ .fs = &fs } }; const symlink = Node{ .Symlink = .{ .fs = &fs, .path = "" } }; try testing.expect(!symlink.isDir()); try testing.expect(!file.isDir()); try testing.expect(dir.isDir()); } test "isFile" { const fs: FileSystem = undefined; const dir = Node{ .Dir = .{ .fs = &fs, .mount = null } }; const file = Node{ .File = .{ .fs = &fs } }; const symlink = Node{ .Symlink = .{ .fs = &fs, .path = "" } }; try testing.expect(!dir.isFile()); try testing.expect(!symlink.isFile()); try testing.expect(file.isFile()); } test "isSymlink" { const fs: FileSystem = undefined; const dir = Node{ .Dir = .{ .fs = &fs, .mount = null } }; const file = Node{ .File = .{ .fs = &fs } }; const symlink = Node{ .Symlink = .{ .fs = &fs, .path = "" } }; try testing.expect(!dir.isSymlink()); try testing.expect(!file.isSymlink()); try testing.expect(symlink.isSymlink()); } test "open" { var testfs = try testInitFs(testing.allocator); defer testing.allocator.destroy(testfs); defer testfs.deinit(); root = testfs.tree.val; // Creating a file var test_node = try openFile("/abc.txt", .CREATE_FILE); try testing.expectEqual(testfs.tree.children.items.len, 1); var tree = testfs.tree.children.items[0]; try testing.expect(tree.val.isFile()); try testing.expectEqual(test_node, &tree.val.File); try testing.expect(std.mem.eql(u8, tree.name, "abc.txt")); try testing.expectEqual(tree.data, null); try testing.expectEqual(tree.children.items.len, 0); // Creating a dir var test_dir = try openDir("/def", .CREATE_DIR); try testing.expectEqual(testfs.tree.children.items.len, 2); tree = testfs.tree.children.items[1]; try testing.expect(tree.val.isDir()); try testing.expectEqual(test_dir, &tree.val.Dir); try testing.expect(std.mem.eql(u8, tree.name, "def")); try testing.expectEqual(tree.data, null); try testing.expectEqual(tree.children.items.len, 0); // Creating a file under a new dir test_node = try openFile("/def/ghi.zig", .CREATE_FILE); try testing.expectEqual(testfs.tree.children.items[1].children.items.len, 1); tree = testfs.tree.children.items[1].children.items[0]; try testing.expect(tree.val.isFile()); try testing.expectEqual(test_node, &tree.val.File); try testing.expect(std.mem.eql(u8, tree.name, "ghi.zig")); try testing.expectEqual(tree.data, null); try testing.expectEqual(tree.children.items.len, 0); try testing.expectError(Error.NoSuchFileOrDir, openDir("/jkl", .NO_CREATION)); try testing.expectError(Error.NoSuchFileOrDir, openFile("/mno.txt", .NO_CREATION)); try testing.expectError(Error.NoSuchFileOrDir, openFile("/def/pqr.txt", .NO_CREATION)); try testing.expectError(Error.NoSuchFileOrDir, openDir("/mno/stu", .NO_CREATION)); try testing.expectError(Error.NoSuchFileOrDir, openFile("/mno/stu.txt", .NO_CREATION)); try testing.expectError(Error.NotADirectory, openFile("/abc.txt/vxy.md", .NO_CREATION)); try testing.expectError(Error.IsADirectory, openFile("/def", .NO_CREATION)); try testing.expectError(Error.InvalidFlags, openFile("/abc.txt", .CREATE_DIR)); try testing.expectError(Error.InvalidFlags, openDir("/abc.txt", .CREATE_FILE)); try testing.expectError(Error.NotAbsolutePath, open("", false, .NO_CREATION, .{})); try testing.expectError(Error.NotAbsolutePath, open("abc", false, .NO_CREATION, .{})); try testing.expectError(Error.NoSymlinkTarget, open("/abc", false, .CREATE_SYMLINK, .{})); } test "read" { var testfs = try testInitFs(testing.allocator); defer testing.allocator.destroy(testfs); defer testfs.deinit(); root = testfs.tree.val; var test_file = try openFile("/foo.txt", .CREATE_FILE); var f_data = &testfs.tree.children.items[0].data; var str = "test123"; f_data.* = try Allocator.dupe(testing.allocator, u8, str); var buffer: [64]u8 = undefined; { const length = try test_file.read(buffer[0..str.len]); try testing.expect(std.mem.eql(u8, str, buffer[0..length])); } { const length = try test_file.read(buffer[0 .. str.len + 1]); try testing.expect(std.mem.eql(u8, str, buffer[0..length])); } { const length = try test_file.read(buffer[0 .. str.len + 3]); try testing.expect(std.mem.eql(u8, str, buffer[0..length])); } { const length = try test_file.read(buffer[0 .. str.len - 1]); try testing.expect(std.mem.eql(u8, str[0 .. str.len - 1], buffer[0..length])); } { const length = try test_file.read(buffer[0..0]); try testing.expect(std.mem.eql(u8, str[0..0], buffer[0..length])); } // Try reading from a symlink var test_link = try openSymlink("/link", "/foo.txt", .CREATE_SYMLINK); try testing.expectEqual(test_link, "/foo.txt"); var link_file = try openFile("/link", .NO_CREATION); { const length = try link_file.read(buffer[0..]); try testing.expect(std.mem.eql(u8, str[0..], buffer[0..length])); } } test "write" { var testfs = try testInitFs(testing.allocator); defer testing.allocator.destroy(testfs); defer testfs.deinit(); root = testfs.tree.val; var test_file = try openFile("/foo.txt", .CREATE_FILE); var f_data = &testfs.tree.children.items[0].data; try testing.expectEqual(f_data.*, null); var str = "test123"; const length = try test_file.write(str); try testing.expect(std.mem.eql(u8, str, f_data.* orelse unreachable)); try testing.expect(length == str.len); // Try writing to a symlink var test_link = try openSymlink("/link", "/foo.txt", .CREATE_SYMLINK); try testing.expectEqual(test_link, "/foo.txt"); _ = try openFile("/link", .NO_CREATION); var str2 = "test456"; _ = try test_file.write(str2); try testing.expect(std.mem.eql(u8, str2, f_data.* orelse unreachable)); }
0
repos/pluto/src/kernel
repos/pluto/src/kernel/filesystem/fat32.zig
const std = @import("std"); const builtin = std.builtin; const expectEqualSlices = std.testing.expectEqualSlices; const expectEqual = std.testing.expectEqual; const expectError = std.testing.expectError; const expect = std.testing.expect; const log = std.log.scoped(.fat32); const AutoHashMap = std.AutoHashMap; const Allocator = std.mem.Allocator; const ArrayList = std.ArrayList; const arch = @import("../arch.zig").internals; const vfs = @import("vfs.zig"); const mem = @import("../mem.zig"); const CodePage = @import("../code_page/code_page.zig").CodePage; const mkfat32 = @import("../../../mkfat32.zig"); /// The boot record for FAT32. This is use for parsing the initial boot sector to extract the /// relevant information for todays FAT32. const BootRecord = struct { /// The jump bytes that begin the boot code. This will jump over the FAT32 header. jmp: [3]u8, /// The OEM name. This is a 8 character string padded by spaces. oem: [8]u8, /// The number of bytes per sector. bytes_per_sector: u16, /// The number of sectors per cluster. sectors_per_cluster: u8, /// The number of reserved sectors at the beginning of the image. This is where the fat /// header, FSInfo and FAT is stored. reserved_sectors: u16, /// The number of FAT's. fat_count: u8, /// The size in bytes of the root directory. This is only used by FAT12 and FAT16, so is /// always 0 for FAT32. root_directory_size: u16, /// The total number of sectors. This is only used for FAT12 and FAT16, so is always 0 for /// FAT32. total_sectors_12_16: u16, /// The media type. This is used to identify the type of media. media_descriptor_type: u8, /// The total number of sectors of the FAT. This is only used for FAT12 and FAT16, so /// always 0 for FAT32. sectors_per_fat_12_16: u16, /// The number of sectors per track. sectors_per_track: u16, /// The number of heads. head_count: u16, /// The number of hidden sectors. hidden_sectors: u32, /// The total number of sectors for FAT32. total_sectors: u32, /// The number of sectors the FAT takes up for FAT32. sectors_per_fat: u32, /// Mirror flags. mirror_flags: u16, /// The version. version_number: u16, /// The start cluster of the root directory. root_directory_cluster: u32, /// The sector where is the FS information sector is located. A value of 0x0000 or 0xFFFF /// indicates that there isn't a FSInfo structure. fsinfo_sector: u16, /// The sector for the backup boot record where the first 3 sectors are copied. A value of /// 0x000 or 0xFFFF indicate there is no backup. backup_boot_sector: u16, /// Reserved. All zero. reserved0: [12]u8, /// The physical drive number. drive_number: u8, /// Reserved. All zero. reserved1: u8, /// The extended boot signature. signature: u8, /// The serial number of the FAT image at format. This is a function of the current /// timestamp of creation. serial_number: u32, /// The partitioned volume label. This is a 11 character string padded by spaces. volume_label: [11]u8, /// The file system type. This is a 8 character string padded by spaces. For FAT32, this is /// 'FAT32 ' filesystem_type: [8]u8, // We are ignoring the boot code as we don't need to parse this. }; /// The FSInfo block. This is used for parsing the initial sector to extract the relevant /// information: number_free_clusters and next_free_cluster. const FSInfo = struct { /// The lead signature: 0x41615252 lead_signature: u32, /// Reserved bytes reserved0: [480]u8, /// The middle or struct signature: 0x61417272 struct_signature: u32, /// The number of free clusters in the image number_free_clusters: u32, /// The next available free cluster that can be allocated for writing to. next_free_cluster: u32, /// Reserved bytes reserved1: [12]u8, /// The tail signature: 0xAA550000 tail_signature: u32, }; /// A long name entry. This is part of a fill long name block, but each entry is 32 bytes long. /// Entries are ordered backwards. The file name is encoded as a wide UTF16 characters. const LongName = struct { /// The order in the long entry sequence. If OR'ed with 0x40, then this is the last entry. order: u8, /// The first 5 wide characters in the block. first: [5]u16, /// The attributes from the short name block so will always be 0x0F as this identifies as a /// long entry. attribute: u8 = 0x0F, /// Always 0x00, other values are reserved. 0x00 Means this is a sub-component of the long name /// entry, i.e. there are multiple blocks that make up the long entry. long_entry_type: u8 = 0x00, /// The check sum of the 11 character short file name that goes along with this long name. check_sum: u8, /// The next 6 wide characters in the block. second: [6]u16, /// Must be zero, as this is an artifact from the short name entry to be compatible with /// systems that don't support long name entries. zero: u16 = 0x0000, /// The last 2 wide characters in the block. third: [2]u16, /// This is the error set for std.unicode.Utf16LeIterator. const Error = error{ UnexpectedSecondSurrogateHalf, ExpectedSecondSurrogateHalf, DanglingSurrogateHalf, }; /// /// Given a long name entry part, get the name associated with this where the UFT16 encoded /// characters are converted to UTF8. This will exclude the NULL terminator. If an error /// occurs, then should fail the long name parsing, or be treated as an orphan file. /// /// Arguments: /// IN self: *const LongName - The long name entry to get the name from. /// IN buff: []u8 - A buffer to copy the name into. /// /// Return: u32 /// The index into the buffer where the last character was stored. /// /// Error: std.unicode.Utf16LeIterator /// An error when parsing the wide UFT16 characters in the long name. /// pub fn getName(self: *const LongName, buff: []u8) Error!u32 { // No error can happen when encoding to UFT8 as will be getting valid code points from the // UTF16 iterator var index: u8 = 0; var f_i = std.unicode.Utf16LeIterator.init(self.first[0..]); while (try f_i.nextCodepoint()) |code_point| { // long names are null terminated, so return if (code_point == 0) { return index; } index += std.unicode.utf8Encode(code_point, buff[index..]) catch unreachable; } var s_i = std.unicode.Utf16LeIterator.init(self.second[0..]); while (try s_i.nextCodepoint()) |code_point| { // long names are null terminated, so return if (code_point == 0) { return index; } index += std.unicode.utf8Encode(code_point, buff[index..]) catch unreachable; } var t_i = std.unicode.Utf16LeIterator.init(self.third[0..]); while (try t_i.nextCodepoint()) |code_point| { // long names are null terminated, so return if (code_point == 0) { return index; } index += std.unicode.utf8Encode(code_point, buff[index..]) catch unreachable; } return index; } }; /// A short name entry. This is the standard FAT32 file/directory entry. Each entry is 32 bytes /// long. The file name is encoded in code page 437. When creating a short name, this will be in /// ASCII as there isn't enough support in the std lib for unicode uppercase. But when parsing, /// will decode proper code page 437 characters. const ShortName = struct { /// The short name. All uppercase encoded in code page 437. name: [8]u8, /// The extension of the file. All uppercase encoded in code page 437. extension: [3]u8, /// The file attributes. See Attributes for the options. The upper 2 bits are reserved. attributes: u8, /// Reserved for Windows NT. reserved: u8 = 0x00, /// The 10th of a second part of the time created. The granularity of time created is 2 /// seconds, so this can range from 0-199. time_created_tenth: u8, /// The time of creation. time_created: u16, /// The date of creation. date_created: u16, /// The date of last read or write. date_last_access: u16, /// The higher word of the entries first cluster. cluster_high: u16, /// Time of last modification. time_last_modification: u16, /// Date of last modification. date_last_modification: u16, /// The lower word of the entries first cluster. cluster_low: u16, /// The real file size in bytes. size: u32, /// The file attributes const Attributes = enum(u8) { /// A normal read/write file, not hidden, not a system file, don't backup. None = 0x00, /// Read only, can't write to the file. ReadOnly = 0x01, /// A normal directory listing won't show this. Can still access this as a normal file. Hidden = 0x02, /// An operating system file. System = 0x04, /// The real volume ID. There should only be one of these and in the root directory. The /// cluster should be set to 0. VolumeID = 0x08, /// A directory, not a file. Directory = 0x10, /// Indicates that a file/directory is to be achieved/backed up but backup utilities. Archive = 0x20, }; /// /// Get the short name from the entry. This will be encoded in UFT8. This will also include a /// '.' between the name and extension and ignore trailing spaces. /// /// Arguments: /// IN self: *const ShortName - The short name entry to get the name from. /// IN buff: []u8 - A buffer to copy the UTF8 encoded name. /// /// Return: u32 /// The index into the buffer where the last character was stored. /// pub fn getName(self: *const ShortName, buff: []u8) u32 { var index: u8 = 0; for (self.name) |char| { if (char != ' ') { // 0x05 is actually 0xE5 if (char == 0x05) { const utf16 = CodePage.toWideChar(.CP437, 0xE5); // The code page will return a valid character so the encode won't error index += std.unicode.utf8Encode(utf16, buff[index..]) catch unreachable; } else { const utf16 = CodePage.toWideChar(.CP437, char); // The code page will return a valid character so the encode won't error index += std.unicode.utf8Encode(utf16, buff[index..]) catch unreachable; } } else { break; } } if (!self.isDir()) { buff[index] = '.'; index += 1; for (self.extension) |char| { if (char != ' ') { const utf16 = CodePage.toWideChar(.CP437, char); // The code page will return a valid character so the encode won't error index += std.unicode.utf8Encode(utf16, buff[index..]) catch unreachable; } else { break; } } } return index; } /// /// Get the original short file name without encoding into UTF8. /// /// Arguments: /// IN self: *const ShortName - The short name entry to get the SFN name from. /// /// Return: [11]u8 /// The original 11 characters of the short name entry. /// pub fn getSFNName(self: *const ShortName) [11]u8 { var name: [11]u8 = [_]u8{' '} ** 11; std.mem.copy(u8, name[0..], self.name[0..]); std.mem.copy(u8, name[8..], self.extension[0..]); return name; } /// /// Check the attributes and check if the entry is a directory. /// /// Arguments: /// IN self: *const ShortName - The short name entry to get if this is a directory. /// /// Return: bool /// Whether the file is a directory or not. /// pub fn isDir(self: *const ShortName) bool { return self.attributes & @enumToInt(ShortName.Attributes.Directory) == @enumToInt(ShortName.Attributes.Directory); } /// /// Get the full first cluster number by combining the upper and lower parts of the cluster /// number. /// /// Arguments: /// IN self: *const ShortName - The short name entry to get the cluster from. /// /// Return: u32 /// The first cluster of the file. /// pub fn getCluster(self: *const ShortName) u32 { return @as(u32, self.cluster_high) << 16 | self.cluster_low; } /// /// Calculate the check sum for the short name entry. /// /// Arguments: /// IN self: *const ShortName - The short entry to calculate the check sum for. /// /// Return: u8 /// The check sum. /// pub fn calcCheckSum(self: *const ShortName) u8 { var check_sum: u8 = 0; // This is dumb, the check sum relies on the wrap around >:( for (self.name) |char| { check_sum = (check_sum << 7) +% (check_sum >> 1) +% char; } for (self.extension) |char| { check_sum = (check_sum << 7) +% (check_sum >> 1) +% char; } return check_sum; } }; /// A complete FAT entry. This includes a list of long entries and one short entry. const FatDirEntry = struct { /// The list of long entries. This will be in reverse order. long_entry: []LongName, /// The short entry. short_entry: ShortName, }; /// The FAT32 configuration for modern FAT32. This so to remove the redundant FAT12/16 /// aspect of the boot sector. const FATConfig = struct { /// The number of bytes per sector. This will be normally 512, but will depend on the /// hardware. bytes_per_sector: u16, /// The number of sectors per cluster. This will depend on the size of the disk. sectors_per_cluster: u8, /// The number of reserved sectors at the beginning of the filesystem. This is normally /// 32 sectors. reserved_sectors: u16, /// The number of hidden sectors. This is relevant for partitioned disks. hidden_sectors: u32, /// The total number of sectors of the filesystem. total_sectors: u32, /// The number of sectors that are occupied by the FAT. sectors_per_fat: u32, /// The cluster number of the root directory. root_directory_cluster: u32, /// The sector of the FSInfo sector. fsinfo_sector: u16, /// The sector number of the backup sector. backup_boot_sector: u16, /// If the filesystem has a FSInfo block. has_fs_info: bool, /// The number of free clusters on the disk. Will be 0xFFFFFFFF is unknown. number_free_clusters: u32, /// The next free cluster to start looking for when allocating a new cluster for a /// file. Will be 0xFFFFFFFF is unknown. next_free_cluster: u32, /// The end marker when reading the cluster chain. This will be in range from /// 0x0FFFFFF8 - 0x0FFFFFFF cluster_end_marker: u32, /// /// Convert a cluster to the corresponding sector from the filesystem's configuration. /// /// Arguments: /// IN self: *const FATConfig - The FAT32 configuration. /// IN cluster: u32 - The cluster number to convert. /// /// Return: u32 /// The sector number. /// pub fn clusterToSector(self: *const FATConfig, cluster: u32) u32 { // FAT count will be 2 as this is checked in the init function return (self.sectors_per_fat * 2) + self.reserved_sectors + ((cluster - 2) * self.sectors_per_cluster); } }; /// /// Initialise a struct from bytes. /// TODO: Once packed structs are good to go, then use std.mem.bytesAsValue. /// /// Arguments: /// IN comptime Type: type - The struct type to initialise /// IN bytes: []const u8 - The bytes to initialise the struct with. /// /// Return: Type /// The struct initialised with the bytes. /// fn initStruct(comptime Type: type, bytes: []const u8) Type { var ret: Type = undefined; comptime var index = 0; inline for (std.meta.fields(Type)) |item| { switch (item.field_type) { u8 => @field(ret, item.name) = bytes[index], u16 => @field(ret, item.name) = std.mem.bytesAsSlice(u16, bytes[index .. index + 2])[0], u32 => @field(ret, item.name) = std.mem.bytesAsSlice(u32, bytes[index .. index + 4])[0], else => { switch (@typeInfo(item.field_type)) { .Array => |info| switch (info.child) { u8 => { comptime var i = 0; inline while (i < info.len) : (i += 1) { @field(ret, item.name)[i] = bytes[index + i]; } }, u16 => { comptime var i = 0; inline while (i < info.len) : (i += 1) { @field(ret, item.name)[i] = std.mem.bytesAsSlice(u16, bytes[index + (i * 2) .. index + 2 + (i * 2)])[0]; } }, else => @compileError("Unexpected field type: " ++ @typeName(info.child)), }, else => @compileError("Unexpected field type: " ++ @typeName(item.field_type)), } }, } index += @sizeOf(item.field_type); } return ret; } /// /// Initialise a slice with the values from a struct. /// TODO: Once packed structs are good to go, then use std.mem.bytesAsValue. /// /// Arguments: /// IN comptime Type: type - The type of the struct. /// IN copy_struct: Type - The struct to copy from. /// IN bytes: []u8 - The bytes to copy to. /// /// fn initBytes(comptime Type: type, copy_struct: Type, bytes: []u8) void { comptime var index = 0; inline for (std.meta.fields(Type)) |item| { switch (item.field_type) { u8 => bytes[index] = @field(copy_struct, item.name), u16 => std.mem.bytesAsSlice(u16, bytes[index .. index + 2])[0] = @field(copy_struct, item.name), u32 => std.mem.bytesAsSlice(u32, bytes[index .. index + 4])[0] = @field(copy_struct, item.name), else => { switch (@typeInfo(item.field_type)) { .Array => |info| switch (info.child) { u8 => { comptime var i = 0; inline while (i < info.len) : (i += 1) { bytes[index + i] = @field(copy_struct, item.name)[i]; } }, u16 => { comptime var i = 0; inline while (i < info.len) : (i += 1) { std.mem.bytesAsSlice(u16, bytes[index + (i * 2) .. index + 2 + (i * 2)])[0] = @field(copy_struct, item.name)[i]; } }, else => @compileError("Unexpected field type: " ++ @typeName(info.child)), }, else => @compileError("Unexpected field type: " ++ @typeName(item.field_type)), } }, } index += @sizeOf(item.field_type); } } /// /// A convenient function for returning the error types for reading, writing and seeking a stream. /// /// Arguments: /// IN comptime StreamType: type - The stream to get the error set from. /// /// Return: type /// The Error set for reading, writing and seeking the stream. /// fn ErrorSet(comptime StreamType: type) type { const ReadError = switch (@typeInfo(StreamType)) { .Pointer => |p| p.child.ReadError, else => StreamType.ReadError, }; const WriteError = switch (@typeInfo(StreamType)) { .Pointer => |p| p.child.WriteError, else => StreamType.WriteError, }; const SeekError = switch (@typeInfo(StreamType)) { .Pointer => |p| p.child.SeekError, else => StreamType.SeekError, }; return ReadError || WriteError || SeekError; } /// /// FAT32 filesystem. /// /// Arguments: /// IN comptime StreamType: type - The type of the stream to be used as the underlying device. /// /// Return: type /// The FAT32 filesystem that depends on the stream type. /// pub fn Fat32FS(comptime StreamType: type) type { return struct { /// The underlying virtual filesystem fs: *vfs.FileSystem, /// An allocator for allocating memory for FAT32 operations. allocator: Allocator, /// The root node of the FAT32 filesystem. root_node: RootNode, /// The configuration for the FAT32 filesystem. fat_config: FATConfig, /// A mapping of opened files so can easily retrieved opened files for reading, writing and /// closing. opened_files: AutoHashMap(*const vfs.Node, *OpenedInfo), /// The underlying hardware device that the FAT32 filesystem will be operating on. This could /// be a ramdisk, hard drive, memory stick... stream: StreamType, /// See vfs.FileSystem.instance instance: usize, // TODO: Have a FAT cache to not touching disk so much // If then need to read a new part of the FAT, then flush the old one. // Have a pub fn so the user can flush everything. /// The root node struct for storing the root of the filesystem. const RootNode = struct { /// The VFS node of the root directory. node: *vfs.Node, /// The cluster number for the root directory. cluster: u32, }; /// The struct for storing the data needed for an opened file or directory. const OpenedInfo = struct { /// The cluster number of the file or directory. cluster: u32, /// The real size of the file. This will be zero for directories. size: u32, /// The cluster at which the FAT dir short entry for this node is located. entry_cluster: u32, /// The offset within the entry_cluster the short entry is located. entry_offset: u32, }; /// The error set for the FAT32 filesystem. const Error = error{ /// If the boot sector doesn't have the 0xAA55 signature as the last word. This would /// indicate this is not a valid boot sector. BadMBRMagic, /// An unexpected filesystem type. The filesystem type must be FAT32. BadFSType, /// An unexpected media descriptor other than 0xF8. BadMedia, /// An unexpected extended BIOS block signature other than 0x29. BadSignature, /// An unexpected FAT32 configuration. This will be if there are values set in the boot /// sector that are unexpected for FAT32. BadFat32, /// An unexpected FAT count. There should only be 2 tables. BadFATCount, /// An unexpected flags. Should be set to mirror which is zero. NotMirror, /// An unexpected root cluster number. Should be 2. BadRootCluster, /// When reading from the stream, if the read count is less than the expected read, /// then there is a bad read. BadRead, /// When creating a new FAT32 entry, if the name doesn't match the specification. InvalidName, /// When there is is no more space on the stream for a new entry. DiskFull, /// When destroying the filesystem, this is returned if there are filles left open. FilesStillOpen, }; /// The internal self struct const Fat32Self = @This(); // Can't directly access the fields of a pointer type, idk if this is a bug? /// The errors that can occur when reading from the stream. const ReadError = switch (@typeInfo(StreamType)) { .Pointer => |p| p.child.ReadError, else => StreamType.ReadError, }; /// The errors that can occur when writing to the stream. const WriteError = switch (@typeInfo(StreamType)) { .Pointer => |p| p.child.WriteError, else => StreamType.WriteError, }; /// The errors that can occur when seeking the stream. const SeekError = switch (@typeInfo(StreamType)) { .Pointer => |p| p.child.SeekError, else => StreamType.SeekError, }; /// The errors that can occur when getting the seek position of the stream. const GetPosError = switch (@typeInfo(StreamType)) { .Pointer => |p| p.child.GetPosError, else => StreamType.GetPosError, }; /// An iterator for looping over the cluster chain in the FAT and reading the cluster data. const ClusterChainIterator = struct { /// The allocator used for allocating the initial FAT array, then to free in deinit. allocator: Allocator, /// The current cluster value. cluster: u32, /// The configuration for this FAT instance. Used for converting clusters to sectors /// and seeking to the correct location to read the next cluster value. fat_config: FATConfig, /// The underlying stream to read the FAT. stream: StreamType, /// The cached FAT. fat: []u32, /// The offset into the FAT. If the next cluster is outside the cached FAT, then will /// need to read a new part of the FAT. table_offset: u32, /// The offset used when reading part of a cluster. This will range from 0 to /// bytes_per_sector * sectors_per_cluster. cluster_offset: u32, /// The iterators self. const ClusterChainIteratorSelf = @This(); /// /// Check if we need to advance the cluster value and in turn check if the new cluster /// is within the cached FAT. /// /// Arguments: /// IN self: *ClusterChainIteratorSelf - Iterator self. /// /// Error: Fat32Self.Error || ReadError || SeekError /// Fat32Self.Error - If reading the stream didn't fill the cache FAT array. /// ReadError - If there is an error reading from the stream. /// SeekError - If there is an error seeking the stream. /// fn checkRead(self: *ClusterChainIteratorSelf) (Fat32Self.Error || ReadError || SeekError)!void { if (self.cluster_offset >= self.fat_config.bytes_per_sector * self.fat_config.sectors_per_cluster) { self.cluster = self.fat[self.cluster - self.table_offset]; self.cluster_offset = 0; // If we are at the end, break if ((self.cluster & 0x0FFFFFFF) >= self.fat_config.cluster_end_marker) { return; } // This will also allow for forwards and backwards iteration of the FAT const table_offset = self.cluster / self.fat.len; if (table_offset != self.table_offset) { self.table_offset = table_offset; try self.stream.seekableStream().seekTo((self.fat_config.reserved_sectors + self.table_offset) * self.fat_config.bytes_per_sector); const read_count = try self.stream.reader().readAll(std.mem.sliceAsBytes(self.fat)); if (read_count != self.fat.len * @sizeOf(u32)) { return Fat32Self.Error.BadRead; } } } } /// /// Iterate the cluster chain of FAT32. This will follow the FAT chain reading the data /// into the buffer provided where the cluster is pointing to. This will read the /// entire clusters data into the buffer. If the buffer is full or the end of the /// cluster chain is reached, then will return null else will return the end index into /// the buffer to next read into. Currently, this will be the bytes per cluster and /// only buffers aligned to bytes per cluster will work. /// /// Arguments: /// IN self: *ClusterChainIteratorSelf - Self iterator. /// IN buff: []u8 - The buffer to read the data into. /// /// Return: ?usize /// The end index into the buffer where the next read should start. If returned /// null, then the buffer is full or the end of the cluster chain is reached. /// /// Error: Fat32Self.Error || ReadError || SeekError /// Fat32Self.Error - (BadRead) If the buffer isn't aligned to the bytes per cluster boundary. /// ReadError - If there is an error reading from the stream. /// SeekError - If there is an error seeking the stream. /// pub fn read(self: *ClusterChainIteratorSelf, buff: []u8) (Fat32Self.Error || ReadError || SeekError)!?usize { // FAT32 is really FAT28, so the top 4 bits are not used, so mask them out if (buff.len != 0 and self.cluster != 0 and (self.cluster & 0x0FFFFFFF) < self.fat_config.cluster_end_marker) { // Seek to the sector where the cluster is const sector = self.fat_config.clusterToSector(self.cluster); try self.stream.seekableStream().seekTo(sector * self.fat_config.bytes_per_sector + self.cluster_offset); const read_len = std.math.min(buff.len, self.fat_config.bytes_per_sector * self.fat_config.sectors_per_cluster); self.cluster_offset += read_len; // Read the cluster // TODO: Maybe cache bytes per cluster block, then can read into a buffer that isn't aligned to 512 bytes. // So would read from the cache rather than the stream itself. const read_count = try self.stream.reader().readAll(buff[0..read_len]); if (read_count != read_len) { return Fat32Self.Error.BadRead; } // Increment the cluster // Check if we need to read another part of the FAT try self.checkRead(); return read_len; } return null; } /// /// Deinitialise the cluster chain iterator. /// /// Arguments: /// IN self: *const ClusterChainIteratorSelf - Iterator self. /// pub fn deinit(self: *const ClusterChainIteratorSelf) void { self.allocator.free(self.fat); } /// /// Initialise a cluster chain iterator. /// /// Arguments: /// IN allocator: Allocator - The allocator for allocating a FAT cache. /// IN fat_config: FATConfig - The FAT configuration. /// IN cluster: u32 - The first cluster to start reading from. /// IN stream: StreamType - The underlying stream. /// /// Return: ClusterChainIteratorSelf /// A cluster chain iterator. /// /// Error: Allocator.Error || Fat32Self.Error || ReadError || SeekError /// Allocator.Error - If there is an error allocating the initial FAT cache. /// Fat32Self.Error - If reading the stream didn't fill the cache FAT array. /// ReadError - If there is an error reading from the stream. /// SeekError - If there is an error seeking the stream. /// pub fn init(allocator: Allocator, fat_config: FATConfig, cluster: u32, stream: StreamType) (Allocator.Error || Fat32Self.Error || ReadError || SeekError)!ClusterChainIteratorSelf { // Create a bytes per sector sized cache of the FAT. var fat = try allocator.alloc(u32, fat_config.bytes_per_sector / @sizeOf(u32)); errdefer allocator.free(fat); const table_offset = cluster / fat.len; // Seek to the FAT // The FAT is just after the reserved sectors + the index try stream.seekableStream().seekTo((fat_config.reserved_sectors + table_offset) * fat_config.bytes_per_sector); const read_count = try stream.reader().readAll(std.mem.sliceAsBytes(fat)); if (read_count != fat.len * @sizeOf(u32)) { return Fat32Self.Error.BadRead; } return ClusterChainIteratorSelf{ .allocator = allocator, .cluster = cluster, .fat_config = fat_config, .stream = stream, .fat = fat, .table_offset = table_offset, .cluster_offset = 0, }; } }; /// An iterator for looping over the directory structure of FAT32. const EntryIterator = struct { /// An allocator for memory stuff allocator: Allocator, /// A cache of the current cluster. This wil be read from the cluster chain iterator. cluster_block: []u8, /// The current index into the cluster block. index: u32, /// The cluster chain iterator to read the next custer when needed. cluster_chain: ClusterChainIterator, /// The entry iterator self. const EntryIteratorSelf = @This(); /// The entry returned from 'next()'. pub const Entry = struct { /// The allocator used to allocate fields of this entry. Also used to free these /// entries in the 'deinit()' function. allocator: Allocator, /// The long name for the entry. This maybe null as not all entry have a long name /// part, just a short name. long_name: ?[]const u8, /// The short name struct. short_name: ShortName, /// /// Free the entry. /// /// Arguments: /// IN self: *Entry - The entry self to free. /// /// pub fn deinit(self: *const Entry) void { if (self.long_name) |name| { self.allocator.free(name); } } }; /// The error set for the entry iterator. const EntryItError = error{ /// If the long entry is invalid, so ignore it and use the short name only. Orphan, /// If reading the cluster chain and reach the end unexpectedly. EndClusterChain, }; /// /// Check if the next entry will be outside the cluster block cache. If so, read the /// next cluster and update the index to 0. /// /// Arguments: /// IN self: *EntryIteratorSelf - Iterator self. /// /// Error: Fat32Self.Error || ReadError || SeekError || EntryItError /// Fat32Self.Error - Error reading the cluster chain if the buffer isn't aligned. /// ReadError - Error reading from the stream in the cluster iterator. /// SeekError - Error seeking the stream in the cluster iterator. /// error.EndClusterChain - Reading the next cluster and reach the end unexpectedly. /// fn checkRead(self: *EntryIteratorSelf) (Fat32Self.Error || ReadError || SeekError || EntryItError)!void { if (self.index >= self.cluster_block.len) { // Read the next block var index: u32 = 0; while (try self.cluster_chain.read(self.cluster_block[index..])) |next_index| { index += next_index; } // Didn't read so at end of cluster chain // TODO: This relies on that cluster chain iterator will return full cluster bytes // Currently this is the case, but when changed will need to update this to // include partially full cluster block cache, with an index. if (index < self.cluster_block.len) { return EntryItError.EndClusterChain; } // Reset the index self.index = 0; } } /// /// A helper function for reading a entry. This is used for catching orphaned long /// entries and ignoring them so this shouldn't be called directly. /// /// Arguments: /// IN self: *EntryIteratorSelf - Iterator self. /// /// Return: ?Entry /// The FAT entry. Will return null if there are no more entries for the directory. /// /// Error: Allocator.Error || Fat32Self.Error || ReadError || SeekError || EntryItError /// Allocator.Error - Error allocating memory for fields in the return entry. /// Fat32Self.Error - Error reading the cluster chain if the buffer isn't aligned. /// ReadError - Error reading from the underlying stream. /// SeekError - Error seeking the underlying stream. /// error.Orphan - If there is a long entry without a short name entry or the /// check sum in the long entry is incorrect with the associated /// short entry or the long entry is missing entry parts. /// error.EndClusterChain - Reading the next cluster and reach the end unexpectedly. /// fn nextImp(self: *EntryIteratorSelf) (Allocator.Error || Fat32Self.Error || ReadError || SeekError || LongName.Error || EntryItError)!?Entry { // Do we need to read the next block try self.checkRead(); // Ignore deleted files while (self.cluster_block[self.index] == 0xE5) : ({ self.index += 32; try self.checkRead(); }) {} // Are we at the end of all entries if (self.cluster_block[self.index] != 0x00) { // The long name if there is one var long_name: ?[]u8 = null; errdefer if (long_name) |name| self.allocator.free(name); var long_entries: ?[]LongName = null; defer if (long_entries) |entries| self.allocator.free(entries); // If attribute is 0x0F, then is a long file name if (self.cluster_block[self.index + 11] == 0x0F and self.cluster_block[self.index] & 0x40 == 0x40) { // How many entries do we have, the first byte of the order. This starts at 1 not 0 var long_entry_count = self.cluster_block[self.index] & ~@as(u32, 0x40); // Allocate a buffer for the long name. 13 for the 13 characters in the long entry // * 3 as long names are u16 chars so when converted to u8, there could be 3 u8 to encode a u16 (UTF16 -> UTF8) long_name = try self.allocator.alloc(u8, 13 * 3 * long_entry_count); // For convenience var long_name_temp = long_name.?; var long_name_index: u32 = 0; long_entries = try self.allocator.alloc(LongName, long_entry_count); // Iterate through the long name entries while (long_entry_count > 0) : ({ self.index += 32; long_entry_count -= 1; try self.checkRead(); }) { // Parse the long entry long_entries.?[long_entry_count - 1] = initStruct(LongName, self.cluster_block[self.index..]); // Check the order of the long entry as it could be broken if ((long_entries.?[long_entry_count - 1].order & ~@as(u32, 0x40)) != long_entry_count) { // A broken long entry self.index += 32; return EntryItError.Orphan; } } // Parse the name for (long_entries.?) |entry| { long_name_index += try entry.getName(long_name_temp[long_name_index..]); } long_name = long_name_temp[0..long_name_index]; } // Make sure we have a short name part, if not then it is a orphan // Easy check for the attributes as 0x0F is invalid and a long name part // So if we have one, then this is a long entry not short entry as expected // Also make are we are not at the end if (self.cluster_block[self.index + 11] == 0x0F and self.cluster_block[self.index] != 0x00) { // This will be an invalid short name self.index += 32; return EntryItError.Orphan; } // Parse the short entry // We need all parts of the short name, not just the name const short_entry = initStruct(ShortName, self.cluster_block[self.index..]); // Check the check sum if (long_entries) |entries| { for (entries) |entry| { if (entry.check_sum != short_entry.calcCheckSum()) { return EntryItError.Orphan; } } } // Increment for the next entry self.index += 32; return Entry{ .allocator = self.allocator, .long_name = long_name, .short_name = short_entry, }; } return null; } /// /// Get the next entry in the iterator. Will return null when at the end. This will /// ignore orphaned long entries. /// /// Arguments: /// IN self: *EntryIteratorSelf - Iterator self. /// /// Return: ?Entry /// The FAT entry. Will return null if there are no more entries for the directory. /// The entry must be free using the deinit() function. /// /// Error: Allocator.Error || Fat32Self.Error || ReadError || SeekError /// Allocator.Error - Error allocating memory for fields in the return entry. /// Fat32Self.Error - Error reading the cluster chain if the buffer isn't aligned. /// ReadError - Error reading from the underlying stream. /// SeekError - Error seeking the underlying stream. /// pub fn next(self: *EntryIteratorSelf) (Allocator.Error || Fat32Self.Error || ReadError || SeekError || LongName.Error)!?Entry { // If there is a orphan file, then just get the next one // If we hit the end of the cluster chain, return null while (true) { if (self.nextImp()) |n| { return n; } else |e| switch (e) { error.Orphan => continue, error.EndClusterChain => return null, else => return @errSetCast(Allocator.Error || Fat32Self.Error || ReadError || SeekError || LongName.Error, e), } } } /// /// Destroy the entry iterator freeing any memory. /// /// Arguments: /// IN self: *EntryIteratorSelf - Iterator self. /// pub fn deinit(self: *const EntryIteratorSelf) void { self.allocator.free(self.cluster_block); self.cluster_chain.deinit(); } /// /// Initialise a directory entry iterator to looping over the FAT entries. This uses /// the cluster chain iterator. /// /// Arguments: /// IN allocator: Allocator - The allocator for allocating a cluster block cache. /// IN fat_config: FATConfig - The FAT configuration. /// IN cluster: u32 - The first cluster to start reading from. /// IN stream: StreamType - The underlying stream. /// /// Return: EntryIteratorSelf /// The entry iterator. /// /// Error: Allocator.Error || Fat32Self.Error || ReadError || SeekError /// Allocator.Error - Error allocating memory for fields in the return entry. /// Fat32Self.Error - Error reading the cluster chain if the buffer isn't aligned. /// ReadError - Error reading from the underlying stream. /// SeekError - Error seeking the underlying stream. /// pub fn init(allocator: Allocator, fat_config: FATConfig, cluster: u32, stream: StreamType) (Allocator.Error || Fat32Self.Error || ReadError || SeekError)!EntryIteratorSelf { var cluster_block = try allocator.alloc(u8, fat_config.bytes_per_sector * fat_config.sectors_per_cluster); errdefer allocator.free(cluster_block); var it = try ClusterChainIterator.init(allocator, fat_config, cluster, stream); errdefer it.deinit(); var index: u32 = 0; while (try it.read(cluster_block[index..])) |next_index| { index += next_index; } return EntryIteratorSelf{ .allocator = allocator, .cluster_block = cluster_block, .index = 0, .cluster_chain = it, }; } }; /// See vfs.FileSystem.getRootNode fn getRootNode(fs: *const vfs.FileSystem) *const vfs.DirNode { const self = @fieldParentPtr(Fat32Self, "instance", fs.instance); return &self.root_node.node.Dir; } /// See vfs.FileSystem.close fn close(fs: *const vfs.FileSystem, node: *const vfs.Node) void { const self = @fieldParentPtr(Fat32Self, "instance", fs.instance); // As close can't error, if provided with a invalid Node that isn't opened or try to close // the same file twice, will just do nothing. if (self.opened_files.fetchRemove(node)) |entry_node| { self.allocator.destroy(entry_node.value); self.allocator.destroy(node); } } /// See vfs.FileSystem.read fn read(fs: *const vfs.FileSystem, node: *const vfs.FileNode, buffer: []u8) (Allocator.Error || vfs.Error)!usize { const self = @fieldParentPtr(Fat32Self, "instance", fs.instance); const cast_node = @ptrCast(*const vfs.Node, node); const opened_node = self.opened_files.get(cast_node) orelse return vfs.Error.NotOpened; const size = std.math.min(buffer.len, opened_node.size); var it = ClusterChainIterator.init(self.allocator, self.fat_config, opened_node.cluster, self.stream) catch |e| switch (e) { error.OutOfMemory => return error.OutOfMemory, else => { log.err("Error initialising the cluster chain iterator. Error: {}\n", .{e}); return vfs.Error.Unexpected; }, }; defer it.deinit(); var index: usize = 0; while (it.read(buffer[index..size]) catch |e| { log.err("Error reading the cluster chain iterator. Error: {}\n", .{e}); return vfs.Error.Unexpected; }) |next_index| { index += next_index; } return size; } /// See vfs.FileSystem.write fn write(fs: *const vfs.FileSystem, node: *const vfs.FileNode, bytes: []const u8) (Allocator.Error || vfs.Error)!usize { const self = @fieldParentPtr(Fat32Self, "instance", fs.instance); const cast_node = @ptrCast(*const vfs.Node, node); const opened_node = self.opened_files.get(cast_node) orelse return vfs.Error.NotOpened; // Short cut if length is less than cluster size, can just write the content directly without modifying the FAT if (bytes.len <= self.fat_config.sectors_per_cluster * self.fat_config.bytes_per_sector) { const sector = self.fat_config.clusterToSector(opened_node.cluster); self.stream.seekableStream().seekTo(sector * self.fat_config.bytes_per_sector) catch return vfs.Error.Unexpected; _ = self.stream.writer().writeAll(bytes) catch return vfs.Error.Unexpected; } else { var to_write: u32 = self.fat_config.sectors_per_cluster * self.fat_config.bytes_per_sector; var write_index: u32 = 0; var next_free_cluster: u32 = opened_node.cluster; if (self.fat_config.has_fs_info) { if (self.fat_config.number_free_clusters < bytes.len / (self.fat_config.sectors_per_cluster * self.fat_config.bytes_per_sector)) { // Not enough free clusters return vfs.Error.Unexpected; } } while (write_index < bytes.len) : ({ write_index = to_write; to_write = std.math.min(bytes.len, write_index + self.fat_config.sectors_per_cluster * self.fat_config.bytes_per_sector); if (write_index < bytes.len) { next_free_cluster = self.findNextFreeCluster(next_free_cluster, next_free_cluster) catch return vfs.Error.Unexpected; } }) { const sector = self.fat_config.clusterToSector(next_free_cluster); self.stream.seekableStream().seekTo(sector * self.fat_config.bytes_per_sector) catch return vfs.Error.Unexpected; _ = self.stream.writer().writeAll(bytes[write_index..to_write]) catch return vfs.Error.Unexpected; } } // Update the entry the size for the file. const entry_sector = self.fat_config.clusterToSector(opened_node.entry_cluster); self.stream.seekableStream().seekTo(entry_sector * self.fat_config.bytes_per_sector + opened_node.entry_offset) catch return vfs.Error.Unexpected; self.stream.writer().writeIntLittle(u32, bytes.len) catch return vfs.Error.Unexpected; opened_node.size = bytes.len; return bytes.len; } /// See vfs.FileSystem.open fn open(fs: *const vfs.FileSystem, dir: *const vfs.DirNode, name: []const u8, flags: vfs.OpenFlags, open_args: vfs.OpenArgs) (Allocator.Error || vfs.Error)!*vfs.Node { // Suppress unused var warning _ = open_args; return switch (flags) { .NO_CREATION => openImpl(fs, dir, name), .CREATE_FILE => createFileOrDir(fs, dir, name, false), .CREATE_DIR => createFileOrDir(fs, dir, name, true), // FAT32 doesn't support symlinks .CREATE_SYMLINK => vfs.Error.InvalidFlags, }; } /// /// Helper function for creating the correct *Node. This can only create a FileNode or /// DirNode, Symlinks are not supported for FAT32. The arguments are assumed correct: the /// cluster points to a free block and the size is correct: zero for directories. /// /// Arguments: /// IN self: *Fat32Self - Self, needed for the allocator and underlying filesystem /// IN cluster: u32 - The cluster there the file/directory will be. /// IN size: u32 - The size of the file or 0 for a directory. /// IN entry_cluster: u32 - The cluster where the FAT dir entry is located. /// IN entry_offset: u32 - The offset in the entry_cluster there the entry is located. /// IN flags: vfs.OpenFlags - The open flags for deciding on the Node type. /// /// Return: *vfs.Node /// The VFS Node /// /// Error: Allocator.Error || vfs.Error /// Allocator.Error - Not enough memory for allocating the Node /// vfs.Error.InvalidFlags - Symlinks are not support in FAT32. /// fn createNode(self: *Fat32Self, cluster: u32, size: u32, entry_cluster: u32, entry_offset: u32, flags: vfs.OpenFlags) (Allocator.Error || vfs.Error)!*vfs.Node { var node = try self.allocator.create(vfs.Node); errdefer self.allocator.destroy(node); node.* = switch (flags) { .CREATE_DIR => .{ .Dir = .{ .fs = self.fs, .mount = null } }, .CREATE_FILE => .{ .File = .{ .fs = self.fs } }, .CREATE_SYMLINK, .NO_CREATION => return vfs.Error.InvalidFlags, }; // Create the opened info struct var opened_info = try self.allocator.create(OpenedInfo); errdefer self.allocator.destroy(opened_info); opened_info.* = .{ .cluster = cluster, .size = size, .entry_cluster = entry_cluster, .entry_offset = entry_offset, }; try self.opened_files.put(node, opened_info); return node; } /// /// A helper function for getting the cluster from an opened directory node. /// /// Arguments: /// IN self: *const Fat32Self - Self, used to get the opened nodes. /// IN dir: *const vfs.DirNode - The directory node to get the cluster for. /// /// Return: u32 /// The cluster number for the directory node. /// /// Error: vfs.Error /// error.NotOpened - If directory node isn't opened. /// fn getDirCluster(self: *const Fat32Self, dir: *const vfs.DirNode) vfs.Error!u32 { return if (std.meta.eql(dir, self.fs.getRootNode(self.fs))) self.root_node.cluster else brk: { const parent = self.opened_files.get(@ptrCast(*const vfs.Node, dir)) orelse return vfs.Error.NotOpened; // Cluster 0 means is the root directory cluster break :brk if (parent.cluster == 0) self.fat_config.root_directory_cluster else parent.cluster; }; } /// /// The helper function for opening a file/folder, no creation. /// /// Arguments: /// IN fs: *const vfs.FileSystem - The underlying filesystem. /// IN dir: *const vfs.DirNode - The parent directory. /// IN name: []const u8 - The name of the file/folder to open. /// /// Return: *vfs.Node /// The VFS Node for the opened file/folder. /// /// Error: Allocator.Error || vfs.Error /// Allocator.Error - Not enough memory for allocating memory /// vfs.Error.NoSuchFileOrDir - Error if the file/folder doesn't exist. /// vfs.Error.Unexpected - An error occurred whilst reading the file system, this /// can be caused by a parsing error or errors on reading /// or seeking the underlying stream. If this occurs, then /// the real error is printed using `log.err`. /// fn openImpl(fs: *const vfs.FileSystem, dir: *const vfs.DirNode, name: []const u8) (Allocator.Error || vfs.Error)!*vfs.Node { const self = @fieldParentPtr(Fat32Self, "instance", fs.instance); // TODO: Cache the files in this dir, so when opening, don't have to iterator the directory every time // Iterate over the directory and find the file/folder const cluster = try self.getDirCluster(dir); var previous_cluster = cluster; var it = EntryIterator.init(self.allocator, self.fat_config, cluster, self.stream) catch |e| switch (e) { error.OutOfMemory => return error.OutOfMemory, else => { log.err("Error initialising the entry iterator. Error: {}\n", .{e}); return vfs.Error.Unexpected; }, }; defer it.deinit(); while (it.next() catch |e| switch (e) { error.OutOfMemory => return error.OutOfMemory, else => { log.err("Error in next() iterating the entry iterator. Error: {}\n", .{e}); return vfs.Error.Unexpected; }, }) |entry| { defer entry.deinit(); if ((it.cluster_chain.cluster & 0x0FFFFFFF) < self.fat_config.cluster_end_marker) { previous_cluster = it.cluster_chain.cluster; } // File name compare is case insensitive var match: bool = brk: { if (entry.long_name) |long_name| { if (std.ascii.eqlIgnoreCase(name, long_name)) { break :brk true; } } var short_buff: [33]u8 = undefined; const s_end = entry.short_name.getName(short_buff[0..]); if (std.ascii.eqlIgnoreCase(name, short_buff[0..s_end])) { break :brk true; } break :brk false; }; if (match) { const open_type = if (entry.short_name.isDir()) vfs.OpenFlags.CREATE_DIR else vfs.OpenFlags.CREATE_FILE; return self.createNode(entry.short_name.getCluster(), entry.short_name.size, previous_cluster, it.index - 4, open_type); } } return vfs.Error.NoSuchFileOrDir; } /// /// Helper function for finding a free cluster from a given hint. The hint is where to /// start looking. This will update the FAT and FSInfo accordingly. If a parent cluster /// is provided, then the cluster chan will be updated so the parent cluster will point to /// the new cluster. /// /// Arguments: /// IN self: *Fat32Self - Self for allocating a free cluster with the current configuration. /// IN cluster_hint: u32 - The cluster hint to start looking. /// IN parent_cluster: ?u32 - The parent cluster to update the cluster chain from. Can /// be null if creating a new cluster for a file/folder. /// /// Return: u32 /// The next free cluster to use. /// /// Error: Allocator.Error || ReadError || SeekError || Fat32Self.Error /// Allocator.Error - Not enough memory for allocating memory. /// WriteError - Error while updating the FAT with the new cluster. /// ReadError - Error while reading the stream. /// SeekError - Error while seeking the stream. /// Fat32Self.Error.BadRead - Error reading the FAT, not aligned to the sector. /// Fat32Self.Error.DiskFull - No free clusters. /// fn findNextFreeCluster(self: *Fat32Self, cluster_hint: u32, parent_cluster: ?u32) (Allocator.Error || WriteError || ReadError || SeekError || Fat32Self.Error)!u32 { var fat_buff = try self.allocator.alloc(u32, self.fat_config.bytes_per_sector / @sizeOf(u32)); defer self.allocator.free(fat_buff); var sector_offset = cluster_hint / fat_buff.len; const reader = self.stream.reader(); const writer = self.stream.writer(); const seeker = self.stream.seekableStream(); try seeker.seekTo((self.fat_config.reserved_sectors + sector_offset) * self.fat_config.bytes_per_sector); var fat_read = try reader.readAll(std.mem.sliceAsBytes(fat_buff)); if (fat_read != self.fat_config.bytes_per_sector) { return Fat32Self.Error.BadRead; } // Check for a free cluster by checking the FAT for a 0x00000000 entry (free) var cluster = cluster_hint; while (fat_buff[cluster - (sector_offset * fat_buff.len)] != 0x00000000) { cluster += 1; // Check we are still in the FAT buffer, if not, read the next FAT part const check_offset = cluster / fat_buff.len; if (check_offset > sector_offset) { // Check if the cluster will go outside the FAT if (check_offset >= self.fat_config.sectors_per_fat) { // TODO: Will need to wrap as there maybe free clusters before the hint if (self.fat_config.has_fs_info) { std.debug.assert(self.fat_config.number_free_clusters == 0); } return Fat32Self.Error.DiskFull; } sector_offset = check_offset; try seeker.seekTo((self.fat_config.reserved_sectors + sector_offset) * self.fat_config.bytes_per_sector); fat_read = try reader.readAll(std.mem.sliceAsBytes(fat_buff)); if (fat_read != fat_buff.len * @sizeOf(u32)) { return Fat32Self.Error.BadRead; } } } // Update the FAT for the allocated cluster if (parent_cluster) |p_cluster| { try seeker.seekTo((self.fat_config.reserved_sectors * self.fat_config.bytes_per_sector) + (p_cluster * @sizeOf(u32))); try writer.writeIntLittle(u32, cluster); } try seeker.seekTo((self.fat_config.reserved_sectors * self.fat_config.bytes_per_sector) + (cluster * @sizeOf(u32))); try writer.writeIntLittle(u32, self.fat_config.cluster_end_marker); // And the backup FAT if (parent_cluster) |p_cluster| { try seeker.seekTo(((self.fat_config.sectors_per_fat + self.fat_config.reserved_sectors) * self.fat_config.bytes_per_sector) + (p_cluster * @sizeOf(u32))); try writer.writeIntLittle(u32, cluster); } try seeker.seekTo(((self.fat_config.sectors_per_fat + self.fat_config.reserved_sectors) * self.fat_config.bytes_per_sector) + (cluster * @sizeOf(u32))); try writer.writeIntLittle(u32, self.fat_config.cluster_end_marker); // Update the FSInfo if we have one if (self.fat_config.has_fs_info) { self.fat_config.next_free_cluster = cluster + 1; self.fat_config.number_free_clusters -= 1; // write it to disk // TODO: Have this cached and flush later to save writes // Have a flush function so the user can flush and flush on deinit try seeker.seekTo(self.fat_config.fsinfo_sector * self.fat_config.bytes_per_sector + 488); try writer.writeIntLittle(u32, self.fat_config.number_free_clusters); try writer.writeIntLittle(u32, self.fat_config.next_free_cluster); // And the backup FSInfo try seeker.seekTo((self.fat_config.backup_boot_sector + self.fat_config.fsinfo_sector) * self.fat_config.bytes_per_sector + 488); try writer.writeIntLittle(u32, self.fat_config.number_free_clusters); try writer.writeIntLittle(u32, self.fat_config.next_free_cluster); } // Have found a free cluster return cluster; } /// /// Helper function for creating a file, folder or symlink. /// /// Arguments: /// IN fs: *const vfs.FileSystem - The underlying filesystem. /// IN dir: *const vfs.DirNode - The parent directory. /// IN name: []const u8 - The name of the file/folder to open. /// IN is_dir: bool - If creating a file/folder. /// /// Return: *vfs.Node /// The VFS Node for the opened/created file/folder. /// /// Error: Allocator.Error || vfs.Error /// Allocator.Error - Not enough memory for allocating memory. /// vfs.Error.NoSuchFileOrDir - Error if creating a symlink and no target is provided. /// vfs.Error.Unexpected - An error occurred whilst reading the file system, this /// can be caused by a parsing error or errors on reading /// or seeking the underlying stream. If this occurs, then /// the real error is printed using `log.err`. /// fn createFileOrDir(fs: *const vfs.FileSystem, dir: *const vfs.DirNode, name: []const u8, is_dir: bool) (Allocator.Error || vfs.Error)!*vfs.Node { const self = @fieldParentPtr(Fat32Self, "instance", fs.instance); var files_in_dir = ArrayList([11]u8).init(self.allocator); defer files_in_dir.deinit(); const dir_cluster = try self.getDirCluster(dir); var previous_cluster = dir_cluster; var previous_index: u32 = 0; var it = EntryIterator.init(self.allocator, self.fat_config, dir_cluster, self.stream) catch |e| switch (e) { error.OutOfMemory => return error.OutOfMemory, else => { log.err("Error initialising the entry iterator. Error: {}\n", .{e}); return vfs.Error.Unexpected; }, }; defer it.deinit(); while (it.next() catch |e| switch (e) { error.OutOfMemory => return error.OutOfMemory, else => { log.err("Error in next() iterating the entry iterator. Error: {}\n", .{e}); return vfs.Error.Unexpected; }, }) |entry| { defer entry.deinit(); // Keep track of the last cluster before the end previous_index = it.index; if ((it.cluster_chain.cluster & 0x0FFFFFFF) < self.fat_config.cluster_end_marker) { previous_cluster = it.cluster_chain.cluster; } try files_in_dir.append(entry.short_name.getSFNName()); } const existing_files = files_in_dir.toOwnedSlice(); defer self.allocator.free(existing_files); // Find a free cluster // The default cluster to start looking for a free cluster var cluster_hint: u32 = 2; if (self.fat_config.has_fs_info and self.fat_config.next_free_cluster != 0x0FFFFFFF) { // Have a next free cluster in the FSInfo, so can use this to start looking cluster_hint = self.fat_config.next_free_cluster; } const free_cluster = self.findNextFreeCluster(cluster_hint, null) catch |e| switch (e) { error.OutOfMemory => return error.OutOfMemory, else => { log.err("Error finding next cluster. Error: {}\n", .{e}); return vfs.Error.Unexpected; }, }; const dir_attr: ShortName.Attributes = if (is_dir) .Directory else .None; const entries = createEntries(self.allocator, name, free_cluster, dir_attr, existing_files) catch |e| switch (e) { error.OutOfMemory => return error.OutOfMemory, else => { log.err("Error creating short and long entries. Error: {}\n", .{e}); return vfs.Error.Unexpected; }, }; defer self.allocator.free(entries.long_entry); // Write the entries to the directory const short_offset = self.writeEntries(entries, previous_cluster, free_cluster, previous_index) catch |e| switch (e) { error.OutOfMemory => return error.OutOfMemory, else => { log.err("Error writing entries to disk. Error: {}\n", .{e}); return vfs.Error.Unexpected; }, }; return self.createNode(free_cluster, 0, short_offset.cluster, short_offset.offset + 28, if (is_dir) .CREATE_DIR else .CREATE_FILE); } /// /// Helper function for creating both long and short entries. This will convert the raw /// name to a valid long and short FAT32 name and create the corresponding FAT32 entries. /// error.InvalidName will be returned if the raw name cannot be converted to a valid FAT32 /// name. The caller needs to free the long name entries. /// /// Arguments: /// IN allocator: Allocator - The allocator for allocating the long entries array. /// IN name: []const u8 - The raw name to be used for creating the file/directory. /// IN cluster: u32 - The cluster where the entry will point to. /// IN attributes: ShortName.Attributes - The attributes of the the entry. /// IN existing_short_names: []const [11]u8 - Existing short names so to resolve short name clashes. /// /// Return: FatDirEntry /// The full FAT entry ready to be copies to disk, byte by byte. /// /// Error: Allocator.Error || Fat32Self.Error /// Allocator.Error - Error allocating memory. /// Fat32Self.Error - The name provided cannot be converted to a valid FAT32 name. /// fn createEntries(allocator: Allocator, name: []const u8, cluster: u32, attributes: ShortName.Attributes, existing_short_names: []const [11]u8) (Allocator.Error || Fat32Self.Error)!FatDirEntry { const long_name = try nameToLongName(allocator, name); defer allocator.free(long_name); const short_name = try longNameToShortName(long_name, existing_short_names); const short_entry = createShortNameEntry(short_name, attributes, cluster); const long_entry = try createLongNameEntry(allocator, long_name, short_entry.calcCheckSum()); return FatDirEntry{ .long_entry = long_entry, .short_entry = short_entry, }; } /// /// Helper for converting a raw file name to a valid FAT32 long file name. The returned /// name will need to be freed by the allocator. /// /// Arguments: /// IN allocator: Allocator - The allocator for creating the FAT32 long file name. /// IN name: []const u8 - The raw file name. /// /// Return: []const u16 /// A valid UTF-16 FAT32 long file name. /// /// Error: Allocator.Error || Fat32Self.Error /// Allocator.Error - Error allocating memory. /// Fat32Self.Error.InvalidName - The file name cannot be converted to a valid long name. /// fn nameToLongName(allocator: Allocator, name: []const u8) (Allocator.Error || Fat32Self.Error)![]const u16 { // Allocate a buffer to translate to UFT16. Then length of the UFT8 will be more than enough // TODO: Calc the total length and use appendAssumeCapacity var utf16_buff = try ArrayList(u16).initCapacity(allocator, name.len); defer utf16_buff.deinit(); // The name is in UTF8, this needs to be conversed to UTF16 // This also checks for valid UTF8 characters const utf8_view = std.unicode.Utf8View.init(name) catch return Fat32Self.Error.InvalidName; var utf8_it = utf8_view.iterator(); // Make sure the code points as valid for the long name var ignored_leading = false; while (utf8_it.nextCodepoint()) |code_point| { // Ignore leading spaces if (!ignored_leading and code_point == ' ') { continue; } ignored_leading = true; // If it is larger than 0xFFFF, then it cannot fit in UTF16 so invalid. // Can't have control characters (including the DEL key) if (code_point > 0xFFFF or code_point < 0x20 or code_point == 0x7F) { return Fat32Self.Error.InvalidName; } // Check for invalid characters const invalid_chars = "\"*/:<>?\\|"; inline for (invalid_chars) |char| { if (char == code_point) { return Fat32Self.Error.InvalidName; } } // Valid character try utf16_buff.append(@intCast(u16, code_point)); } // Remove trailing spaces and dots // And return the name const long_name = std.mem.trimRight(u16, utf16_buff.toOwnedSlice(), &[_]u16{ ' ', '.' }); errdefer allocator.free(long_name); // Check the generated name is a valid length if (long_name.len > 255) { return Fat32Self.Error.InvalidName; } return long_name; } /// /// Helper function for checking if a u16 long name character can be converted to a valid /// OEM u8 character /// /// Arguments: /// IN char: u16 - The character to check /// /// Return: ?u8 /// The successful converted character or null if is an invalid OEM u8 char. /// fn isValidSFNChar(char: u16) ?u8 { // Ignore spaces if (char == 0x20) { return null; } // If not a u8 char or CP437 char, then replace with a _ if (char > 0x7F) { return CodePage.toCodePage(.CP437, char) catch { return null; }; } // Check for invalid characters, then replace with a _ const invalid_chars = "+,;=[]"; inline for (invalid_chars) |c| { if (c == char) { return null; } } return @intCast(u8, char); } /// /// Helper function for converting a valid long name to a short file name. This expects /// valid long name, else is undefined for invalid long names. This also checks against /// existing short names so there are no clashed within the same directory (appending /// ~n to the end of the short name if there is a clash). /// /// Arguments: /// IN long_name: []const u16 - The long name to convert. /// IN existing_names: []const [11]u8 - The list of existing short names. /// /// Return: [11]u8 /// The converted short name. /// /// Error: Fat32Self.Error /// Fat32Self.Error.InvalidName - If the directory is fill of the same short file name. /// fn longNameToShortName(long_name: []const u16, existing_names: []const [11]u8) Fat32Self.Error![11]u8 { // Pad with spaces var sfn: [11]u8 = [_]u8{' '} ** 11; var sfn_i: u8 = 0; var is_lossy = false; // TODO: Need to convert to upper case first but don't have proper unicode support for this yet // Remove leading dots and spaces var long_name_start: u32 = 0; for (long_name) |char| { if (char != '.') { break; } // If there is, then it is lossy long_name_start += 1; is_lossy = true; } // Get the last dot in the string const last_dot_index = std.mem.lastIndexOf(u16, long_name[long_name_start..], &[_]u16{'.'}); for (long_name[long_name_start..]) |char, i| { // Break when we reach the max of the short name or the last dot if (char == '.') { if (last_dot_index) |index| { if (i == index) { break; } } // Else ignore it, and is lossy is_lossy = true; continue; } if (sfn_i == 8) { is_lossy = true; break; } if (isValidSFNChar(char)) |oem_char| { // Valid SFN char, and convert to upper case // TODO: Need proper unicode uppercase sfn[sfn_i] = std.ascii.toUpper(oem_char); sfn_i += 1; } else { // Spaces don't need to be replaced, just ignored, but still set the lossy if (char != 0x20) { sfn[sfn_i] = '_'; sfn_i += 1; } is_lossy = true; } } // Save the name index const name_index = sfn_i; // Go to the last dot, if there isn't one, return what we have const index = (last_dot_index orelse return sfn) + long_name_start; sfn_i = 8; // +1 as the index will be on the DOT and we don't need to include it for (long_name[index + 1 ..]) |char| { // Break when we reach the max of the short name if (sfn_i == 11) { is_lossy = true; break; } if (isValidSFNChar(char)) |oem_char| { // Valid SFN char, and convert to upper case // TODO: Need proper unicode uppercase sfn[sfn_i] = std.ascii.toUpper(oem_char); sfn_i += 1; } else { // Spaces don't need to be replaced, just ignored, but still set the lossy if (char != 0x20) { sfn[sfn_i] = '_'; sfn_i += 1; } is_lossy = true; } } // 0xE5 is used for a deleted file, but is a valid UTF8 character, so use 0x05 instead if (sfn[0] == 0xE5) { sfn[0] = 0x05; } // Is there a collision of file names // Find n in ~n in the existing files var trail_number: u32 = 0; var full_name_match = false; for (existing_names) |existing_name| { // Only need to check the 8 char file name, not extension var i: u8 = 0; while (i < 8) : (i += 1) { if (existing_name[i] != sfn[i] and existing_name[i] == '~') { // Read the number and break // -3 as we exclude the extension // +1 as we are at the '~' i += 1; const end_num = std.mem.indexOf(u8, existing_name[0..], " ") orelse existing_name.len - 3; const num = std.fmt.parseInt(u32, existing_name[i..end_num], 10) catch { break; }; if (num > trail_number) { trail_number = num; } break; } // Not the same file name if (existing_name[i] != sfn[i] and (!is_lossy or existing_name[i] != '~')) { break; } } // If match the full name, then need to add trail if (i == 8) { full_name_match = true; } } // If there were some losses, then wee need to add a number to the end if (is_lossy or full_name_match) { // Check if we have the max file names if (trail_number == 999999) { return Error.InvalidName; } // Increase the trail number as this will be the number to append trail_number += 1; // Format this as a string, can't be more than 6 characters var trail_number_str: [6]u8 = undefined; const trail_number_str_end = std.fmt.formatIntBuf(trail_number_str[0..], trail_number, 10, .lower, .{}); // Get the index to put the ~n var number_trail_index = if (name_index > 7 - trail_number_str_end) 7 - trail_number_str_end else name_index; sfn[number_trail_index] = '~'; for (trail_number_str[0..trail_number_str_end]) |num_str| { number_trail_index += 1; sfn[number_trail_index] = num_str; } } return sfn; } /// /// Helper function for creating the long name dir entries from the long name. The return /// array will need to be freed by the caller. This expects a valid long name else undefined /// behavior. /// /// Arguments: /// IN allocator: Allocator - The allocator for the long name array /// IN long_name: []const u16 - The valid long name. /// IN check_sum: u8 - The short name check sum for the long entry. /// /// Return: []LongName /// The list of long name entries read to be written to disk. /// /// Error: Allocator.Error /// Allocator.Error - Error allocating memory for the long name entries. /// fn createLongNameEntry(allocator: Allocator, long_name: []const u16, check_sum: u8) Allocator.Error![]LongName { // Calculate the number of long entries (round up). LFN are each 13 characters long const num_lfn_entries = @intCast(u8, (long_name.len + 12) / 13); // Create the long entries var lfn_array = try allocator.alloc(LongName, num_lfn_entries); errdefer allocator.free(lfn_array); // Work backwards because it is easier var backwards_index = num_lfn_entries; while (backwards_index > 0) : (backwards_index -= 1) { // If this is the first entry, then the first bytes starts with 0x40 const entry_index = num_lfn_entries - backwards_index; const order = if (backwards_index == 1) 0x40 | num_lfn_entries else entry_index + 1; // Get the working slice of 13 characters // NULL terminate and pad with 0xFFFF if less than 13 characters const working_name: [13]u16 = blk: { var temp: [13]u16 = [_]u16{0xFFFF} ** 13; const long_name_slice = long_name[(entry_index * 13)..]; if (long_name_slice.len < 13) { for (long_name_slice) |char, i| { temp[i] = char; } // NULL terminated temp[long_name_slice.len] = 0x0000; } else { for (temp) |*char, i| { char.* = long_name[(entry_index * 13) + i]; } } break :blk temp; }; // Create the entry lfn_array[backwards_index - 1] = .{ .order = order, .first = working_name[0..5].*, .check_sum = check_sum, .second = working_name[5..11].*, .third = working_name[11..13].*, }; } return lfn_array; } /// /// Helper function fro creating a short name entry. This calls the system time to get the /// current date and time for the new file/directory. This assumes a valid short name else /// undefined behavior. /// /// Arguments: /// IN name: [11]u8 - The short name. /// IN attributes: ShortName.Attributes - The attribute for the short name entry. /// IN cluster: u32 - The cluster where this will point to. /// /// Return: ShortName /// The short name entry with the current time used. /// fn createShortNameEntry(name: [11]u8, attributes: ShortName.Attributes, cluster: u32) ShortName { const date_time = arch.getDateTime(); const date = @intCast(u16, date_time.day | date_time.month << 5 | (date_time.year - 1980) << 9); const time = @intCast(u16, date_time.second / 2 | date_time.minute << 5 | date_time.hour << 11); return .{ .name = name[0..8].*, .extension = name[8..11].*, .attributes = @enumToInt(attributes), .time_created_tenth = @intCast(u8, (date_time.second % 2) * 100), .time_created = time, .date_created = date, .date_last_access = date, .cluster_high = @truncate(u16, cluster >> 16), .time_last_modification = time, .date_last_modification = date, .cluster_low = @truncate(u16, cluster), .size = 0x00000000, }; } /// /// Helper function for writing a new file/folder entry. This will create the new entry /// under the provided cluster. If the cluster is full or not big enough the FAT will /// be extended and a new cluster will be allocated. This expects valid entries. Inputs /// are assumed to be correct. /// /// Arguments: /// IN self: *Fat32Self - Self for the current instance of the FAT32 filesystem. /// IN entries: FatDirEntry - The new entries to be written. /// IN at_cluster: u32 - The cluster to write the entries to. /// IN next_free_cluster_hint: u32 - The next free cluster to be used as a hint to find /// new clusters for large entries. /// IN initial_cluster_offset: u32 - The initial offset into the cluster to write to. /// /// Return: struct{cluster: u32, offset: u32} /// cluster - The cluster at which the short entry is located /// offset - The offset at which the short entry is located with in the cluster. /// /// Error: Allocator.Error || WriteError || ReadError || SeekError /// Allocator.Error - Error allocating memory. /// WriteError - Error writing to the underlying stream. /// ReadError - Error reading the underlying stream. /// SeekError - Error seeking the underlying stream. /// Fat32Self.Error - This will relate to allocating a new cluster. /// fn writeEntries(self: *Fat32Self, entries: FatDirEntry, at_cluster: u32, next_free_cluster_hint: u32, initial_cluster_offset: u32) (Allocator.Error || WriteError || ReadError || SeekError || Fat32Self.Error)!struct { cluster: u32, offset: u32 } { // Each entry is 32 bytes short + 32 * long len const entries_size_bytes = 32 + (32 * entries.long_entry.len); std.debug.assert(at_cluster >= 2); // Largest possible entry length std.debug.assert(entries_size_bytes <= 32 + (32 * 20)); // Entries are 32 bytes long, so the offset will need to be aligned to 32 bytes std.debug.assert(initial_cluster_offset % 32 == 0); const cluster_size = self.fat_config.bytes_per_sector * self.fat_config.sectors_per_cluster; // Check free entry var index = initial_cluster_offset; // The cluster to write to, this can update as if the cluster provided is full, will need to write to the next free cluster var write_cluster = at_cluster; // At the end of the cluster chain, need to alloc a cluster // Overwrite the at_cluster to use the new one if (index == cluster_size) { write_cluster = try self.findNextFreeCluster(next_free_cluster_hint, write_cluster); index = 0; } // TODO: Once FatDirEntry can be a packed struct, then can write as bytes and not convert var write_buff = try self.allocator.alloc(u8, entries_size_bytes); defer self.allocator.free(write_buff); for (entries.long_entry) |long_entry, i| { initBytes(LongName, long_entry, write_buff[(32 * i)..]); } initBytes(ShortName, entries.short_entry, write_buff[write_buff.len - 32 ..]); // Fill the cluster with the entry var cluster_offset = index; var write_index: u32 = 0; var write_next_index = std.math.min(cluster_size - cluster_offset, write_buff.len); while (write_index < write_buff.len) : ({ cluster_offset = 0; write_index = write_next_index; write_next_index = std.math.min(write_next_index + cluster_size, write_buff.len); if (write_index < write_buff.len) { write_cluster = try self.findNextFreeCluster(write_cluster, write_cluster); } }) { const write_sector = self.fat_config.clusterToSector(write_cluster); try self.stream.seekableStream().seekTo(write_sector * self.fat_config.bytes_per_sector + cluster_offset); try self.stream.writer().writeAll(write_buff[write_index..write_next_index]); } const ret = .{ .cluster = write_cluster, .offset = (index + write_buff.len - 32) % cluster_size }; return ret; } /// /// Deinitialise this file system. This frees the root node, virtual filesystem and self. /// This asserts that there are no open files left. /// /// Arguments: /// IN self: *Fat32Self - Self to free. /// pub fn destroy(self: *Fat32Self) Fat32Self.Error!void { // Make sure we have closed all files if (self.opened_files.count() != 0) { return Fat32Self.Error.FilesStillOpen; } self.opened_files.deinit(); self.allocator.destroy(self.root_node.node); self.allocator.destroy(self.fs); self.allocator.destroy(self); } /// /// Initialise a FAT32 filesystem. /// /// Arguments: /// IN allocator: Allocator - Allocate memory. /// IN stream: StreamType - The underlying stream that the filesystem will sit on. /// /// Return: *Fat32 /// The pointer to a FAT32 filesystem. /// /// Error: Allocator.Error || ReadError || SeekError || Fat32Self.Error /// Allocator.Error - If there is no more memory. Any memory allocated will be freed. /// ReadError - If there is an error reading from the stream. /// SeekError - If there si an error seeking the stream. /// Fat32Self.Error - If there is an error when parsing the stream to set up a fAT32 /// filesystem. See Error for the list of possible errors. /// pub fn create(allocator: Allocator, stream: StreamType) (Allocator.Error || ReadError || SeekError || Fat32Self.Error)!*Fat32Self { log.debug("Init\n", .{}); defer log.debug("Done\n", .{}); // We need to get the root directory sector. For this we need to read the boot sector. var boot_sector_raw = try allocator.alloc(u8, 512); defer allocator.free(boot_sector_raw); const seek_stream = stream.seekableStream(); const read_stream = stream.reader(); // Ensure we are the beginning try seek_stream.seekTo(0); const read_count = try read_stream.readAll(boot_sector_raw[0..]); if (read_count != 512) { return Error.BadRead; } // Check the boot signature if (boot_sector_raw[510] != 0x55 or boot_sector_raw[511] != 0xAA) { return Error.BadMBRMagic; } // Parse the boot sector to extract the relevant information const boot_sector = initStruct(BootRecord, boot_sector_raw[0..90]); // Make sure the root cluster isn't 0 or 1 if (boot_sector.root_directory_cluster < 2) { return Error.BadRootCluster; } // Make sure we have 2 FATs as this is common for FAT32, and we are going to not accept more or less FATs if (boot_sector.fat_count != 2) { return Error.BadFATCount; } // Make sure the FATs are mirrored (flags = 0x0) if (boot_sector.mirror_flags != 0x00) { return Error.NotMirror; } // Only accept fixed disks, no floppies if (boot_sector.media_descriptor_type != 0xF8) { return Error.BadMedia; } // Make sure the parts there were used for FAT12/16 are zero if (boot_sector.root_directory_size != 0 or boot_sector.sectors_per_fat_12_16 != 0 or boot_sector.total_sectors_12_16 != 0) { return Error.BadFat32; } // Check the signature if (boot_sector.signature != 0x29) { return Error.BadSignature; } // Check the filesystem type if (!std.mem.eql(u8, "FAT32 ", boot_sector.filesystem_type[0..])) { return Error.BadFSType; } // Read the FSInfo block // 0xFFFFFFFF is for unknown sizes var number_free_clusters: u32 = 0xFFFFFFFF; var next_free_cluster: u32 = 0xFFFFFFFF; var has_fs_info = false; if (boot_sector.fsinfo_sector != 0x0000 and boot_sector.fsinfo_sector != 0xFFFF) { var fs_info_raw = try allocator.alloc(u8, 512); defer allocator.free(fs_info_raw); try seek_stream.seekTo(boot_sector.fsinfo_sector * boot_sector.bytes_per_sector); const fs_read_count = try read_stream.readAll(fs_info_raw[0..]); if (fs_read_count != 512) { return Error.BadRead; } const fs_info = initStruct(FSInfo, fs_info_raw[0..]); // Check the signatures if (fs_info.lead_signature == 0x41615252 and fs_info.struct_signature == 0x61417272 and fs_info.tail_signature == 0xAA550000) { // It should be range checked at least to make sure it is <= volume cluster count. const usable_sectors = boot_sector.total_sectors - boot_sector.reserved_sectors - (boot_sector.fat_count * boot_sector.sectors_per_fat); const usable_clusters = @divFloor(usable_sectors, boot_sector.sectors_per_cluster) - 1; if (usable_clusters >= fs_info.number_free_clusters) { number_free_clusters = fs_info.number_free_clusters; } next_free_cluster = fs_info.next_free_cluster; has_fs_info = true; } } // Figure out the end marker used in the cluster chain by reading the FAT // FAT is just after the reserved sectors +4 as it is the second entry of u32 entries try seek_stream.seekTo(boot_sector.reserved_sectors * boot_sector.bytes_per_sector + 4); var end_marker_raw: [4]u8 = undefined; const fat_read_count = try read_stream.readAll(end_marker_raw[0..]); if (fat_read_count != 4) { return Error.BadRead; } const cluster_end_marker = std.mem.bytesAsSlice(u32, end_marker_raw[0..])[0] & 0x0FFFFFFF; // Have performed the checks, create the filesystem const fs = try allocator.create(vfs.FileSystem); errdefer allocator.destroy(fs); const root_node = try allocator.create(vfs.Node); errdefer allocator.destroy(root_node); const fat32_fs = try allocator.create(Fat32Self); errdefer allocator.destroy(fat32_fs); // Record the relevant information const fat_config = FATConfig{ .bytes_per_sector = boot_sector.bytes_per_sector, .sectors_per_cluster = boot_sector.sectors_per_cluster, .reserved_sectors = boot_sector.reserved_sectors, .hidden_sectors = boot_sector.hidden_sectors, .total_sectors = boot_sector.total_sectors, .sectors_per_fat = boot_sector.sectors_per_fat, .root_directory_cluster = boot_sector.root_directory_cluster, .fsinfo_sector = boot_sector.fsinfo_sector, .backup_boot_sector = boot_sector.backup_boot_sector, .has_fs_info = has_fs_info, .number_free_clusters = number_free_clusters, .next_free_cluster = next_free_cluster, .cluster_end_marker = cluster_end_marker, }; fat32_fs.* = .{ .fs = fs, .allocator = allocator, .instance = 32, .root_node = .{ .node = root_node, .cluster = fat_config.root_directory_cluster, }, .fat_config = fat_config, .stream = stream, .opened_files = AutoHashMap(*const vfs.Node, *OpenedInfo).init(allocator), }; root_node.* = .{ .Dir = .{ .fs = fs, .mount = null, }, }; fs.* = .{ .open = open, .close = close, .read = read, .write = write, .instance = &fat32_fs.instance, .getRootNode = getRootNode, }; return fat32_fs; } }; } /// /// Initialise a FAT32 filesystem. This will take a stream that conforms to the reader(), writer(), /// and seekableStream() interfaces. For example, FixedBufferStream or File. A pointer to this is /// allowed. This will allow the use of different devices such as Hard drives or RAM disks to have /// a FAT32 filesystem abstraction. We assume the FAT32 will be the only filesystem present and no /// partition schemes are present. So will expect a valid boot sector. /// /// Arguments: /// IN allocator: Allocator - An allocator. /// IN stream: anytype - A stream this is used to read and seek a raw disk or memory to /// be parsed as a FAT32 filesystem. E.g. a FixedBufferStream. /// /// Return: *Fat32FS(@TypeOf(stream)) /// A pointer to a FAT32 filesystem. /// /// Error: Allocator.Error || Fat32FS(@TypeOf(stream)).Error /// Allocator.Error - If there isn't enough memory to create the filesystem. /// pub fn initialiseFAT32(allocator: Allocator, stream: anytype) (Allocator.Error || ErrorSet(@TypeOf(stream)) || Fat32FS(@TypeOf(stream)).Error)!*Fat32FS(@TypeOf(stream)) { return Fat32FS(@TypeOf(stream)).create(allocator, stream); } /// /// Create a test FAT32 filesystem. This will use mkfat32 to create the temporary FAT32 then the /// stream and fat_config will be replaced by the provided ones. Returned will need to be deinit(). /// This will also set the VFS root node so can use the VFS interfaces without manual setup. /// /// Arguments: /// IN allocator: Allocator - The allocator to create the FAT32FS /// IN stream: anytype - The stream to replace the generated one. This will need to be a /// fixed buffer stream. /// IN fat_config: FATConfig - The config to replace the generated one. /// /// Return: *Fat32FS(@TypeOf(stream)) /// Teh test FAT32 filesystem /// /// Error: anyerror /// Any errors. As this is a test function, it doesn't matter what error is returned, if one /// does, it fails the test. /// fn testFAT32FS(allocator: Allocator, stream: anytype, fat_config: FATConfig) anyerror!*Fat32FS(@TypeOf(stream)) { var test_file_buf = try std.testing.allocator.alloc(u8, 35 * 512); defer allocator.free(test_file_buf); var temp_stream = &std.io.fixedBufferStream(test_file_buf[0..]); try mkfat32.Fat32.make(.{ .image_size = test_file_buf.len }, temp_stream, true); var test_fs = try initialiseFAT32(allocator, temp_stream); test_fs.stream = stream; test_fs.fat_config = fat_config; try vfs.setRoot(test_fs.root_node.node); return test_fs; } test "LongName.getName" { { const lfn = LongName{ .order = 0x00, .first = [5]u16{ '1', '2', '3', '4', '5' }, .check_sum = 0x00, .second = [6]u16{ '1', '2', '3', 0x0000, 0xFFFF, 0xFFFF }, .third = [2]u16{ 0xFFFF, 0xFFFF }, }; // 2 * 13 u16's var buff: [26]u8 = undefined; const end = try lfn.getName(buff[0..]); try expectEqualSlices(u8, "12345123", buff[0..end]); } { const lfn = LongName{ .order = 0x00, .first = [5]u16{ 0x20AC, '1', 0x20AC, '2', 0x0000 }, .check_sum = 0x00, .second = [6]u16{ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, .third = [2]u16{ 0xFFFF, 0xFFFF }, }; // 2 * 13 u16's var buff: [26]u8 = undefined; const end = try lfn.getName(buff[0..]); try expectEqualSlices(u8, "€1€2", buff[0..end]); } { const lfn = LongName{ .order = 0x00, .first = [5]u16{ '1', '1', '1', '1', '1' }, .check_sum = 0x00, .second = [6]u16{ '1', '1', '1', '1', '1', '1' }, .third = [2]u16{ '1', 0xD801 }, }; // 2 * 13 u16's var buff: [26]u8 = undefined; try expectError(error.DanglingSurrogateHalf, lfn.getName(buff[0..])); } { const lfn = LongName{ .order = 0x00, .first = [5]u16{ 0xD987, '1', 0xFFFF, 0xFFFF, 0xFFFF }, .check_sum = 0x00, .second = [6]u16{ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, .third = [2]u16{ 0xFFFF, 0xFFFF }, }; // 2 * 13 u16's var buff: [26]u8 = undefined; try expectError(error.ExpectedSecondSurrogateHalf, lfn.getName(buff[0..])); } { const lfn = LongName{ .order = 0x00, .first = [5]u16{ 0xDD87, '1', 0xFFFF, 0xFFFF, 0xFFFF }, .check_sum = 0x00, .second = [6]u16{ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, .third = [2]u16{ 0xFFFF, 0xFFFF }, }; // 2 * 13 u16's var buff: [26]u8 = undefined; try expectError(error.UnexpectedSecondSurrogateHalf, lfn.getName(buff[0..])); } } test "ShortName.getName - File" { { const sfn = ShortName{ .name = "12345678".*, .extension = "123".*, .attributes = 0x00, .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0x0000, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x0000, .size = 0x00000000, }; var name: [12]u8 = undefined; const name_end = sfn.getName(name[0..]); try expectEqualSlices(u8, "12345678.123", name[0..name_end]); } { const sfn = ShortName{ .name = "12345 ".*, .extension = "123".*, .attributes = 0x00, .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0x0000, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x0000, .size = 0x00000000, }; var name: [12]u8 = undefined; const name_end = sfn.getName(name[0..]); try expectEqualSlices(u8, "12345.123", name[0..name_end]); } { const sfn = ShortName{ .name = "12345 ".*, .extension = "1 ".*, .attributes = 0x00, .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0x0000, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x0000, .size = 0x00000000, }; var name: [12]u8 = undefined; const name_end = sfn.getName(name[0..]); try expectEqualSlices(u8, "12345.1", name[0..name_end]); } { const sfn = ShortName{ .name = "\u{05}2345 ".*, .extension = "1 ".*, .attributes = 0x00, .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0x0000, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x0000, .size = 0x00000000, }; var name: [12]u8 = undefined; const name_end = sfn.getName(name[0..]); try expectEqualSlices(u8, "σ2345.1", name[0..name_end]); } { const sfn = ShortName{ .name = [_]u8{ 0x90, 0xA0 } ++ "345 ".*, .extension = "1 ".*, .attributes = 0x00, .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0x0000, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x0000, .size = 0x00000000, }; var name: [12]u8 = undefined; const name_end = sfn.getName(name[0..]); try expectEqualSlices(u8, "Éá345.1", name[0..name_end]); } { const sfn = ShortName{ .name = "12345 ".*, .extension = "1".* ++ [_]u8{0xB0} ++ " ".*, .attributes = 0x00, .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0x0000, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x0000, .size = 0x00000000, }; var name: [12]u8 = undefined; const name_end = sfn.getName(name[0..]); try expectEqualSlices(u8, "12345.1░", name[0..name_end]); } } test "ShortName.getName - Dir" { { const sfn = ShortName{ .name = "12345678".*, .extension = " ".*, .attributes = @enumToInt(ShortName.Attributes.Directory), .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0x0000, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x0000, .size = 0x00000000, }; var name: [12]u8 = undefined; const name_end = sfn.getName(name[0..]); try expectEqualSlices(u8, "12345678", name[0..name_end]); } { const sfn = ShortName{ .name = "12345 ".*, .extension = " ".*, .attributes = @enumToInt(ShortName.Attributes.Directory), .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0x0000, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x0000, .size = 0x00000000, }; var name: [12]u8 = undefined; const name_end = sfn.getName(name[0..]); try expectEqualSlices(u8, "12345", name[0..name_end]); } { const sfn = ShortName{ .name = "\u{05}2345 ".*, .extension = " ".*, .attributes = @enumToInt(ShortName.Attributes.Directory), .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0x0000, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x0000, .size = 0x00000000, }; var name: [12]u8 = undefined; const name_end = sfn.getName(name[0..]); try expectEqualSlices(u8, "σ2345", name[0..name_end]); } { const sfn = ShortName{ .name = "12345 ".*, .extension = "123".*, .attributes = @enumToInt(ShortName.Attributes.Directory), .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0x0000, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x0000, .size = 0x00000000, }; var name: [12]u8 = undefined; const name_end = sfn.getName(name[0..]); try expectEqualSlices(u8, "12345", name[0..name_end]); } } test "ShortName.getSFNName" { const sfn = ShortName{ .name = [_]u8{ 0x05, 0xAA } ++ "345 ".*, .extension = "1 ".*, .attributes = 0x00, .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0x0000, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x0000, .size = 0x00000000, }; const name = sfn.getSFNName(); const expected = [_]u8{ 0x05, 0xAA } ++ "345 1 "; try expectEqualSlices(u8, expected, name[0..]); } test "ShortName.isDir" { { const sfn = ShortName{ .name = "12345678".*, .extension = "123".*, .attributes = @enumToInt(ShortName.Attributes.Directory), .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0x0000, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x0000, .size = 0x00000000, }; try expect(sfn.isDir()); } { const sfn = ShortName{ .name = "12345678".*, .extension = "123".*, .attributes = @enumToInt(ShortName.Attributes.None), .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0x0000, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x0000, .size = 0x00000000, }; try expect(!sfn.isDir()); } } test "ShortName.getCluster" { const sfn = ShortName{ .name = "12345678".*, .extension = "123".*, .attributes = @enumToInt(ShortName.Attributes.None), .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0xABCD, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x1234, .size = 0x00000000, }; try expectEqual(sfn.getCluster(), 0xABCD1234); } test "ShortName.calcCheckSum" { const sfn = ShortName{ .name = "12345678".*, .extension = "123".*, .attributes = @enumToInt(ShortName.Attributes.None), .time_created_tenth = 0x00, .time_created = 0x0000, .date_created = 0x0000, .date_last_access = 0x0000, .cluster_high = 0xABCD, .time_last_modification = 0x0000, .date_last_modification = 0x0000, .cluster_low = 0x1234, .size = 0x00000000, }; try expectEqual(sfn.calcCheckSum(), 0x7A); } test "ClusterChainIterator.checkRead - Within cluster, within FAT" { // The undefined values are not used in checkRead const fat_config = FATConfig{ .bytes_per_sector = 16, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = undefined, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = undefined, }; var buff_stream = [_]u8{}; var stream = &std.io.fixedBufferStream(buff_stream[0..]); // First 2 are for other purposed and not needed, the third is the first real FAT entry var fat = [_]u32{ 0x0FFFFFFF, 0xFFFFFFF8, 0x0FFFFFFF, 0x00000000 }; var it = Fat32FS(@TypeOf(stream)).ClusterChainIterator{ .allocator = undefined, .cluster = 2, .fat_config = fat_config, .stream = stream, .fat = fat[0..], .table_offset = 0, .cluster_offset = 0, }; try it.checkRead(); // Nothing changed try expectEqual(it.cluster, 2); try expectEqual(it.cluster_offset, 0); try expectEqual(it.table_offset, 0); try expectEqualSlices(u32, it.fat, fat[0..]); } test "ClusterChainIterator.checkRead - Multiple clusters, within FAT" { // The undefined values are not used in checkRead // The value won't be valid FAT32 values, but this doesn't matter in this context so can make the stream buffer smaller const fat_config = FATConfig{ .bytes_per_sector = 16, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = undefined, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = undefined, }; var buff_stream = [_]u8{}; var stream = &std.io.fixedBufferStream(buff_stream[0..]); // First 2 are for other purposed and not needed, the third is the first real FAT entry var fat = [_]u32{ 0x0FFFFFFF, 0xFFFFFFF8, 0x00000003, 0x0FFFFFFF }; var it = Fat32FS(@TypeOf(stream)).ClusterChainIterator{ .allocator = undefined, .cluster = 2, .fat_config = fat_config, .stream = stream, .fat = fat[0..], .table_offset = 0, .cluster_offset = 16, }; // This will update the next cluster to read from try it.checkRead(); // Updated the cluster only try expectEqual(it.cluster, 3); try expectEqual(it.cluster_offset, 0); try expectEqual(it.table_offset, 0); try expectEqualSlices(u32, it.fat, fat[0..]); } test "ClusterChainIterator.checkRead - Multiple clusters, outside FAT" { // The undefined values are not used in checkRead // The value won't be valid FAT32 values, but this doesn't matter in this context so can make the stream buffer smaller const fat_config = FATConfig{ .bytes_per_sector = 16, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; // Set the stream to all FF which represents the end of a FAT chain var buff_stream = [_]u8{ // First 4 FAT (little endian) 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Second 4 FAT. This is where it will seek to 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); // First 2 are for other purposed and not needed, the third is the first real FAT entry var fat = [_]u32{ 0x0FFFFFFF, 0xFFFFFFF8, 0x00000004, 0x0FFFFFFF }; var expected_fat = [_]u32{ 0x0FFFFFFF, 0x0FFFFFFF, 0x0FFFFFFF, 0x0FFFFFFF }; var it = Fat32FS(@TypeOf(stream)).ClusterChainIterator{ .allocator = undefined, .cluster = 2, .fat_config = fat_config, .stream = stream, .fat = fat[0..], .table_offset = 0, .cluster_offset = 16, }; // This will read the next cluster and read a new FAT cache try it.checkRead(); // Updated the cluster and table offset try expectEqual(it.cluster, 4); try expectEqual(it.cluster_offset, 0); try expectEqual(it.table_offset, 1); try expectEqualSlices(u32, it.fat, expected_fat[0..]); } test "ClusterChainIterator.read - end of buffer" { var stream = &std.io.fixedBufferStream(&[_]u8{}); var it = Fat32FS(@TypeOf(stream)).ClusterChainIterator{ .allocator = undefined, .cluster = undefined, .fat_config = undefined, .stream = undefined, .fat = undefined, .table_offset = undefined, .cluster_offset = undefined, }; const actual = try it.read(&[_]u8{}); try expectEqual(actual, null); } test "ClusterChainIterator.read - cluster 0" { var stream = &std.io.fixedBufferStream(&[_]u8{}); var it = Fat32FS(@TypeOf(stream)).ClusterChainIterator{ .allocator = undefined, .cluster = 0, .fat_config = undefined, .stream = undefined, .fat = undefined, .table_offset = undefined, .cluster_offset = undefined, }; var buff: [128]u8 = undefined; const actual = try it.read(buff[0..]); try expectEqual(actual, null); } test "ClusterChainIterator.read - end of cluster chain" { // The undefined values are not used in read const end_cluster: u32 = 0x0FFFFFFF; const fat_config = FATConfig{ .bytes_per_sector = undefined, .sectors_per_cluster = undefined, .reserved_sectors = undefined, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = undefined, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = end_cluster, }; var stream = &std.io.fixedBufferStream(&[_]u8{}); var it = Fat32FS(@TypeOf(stream)).ClusterChainIterator{ .allocator = undefined, .cluster = end_cluster, .fat_config = fat_config, .stream = undefined, .fat = undefined, .table_offset = undefined, .cluster_offset = undefined, }; var buff: [128]u8 = undefined; const actual = try it.read(buff[0..]); try expectEqual(actual, null); } test "ClusterChainIterator.read - BadRead" { // The undefined values are not used in read const fat_config = FATConfig{ .bytes_per_sector = 512, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var stream_buff: [1024]u8 = undefined; var stream = &std.io.fixedBufferStream(stream_buff[0..]); var it = Fat32FS(@TypeOf(stream)).ClusterChainIterator{ .allocator = undefined, .cluster = 2, .fat_config = fat_config, .stream = stream, .fat = undefined, .table_offset = 0, .cluster_offset = 0, }; // Buffer is too small var buff: [128]u8 = undefined; try expectError(error.BadRead, it.read(buff[0..])); } test "ClusterChainIterator.read - success" { // The undefined values are not used in checkRead // The value won't be valid FAT32 values, but this doesn't matter in this context so can make the stream buffer smaller const fat_config = FATConfig{ .bytes_per_sector = 16, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Data region 'a', 'b', 'c', 'd', '1', '2', '3', '4', 'A', 'B', 'C', 'D', '!', '"', '$', '%', }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); // First 2 are for other purposed and not needed, the third is the first real FAT entry var fat = [_]u32{ 0x0FFFFFFF, 0xFFFFFFF8, 0x0FFFFFFF, 0x0FFFFFFF }; var it = Fat32FS(@TypeOf(stream)).ClusterChainIterator{ .allocator = undefined, .cluster = 2, .fat_config = fat_config, .stream = stream, .fat = fat[0..], .table_offset = 0, .cluster_offset = 0, }; var buff: [16]u8 = undefined; const read = try it.read(buff[0..]); try expectEqual(read, 16); try expectEqualSlices(u8, buff[0..], "abcd1234ABCD!\"$%"); try expectEqual(it.table_offset, 0); try expectEqual(it.cluster_offset, 0); try expectEqual(it.cluster, 0x0FFFFFFF); } test "ClusterChainIterator.init - free on BadRead" { const fat_config = FATConfig{ .bytes_per_sector = 16, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = undefined, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var stream = &std.io.fixedBufferStream(&[_]u8{}); try expectError(error.BadRead, Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(std.testing.allocator, fat_config, 2, stream)); } test "ClusterChainIterator.init - free on OutOfMemory" { const fat_config = FATConfig{ .bytes_per_sector = 16, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Data region 'a', 'b', 'c', 'd', '1', '2', '3', '4', 'A', 'B', 'C', 'D', '!', '"', '$', '%', }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); const allocations: usize = 1; var i: usize = 0; while (i < allocations) : (i += 1) { var fa = std.testing.FailingAllocator.init(std.testing.allocator, i); try expectError(error.OutOfMemory, Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(fa.allocator(), fat_config, 2, stream)); } } test "ClusterChainIterator.init - success and good read" { const fat_config = FATConfig{ .bytes_per_sector = 16, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Data region 'a', 'b', 'c', 'd', '1', '2', '3', '4', 'A', 'B', 'C', 'D', '!', '"', '$', '%', }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var it = try Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(std.testing.allocator, fat_config, 2, stream); defer it.deinit(); var buff: [16]u8 = undefined; // If orelse, then 'try expectEqual(read, 16);' will fail const read = (try it.read(buff[0..])) orelse 0; try expectEqual(read, 16); try expectEqualSlices(u8, buff[0..], "abcd1234ABCD!\"$%"); try expectEqual(it.table_offset, 0); try expectEqual(it.cluster_offset, 0); try expectEqual(it.cluster, 0x0FFFFFFF); const expect_null = try it.read(buff[read..]); try expectEqual(expect_null, null); } test "EntryIterator.checkRead - inside cluster block" { const fat_config = FATConfig{ .bytes_per_sector = 16, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x04, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 1 'a', 'b', 'c', 'd', '1', '2', '3', '4', 'A', 'B', 'C', 'D', '!', '"', '$', '%', // Data region cluster 2 'e', 'f', 'g', 'h', '5', '6', '7', '8', 'E', 'F', 'G', 'H', '^', '&', '*', '(', }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var cluster_chain = try Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(std.testing.allocator, fat_config, 2, stream); defer cluster_chain.deinit(); var buff: [16]u8 = undefined; std.mem.copy(u8, buff[0..], buff_stream[32..48]); var it = Fat32FS(@TypeOf(cluster_chain.stream)).EntryIterator{ .allocator = undefined, .cluster_block = buff[0..], .index = 0, .cluster_chain = cluster_chain, }; try expectEqualSlices(u8, it.cluster_block, "abcd1234ABCD!\"$%"); try it.checkRead(); // nothing changed try expectEqualSlices(u8, it.cluster_block, "abcd1234ABCD!\"$%"); } test "EntryIterator.checkRead - read new cluster" { const fat_config = FATConfig{ .bytes_per_sector = 16, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 1 'a', 'b', 'c', 'd', '1', '2', '3', '4', 'A', 'B', 'C', 'D', '!', '"', '$', '%', // Data region cluster 2 'e', 'f', 'g', 'h', '5', '6', '7', '8', 'E', 'F', 'G', 'H', '^', '&', '*', '(', }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var cluster_chain = try Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(std.testing.allocator, fat_config, 2, stream); defer cluster_chain.deinit(); var buff: [16]u8 = undefined; _ = try cluster_chain.read(buff[0..]); var it = Fat32FS(@TypeOf(cluster_chain.stream)).EntryIterator{ .allocator = undefined, .cluster_block = buff[0..], .index = 16, .cluster_chain = cluster_chain, }; try expectEqualSlices(u8, it.cluster_block, "abcd1234ABCD!\"$%"); try it.checkRead(); try expectEqualSlices(u8, it.cluster_block, "efgh5678EFGH^&*("); try expectEqual(it.index, 0); } test "EntryIterator.checkRead - end of cluster chain" { const fat_config = FATConfig{ .bytes_per_sector = 16, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 1 'a', 'b', 'c', 'd', '1', '2', '3', '4', 'A', 'B', 'C', 'D', '!', '"', '$', '%', // Data region cluster 2 'e', 'f', 'g', 'h', '5', '6', '7', '8', 'E', 'F', 'G', 'H', '^', '&', '*', '(', }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var cluster_chain = try Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(std.testing.allocator, fat_config, 2, stream); defer cluster_chain.deinit(); var buff: [16]u8 = undefined; _ = try cluster_chain.read(buff[0..]); var it = Fat32FS(@TypeOf(cluster_chain.stream)).EntryIterator{ .allocator = undefined, .cluster_block = buff[0..], .index = 16, .cluster_chain = cluster_chain, }; try expectEqualSlices(u8, it.cluster_block, "abcd1234ABCD!\"$%"); try expectError(error.EndClusterChain, it.checkRead()); } test "EntryIterator.nextImp - end of entries" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 1 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region cluster 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var cluster_chain = try Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(std.testing.allocator, fat_config, 2, stream); defer cluster_chain.deinit(); var buff: [32]u8 = undefined; _ = try cluster_chain.read(buff[0..]); var it = Fat32FS(@TypeOf(cluster_chain.stream)).EntryIterator{ .allocator = undefined, .cluster_block = buff[0..], .index = 0, .cluster_chain = cluster_chain, }; const actual = try it.nextImp(); try expectEqual(actual, null); } test "EntryIterator.nextImp - just deleted files" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; // These deleted files are taken from a real FAT32 implementation. There is one deleted file for cluster 2 // This will also read the next cluster chain. var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 1 0xE5, 0x41, 0x4D, 0x44, 0x49, 0x53, 0x7E, 0x32, 0x54, 0x58, 0x54, 0x00, 0x18, 0x34, 0x47, 0x76, 0xF9, 0x50, 0x00, 0x00, 0x00, 0x00, 0x48, 0x76, 0xF9, 0x50, 0x04, 0x00, 0x24, 0x00, 0x00, 0x00, // Data region cluster 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var cluster_chain = try Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(std.testing.allocator, fat_config, 2, stream); defer cluster_chain.deinit(); var buff: [32]u8 = undefined; _ = try cluster_chain.read(buff[0..]); var it = Fat32FS(@TypeOf(cluster_chain.stream)).EntryIterator{ .allocator = undefined, .cluster_block = buff[0..], .index = 0, .cluster_chain = cluster_chain, }; const actual = try it.nextImp(); try expectEqual(actual, null); } test "EntryIterator.nextImp - short name only" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; // This short name files are taken from a real FAT32 implementation. // This will also read the next cluster chain. var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 1 0x42, 0x53, 0x48, 0x4F, 0x52, 0x54, 0x20, 0x20, 0x54, 0x58, 0x54, 0x00, 0x10, 0xA0, 0x68, 0xA9, 0xFE, 0x50, 0x00, 0x00, 0x00, 0x00, 0x6E, 0xA9, 0xFE, 0x50, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00, // Data region cluster 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var cluster_chain = try Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(std.testing.allocator, fat_config, 2, stream); defer cluster_chain.deinit(); var buff: [32]u8 = undefined; _ = try cluster_chain.read(buff[0..]); var it = Fat32FS(@TypeOf(cluster_chain.stream)).EntryIterator{ .allocator = undefined, .cluster_block = buff[0..], .index = 0, .cluster_chain = cluster_chain, }; const actual = (try it.nextImp()) orelse return error.TestFail; defer actual.deinit(); try expectEqualSlices(u8, actual.short_name.getSFNName()[0..], "BSHORT TXT"); try expectEqual(actual.long_name, null); } test "EntryIterator.nextImp - long name only" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; // This short name files are taken from a real FAT32 implementation. // This will also read the next cluster chain. // FAT 2 long then blank // FAT 4 2 long entries, no associated short var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 1 0x41, 0x42, 0x00, 0x73, 0x00, 0x68, 0x00, 0x6F, 0x00, 0x72, 0x00, 0x0F, 0x00, 0xA8, 0x74, 0x00, 0x2E, 0x00, 0x74, 0x00, 0x78, 0x00, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region cluster 3 0x41, 0x42, 0x00, 0x73, 0x00, 0x68, 0x00, 0x6F, 0x00, 0x72, 0x00, 0x0F, 0x00, 0xA8, 0x74, 0x00, 0x2E, 0x00, 0x74, 0x00, 0x78, 0x00, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 4 0x41, 0x42, 0x00, 0x73, 0x00, 0x68, 0x00, 0x6F, 0x00, 0x72, 0x00, 0x0F, 0x00, 0xA8, 0x74, 0x00, 0x2E, 0x00, 0x74, 0x00, 0x78, 0x00, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, }; // FAT 2 test { var stream = &std.io.fixedBufferStream(buff_stream[0..]); var cluster_chain = try Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(std.testing.allocator, fat_config, 2, stream); defer cluster_chain.deinit(); var buff: [32]u8 = undefined; _ = try cluster_chain.read(buff[0..]); var it = Fat32FS(@TypeOf(cluster_chain.stream)).EntryIterator{ .allocator = std.testing.allocator, .cluster_block = buff[0..], .index = 0, .cluster_chain = cluster_chain, }; try expectError(error.Orphan, it.nextImp()); } // FAT 4 test { var stream = &std.io.fixedBufferStream(buff_stream[0..]); var cluster_chain = try Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(std.testing.allocator, fat_config, 4, stream); defer cluster_chain.deinit(); var buff: [32]u8 = undefined; _ = try cluster_chain.read(buff[0..]); var it = Fat32FS(@TypeOf(cluster_chain.stream)).EntryIterator{ .allocator = std.testing.allocator, .cluster_block = buff[0..], .index = 0, .cluster_chain = cluster_chain, }; try expectError(error.Orphan, it.nextImp()); } } test "EntryIterator.nextImp - long name, incorrect check sum" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; // Values taken from a real FAT32 implementation // In data region cluster 1, row 4 column 2, // this has changed from the valid 0xA8 to a invalid 0x55 check sum var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 1 0x41, 0x42, 0x00, 0x73, 0x00, 0x68, 0x00, 0x6F, 0x00, 0x72, 0x00, 0x0F, 0x00, 0x55, 0x74, 0x00, 0x2E, 0x00, 0x74, 0x00, 0x78, 0x00, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 2 0x42, 0x53, 0x48, 0x4F, 0x52, 0x54, 0x20, 0x20, 0x54, 0x58, 0x54, 0x00, 0x10, 0xA0, 0x68, 0xA9, 0xFE, 0x50, 0x00, 0x00, 0x00, 0x00, 0x6E, 0xA9, 0xFE, 0x50, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00, // Data region cluster 3 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var cluster_chain = try Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(std.testing.allocator, fat_config, 2, stream); defer cluster_chain.deinit(); var buff: [32]u8 = undefined; _ = try cluster_chain.read(buff[0..]); var it = Fat32FS(@TypeOf(cluster_chain.stream)).EntryIterator{ .allocator = std.testing.allocator, .cluster_block = buff[0..], .index = 0, .cluster_chain = cluster_chain, }; try expectError(error.Orphan, it.nextImp()); } test "EntryIterator.nextImp - long name missing entry" { // 0x43 // 0x01 // missing 0x02 const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; // Values taken from a real FAT32 implementation // In data region cluster 1, row 4 column 2, // this has changed from the valid 0xA8 to a invalid 0x55 check sum var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 1 0x43, 0x6E, 0x00, 0x67, 0x00, 0x6E, 0x00, 0x61, 0x00, 0x6D, 0x00, 0x0F, 0x00, 0x6E, 0x65, 0x00, 0x2E, 0x00, 0x74, 0x00, 0x78, 0x00, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 2 0x01, 0x6C, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x0F, 0x00, 0x6E, 0x6F, 0x00, 0x6E, 0x00, 0x67, 0x00, 0x6C, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x6F, 0x00, // Data region cluster 3 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var cluster_chain = try Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(std.testing.allocator, fat_config, 2, stream); defer cluster_chain.deinit(); var buff: [32]u8 = undefined; _ = try cluster_chain.read(buff[0..]); var it = Fat32FS(@TypeOf(cluster_chain.stream)).EntryIterator{ .allocator = std.testing.allocator, .cluster_block = buff[0..], .index = 0, .cluster_chain = cluster_chain, }; try expectError(error.Orphan, it.nextImp()); } test "EntryIterator.nextImp - valid short and long entry" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; // Values taken from a real FAT32 implementation var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 1 0x43, 0x6E, 0x00, 0x67, 0x00, 0x6E, 0x00, 0x61, 0x00, 0x6D, 0x00, 0x0F, 0x00, 0x6E, 0x65, 0x00, 0x2E, 0x00, 0x74, 0x00, 0x78, 0x00, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 2 0x02, 0x6E, 0x00, 0x67, 0x00, 0x76, 0x00, 0x65, 0x00, 0x72, 0x00, 0x0F, 0x00, 0x6E, 0x79, 0x00, 0x6C, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x6F, 0x00, // Data region cluster 3 0x01, 0x6C, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x0F, 0x00, 0x6E, 0x6F, 0x00, 0x6E, 0x00, 0x67, 0x00, 0x6C, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x6F, 0x00, // Data region cluster 4 0x4C, 0x4F, 0x4F, 0x4F, 0x4F, 0x4F, 0x7E, 0x31, 0x54, 0x58, 0x54, 0x00, 0x18, 0xA0, 0x68, 0xA9, 0xFE, 0x50, 0x00, 0x00, 0x00, 0x00, 0x6E, 0xA9, 0xFE, 0x50, 0x08, 0x00, 0x13, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var cluster_chain = try Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(std.testing.allocator, fat_config, 2, stream); defer cluster_chain.deinit(); var buff: [32]u8 = undefined; _ = try cluster_chain.read(buff[0..]); var it = Fat32FS(@TypeOf(cluster_chain.stream)).EntryIterator{ .allocator = std.testing.allocator, .cluster_block = buff[0..], .index = 0, .cluster_chain = cluster_chain, }; const actual = (try it.nextImp()) orelse return error.TestFail; defer actual.deinit(); try expectEqualSlices(u8, actual.short_name.getSFNName()[0..], "LOOOOO~1TXT"); try expectEqualSlices(u8, actual.long_name.?, "looooongloooongveryloooooongname.txt"); } test "EntryIterator.next - skips orphan long entry" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; // Values taken from a real FAT32 implementation var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 1 0x43, 0x6E, 0x00, 0x67, 0x00, 0x6E, 0x00, 0x61, 0x00, 0x6D, 0x00, 0x0F, 0x00, 0x6E, 0x65, 0x00, 0x2E, 0x00, 0x74, 0x00, 0x78, 0x00, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Missing 0x02 // Data region cluster 2 0x01, 0x6C, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x0F, 0x00, 0x6E, 0x6F, 0x00, 0x6E, 0x00, 0x67, 0x00, 0x6C, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x6F, 0x00, // Data region cluster 3 0x4C, 0x4F, 0x4F, 0x4F, 0x4F, 0x4F, 0x7E, 0x31, 0x54, 0x58, 0x54, 0x00, 0x18, 0xA0, 0x68, 0xA9, 0xFE, 0x50, 0x00, 0x00, 0x00, 0x00, 0x6E, 0xA9, 0xFE, 0x50, 0x08, 0x00, 0x13, 0x00, 0x00, 0x00, // Data region cluster 4 0x42, 0x2E, 0x00, 0x74, 0x00, 0x78, 0x00, 0x74, 0x00, 0x00, 0x00, 0x0F, 0x00, 0xE9, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Data region cluster 5 0x01, 0x72, 0x00, 0x61, 0x00, 0x6D, 0x00, 0x64, 0x00, 0x69, 0x00, 0x0F, 0x00, 0xE9, 0x73, 0x00, 0x6B, 0x00, 0x5F, 0x00, 0x74, 0x00, 0x65, 0x00, 0x73, 0x00, 0x00, 0x00, 0x74, 0x00, 0x31, 0x00, // Data region cluster 6 0x52, 0x41, 0x4D, 0x44, 0x49, 0x53, 0x7E, 0x31, 0x54, 0x58, 0x54, 0x00, 0x18, 0x34, 0x47, 0x76, 0xF9, 0x50, 0x00, 0x00, 0x00, 0x00, 0x48, 0x76, 0xF9, 0x50, 0x03, 0x00, 0x10, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var cluster_chain = try Fat32FS(@TypeOf(stream)).ClusterChainIterator.init(std.testing.allocator, fat_config, 2, stream); defer cluster_chain.deinit(); var buff: [32]u8 = undefined; _ = try cluster_chain.read(buff[0..]); var it = Fat32FS(@TypeOf(cluster_chain.stream)).EntryIterator{ .allocator = std.testing.allocator, .cluster_block = buff[0..], .index = 0, .cluster_chain = cluster_chain, }; const actual1 = (try it.next()) orelse return error.TestFail; defer actual1.deinit(); try expectEqualSlices(u8, actual1.short_name.getSFNName()[0..], "LOOOOO~1TXT"); try expectEqual(actual1.long_name, null); const actual2 = (try it.next()) orelse return error.TestFail; defer actual2.deinit(); try expectEqualSlices(u8, actual2.short_name.getSFNName()[0..], "RAMDIS~1TXT"); try expectEqualSlices(u8, actual2.long_name.?, "ramdisk_test1.txt"); try expectEqual(try it.next(), null); } test "EntryIterator.init - free on OutOfMemory" { const fat_config = FATConfig{ .bytes_per_sector = 16, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Data region 'a', 'b', 'c', 'd', '1', '2', '3', '4', 'A', 'B', 'C', 'D', '!', '"', '$', '%', }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); const allocations: usize = 2; var i: usize = 0; while (i < allocations) : (i += 1) { var fa = std.testing.FailingAllocator.init(std.testing.allocator, i); try expectError(error.OutOfMemory, Fat32FS(@TypeOf(stream)).EntryIterator.init(fa.allocator(), fat_config, 2, stream)); } } test "EntryIterator.init - free on BadRead" { const fat_config = FATConfig{ .bytes_per_sector = 16, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Data region (too short) 'a', 'b', 'c', 'd', '1', '2', '3', '4', }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); try expectError(error.BadRead, Fat32FS(@TypeOf(stream)).EntryIterator.init(std.testing.allocator, fat_config, 2, stream)); } test "Fat32FS.getRootNode" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; try expectEqual(test_fs.fs.getRootNode(test_fs.fs), &test_fs.root_node.node.Dir); try expectEqual(test_fs.root_node.cluster, 2); try expectEqual(test_fs.fat_config.root_directory_cluster, 2); } test "Fat32FS.createNode - dir" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; const dir_node = try test_fs.createNode(3, 0, 0, 0, .CREATE_DIR); defer std.testing.allocator.destroy(dir_node); try expect(dir_node.isDir()); try expect(test_fs.opened_files.contains(dir_node)); const opened_info = test_fs.opened_files.fetchRemove(dir_node).?.value; defer std.testing.allocator.destroy(opened_info); try expectEqual(opened_info.cluster, 3); try expectEqual(opened_info.size, 0); } test "Fat32FS.createNode - file" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; const file_node = try test_fs.createNode(4, 16, 0, 0, .CREATE_FILE); defer std.testing.allocator.destroy(file_node); try expect(file_node.isFile()); try expect(test_fs.opened_files.contains(file_node)); const opened_info = test_fs.opened_files.fetchRemove(file_node).?.value; defer std.testing.allocator.destroy(opened_info); try expectEqual(opened_info.cluster, 4); try expectEqual(opened_info.size, 16); } test "Fat32FS.createNode - symlink" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; try expectError(error.InvalidFlags, test_fs.createNode(4, 16, 0, 0, .CREATE_SYMLINK)); } test "Fat32FS.createNode - no create" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; try expectError(error.InvalidFlags, test_fs.createNode(4, 16, 0, 0, .NO_CREATION)); } test "Fat32FS.createNode - free memory" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; // There 2 allocations var allocations: usize = 0; while (allocations < 2) : (allocations += 1) { var fa = std.testing.FailingAllocator.init(std.testing.allocator, allocations); const allocator = fa.allocator(); test_fs.allocator = allocator; try expectError(error.OutOfMemory, test_fs.createNode(3, 16, 0, 0, .CREATE_FILE)); } } test "Fat32FS.getDirCluster - root dir" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; var test_node_1 = try test_fs.createNode(3, 16, 0, 0, .CREATE_FILE); defer test_node_1.File.close(); const actual = try test_fs.getDirCluster(&test_fs.root_node.node.Dir); try expectEqual(actual, 2); } test "Fat32FS.getDirCluster - sub dir" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; var test_node_1 = try test_fs.createNode(5, 0, 0, 0, .CREATE_DIR); defer test_node_1.Dir.close(); const actual = try test_fs.getDirCluster(&test_node_1.Dir); try expectEqual(actual, 5); } test "Fat32FS.getDirCluster - not opened dir" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; var test_node_1 = try test_fs.createNode(5, 0, 0, 0, .CREATE_DIR); const elem = test_fs.opened_files.fetchRemove(test_node_1).?.value; std.testing.allocator.destroy(elem); try expectError(error.NotOpened, test_fs.getDirCluster(&test_node_1.Dir)); std.testing.allocator.destroy(test_node_1); } test "Fat32FS.openImpl - entry iterator failed init" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; var test_node_1 = try test_fs.createNode(5, 0, 0, 0, .CREATE_DIR); defer test_node_1.Dir.close(); var fa = std.testing.FailingAllocator.init(std.testing.allocator, 1); const allocator = fa.allocator(); test_fs.allocator = allocator; try expectError(error.OutOfMemory, Fat32FS(@TypeOf(test_fat32_image)).openImpl(test_fs.fs, &test_node_1.Dir, "file.txt")); } test "Fat32FS.openImpl - entry iterator failed next" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; var fa = std.testing.FailingAllocator.init(std.testing.allocator, 2); const allocator = fa.allocator(); test_fs.allocator = allocator; try expectError(error.OutOfMemory, Fat32FS(@TypeOf(test_fat32_image)).openImpl(test_fs.fs, &test_fs.root_node.node.Dir, "short.txt")); } test "Fat32FS.openImpl - entry iterator failed 2nd next" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; var fa = std.testing.FailingAllocator.init(std.testing.allocator, 3); const allocator = fa.allocator(); test_fs.allocator = allocator; try expectError(error.OutOfMemory, Fat32FS(@TypeOf(test_fat32_image)).openImpl(test_fs.fs, &test_fs.root_node.node.Dir, "short.txt")); } test "Fat32FS.openImpl - match short name" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; const file_node = try Fat32FS(@TypeOf(test_fat32_image)).openImpl(test_fs.fs, &test_fs.root_node.node.Dir, "short.txt"); defer file_node.File.close(); } test "Fat32FS.openImpl - match long name" { return error.SkipZigTest; //const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); //defer test_fat32_image.close(); // //var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); //defer test_fs.destroy() catch unreachable; // //const file_node = try Fat32FS(@TypeOf(test_fat32_image)).openImpl(test_fs.fs, &test_fs.root_node.node.Dir, "insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long.txt"); } test "Fat32FS.openImpl - no match" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; var test_node_1 = try test_fs.createNode(5, 0, 0, 0, .CREATE_DIR); defer test_node_1.Dir.close(); try expectError(vfs.Error.NoSuchFileOrDir, Fat32FS(@TypeOf(test_fat32_image)).openImpl(test_fs.fs, &test_node_1.Dir, "file.txt")); } test "Fat32FS.open - no create - hand crafted" { var test_file_buf = try std.testing.allocator.alloc(u8, 1024 * 1024); defer std.testing.allocator.free(test_file_buf); var stream = &std.io.fixedBufferStream(test_file_buf[0..]); try mkfat32.Fat32.make(.{ .image_size = test_file_buf.len }, stream, true); var test_fs = try initialiseFAT32(std.testing.allocator, stream); defer test_fs.destroy() catch unreachable; var entry_buff = [_]u8{ // Long entry 3 0x43, 0x6E, 0x00, 0x67, 0x00, 0x6E, 0x00, 0x61, 0x00, 0x6D, 0x00, 0x0F, 0x00, 0x6E, 0x65, 0x00, 0x2E, 0x00, 0x74, 0x00, 0x78, 0x00, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Long entry 2 0x02, 0x6E, 0x00, 0x67, 0x00, 0x76, 0x00, 0x65, 0x00, 0x72, 0x00, 0x0F, 0x00, 0x6E, 0x79, 0x00, 0x6C, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x6F, 0x00, // Long entry 1 0x01, 0x6C, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x0F, 0x00, 0x6E, 0x6F, 0x00, 0x6E, 0x00, 0x67, 0x00, 0x6C, 0x00, 0x6F, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x6F, 0x00, // Short entry 0x4C, 0x4F, 0x4F, 0x4F, 0x4F, 0x4F, 0x7E, 0x31, 0x54, 0x58, 0x54, 0x00, 0x18, 0xA0, 0x68, 0xA9, 0xFE, 0x50, 0x00, 0x00, 0x00, 0x00, 0x6E, 0xA9, 0xFE, 0x50, 0x08, 0x00, 0x13, 0x00, 0x00, 0x00, // Long entry 2 0x42, 0x2E, 0x00, 0x74, 0x00, 0x78, 0x00, 0x74, 0x00, 0x00, 0x00, 0x0F, 0x00, 0xE9, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, // Long entry 1 0x01, 0x72, 0x00, 0x61, 0x00, 0x6D, 0x00, 0x64, 0x00, 0x69, 0x00, 0x0F, 0x00, 0xE9, 0x73, 0x00, 0x6B, 0x00, 0x5F, 0x00, 0x74, 0x00, 0x65, 0x00, 0x73, 0x00, 0x00, 0x00, 0x74, 0x00, 0x31, 0x00, // Short entry 0x52, 0x41, 0x4D, 0x44, 0x49, 0x53, 0x7E, 0x31, 0x54, 0x58, 0x54, 0x00, 0x18, 0x34, 0x47, 0x76, 0xF9, 0x50, 0x00, 0x00, 0x00, 0x00, 0x48, 0x76, 0xF9, 0x50, 0x03, 0x00, 0x10, 0x00, 0x00, 0x00, }; // Goto root dir and write a long and short entry const sector = test_fs.fat_config.clusterToSector(test_fs.root_node.cluster); try test_fs.stream.seekableStream().seekTo(sector * test_fs.fat_config.bytes_per_sector); try test_fs.stream.writer().writeAll(entry_buff[0..]); try vfs.setRoot(test_fs.root_node.node); const file = try vfs.openFile("/ramdisk_test1.txt", .NO_CREATION); defer file.close(); try expect(test_fs.opened_files.contains(@ptrCast(*const vfs.Node, file))); const opened_info = test_fs.opened_files.get(@ptrCast(*const vfs.Node, file)).?; try expectEqual(opened_info.cluster, 3); try expectEqual(opened_info.size, 16); } fn testOpenRec(dir_node: *const vfs.DirNode, path: []const u8) anyerror!void { var test_files = try std.fs.cwd().openDir(path, .{ .iterate = true }); defer test_files.close(); var it = test_files.iterate(); while (try it.next()) |file| { if (file.kind == .Directory) { var dir_path = try std.testing.allocator.alloc(u8, path.len + file.name.len + 1); defer std.testing.allocator.free(dir_path); std.mem.copy(u8, dir_path[0..], path); dir_path[path.len] = '/'; std.mem.copy(u8, dir_path[path.len + 1 ..], file.name); const new_dir = &(try dir_node.open(file.name, .NO_CREATION, .{})).Dir; defer new_dir.close(); try testOpenRec(new_dir, dir_path); } else { const open_file = &(try dir_node.open(file.name, .NO_CREATION, .{})).File; defer open_file.close(); } } } test "Fat32FS.open - no create - all files" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; try vfs.setRoot(test_fs.root_node.node); try testOpenRec(&test_fs.root_node.node.Dir, "test/fat32/test_files"); } test "Fat32FS.open - create file" { var test_file_buf = try std.testing.allocator.alloc(u8, 1024 * 1024); defer std.testing.allocator.free(test_file_buf); var stream = &std.io.fixedBufferStream(test_file_buf[0..]); try mkfat32.Fat32.make(.{ .image_size = test_file_buf.len }, stream, true); var test_fs = try initialiseFAT32(std.testing.allocator, stream); defer test_fs.destroy() catch unreachable; try vfs.setRoot(test_fs.root_node.node); // Open and close const open_file = try vfs.openFile("/fileαfile€file.txt", .CREATE_FILE); open_file.close(); // Can't open it as a dir try expectError(error.IsAFile, vfs.openDir("/fileαfile€file.txt", .NO_CREATION)); // Can we open the same file const read_file = try vfs.openFile("/fileαfile€file.txt", .NO_CREATION); defer read_file.close(); // Reads nothing var buff = [_]u8{0xAA} ** 512; const read = read_file.read(buff[0..]); try expectEqual(read, 0); try expectEqualSlices(u8, buff[0..], &[_]u8{0xAA} ** 512); } test "Fat32FS.open - create directory" { var test_file_buf = try std.testing.allocator.alloc(u8, 1024 * 1024); defer std.testing.allocator.free(test_file_buf); var stream = &std.io.fixedBufferStream(test_file_buf[0..]); try mkfat32.Fat32.make(.{ .image_size = test_file_buf.len }, stream, true); var test_fs = try initialiseFAT32(std.testing.allocator, stream); defer test_fs.destroy() catch unreachable; try vfs.setRoot(test_fs.root_node.node); // Open and close const open_dir = try vfs.openDir("/fileαfile€file", .CREATE_DIR); open_dir.close(); // Can't open it as a file try expectError(error.IsADirectory, vfs.openFile("/fileαfile€file", .NO_CREATION)); const open = try vfs.openDir("/fileαfile€file", .NO_CREATION); defer open.close(); } test "Fat32FS.open - create symlink" { var test_file_buf = try std.testing.allocator.alloc(u8, 1024 * 1024); defer std.testing.allocator.free(test_file_buf); var stream = &std.io.fixedBufferStream(test_file_buf[0..]); try mkfat32.Fat32.make(.{ .image_size = test_file_buf.len }, stream, true); var test_fs = try initialiseFAT32(std.testing.allocator, stream); defer test_fs.destroy() catch unreachable; try vfs.setRoot(test_fs.root_node.node); try expectError(error.InvalidFlags, vfs.openSymlink("/fileαfile€file.txt", "/file.txt", .CREATE_SYMLINK)); } test "Fat32FS.open - create nested directories" { var test_file_buf = try std.testing.allocator.alloc(u8, 1024 * 1024); defer std.testing.allocator.free(test_file_buf); var stream = &std.io.fixedBufferStream(test_file_buf[0..]); try mkfat32.Fat32.make(.{ .image_size = test_file_buf.len }, stream, true); var test_fs = try initialiseFAT32(std.testing.allocator, stream); defer test_fs.destroy() catch unreachable; try vfs.setRoot(test_fs.root_node.node); const open1 = try vfs.openDir("/fileαfile€file", .CREATE_DIR); defer open1.close(); const open2 = try vfs.openDir("/fileαfile€file/folder", .CREATE_DIR); defer open2.close(); const open3 = try vfs.openDir("/fileαfile€file/folder/1", .CREATE_DIR); defer open3.close(); const open4 = try vfs.openDir("/fileαfile€file/folder/1/2", .CREATE_DIR); defer open4.close(); const open5 = try vfs.openDir("/fileαfile€file/folder/1/2/3", .CREATE_DIR); defer open5.close(); const open6 = try vfs.openDir("/fileαfile€file/folder/1/2/3/end", .CREATE_DIR); defer open6.close(); const open_dir = try vfs.openDir("/fileαfile€file/folder/1/2/3/end", .NO_CREATION); defer open_dir.close(); } test "Fat32FS.read - not opened" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; // Craft a node var node = try std.testing.allocator.create(vfs.Node); defer std.testing.allocator.destroy(node); node.* = .{ .File = .{ .fs = test_fs.fs } }; try expectError(error.NotOpened, node.File.read(&[_]u8{})); } test "Fat32FS.read - cluster iterator init fail" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; var test_node = try test_fs.createNode(5, 16, 0, 0, .CREATE_FILE); defer test_node.File.close(); var fa = std.testing.FailingAllocator.init(std.testing.allocator, 0); const allocator = fa.allocator(); test_fs.allocator = allocator; var buff = [_]u8{0xAA} ** 128; try expectError(error.OutOfMemory, test_node.File.read(buff[0..])); } test "Fat32FS.read - buffer smaller than file" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; try vfs.setRoot(test_fs.root_node.node); const test_node = try vfs.openFile("/short.txt", .NO_CREATION); defer test_node.close(); var buff = [_]u8{0xAA} ** 8; const read = try test_node.read(buff[0..]); try expectEqualSlices(u8, buff[0..read], "short.tx"); } test "Fat32FS.read - buffer bigger than file" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; try vfs.setRoot(test_fs.root_node.node); const test_node = try vfs.openFile("/short.txt", .NO_CREATION); defer test_node.close(); var buff = [_]u8{0xAA} ** 16; const read = try test_node.read(buff[0..]); try expectEqualSlices(u8, buff[0..read], "short.txt"); // The rest should be unchanged try expectEqualSlices(u8, buff[read..], &[_]u8{0xAA} ** 7); } test "Fat32FS.read - large" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; try vfs.setRoot(test_fs.root_node.node); const test_node = try vfs.openFile("/large_file.txt", .NO_CREATION); defer test_node.close(); var buff = [_]u8{0xAA} ** 8450; const read = try test_node.read(buff[0..]); try expectEqual(read, 8450); const large_file_content = @embedFile("../../../test/fat32/test_files/large_file.txt"); try expectEqualSlices(u8, buff[0..], large_file_content[0..]); } fn testReadRec(dir_node: *const vfs.DirNode, path: []const u8, read_big: bool) anyerror!void { var test_files = try std.fs.cwd().openDir(path, .{ .iterate = true }); defer test_files.close(); var it = test_files.iterate(); while (try it.next()) |file| { if (file.kind == .Directory) { var dir_path = try std.testing.allocator.alloc(u8, path.len + file.name.len + 1); defer std.testing.allocator.free(dir_path); std.mem.copy(u8, dir_path[0..], path); dir_path[path.len] = '/'; std.mem.copy(u8, dir_path[path.len + 1 ..], file.name); const new_dir = &(try dir_node.open(file.name, .NO_CREATION, .{})).Dir; defer new_dir.close(); try testReadRec(new_dir, dir_path, read_big); } else { const open_file = &(try dir_node.open(file.name, .NO_CREATION, .{})).File; defer open_file.close(); // Have tested the large file if (!read_big and std.mem.eql(u8, file.name, "large_file.txt")) { continue; } else if (read_big and std.mem.eql(u8, file.name, "large_file.txt")) { var buff = [_]u8{0xAA} ** 8450; const large_file_content = @embedFile("../../../test/fat32/test_files/large_file.txt"); const read = try open_file.read(buff[0..]); try expectEqualSlices(u8, buff[0..], large_file_content[0..]); try expectEqual(read, 8450); continue; } // Big enough var buff = [_]u8{0xAA} ** 256; const read = try open_file.read(buff[0..]); // The file content is the same as the file name try expectEqual(file.name.len, read); try expectEqualSlices(u8, buff[0..read], file.name[0..]); } } } test "Fat32FS.read - all test files" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; try vfs.setRoot(test_fs.root_node.node); // Check we can open all the expected files correctly var test_files = try std.fs.cwd().openDir("test/fat32/test_files", .{ .iterate = true }); defer test_files.close(); try testReadRec(&test_fs.root_node.node.Dir, "test/fat32/test_files", false); } test "Fat32FS.findNextFreeCluster - free on error" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = false, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; // Too small var fat_buff_stream = [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, }; var stream = &std.io.fixedBufferStream(fat_buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; try expectError(error.BadRead, test_fs.findNextFreeCluster(2, null)); } test "Fat32FS.findNextFreeCluster - alloc cluster in first sector" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 2, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = false, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; // 6th entry is free var fat_buff_stream = [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // FAT region 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Backup FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Backup FAT region 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(fat_buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; const cluster = try test_fs.findNextFreeCluster(2, null); try expectEqual(cluster, 6); // check the FAT where the update would happen + backup FAT try expectEqualSlices(u8, fat_buff_stream[24..28], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); try expectEqualSlices(u8, fat_buff_stream[88..92], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); } test "Fat32FS.findNextFreeCluster - alloc cluster in second sector" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 2, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = false, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; // 6th entry is free var fat_buff_stream = [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // FAT region 2 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Backup FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 2 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(fat_buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; const cluster = try test_fs.findNextFreeCluster(10, null); try expectEqual(cluster, 10); // check the FAT where the update would happen + backup FAT try expectEqualSlices(u8, fat_buff_stream[40..44], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); try expectEqualSlices(u8, fat_buff_stream[104..108], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); } test "Fat32FS.findNextFreeCluster - alloc cluster over sector boundary" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 2, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = false, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; // 6th entry is free var fat_buff_stream = [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // FAT region 2 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Backup FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // Backup FAT region 2 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(fat_buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; const cluster = try test_fs.findNextFreeCluster(2, null); try expectEqual(cluster, 10); // check the FAT where the update would happen + backup FAT try expectEqualSlices(u8, fat_buff_stream[24..28], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); try expectEqualSlices(u8, fat_buff_stream[88..92], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); } test "Fat32FS.findNextFreeCluster - no free cluster" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = false, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; // 6th entry is free var fat_buff_stream = [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, // FAT region 2 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x0F, }; var stream = &std.io.fixedBufferStream(fat_buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; try expectError(error.DiskFull, test_fs.findNextFreeCluster(2, null)); } test "Fat32FS.findNextFreeCluster - updates FSInfo" { const fat_config = FATConfig{ .bytes_per_sector = 512, .sectors_per_cluster = 1, .reserved_sectors = 2, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 2, .root_directory_cluster = undefined, .fsinfo_sector = 0, .backup_boot_sector = 1, .has_fs_info = true, .number_free_clusters = 10, .next_free_cluster = 6, .cluster_end_marker = 0x0FFFFFFF, }; // 6th entry is free var buff_stream = [_]u8{0x00} ** 488 ++ [_]u8{ // FSInfo 0x0A, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, } ++ [_]u8{0x00} ** 504 ++ [_]u8{ // Backup FSInfo 0x0A, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } ++ [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } ++ [_]u8{0x00} ** 480 ++ [_]u8{ // FAT region 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } ++ [_]u8{0x00} ** 480 ++ [_]u8{ // Backup FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } ++ [_]u8{0x00} ** 480 ++ [_]u8{ // Backup FAT region 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } ++ [_]u8{0x00} ** 480; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; const cluster = try test_fs.findNextFreeCluster(2, null); try expectEqual(cluster, 6); try expectEqual(test_fs.fat_config.number_free_clusters, 9); try expectEqual(test_fs.fat_config.next_free_cluster, 7); try expectEqual(buff_stream[488], 9); try expectEqual(buff_stream[492], 7); try expectEqual(buff_stream[1000], 9); try expectEqual(buff_stream[1004], 7); } test "Fat32FS.findNextFreeCluster - updates cluster chain with parent" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 2, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = false, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; // 6th entry is free var fat_buff_stream = [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // FAT region 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Backup FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Backup FAT region 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(fat_buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; const cluster = try test_fs.findNextFreeCluster(2, 5); try expectEqual(cluster, 6); // check the FAT where the update would happen + backup FAT try expectEqualSlices(u8, fat_buff_stream[20..28], &[_]u8{ 0x06, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F }); try expectEqualSlices(u8, fat_buff_stream[84..92], &[_]u8{ 0x06, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F }); } test "Fat32FS.nameToLongName - name too long" { const long_name = [_]u8{'A'} ** 256; var stream = &std.io.fixedBufferStream(&[_]u8{}); try expectError(error.InvalidName, Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, long_name[0..])); } test "Fat32FS.nameToLongName - leading spaces" { const name_cases = [_][]const u8{ " file.txt", " file.txt", [_]u8{' '} ** 256 ++ "file.txt", }; var stream = &std.io.fixedBufferStream(&[_]u8{}); const expected = [_]u16{ 'f', 'i', 'l', 'e', '.', 't', 'x', 't' }; for (name_cases) |case| { const actual = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, case[0..]); defer std.testing.allocator.free(actual); try expectEqualSlices(u16, expected[0..], actual); } } test "Fat32FS.nameToLongName - invalid name" { const name_cases = [_][]const u8{ "\"file.txt", "*file.txt", "/file.txt", ":file.txt", "<file.txt", ">file.txt", "?file.txt", "\\file.txt", "|file.txt", [_]u8{0x10} ++ "file.txt", [_]u8{0x7F} ++ "file.txt", "\u{12345}file.txt", }; var stream = &std.io.fixedBufferStream(&[_]u8{}); for (name_cases) |case| { try expectError(error.InvalidName, Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, case[0..])); } } test "Fat32FS.nameToLongName - trailing spaces or dots" { const name_cases = [_][]const u8{ "file.txt ", "file.txt....", "file.txt . .", "file.txt" ++ [_]u8{' '} ** 256, }; var stream = &std.io.fixedBufferStream(&[_]u8{}); const expected = [_]u16{ 'f', 'i', 'l', 'e', '.', 't', 'x', 't' }; for (name_cases) |case| { const actual = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, case[0..]); defer std.testing.allocator.free(actual); try expectEqualSlices(u16, expected[0..], actual); } } test "Fat32FS.nameToLongName - valid name" { const name_cases = [_][]const u8{ "....leading_dots.txt", "[nope].txt", "A_verY_Long_File_namE_With_normal_Extension.tXt", "dot.in.file.txt", "file.long_ext", "file.t x t", "insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long_insanely_long.txt", "nope.[x]", "s p a c e s.txt", "UTF16.€xt", "UTF16€.txt", "αlpha.txt", "file.txt", }; var stream = &std.io.fixedBufferStream(&[_]u8{}); for (name_cases) |case| { // Can just test no error const actual = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, case[0..]); defer std.testing.allocator.free(actual); } } test "Fat32FS.isValidSFNChar - invalid" { var stream = &std.io.fixedBufferStream(&[_]u8{}); try expectEqual(Fat32FS(@TypeOf(stream)).isValidSFNChar(' '), null); try expectEqual(Fat32FS(@TypeOf(stream)).isValidSFNChar('€'), null); try expectEqual(Fat32FS(@TypeOf(stream)).isValidSFNChar('+'), null); try expectEqual(Fat32FS(@TypeOf(stream)).isValidSFNChar(','), null); try expectEqual(Fat32FS(@TypeOf(stream)).isValidSFNChar(';'), null); try expectEqual(Fat32FS(@TypeOf(stream)).isValidSFNChar('='), null); try expectEqual(Fat32FS(@TypeOf(stream)).isValidSFNChar('['), null); try expectEqual(Fat32FS(@TypeOf(stream)).isValidSFNChar(']'), null); try expectEqual(Fat32FS(@TypeOf(stream)).isValidSFNChar('α'), 0xE0); try expectEqual(Fat32FS(@TypeOf(stream)).isValidSFNChar('a'), 'a'); } test "Fat32FS.longNameToShortName - leading dots and spaces" { // Using valid long names const name_cases = [_][]const u8{ "....file.txt", ". . file.txt", }; var stream = &std.io.fixedBufferStream(&[_]u8{}); const expected = "FILE~1 TXT"; for (name_cases) |case| { const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, case[0..]); defer std.testing.allocator.free(long_name); const actual = try Fat32FS(@TypeOf(stream)).longNameToShortName(long_name, &[_][11]u8{}); try expectEqualSlices(u8, actual[0..], expected[0..]); } } test "Fat32FS.longNameToShortName - embedded spaces" { // Using valid long names const name_cases = [_][]const u8{ "f i l e.txt", "fi le.txt", "file.t x t", "file.tx t", }; var stream = &std.io.fixedBufferStream(&[_]u8{}); const expected = "FILE~1 TXT"; for (name_cases) |case| { const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, case[0..]); defer std.testing.allocator.free(long_name); const actual = try Fat32FS(@TypeOf(stream)).longNameToShortName(long_name, &[_][11]u8{}); try expectEqualSlices(u8, actual[0..], expected[0..]); } } test "Fat32FS.longNameToShortName - dot before end" { // Using valid long names const name_cases = [_][]const u8{ "fi.le.txt", "f.i.l.e.txt", "fi.....le.txt", "fi. le.txt", }; var stream = &std.io.fixedBufferStream(&[_]u8{}); const expected = "FILE~1 TXT"; for (name_cases) |case| { const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, case[0..]); defer std.testing.allocator.free(long_name); const actual = try Fat32FS(@TypeOf(stream)).longNameToShortName(long_name, &[_][11]u8{}); try expectEqualSlices(u8, actual[0..], expected[0..]); } } test "Fat32FS.longNameToShortName - long name" { // Using valid long names const name_cases = [_][]const u8{ "loooooong.txt", "loooooo.ng.txt", "loooooo.ng€.txt", "looooo€.ng.txt", "loooooong.txttttt", "looooo.txttttt", }; var stream = &std.io.fixedBufferStream(&[_]u8{}); const expected = "LOOOOO~1TXT"; for (name_cases) |case| { const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, case[0..]); defer std.testing.allocator.free(long_name); const actual = try Fat32FS(@TypeOf(stream)).longNameToShortName(long_name, &[_][11]u8{}); try expectEqualSlices(u8, actual[0..], expected[0..]); } } test "Fat32FS.longNameToShortName - short name" { // Using valid long names const name_cases = [_][]const u8{ "file1234.txt", "FiLe1234.txt", }; var stream = &std.io.fixedBufferStream(&[_]u8{}); const expected = "FILE1234TXT"; for (name_cases) |case| { const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, case[0..]); defer std.testing.allocator.free(long_name); const actual = try Fat32FS(@TypeOf(stream)).longNameToShortName(long_name, &[_][11]u8{}); try expectEqualSlices(u8, actual[0..], expected[0..]); } } test "Fat32FS.longNameToShortName - invalid short name characters" { // Using valid long names const name_cases = [_][]const u8{ "+file.txt", ",file.txt", ";file.txt", "=file.txt", "[file.txt", "]file.txt", "€file.txt", }; var stream = &std.io.fixedBufferStream(&[_]u8{}); const expected = "_FILE~1 TXT"; for (name_cases) |case| { const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, case[0..]); defer std.testing.allocator.free(long_name); const actual = try Fat32FS(@TypeOf(stream)).longNameToShortName(long_name, &[_][11]u8{}); try expectEqualSlices(u8, actual[0..], expected[0..]); } } test "Fat32FS.longNameToShortName - existing name short" { const excising_names = &[_][11]u8{ "FILE TXT".*, "FILE~1 TXT".*, "FILE~A TXT".*, "FILE~2 TXT".*, }; var stream = &std.io.fixedBufferStream(&[_]u8{}); const expected = "FILE~3 TXT"; const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, "file.txt"); defer std.testing.allocator.free(long_name); const actual = try Fat32FS(@TypeOf(stream)).longNameToShortName(long_name, excising_names); try expectEqualSlices(u8, actual[0..], expected[0..]); } test "Fat32FS.longNameToShortName - existing name short rev" { const excising_names = &[_][11]u8{ "FILE~2 TXT".*, "FILE~A TXT".*, "FILE~1 TXT".*, "FILE TXT".*, }; var stream = &std.io.fixedBufferStream(&[_]u8{}); const expected = "FILE~3 TXT"; const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, "file.txt"); defer std.testing.allocator.free(long_name); const actual = try Fat32FS(@TypeOf(stream)).longNameToShortName(long_name, excising_names); try expectEqualSlices(u8, actual[0..], expected[0..]); } test "Fat32FS.longNameToShortName - existing name long" { const excising_names = &[_][11]u8{ "FILEFILETXT".*, "FILEFI~1TXT".*, "FILEFI~ATXT".*, "FILEFI~2TXT".*, }; var stream = &std.io.fixedBufferStream(&[_]u8{}); const expected = "FILEFI~3TXT"; const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, "filefilefile.txt"); defer std.testing.allocator.free(long_name); const actual = try Fat32FS(@TypeOf(stream)).longNameToShortName(long_name, excising_names); try expectEqualSlices(u8, actual[0..], expected[0..]); } test "Fat32FS.longNameToShortName - existing name long no match" { const excising_names = &[_][11]u8{ "FILEFI~1TXT".*, "FILEFI~ATXT".*, "FILEFI~2TXT".*, }; var stream = &std.io.fixedBufferStream(&[_]u8{}); const expected = "FILEFILETXT"; const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, "filefile.txt"); defer std.testing.allocator.free(long_name); const actual = try Fat32FS(@TypeOf(stream)).longNameToShortName(long_name, excising_names); try expectEqualSlices(u8, actual[0..], expected[0..]); } test "Fat32FS.longNameToShortName - trail number to large" { const excising_names = &[_][11]u8{ "F~999999TXT".*, }; var stream = &std.io.fixedBufferStream(&[_]u8{}); const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, "filefilefile.txt"); defer std.testing.allocator.free(long_name); try expectError(error.InvalidName, Fat32FS(@TypeOf(stream)).longNameToShortName(long_name, excising_names)); } test "Fat32FS.longNameToShortName - large trail number" { const excising_names = &[_][11]u8{ "FILE TXT".*, "FILE~2 TXT".*, "FILE~3 TXT".*, "FILE~4 TXT".*, "FILE~5 TXT".*, "FILE~6 TXT".*, "FILE~7 TXT".*, "FILE~8 TXT".*, "FILE~9 TXT".*, }; var stream = &std.io.fixedBufferStream(&[_]u8{}); const expected = "FILE~10 TXT"; const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, "file.txt"); defer std.testing.allocator.free(long_name); const actual = try Fat32FS(@TypeOf(stream)).longNameToShortName(long_name, excising_names); try expectEqualSlices(u8, actual[0..], expected[0..]); } test "Fat32FS.longNameToShortName - CP437" { var stream = &std.io.fixedBufferStream(&[_]u8{}); const expected = [_]u8{0xE0} ++ "LPHA TXT"; const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, "αlpha.txt"); defer std.testing.allocator.free(long_name); const actual = try Fat32FS(@TypeOf(stream)).longNameToShortName(long_name, &[_][11]u8{}); try expectEqualSlices(u8, actual[0..], expected[0..]); } test "Fat32FS.createLongNameEntry - less than 13 characters" { var stream = &std.io.fixedBufferStream(&[_]u8{}); // Pre-calculated check fum for file.txt => FILE TXT const check_sum: u8 = 25; // Using valid long name const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, "file.txt"); defer std.testing.allocator.free(long_name); const entries = try Fat32FS(@TypeOf(stream)).createLongNameEntry(std.testing.allocator, long_name, check_sum); defer std.testing.allocator.free(entries); try expectEqual(entries.len, 1); const expected = LongName{ .order = 0x41, .first = [_]u16{'f'} ++ [_]u16{'i'} ++ [_]u16{'l'} ++ [_]u16{'e'} ++ [_]u16{'.'}, .check_sum = check_sum, .second = [_]u16{'t'} ++ [_]u16{'x'} ++ [_]u16{'t'} ++ [_]u16{ 0x0000, 0xFFFF, 0xFFFF }, .third = [_]u16{ 0xFFFF, 0xFFFF }, }; try expectEqual(entries[0], expected); } test "Fat32FS.createLongNameEntry - greater than 13 characters" { var stream = &std.io.fixedBufferStream(&[_]u8{}); // Pre-calculated check fum for filefilefilefile.txt => FILEFI~1TXT const check_sum: u8 = 123; // Using valid long name const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, "filefilefilefile.txt"); defer std.testing.allocator.free(long_name); const entries = try Fat32FS(@TypeOf(stream)).createLongNameEntry(std.testing.allocator, long_name, check_sum); defer std.testing.allocator.free(entries); try expectEqual(entries.len, 2); var expected = [_]LongName{ LongName{ .order = 0x42, .first = [_]u16{'i'} ++ [_]u16{'l'} ++ [_]u16{'e'} ++ [_]u16{'.'} ++ [_]u16{'t'}, .check_sum = check_sum, .second = [_]u16{'x'} ++ [_]u16{'t'} ++ [_]u16{ 0x0000, 0xFFFF, 0xFFFF, 0xFFFF }, .third = [_]u16{ 0xFFFF, 0xFFFF }, }, LongName{ .order = 0x01, .first = [_]u16{'f'} ++ [_]u16{'i'} ++ [_]u16{'l'} ++ [_]u16{'e'} ++ [_]u16{'f'}, .check_sum = check_sum, .second = [_]u16{'i'} ++ [_]u16{'l'} ++ [_]u16{'e'} ++ [_]u16{'f'} ++ [_]u16{'i'} ++ [_]u16{'l'}, .third = [_]u16{'e'} ++ [_]u16{'f'}, }, }; try expectEqual(entries[0], expected[0]); try expectEqual(entries[1], expected[1]); } test "Fat32FS.createLongNameEntry - max 255 characters" { var stream = &std.io.fixedBufferStream(&[_]u8{}); // Pre-calculated check fum for A**255 => AAAAAA~1TXT const check_sum: u8 = 17; // Using valid long name const name = [_]u8{'A'} ** 255; const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, name[0..]); defer std.testing.allocator.free(long_name); const entries = try Fat32FS(@TypeOf(stream)).createLongNameEntry(std.testing.allocator, long_name, check_sum); defer std.testing.allocator.free(entries); try expectEqual(entries.len, 20); const UA = [_]u16{'A'}; var expected = [_]LongName{LongName{ .order = 0x00, .first = UA ** 5, .check_sum = check_sum, .second = UA ** 6, .third = UA ** 2, }} ** 20; for (expected) |*e, i| { e.order = 20 - @intCast(u8, i); } expected[0] = LongName{ .order = 0x54, // 0x40 | 0x14 .first = UA ** 5, .check_sum = check_sum, .second = UA ** 3 ++ [_]u16{ 0x0000, 0xFFFF, 0xFFFF }, .third = [_]u16{ 0xFFFF, 0xFFFF }, }; for (expected) |ex, i| { try expectEqual(entries[i], ex); } } test "Fat32FS.createShortNameEntry" { var stream = &std.io.fixedBufferStream(&[_]u8{}); const actual = Fat32FS(@TypeOf(stream)).createShortNameEntry("FILE TXT".*, .None, 0x10); // Expects 12:12:13 12/12/2012 from mock arch const expected = ShortName{ .name = "FILE ".*, .extension = "TXT".*, .attributes = 0x00, .time_created_tenth = 0x64, // 100 (1 sec) .time_created = 0x6186, .date_created = 0x418C, .date_last_access = 0x418C, .cluster_high = 0x00, .time_last_modification = 0x6186, .date_last_modification = 0x418C, .cluster_low = 0x10, .size = 0x00000000, }; try expectEqual(actual, expected); } test "Fat32FS.writeEntries - all free cluster" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Backup FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; const entries = FatDirEntry{ .short_entry = Fat32FS(@TypeOf(stream)).createShortNameEntry("FILE TXT".*, .None, 3), .long_entry = &[_]LongName{}, }; // Convert to bytes var expected_bytes: [32]u8 = undefined; initBytes(ShortName, entries.short_entry, expected_bytes[0..]); _ = try test_fs.writeEntries(entries, 2, 3, 0); try expectEqualSlices(u8, expected_bytes[0..], buff_stream[64..]); } test "Fat32FS.writeEntries - half free cluster" { const fat_config = FATConfig{ .bytes_per_sector = 64, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Backup FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 0x49, 0x4E, 0x53, 0x41, 0x4E, 0x45, 0x7E, 0x31, 0x54, 0x58, 0x54, 0x20, 0x00, 0x00, 0x9B, 0xB9, 0x88, 0x51, 0x88, 0x51, 0x00, 0x00, 0x9B, 0xB9, 0x88, 0x51, 0x0D, 0x00, 0xE3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; const entries = FatDirEntry{ .short_entry = Fat32FS(@TypeOf(stream)).createShortNameEntry("FILE TXT".*, .None, 3), .long_entry = &[_]LongName{}, }; // Convert to bytes var expected_bytes: [32]u8 = undefined; initBytes(ShortName, entries.short_entry, expected_bytes[0..]); _ = try test_fs.writeEntries(entries, 2, 3, 32); try expectEqualSlices(u8, expected_bytes[0..], buff_stream[160..]); } test "Fat32FS.writeEntries - full cluster" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Backup FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 1 0x49, 0x4E, 0x53, 0x41, 0x4E, 0x45, 0x7E, 0x31, 0x54, 0x58, 0x54, 0x20, 0x00, 0x00, 0x9B, 0xB9, 0x88, 0x51, 0x88, 0x51, 0x00, 0x00, 0x9B, 0xB9, 0x88, 0x51, 0x0D, 0x00, 0xE3, 0x00, 0x00, 0x00, // Data region 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; const entries = FatDirEntry{ .short_entry = Fat32FS(@TypeOf(stream)).createShortNameEntry("FILE TXT".*, .None, 3), .long_entry = &[_]LongName{}, }; // Convert to bytes var expected_bytes: [32]u8 = undefined; initBytes(ShortName, entries.short_entry, expected_bytes[0..]); _ = try test_fs.writeEntries(entries, 2, 3, 32); try expectEqualSlices(u8, expected_bytes[0..], buff_stream[96..]); try expectEqualSlices(u8, buff_stream[8..16], &[_]u8{ 0x03, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F }); try expectEqualSlices(u8, buff_stream[40..48], &[_]u8{ 0x03, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F }); } test "Fat32FS.writeEntries - large entry over 3 clusters" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = undefined, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = undefined, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Backup FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 1 0x49, 0x4E, 0x53, 0x41, 0x4E, 0x45, 0x7E, 0x31, 0x54, 0x58, 0x54, 0x20, 0x00, 0x00, 0x9B, 0xB9, 0x88, 0x51, 0x88, 0x51, 0x00, 0x00, 0x9B, 0xB9, 0x88, 0x51, 0x0D, 0x00, 0xE3, 0x00, 0x00, 0x00, // Data region 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 3 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 4 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; const short_entry = Fat32FS(@TypeOf(stream)).createShortNameEntry("FILE TXT".*, .None, 3); const long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, "filefilefile.txt"); defer std.testing.allocator.free(long_name); const long_entry = try Fat32FS(@TypeOf(stream)).createLongNameEntry(std.testing.allocator, long_name, short_entry.calcCheckSum()); defer std.testing.allocator.free(long_entry); try expectEqual(long_entry.len, 2); const entries = FatDirEntry{ .short_entry = short_entry, .long_entry = long_entry, }; // Convert to bytes var expected_bytes: [96]u8 = undefined; initBytes(LongName, entries.long_entry[0], expected_bytes[0..32]); initBytes(LongName, entries.long_entry[1], expected_bytes[32..64]); initBytes(ShortName, entries.short_entry, expected_bytes[64..]); _ = try test_fs.writeEntries(entries, 2, 3, 32); try expectEqualSlices(u8, expected_bytes[0..], buff_stream[96..]); } test "Fat32FS.createFileOrDir - create file" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = 2, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = false, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Backup FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 1 (Root dir long name) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 2 (File) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 3 (Root dir short name) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; const file = try Fat32FS(@TypeOf(stream)).createFileOrDir(test_fs.fs, &test_fs.root_node.node.Dir, "file.txt", false); defer file.File.close(); const expected_short_entry = Fat32FS(@TypeOf(stream)).createShortNameEntry("FILE TXT".*, .None, 3); const expected_long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, "file.txt"); defer std.testing.allocator.free(expected_long_name); const expected_long_entry = try Fat32FS(@TypeOf(stream)).createLongNameEntry(std.testing.allocator, expected_long_name, expected_short_entry.calcCheckSum()); defer std.testing.allocator.free(expected_long_entry); var temp_buf: [32]u8 = undefined; initBytes(LongName, expected_long_entry[0], temp_buf[0..]); try expectEqualSlices(u8, buff_stream[64..96], temp_buf[0..]); initBytes(ShortName, expected_short_entry, temp_buf[0..]); try expectEqualSlices(u8, buff_stream[128..], temp_buf[0..]); // FAT try expectEqualSlices(u8, buff_stream[8..12], &[_]u8{ 0x04, 0x00, 0x00, 0x00 }); try expectEqualSlices(u8, buff_stream[12..16], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); try expectEqualSlices(u8, buff_stream[16..20], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); } test "Fat32FS.createFileOrDir - create directory" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = 2, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = false, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Backup FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 1 (Root dir long name) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 2 (Directory) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 3 (Root dir short name) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; const file = try Fat32FS(@TypeOf(stream)).createFileOrDir(test_fs.fs, &test_fs.root_node.node.Dir, "folder", true); defer file.Dir.close(); const expected_short_entry = Fat32FS(@TypeOf(stream)).createShortNameEntry("FOLDER ".*, .Directory, 3); const expected_long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, "folder"); defer std.testing.allocator.free(expected_long_name); const expected_long_entry = try Fat32FS(@TypeOf(stream)).createLongNameEntry(std.testing.allocator, expected_long_name, expected_short_entry.calcCheckSum()); defer std.testing.allocator.free(expected_long_entry); var temp_buf: [32]u8 = undefined; initBytes(LongName, expected_long_entry[0], temp_buf[0..]); try expectEqualSlices(u8, buff_stream[64..96], temp_buf[0..]); initBytes(ShortName, expected_short_entry, temp_buf[0..]); try expectEqualSlices(u8, buff_stream[128..], temp_buf[0..]); // FAT try expectEqualSlices(u8, buff_stream[8..12], &[_]u8{ 0x04, 0x00, 0x00, 0x00 }); try expectEqualSlices(u8, buff_stream[12..16], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); try expectEqualSlices(u8, buff_stream[16..20], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); } test "Fat32FS.createFileOrDir - create file parent cluster full" { const fat_config = FATConfig{ .bytes_per_sector = 32, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = 2, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = false, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Backup FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 1 (Root dir full) 0x49, 0x4E, 0x53, 0x41, 0x4E, 0x45, 0x7E, 0x31, 0x54, 0x58, 0x54, 0x20, 0x00, 0x00, 0x9B, 0xB9, 0x88, 0x51, 0x88, 0x51, 0x00, 0x00, 0x9B, 0xB9, 0x88, 0x51, 0x0D, 0x00, 0xE3, 0x00, 0x00, 0x00, // Data region 2 (File) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 3 (Root dir long name) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 4 (Root dir short name) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; const file = try Fat32FS(@TypeOf(stream)).createFileOrDir(test_fs.fs, &test_fs.root_node.node.Dir, "file.txt", false); defer file.File.close(); const expected_short_entry = Fat32FS(@TypeOf(stream)).createShortNameEntry("FILE TXT".*, .None, 3); const expected_long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, "file.txt"); defer std.testing.allocator.free(expected_long_name); const expected_long_entry = try Fat32FS(@TypeOf(stream)).createLongNameEntry(std.testing.allocator, expected_long_name, expected_short_entry.calcCheckSum()); defer std.testing.allocator.free(expected_long_entry); var temp_buf: [32]u8 = undefined; initBytes(LongName, expected_long_entry[0], temp_buf[0..]); try expectEqualSlices(u8, buff_stream[128..160], temp_buf[0..]); initBytes(ShortName, expected_short_entry, temp_buf[0..]); try expectEqualSlices(u8, buff_stream[160..], temp_buf[0..]); // FAT try expectEqualSlices(u8, buff_stream[8..12], &[_]u8{ 0x04, 0x00, 0x00, 0x00 }); try expectEqualSlices(u8, buff_stream[12..16], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); try expectEqualSlices(u8, buff_stream[16..20], &[_]u8{ 0x05, 0x00, 0x00, 0x00 }); try expectEqualSlices(u8, buff_stream[20..24], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); } test "Fat32FS.createFileOrDir - half root" { const fat_config = FATConfig{ .bytes_per_sector = 64, .sectors_per_cluster = 1, .reserved_sectors = 0, .hidden_sectors = undefined, .total_sectors = undefined, .sectors_per_fat = 1, .root_directory_cluster = 2, .fsinfo_sector = undefined, .backup_boot_sector = undefined, .has_fs_info = false, .number_free_clusters = undefined, .next_free_cluster = undefined, .cluster_end_marker = 0x0FFFFFFF, }; var buff_stream = [_]u8{ // FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Backup FAT region 1 0xFF, 0xFF, 0xFF, 0x0F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 1 (Root long) 0x49, 0x4E, 0x53, 0x41, 0x4E, 0x45, 0x7E, 0x31, 0x54, 0x58, 0x54, 0x20, 0x00, 0x00, 0x9B, 0xB9, 0x88, 0x51, 0x88, 0x51, 0x00, 0x00, 0x9B, 0xB9, 0x88, 0x51, 0x0D, 0x00, 0xE3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 2 (File) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Data region 2 (Root short half) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; var stream = &std.io.fixedBufferStream(buff_stream[0..]); var test_fs = try testFAT32FS(std.testing.allocator, stream, fat_config); defer test_fs.destroy() catch unreachable; const file = try Fat32FS(@TypeOf(stream)).createFileOrDir(test_fs.fs, &test_fs.root_node.node.Dir, "file.txt", false); defer file.File.close(); const expected_short_entry = Fat32FS(@TypeOf(stream)).createShortNameEntry("FILE TXT".*, .None, 3); const expected_long_name = try Fat32FS(@TypeOf(stream)).nameToLongName(std.testing.allocator, "file.txt"); defer std.testing.allocator.free(expected_long_name); const expected_long_entry = try Fat32FS(@TypeOf(stream)).createLongNameEntry(std.testing.allocator, expected_long_name, expected_short_entry.calcCheckSum()); defer std.testing.allocator.free(expected_long_entry); var temp_buf: [32]u8 = undefined; initBytes(LongName, expected_long_entry[0], temp_buf[0..]); try expectEqualSlices(u8, buff_stream[160..192], temp_buf[0..]); initBytes(ShortName, expected_short_entry, temp_buf[0..]); try expectEqualSlices(u8, buff_stream[256..288], temp_buf[0..]); // FAT try expectEqualSlices(u8, buff_stream[8..12], &[_]u8{ 0x04, 0x00, 0x00, 0x00 }); try expectEqualSlices(u8, buff_stream[12..16], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); try expectEqualSlices(u8, buff_stream[16..20], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); } test "Fat32FS.write - small file" { var test_file_buf = try std.testing.allocator.alloc(u8, 1024 * 1024); defer std.testing.allocator.free(test_file_buf); var stream = &std.io.fixedBufferStream(test_file_buf[0..]); try mkfat32.Fat32.make(.{ .image_size = test_file_buf.len }, stream, false); var test_fs = try initialiseFAT32(std.testing.allocator, stream); defer test_fs.destroy() catch unreachable; try vfs.setRoot(test_fs.root_node.node); const file = try vfs.openFile("/file.txt", .CREATE_FILE); const text = "Hello, world!\n"; const written = try file.write(text[0..]); try expectEqual(written, text.len); var read_buf1: [text.len * 2]u8 = undefined; const read1 = try file.read(read_buf1[0..]); try expectEqual(read1, text.len); try expectEqualSlices(u8, text[0..], read_buf1[0..read1]); file.close(); const read_file = try vfs.openFile("/file.txt", .NO_CREATION); defer read_file.close(); var read_buf2: [text.len * 2]u8 = undefined; const read2 = try read_file.read(read_buf2[0..]); try expectEqual(read2, text.len); try expectEqualSlices(u8, text[0..], read_buf2[0..read2]); } test "Fat32FS.write - large file" { var test_file_buf = try std.testing.allocator.alloc(u8, 1024 * 1024); defer std.testing.allocator.free(test_file_buf); var stream = &std.io.fixedBufferStream(test_file_buf[0..]); try mkfat32.Fat32.make(.{ .image_size = test_file_buf.len }, stream, false); var test_fs = try initialiseFAT32(std.testing.allocator, stream); defer test_fs.destroy() catch unreachable; try vfs.setRoot(test_fs.root_node.node); const file = try vfs.openFile("/file.txt", .CREATE_FILE); // Check the opened file const open_info1 = test_fs.opened_files.get(@ptrCast(*const vfs.Node, file)).?; try expectEqual(open_info1.cluster, 3); try expectEqual(open_info1.size, 0); try expectEqual(open_info1.entry_cluster, 2); try expectEqual(open_info1.entry_offset, 60); const fat_offset = test_fs.fat_config.reserved_sectors * test_fs.fat_config.bytes_per_sector + 12; try expectEqualSlices(u8, test_file_buf[fat_offset .. fat_offset + 4], &[_]u8{ 0xFF, 0xFF, 0xFF, 0x0F }); const text = [_]u8{'A'} ** (8 * 1024); const written = try file.write(text[0..]); try expectEqual(written, text.len); // Check the FAT const expected_fat = [_]u32{ 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x0FFFFFFF }; try expectEqualSlices(u8, test_file_buf[fat_offset .. fat_offset + (16 * 4)], std.mem.sliceAsBytes(expected_fat[0..])); var read_buf1: [text.len * 2]u8 = undefined; const read1 = try file.read(read_buf1[0..]); try expectEqual(read1, text.len); try expectEqualSlices(u8, text[0..], read_buf1[0..read1]); file.close(); const read_file = try vfs.openFile("/file.txt", .NO_CREATION); defer read_file.close(); const open_info2 = test_fs.opened_files.get(@ptrCast(*const vfs.Node, read_file)).?; try expectEqual(open_info2.cluster, 3); try expectEqual(open_info2.size, text.len); try expectEqual(open_info2.entry_cluster, 2); try expectEqual(open_info2.entry_offset, 60); var read_buf2: [text.len * 2]u8 = undefined; const read2 = try read_file.read(read_buf2[0..]); try expectEqual(read2, text.len); try expectEqualSlices(u8, text[0..], read_buf2[0..read2]); } fn testWriteRec(dir_node: *const vfs.DirNode, path: []const u8) anyerror!void { var test_files = try std.fs.cwd().openDir(path, .{ .iterate = true }); defer test_files.close(); var it = test_files.iterate(); while (try it.next()) |file| { if (file.kind == .Directory) { var dir_path = try std.testing.allocator.alloc(u8, path.len + file.name.len + 1); defer std.testing.allocator.free(dir_path); std.mem.copy(u8, dir_path[0..], path); dir_path[path.len] = '/'; std.mem.copy(u8, dir_path[path.len + 1 ..], file.name); const new_dir = &(try dir_node.open(file.name, .CREATE_DIR, .{})).Dir; defer new_dir.close(); try testWriteRec(new_dir, dir_path); } else { // Open the test file const test_file = try test_files.openFile(file.name, .{}); defer test_file.close(); // Read the content const test_file_content = try test_file.readToEndAlloc(std.testing.allocator, 0xFFFF); defer std.testing.allocator.free(test_file_content); const open_file = &(try dir_node.open(file.name, .CREATE_FILE, .{})).File; defer open_file.close(); // Write the content const written = try open_file.write(test_file_content); if (written != test_file_content.len) { return error.BadWrite; } } } } test "Fat32FS.write - test files" { var test_file_buf = try std.testing.allocator.alloc(u8, 1024 * 1024); defer std.testing.allocator.free(test_file_buf); var stream = &std.io.fixedBufferStream(test_file_buf[0..]); try mkfat32.Fat32.make(.{ .image_size = test_file_buf.len }, stream, false); var test_fs = try initialiseFAT32(std.testing.allocator, stream); defer test_fs.destroy() catch unreachable; try vfs.setRoot(test_fs.root_node.node); try testWriteRec(&test_fs.root_node.node.Dir, "test/fat32/test_files"); const image = try std.fs.cwd().createFile("ig.img", .{}); defer image.close(); _ = try image.writer().writeAll(test_file_buf); try testReadRec(&test_fs.root_node.node.Dir, "test/fat32/test_files", true); } test "Fat32FS.write - not enough space" { var test_file_buf = try std.testing.allocator.alloc(u8, 37 * 512); defer std.testing.allocator.free(test_file_buf); var stream = &std.io.fixedBufferStream(test_file_buf[0..]); try mkfat32.Fat32.make(.{ .image_size = test_file_buf.len }, stream, false); var test_fs = try initialiseFAT32(std.testing.allocator, stream); defer test_fs.destroy() catch unreachable; try vfs.setRoot(test_fs.root_node.node); const text = [_]u8{'A'} ** 1025; const file = try vfs.openFile("/file.txt", .CREATE_FILE); defer file.close(); try expectError(error.Unexpected, file.write(text[0..])); const offset = test_fs.fat_config.clusterToSector(3) * test_fs.fat_config.bytes_per_sector; try expectEqualSlices(u8, test_file_buf[offset .. offset + 1024], &[_]u8{0x00} ** 1024); } test "Fat32FS.init no error" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; } test "Fat32FS.init errors" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_file_buf = try std.testing.allocator.alloc(u8, (32 * 512 + 4) + 1); defer std.testing.allocator.free(test_file_buf); _ = try test_fat32_image.reader().readAll(test_file_buf[0..]); const stream = &std.io.fixedBufferStream(test_file_buf[0..]); // BadMBRMagic test_file_buf[510] = 0x00; try expectError(error.BadMBRMagic, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[510] = 0x55; test_file_buf[511] = 0x00; try expectError(error.BadMBRMagic, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[511] = 0xAA; // BadRootCluster // Little endian, so just eed to set the upper bytes test_file_buf[44] = 0; try expectError(error.BadRootCluster, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[44] = 1; try expectError(error.BadRootCluster, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[44] = 2; // BadFATCount test_file_buf[16] = 0; try expectError(error.BadFATCount, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[16] = 1; try expectError(error.BadFATCount, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[16] = 10; try expectError(error.BadFATCount, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[16] = 2; // NotMirror test_file_buf[40] = 1; try expectError(error.NotMirror, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[40] = 10; try expectError(error.NotMirror, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[40] = 0; // BadMedia test_file_buf[21] = 0xF0; try expectError(error.BadMedia, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[21] = 0xF8; // BadFat32 test_file_buf[17] = 10; try expectError(error.BadFat32, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[17] = 0; test_file_buf[19] = 10; try expectError(error.BadFat32, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[19] = 0; test_file_buf[22] = 10; try expectError(error.BadFat32, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[22] = 0; // BadSignature test_file_buf[66] = 0x28; try expectError(error.BadSignature, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[66] = 0x29; // BadFSType // Change from FAT32 to FAT16 test_file_buf[85] = '1'; test_file_buf[86] = '6'; try expectError(error.BadFSType, initialiseFAT32(std.testing.allocator, stream)); test_file_buf[85] = '3'; test_file_buf[86] = '2'; // Test the bad reads // Boot sector try expectError(error.BadRead, initialiseFAT32(std.testing.allocator, &std.io.fixedBufferStream(test_file_buf[0..510]))); // FSInfo (we have one) try expectError(error.BadRead, initialiseFAT32(std.testing.allocator, &std.io.fixedBufferStream(test_file_buf[0 .. 512 + 100]))); // FAT try expectError(error.BadRead, initialiseFAT32(std.testing.allocator, &std.io.fixedBufferStream(test_file_buf[0 .. (32 * 512 + 4) + 1]))); } test "Fat32FS.init free memory" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); const allocations: usize = 5; var i: usize = 0; while (i < allocations) : (i += 1) { var fa = std.testing.FailingAllocator.init(std.testing.allocator, i); try expectError(error.OutOfMemory, initialiseFAT32(fa.allocator(), test_fat32_image)); } } test "Fat32FS.init FATConfig expected" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_fs = try initialiseFAT32(std.testing.allocator, test_fat32_image); defer test_fs.destroy() catch unreachable; // This is the expected FAT config from the initialised FAT device const expected = FATConfig{ .bytes_per_sector = 512, .sectors_per_cluster = 1, .reserved_sectors = 32, .hidden_sectors = 0, .total_sectors = 66583, .sectors_per_fat = 513, .root_directory_cluster = 2, .fsinfo_sector = 1, .backup_boot_sector = 6, .has_fs_info = true, .number_free_clusters = 65473, .next_free_cluster = 53, .cluster_end_marker = 0x0FFFFFFF, }; try expectEqual(test_fs.fat_config, expected); } test "Fat32FS.init FATConfig mix FSInfo" { const test_fat32_image = try std.fs.cwd().openFile("test/fat32/test_fat32.img", .{}); defer test_fat32_image.close(); var test_file_buf = try std.testing.allocator.alloc(u8, 1024 * 1024); defer std.testing.allocator.free(test_file_buf); _ = try test_fat32_image.reader().readAll(test_file_buf[0..]); const stream = &std.io.fixedBufferStream(test_file_buf[0..]); // No FSInfo { // Force no FSInfo test_file_buf[48] = 0x00; var test_fs = try initialiseFAT32(std.testing.allocator, stream); defer test_fs.destroy() catch unreachable; // This is the default config that should be produced from mkfat32.Fat32 const expected = FATConfig{ .bytes_per_sector = 512, .sectors_per_cluster = 1, .reserved_sectors = 32, .hidden_sectors = 0, .total_sectors = 66583, .sectors_per_fat = 513, .root_directory_cluster = 2, .fsinfo_sector = 0, .backup_boot_sector = 6, .has_fs_info = false, .number_free_clusters = 0xFFFFFFFF, .next_free_cluster = 0xFFFFFFFF, .cluster_end_marker = 0x0FFFFFFF, }; try expectEqual(test_fs.fat_config, expected); test_file_buf[48] = 0x01; } // Bad Signatures { // Corrupt a signature test_file_buf[512] = 0xAA; var test_fs = try initialiseFAT32(std.testing.allocator, stream); defer test_fs.destroy() catch unreachable; // This is the default config that should be produced from mkfat32.Fat32 const expected = FATConfig{ .bytes_per_sector = 512, .sectors_per_cluster = 1, .reserved_sectors = 32, .hidden_sectors = 0, .total_sectors = 66583, .sectors_per_fat = 513, .root_directory_cluster = 2, .fsinfo_sector = 1, .backup_boot_sector = 6, .has_fs_info = false, .number_free_clusters = 0xFFFFFFFF, .next_free_cluster = 0xFFFFFFFF, .cluster_end_marker = 0x0FFFFFFF, }; try expectEqual(test_fs.fat_config, expected); test_file_buf[512] = 0x52; } // Bad number_free_clusters { // Make is massive test_file_buf[512 + 4 + 480 + 4] = 0xAA; test_file_buf[512 + 4 + 480 + 5] = 0xBB; test_file_buf[512 + 4 + 480 + 6] = 0xCC; test_file_buf[512 + 4 + 480 + 7] = 0xDD; var test_fs = try initialiseFAT32(std.testing.allocator, stream); defer test_fs.destroy() catch unreachable; // This is the default config that should be produced from mkfat32.Fat32 const expected = FATConfig{ .bytes_per_sector = 512, .sectors_per_cluster = 1, .reserved_sectors = 32, .hidden_sectors = 0, .total_sectors = 66583, .sectors_per_fat = 513, .root_directory_cluster = 2, .fsinfo_sector = 1, .backup_boot_sector = 6, .has_fs_info = true, .number_free_clusters = 0xFFFFFFFF, .next_free_cluster = 53, .cluster_end_marker = 0x0FFFFFFF, }; try expectEqual(test_fs.fat_config, expected); } }
0
repos/pluto/src/kernel
repos/pluto/src/kernel/filesystem/initrd.zig
const std = @import("std"); const builtin = @import("builtin"); const is_test = std.builtin.is_test; const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const expectError = std.testing.expectError; const expectEqualSlices = std.testing.expectEqualSlices; const log = std.log.scoped(.initrd); const build_options = @import("build_options"); const Allocator = std.mem.Allocator; const AutoHashMap = std.AutoHashMap; const vfs = @import("vfs.zig"); const mem = @import("../mem.zig"); const panic = @import("../panic.zig").panic; /// The Initrd file system struct. /// Format of raw ramdisk: /// (NumOfFiles:usize)[(name_length:usize)(name:u8[name_length])(content_length:usize)(content:u8[content_length])]* pub const InitrdFS = struct { /// The ramdisk header that stores pointers for the name and file content. const InitrdHeader = struct { /// The name of the file name: []u8, /// The content of the file content: []u8, }; /// The error set for the ramdisk file system. const Error = error{ /// The error for an invalid raw ramdisk when /// parsing. InvalidRamDisk, }; const Self = @This(); /// A mapping of opened files so can easily retrieved opened files for reading. opened_files: AutoHashMap(*const vfs.Node, *InitrdHeader), /// The underlying file system fs: *vfs.FileSystem, /// The allocator used for allocating memory for opening files. allocator: Allocator, /// The list of files in the ram disk. These will be pointers into the raw ramdisk to save on /// allocations. files: []InitrdHeader, /// The root node for the ramdisk file system. This is just a root directory as there is not /// subdirectories. root_node: *vfs.Node, /// See vfs.FileSystem.instance instance: usize, /// See vfs.FileSystem.getRootNode fn getRootNode(fs: *const vfs.FileSystem) *const vfs.DirNode { var self = @fieldParentPtr(InitrdFS, "instance", fs.instance); return &self.root_node.Dir; } /// See vfs.FileSystem.close fn close(fs: *const vfs.FileSystem, node: *const vfs.Node) void { var self = @fieldParentPtr(InitrdFS, "instance", fs.instance); // As close can't error, if provided with a invalid Node that isn't opened or try to close // the same file twice, will just do nothing. if (self.opened_files.remove(node)) { self.allocator.destroy(node); } } /// See vfs.FileSystem.read fn read(fs: *const vfs.FileSystem, file_node: *const vfs.FileNode, bytes: []u8) (Allocator.Error || vfs.Error)!usize { var self = @fieldParentPtr(InitrdFS, "instance", fs.instance); const node = @ptrCast(*const vfs.Node, file_node); const file_header = self.opened_files.get(node) orelse return vfs.Error.NotOpened; const length = std.math.min(bytes.len, file_header.content.len); std.mem.copy(u8, bytes, file_header.content[0..length]); return length; } /// See vfs.FileSystem.write fn write(fs: *const vfs.FileSystem, node: *const vfs.FileNode, bytes: []const u8) (Allocator.Error || vfs.Error)!usize { // Suppress unused var warning _ = fs; _ = node; _ = bytes; return 0; } /// See vfs.FileSystem.open fn open(fs: *const vfs.FileSystem, dir: *const vfs.DirNode, name: []const u8, flags: vfs.OpenFlags, args: vfs.OpenArgs) (Allocator.Error || vfs.Error)!*vfs.Node { var self = @fieldParentPtr(InitrdFS, "instance", fs.instance); // Suppress unused var warning _ = args; _ = dir; switch (flags) { .CREATE_DIR, .CREATE_FILE, .CREATE_SYMLINK => return vfs.Error.InvalidFlags, .NO_CREATION => { for (self.files) |*file| { if (std.mem.eql(u8, file.name, name)) { // Opening 2 files of the same name, will create 2 different Nodes // Create a node var node = try self.allocator.create(vfs.Node); errdefer self.allocator.destroy(node); node.* = .{ .File = .{ .fs = self.fs } }; try self.opened_files.put(node, file); return node; } } return vfs.Error.NoSuchFileOrDir; }, } } /// /// Free all memory allocated. /// /// Arguments: /// IN self: *Self - Self /// pub fn deinit(self: *Self) void { // If there are any files open, then we have a error. std.debug.assert(self.opened_files.count() == 0); self.allocator.destroy(self.root_node); self.allocator.destroy(self.fs); self.opened_files.deinit(); for (self.files) |entry| { self.allocator.free(entry.name); self.allocator.free(entry.content); } self.allocator.free(self.files); self.allocator.destroy(self); } /// /// Initialise a ramdisk file system from a raw ramdisk in memory provided by the bootloader in a stream. /// Any memory allocated will be freed. /// /// Arguments: /// IN stream: *std.io.FixedBufferStream([]u8) - The stream that contains the raw ramdisk data. /// IN allocator: Allocator - The allocator used for initialising any memory needed. /// /// Return: *InitrdFS /// A pointer to the ram disk file system. /// /// Error: Error || error{EndOfStream} || Allocator.Error || std.io.FixedBufferStream([]u8).ReadError /// error.InvalidRamDisk - If the provided raw ramdisk is invalid. This can be due to a /// mis-match of the number of files to the length of the raw /// ramdisk or the wrong length provided to cause undefined parsed /// lengths for other parts of the ramdisk. /// error.EndOfStream - When reading from the stream, we reach the end of the stream /// before completing the read. /// error.OutOfMemory - If there isn't enough memory for initialisation. Any memory /// allocated will be freed. /// pub fn init(stream: *std.io.FixedBufferStream([]u8), allocator: Allocator) (Error || error{EndOfStream} || Allocator.Error)!*InitrdFS { log.info("Init\n", .{}); defer log.info("Done\n", .{}); // First @sizeOf(usize) bytes is the number of files const num_of_files = try stream.reader().readIntNative(usize); var headers = try allocator.alloc(InitrdHeader, num_of_files); errdefer allocator.free(headers); // Populate the headers var i: usize = 0; // If we error, then free any headers that we allocated. errdefer { var j: usize = 0; while (j < i) : (j += 1) { allocator.free(headers[j].name); allocator.free(headers[j].content); } } while (i < num_of_files) : (i += 1) { // We don't need to store the lengths any more as we have the slice.len const name_len = try stream.reader().readIntNative(usize); if (name_len == 0) { return Error.InvalidRamDisk; } headers[i].name = try allocator.alloc(u8, name_len); errdefer allocator.free(headers[i].name); if ((try stream.reader().readAll(headers[i].name)) != name_len) { return Error.InvalidRamDisk; } const content_len = try stream.reader().readIntNative(usize); if (content_len == 0) { return Error.InvalidRamDisk; } headers[i].content = try allocator.alloc(u8, content_len); errdefer allocator.free(headers[i].content); if ((try stream.reader().readAll(headers[i].content)) != content_len) { return Error.InvalidRamDisk; } } // If we aren't at the end, error. if ((try stream.getPos()) != (try stream.getEndPos())) { return Error.InvalidRamDisk; } var rd_fs = try allocator.create(InitrdFS); errdefer allocator.destroy(rd_fs); var fs = try allocator.create(vfs.FileSystem); errdefer allocator.destroy(fs); var root_node = try allocator.create(vfs.Node); root_node.* = .{ .Dir = .{ .fs = fs, .mount = null } }; fs.* = .{ .open = open, .close = close, .read = read, .write = write, .instance = &rd_fs.instance, .getRootNode = getRootNode, }; rd_fs.* = .{ .opened_files = AutoHashMap(*const vfs.Node, *InitrdHeader).init(allocator), .fs = fs, .allocator = allocator, .files = headers, .root_node = root_node, .instance = 1, }; switch (build_options.test_mode) { .Initialisation => runtimeTests(rd_fs), else => {}, } return rd_fs; } }; /// /// Crate a raw ramdisk in memory to be used to initialise the ramdisk file system. This create /// three files: test1.txt, test2.txt and test3.txt. /// /// Arguments: /// IN allocator: Allocator - The allocator to alloc the raw ramdisk. /// /// Return: []u8 /// The bytes of the raw ramdisk in memory. /// /// Error: Allocator.Error /// error.OutOfMemory - If there isn't enough memory for the in memory ramdisk. /// FixedBufferStream.WriterError - Writing to the fixed buffer stream failed /// error.TestExpectedEqual - An equality test failed /// fn createInitrd(allocator: Allocator) ![]u8 { // Create 3 valid ramdisk files in memory const file_names = [_][]const u8{ "test1.txt", "test2.txt", "test3.txt" }; const file_contents = [_][]const u8{ "This is a test", "This is a test: part 2", "This is a test: the prequel" }; // Ensure these two arrays are the same length std.debug.assert(file_names.len == file_contents.len); var sum: usize = 0; const files_length = for ([_]usize{ 0, 1, 2 }) |i| { sum += @sizeOf(usize) + file_names[i].len + @sizeOf(usize) + file_contents[i].len; } else sum; const total_ramdisk_len = @sizeOf(usize) + files_length; var ramdisk_bytes = try allocator.alloc(u8, total_ramdisk_len); var ramdisk_stream = std.io.fixedBufferStream(ramdisk_bytes); // Copy the data into the allocated memory try ramdisk_stream.writer().writeIntNative(usize, file_names.len); inline for ([_]usize{ 0, 1, 2 }) |i| { // Name len try ramdisk_stream.writer().writeIntNative(usize, file_names[i].len); // Name try ramdisk_stream.writer().writeAll(file_names[i]); // File len try ramdisk_stream.writer().writeIntNative(usize, file_contents[i].len); // File content try ramdisk_stream.writer().writeAll(file_contents[i]); } // Make sure we are full try expectEqual(try ramdisk_stream.getPos(), total_ramdisk_len); try expectEqual(try ramdisk_stream.getPos(), try ramdisk_stream.getEndPos()); return ramdisk_bytes; } test "init with files valid" { var ramdisk_bytes = try createInitrd(std.testing.allocator); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); var fs = try InitrdFS.init(&initrd_stream, std.testing.allocator); defer fs.deinit(); try expectEqual(fs.files.len, 3); try expectEqualSlices(u8, fs.files[0].name, "test1.txt"); try expectEqualSlices(u8, fs.files[1].content, "This is a test: part 2"); try expectEqual(fs.opened_files.count(), 0); } test "init with files invalid - invalid number of files" { var ramdisk_bytes = try createInitrd(std.testing.allocator); // Override the number of files std.mem.writeIntSlice(usize, ramdisk_bytes[0..], 10, builtin.cpu.arch.endian()); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); try expectError(error.InvalidRamDisk, InitrdFS.init(&initrd_stream, std.testing.allocator)); // Override the number of files std.mem.writeIntSlice(usize, ramdisk_bytes[0..], 0, builtin.cpu.arch.endian()); try expectError(error.InvalidRamDisk, InitrdFS.init(&initrd_stream, std.testing.allocator)); } test "init with files invalid - mix - bad" { // TODO: Craft a ramdisk that would parse but is invalid // This is possible, but will think about this another time // Challenge, make this a effective security vulnerability // P.S. I don't know if adding magics will stop this { var ramdisk_bytes = try createInitrd(std.testing.allocator); // Override the first file name length, make is shorter std.mem.writeIntSlice(usize, ramdisk_bytes[4..], 2, builtin.cpu.arch.endian()); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); try expectError(error.InvalidRamDisk, InitrdFS.init(&initrd_stream, std.testing.allocator)); } { var ramdisk_bytes = try createInitrd(std.testing.allocator); // Override the first file name length, make is 4 shorter std.mem.writeIntSlice(usize, ramdisk_bytes[4..], 5, builtin.cpu.arch.endian()); // Override the second file name length, make is 4 longer std.mem.writeIntSlice(usize, ramdisk_bytes[35..], 13, builtin.cpu.arch.endian()); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); try expectError(error.InvalidRamDisk, InitrdFS.init(&initrd_stream, std.testing.allocator)); } } /// The number of allocations that the init function make. const init_allocations: usize = 10; test "init with files cleans memory if OutOfMemory" { var i: usize = 0; while (i < init_allocations) : (i += 1) { var fa = std.testing.FailingAllocator.init(std.testing.allocator, i); var ramdisk_bytes = try createInitrd(std.testing.allocator); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); try expectError(error.OutOfMemory, InitrdFS.init(&initrd_stream, fa.allocator())); } } test "getRootNode" { var ramdisk_bytes = try createInitrd(std.testing.allocator); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); var fs = try InitrdFS.init(&initrd_stream, std.testing.allocator); defer fs.deinit(); try expectEqual(fs.fs.getRootNode(fs.fs), &fs.root_node.Dir); } test "open valid file" { var ramdisk_bytes = try createInitrd(std.testing.allocator); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); var fs = try InitrdFS.init(&initrd_stream, std.testing.allocator); defer fs.deinit(); try vfs.setRoot(fs.root_node); var file1 = try vfs.openFile("/test1.txt", .NO_CREATION); defer file1.close(); try expectEqual(fs.opened_files.count(), 1); try expectEqualSlices(u8, fs.opened_files.get(@ptrCast(*const vfs.Node, file1)).?.name, "test1.txt"); var file3_node = try vfs.open("/test3.txt", true, .NO_CREATION, .{}); defer file3_node.File.close(); try expectEqual(fs.opened_files.count(), 2); try expectEqualSlices(u8, fs.opened_files.get(file3_node).?.content, "This is a test: the prequel"); var dir1 = try vfs.openDir("/", .NO_CREATION); try expectEqual(&fs.root_node.Dir, dir1); var file2 = &(try dir1.open("test2.txt", .NO_CREATION, .{})).File; defer file2.close(); try expectEqual(fs.opened_files.count(), 3); } test "open fail with invalid flags" { var ramdisk_bytes = try createInitrd(std.testing.allocator); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); var fs = try InitrdFS.init(&initrd_stream, std.testing.allocator); defer fs.deinit(); try vfs.setRoot(fs.root_node); try expectError(error.InvalidFlags, vfs.openFile("/text10.txt", .CREATE_DIR)); try expectError(error.InvalidFlags, vfs.openFile("/text10.txt", .CREATE_FILE)); try expectError(error.InvalidFlags, vfs.openFile("/text10.txt", .CREATE_SYMLINK)); try expectError(error.InvalidFlags, vfs.openDir("/text10.txt", .CREATE_DIR)); try expectError(error.InvalidFlags, vfs.openDir("/text10.txt", .CREATE_FILE)); try expectError(error.InvalidFlags, vfs.openDir("/text10.txt", .CREATE_SYMLINK)); try expectError(error.InvalidFlags, vfs.openFile("/test/", .CREATE_DIR)); try expectError(error.InvalidFlags, vfs.openFile("/test/", .CREATE_FILE)); try expectError(error.InvalidFlags, vfs.openFile("/test/", .CREATE_SYMLINK)); try expectError(error.InvalidFlags, vfs.openDir("/test/", .CREATE_DIR)); try expectError(error.InvalidFlags, vfs.openDir("/test/", .CREATE_FILE)); try expectError(error.InvalidFlags, vfs.openDir("/test/", .CREATE_SYMLINK)); try expectError(error.InvalidFlags, vfs.openSymlink("/test/", "", .CREATE_FILE)); try expectError(error.InvalidFlags, vfs.openSymlink("/test/", "", .CREATE_DIR)); try expectError(error.InvalidFlags, vfs.openSymlink("/test/", "", .CREATE_SYMLINK)); } test "open fail with NoSuchFileOrDir" { var ramdisk_bytes = try createInitrd(std.testing.allocator); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); var fs = try InitrdFS.init(&initrd_stream, std.testing.allocator); defer fs.deinit(); try vfs.setRoot(fs.root_node); try expectError(error.NoSuchFileOrDir, vfs.openFile("/text10.txt", .NO_CREATION)); try expectError(error.NoSuchFileOrDir, vfs.openDir("/temp/", .NO_CREATION)); } test "open a file, out of memory" { var fa = std.testing.FailingAllocator.init(std.testing.allocator, init_allocations); var ramdisk_bytes = try createInitrd(std.testing.allocator); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); var fs = try InitrdFS.init(&initrd_stream, fa.allocator()); defer fs.deinit(); try vfs.setRoot(fs.root_node); try expectError(error.OutOfMemory, vfs.openFile("/test1.txt", .NO_CREATION)); } test "open two of the same file" { var ramdisk_bytes = try createInitrd(std.testing.allocator); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); var fs = try InitrdFS.init(&initrd_stream, std.testing.allocator); defer fs.deinit(); try vfs.setRoot(fs.root_node); const file1 = try vfs.openFile("/test1.txt", .NO_CREATION); defer file1.close(); const file2 = try vfs.openFile("/test1.txt", .NO_CREATION); defer file2.close(); try expectEqual(fs.opened_files.count(), 2); try expect(file1 != file2); var b1: [128]u8 = undefined; const length1 = try file1.read(b1[0..b1.len]); var b2: [128]u8 = undefined; const length2 = try file2.read(b2[0..b2.len]); try expectEqualSlices(u8, b1[0..length1], b2[0..length2]); } test "close a file" { var ramdisk_bytes = try createInitrd(std.testing.allocator); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); var fs = try InitrdFS.init(&initrd_stream, std.testing.allocator); defer fs.deinit(); try vfs.setRoot(fs.root_node); var file1 = try vfs.openFile("/test1.txt", .NO_CREATION); try expectEqual(fs.opened_files.count(), 1); var file3_node = try vfs.open("/test3.txt", true, .NO_CREATION, .{}); try expectEqual(fs.opened_files.count(), 2); file1.close(); try expectEqual(fs.opened_files.count(), 1); var dir1 = try vfs.openDir("/", .NO_CREATION); try expectEqual(&fs.root_node.Dir, dir1); var file2 = &(try dir1.open("test2.txt", .NO_CREATION, .{})).File; defer file2.close(); try expectEqual(fs.opened_files.count(), 2); file3_node.File.close(); try expectEqual(fs.opened_files.count(), 1); } test "close a non-opened file" { var ramdisk_bytes = try createInitrd(std.testing.allocator); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); var fs = try InitrdFS.init(&initrd_stream, std.testing.allocator); defer fs.deinit(); try vfs.setRoot(fs.root_node); // Open a valid file var file1 = try vfs.openFile("/test1.txt", .NO_CREATION); defer file1.close(); // Only one file open try expectEqual(fs.opened_files.count(), 1); // Craft a Node var fake_node = try std.testing.allocator.create(vfs.Node); defer std.testing.allocator.destroy(fake_node); fake_node.* = .{ .File = .{ .fs = fs.fs } }; fake_node.File.close(); // Still only one file open try expectEqual(fs.opened_files.count(), 1); } test "read a file" { var ramdisk_bytes = try createInitrd(std.testing.allocator); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); var fs = try InitrdFS.init(&initrd_stream, std.testing.allocator); defer fs.deinit(); try vfs.setRoot(fs.root_node); var file1 = try vfs.openFile("/test1.txt", .NO_CREATION); defer file1.close(); var bytes1: [128]u8 = undefined; const length1 = try file1.read(bytes1[0..bytes1.len]); try expectEqualSlices(u8, bytes1[0..length1], "This is a test"); var bytes2: [5]u8 = undefined; const length2 = try file1.read(bytes2[0..bytes2.len]); try expectEqualSlices(u8, bytes2[0..length2], "This "); } test "read a file, invalid/not opened/crafted *const Node" { var ramdisk_bytes = try createInitrd(std.testing.allocator); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); var fs = try InitrdFS.init(&initrd_stream, std.testing.allocator); defer fs.deinit(); try vfs.setRoot(fs.root_node); // Open a valid file var file1 = try vfs.openFile("/test1.txt", .NO_CREATION); defer file1.close(); // Only one file open try expectEqual(fs.opened_files.count(), 1); // Craft a Node var fake_node = try std.testing.allocator.create(vfs.Node); defer std.testing.allocator.destroy(fake_node); fake_node.* = .{ .File = .{ .fs = fs.fs } }; var unused: [1]u8 = undefined; try expectError(error.NotOpened, fake_node.File.read(unused[0..unused.len])); // Still only one file open try expectEqual(fs.opened_files.count(), 1); } test "write does nothing" { var ramdisk_bytes = try createInitrd(std.testing.allocator); defer std.testing.allocator.free(ramdisk_bytes); var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes); var fs = try InitrdFS.init(&initrd_stream, std.testing.allocator); defer fs.deinit(); try vfs.setRoot(fs.root_node); // Open a valid file var file1 = try vfs.openFile("/test1.txt", .NO_CREATION); defer file1.close(); try expectEqual(@as(usize, 0), try file1.write("Blah")); // Unchanged file content try expectEqualSlices(u8, fs.opened_files.get(@ptrCast(*const vfs.Node, file1)).?.content, "This is a test"); } /// See std.testing.expectEqualSlices. As need our panic. fn expectEqualSlicesClone(comptime T: type, expected: []const T, actual: []const T) void { if (expected.len != actual.len) { panic(@errorReturnTrace(), "slice lengths differ. expected {}, found {}", .{ expected.len, actual.len }); } var i: usize = 0; while (i < expected.len) : (i += 1) { if (!std.meta.eql(expected[i], actual[i])) { panic(@errorReturnTrace(), "index {} incorrect. expected {}, found {}", .{ i, expected[i], actual[i] }); } } } /// /// Test that we can open, read and close a file /// fn rt_openReadClose() void { const f1 = vfs.openFile("/ramdisk_test1.txt", .NO_CREATION) catch |e| { panic(@errorReturnTrace(), "FAILURE: Failed to open file: {}\n", .{e}); }; var bytes1: [128]u8 = undefined; const length1 = f1.read(bytes1[0..bytes1.len]) catch |e| { panic(@errorReturnTrace(), "FAILURE: Failed to read file: {}\n", .{e}); }; defer f1.close(); expectEqualSlicesClone(u8, bytes1[0..length1], "Testing ram disk"); const f2 = vfs.openFile("/ramdisk_test2.txt", .NO_CREATION) catch |e| { panic(@errorReturnTrace(), "Failed to open file: {}\n", .{e}); }; var bytes2: [128]u8 = undefined; const length2 = f2.read(bytes2[0..bytes2.len]) catch |e| { panic(@errorReturnTrace(), "FAILURE: Failed to read file: {}\n", .{e}); }; defer f2.close(); expectEqualSlicesClone(u8, bytes2[0..length2], "Testing ram disk for the second time"); // Try open a non-existent file _ = vfs.openFile("/nope.txt", .NO_CREATION) catch |e| switch (e) { error.NoSuchFileOrDir => {}, else => panic(@errorReturnTrace(), "FAILURE: Expected error\n", .{}), }; log.info("Opened, read and closed\n", .{}); } /// /// The ramdisk runtime tests that will test the ramdisks functionality. /// /// Arguments: /// IN rd_fs: *InitrdFS - The initialised ramdisk to play with. /// fn runtimeTests(rd_fs: *InitrdFS) void { // There will be test files provided for the runtime tests // Need to init the VFS. This will be overridden after the tests. vfs.setRoot(rd_fs.root_node) catch |e| { panic(@errorReturnTrace(), "Ramdisk root node isn't a directory node: {}\n", .{e}); }; rt_openReadClose(); if (rd_fs.opened_files.count() != 0) { panic(@errorReturnTrace(), "FAILURE: Didn't close all files\n", .{}); } }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/boot.zig
const arch = @import("arch.zig"); /// The multiboot header const MultiBoot = packed struct { magic: i32, flags: i32, checksum: i32, }; const ALIGN = 1 << 0; const MEMINFO = 1 << 1; const MAGIC = 0x1BADB002; const FLAGS = ALIGN | MEMINFO; const KERNEL_PAGE_NUMBER = 0xC0000000 >> 22; // The number of pages occupied by the kernel, will need to be increased as we add a heap etc. const KERNEL_NUM_PAGES = 1; export var multiboot align(4) linksection(".rodata.boot") = MultiBoot{ .magic = MAGIC, .flags = FLAGS, .checksum = -(MAGIC + FLAGS), }; // The initial page directory used for booting into the higher half. Should be overwritten later export var boot_page_directory: [1024]u32 align(4096) linksection(".rodata.boot") = init: { // Increase max number of branches done by comptime evaluator @setEvalBranchQuota(1024); // Temp value var dir: [1024]u32 = undefined; // Page for 0 -> 4 MiB. Gets unmapped later dir[0] = 0x00000083; var i = 0; var idx = 1; // Fill preceding pages with zeroes. May be unnecessary but incurs no runtime cost while (i < KERNEL_PAGE_NUMBER - 1) : ({ i += 1; idx += 1; }) { dir[idx] = 0; } // Map the kernel's higher half pages increasing by 4 MiB every time i = 0; while (i < KERNEL_NUM_PAGES) : ({ i += 1; idx += 1; }) { dir[idx] = 0x00000083 | (i << 22); } // Fill succeeding pages with zeroes. May be unnecessary but incurs no runtime cost i = 0; while (i < 1024 - KERNEL_PAGE_NUMBER - KERNEL_NUM_PAGES) : ({ i += 1; idx += 1; }) { dir[idx] = 0; } break :init dir; }; export var kernel_stack: [16 * 1024]u8 align(16) linksection(".bss.stack") = undefined; extern var KERNEL_ADDR_OFFSET: *u32; extern fn kmain(mb_info: arch.BootPayload) void; export fn _start() align(16) linksection(".text.boot") callconv(.Naked) noreturn { // Set the page directory to the boot directory asm volatile ( \\.extern boot_page_directory \\mov $boot_page_directory, %%ecx \\mov %%ecx, %%cr3 ); // Enable 4 MiB pages asm volatile ( \\mov %%cr4, %%ecx \\or $0x00000010, %%ecx \\mov %%ecx, %%cr4 ); // Enable paging asm volatile ( \\mov %%cr0, %%ecx \\or $0x80000000, %%ecx \\mov %%ecx, %%cr0 ); asm volatile ("jmp start_higher_half"); while (true) {} } export fn start_higher_half() callconv(.Naked) noreturn { // Invalidate the page for the first 4MiB as it's no longer needed asm volatile ("invlpg (0)"); // Setup the stack asm volatile ( \\.extern KERNEL_STACK_END \\mov $KERNEL_STACK_END, %%esp \\sub $32, %%esp \\mov %%esp, %%ebp ); // Get the multiboot header address and add the virtual offset const mb_info_addr = asm ( \\mov %%ebx, %[res] : [res] "=r" (-> usize), ) + @ptrToInt(&KERNEL_ADDR_OFFSET); kmain(@intToPtr(arch.BootPayload, mb_info_addr)); while (true) {} }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/interrupts.zig
const arch = @import("arch.zig"); const syscalls = @import("syscalls.zig"); const irq = @import("irq.zig"); const idt = @import("idt.zig"); extern fn irqHandler(ctx: *arch.CpuState) usize; extern fn isrHandler(ctx: *arch.CpuState) usize; /// /// The main handler for all exceptions and interrupts. This will then go and call the correct /// handler for an ISR or IRQ. /// /// Arguments: /// IN ctx: *arch.CpuState - Pointer to the exception context containing the contents /// of the registers at the time of a exception. /// export fn handler(ctx: *arch.CpuState) usize { if (ctx.int_num < irq.IRQ_OFFSET or ctx.int_num == syscalls.INTERRUPT) { return isrHandler(ctx); } else { return irqHandler(ctx); } } /// /// The common assembly that all exceptions and interrupts will call. /// export fn commonStub() callconv(.Naked) void { asm volatile ( \\pusha \\push %%ds \\push %%es \\push %%fs \\push %%gs \\mov %%cr3, %%eax \\push %%eax \\mov $0x10, %%ax \\mov %%ax, %%ds \\mov %%ax, %%es \\mov %%ax, %%fs \\mov %%ax, %%gs \\mov %%esp, %%eax \\push %%eax \\call handler \\mov %%eax, %%esp ); // Pop off the new cr3 then check if it's the same as the previous cr3 // If so don't change cr3 to avoid a TLB flush asm volatile ( \\pop %%eax \\mov %%cr3, %%ebx \\cmp %%eax, %%ebx \\je same_cr3 \\mov %%eax, %%cr3 \\same_cr3: \\pop %%gs \\pop %%fs \\pop %%es \\pop %%ds \\popa ); // The Tss.esp0 value is the stack pointer used when an interrupt occurs. This should be the current process' stack pointer // So skip the rest of the CpuState, set Tss.esp0 then un-skip the last few fields of the CpuState asm volatile ( \\add $0x1C, %%esp \\.extern main_tss_entry \\mov %%esp, (main_tss_entry + 4) \\sub $0x14, %%esp \\iret ); } /// /// Generate the function that is the entry point for each exception/interrupt. This will then be /// used as the handler for the corresponding IDT entry. /// /// Arguments: /// IN interrupt_num: u32 - The interrupt number to generate the function for. /// /// Return: idt.InterruptHandler /// The stub function that is called for each interrupt/exception. /// pub fn getInterruptStub(comptime interrupt_num: u32) idt.InterruptHandler { return struct { fn func() callconv(.Naked) void { asm volatile ( \\ cli ); // These interrupts don't push an error code onto the stack, so will push a zero. if (interrupt_num != 8 and !(interrupt_num >= 10 and interrupt_num <= 14) and interrupt_num != 17) { asm volatile ( \\ pushl $0 ); } asm volatile ( \\ pushl %[nr] \\ jmp commonStub : : [nr] "n" (interrupt_num), ); } }.func; }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/tty.zig
const std = @import("std"); const fmt = std.fmt; const builtin = @import("builtin"); const is_test = builtin.is_test; const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const expectError = std.testing.expectError; const log = std.log.scoped(.x86_tty); const build_options = @import("build_options"); const vga = if (is_test) @import("../../../../test/mock/kernel/vga_mock.zig") else @import("vga.zig"); const panic = @import("../../panic.zig").panic; /// The error set for if there is an error whiles printing. const TtyError = error{ /// If the printing tries to print outside the video buffer. OutOfBounds, }; /// The number of rows down from the top (row 0) where the displayable region starts. Above is /// where the logo and time is printed const ROW_MIN: u16 = 7; /// The total number of rows in the displayable region const ROW_TOTAL: u16 = vga.HEIGHT - ROW_MIN; /// The total number of pages (static) that the terminal will remember. In the future, this can /// move to a more dynamic allocation when a kheap is implemented. const TOTAL_NUM_PAGES: u16 = 5; /// The total number of VGA (or characters) elements are on a page const TOTAL_CHAR_ON_PAGE: u16 = vga.WIDTH * ROW_TOTAL; /// The start of the displayable region in the video buffer memory const START_OF_DISPLAYABLE_REGION: u16 = vga.WIDTH * ROW_MIN; /// The total number of VGA elements (or characters) the video buffer can display const VIDEO_BUFFER_SIZE: u16 = vga.WIDTH * vga.HEIGHT; /// The location of the kernel in virtual memory so can calculate the address of the VGA buffer extern var KERNEL_ADDR_OFFSET: *u32; /// The current x position of the cursor. var column: u8 = 0; /// The current y position of the cursor. var row: u8 = 0; /// The current colour of the display with foreground and background colour. var colour: u8 = undefined; /// The buffer starting from the beginning of the video memory location that contains all data /// written to the display. var video_buffer: []volatile u16 = undefined; /// The blank VGA entry to be used to clear the screen. var blank: u16 = undefined; /// A total of TOTAL_NUM_PAGES pages that can be saved and restored to from and to the video buffer var pages: [TOTAL_NUM_PAGES][TOTAL_CHAR_ON_PAGE]u16 = init: { var p: [TOTAL_NUM_PAGES][TOTAL_CHAR_ON_PAGE]u16 = undefined; for (p) |*page| { page.* = [_]u16{0} ** TOTAL_CHAR_ON_PAGE; } break :init p; }; /// The current page index. var page_index: u8 = 0; /// /// Copies data into the video buffer. This is used for copying a page into the video buffer. /// /// Arguments: /// IN video_buf_offset: u16 - The offset into the video buffer to start copying to. /// IN data: []const u16 - The data to copy into the video buffer. /// IN size: u16 - The amount to copy. /// /// Errors: TtyError /// TtyError.OutOfBounds - If offset or the size to copy is greater than the size of the /// video buffer or data to copy. /// fn videoCopy(video_buf_offset: u16, data: []const u16, size: u16) TtyError!void { // Secure programming ;) if (video_buf_offset >= video_buffer.len and size > video_buffer.len - video_buf_offset and size > data.len) { return TtyError.OutOfBounds; } var i: u32 = 0; while (i < size) : (i += 1) { video_buffer[video_buf_offset + i] = data[i]; } } /// /// Moves data with a page without overriding itself. /// /// Arguments: /// IN dest: []u16 - The destination position to copy into. /// IN src: []u16 - The source position to copy from. /// IN size: u16 - The amount to copy. /// /// Errors: /// TtyError.OutOfBounds - If the size to copy is greater than the size of the pages. /// fn pageMove(dest: []u16, src: []u16, size: u16) TtyError!void { if (dest.len < size or src.len < size) { return TtyError.OutOfBounds; } // Not an error if size is zero, nothing will be copied if (size == 0) return; // Make sure we don't override the values we want to copy if (@ptrToInt(&dest[0]) < @ptrToInt(&src[0])) { var i: u16 = 0; while (i != size) : (i += 1) { dest[i] = src[i]; } } else { var i = size; while (i != 0) { i -= 1; dest[i] = src[i]; } } } /// /// Clears a region of the video buffer to a VGA entry from the beginning. /// /// Arguments: /// IN c: u16 - VGA entry to set the video buffer to. /// IN size: u16 - The number to VGA entries to set from the beginning of the video buffer. /// /// Errors: /// TtyError.OutOfBounds - If the size to copy is greater than the size of the video buffer. /// fn setVideoBuffer(c: u16, size: u16) TtyError!void { if (size > VIDEO_BUFFER_SIZE) { return TtyError.OutOfBounds; } for (video_buffer[0..size]) |*b| { b.* = c; } } /// /// Updated the hardware cursor to the current column and row (x, y). /// fn updateCursor() void { vga.updateCursor(column, row); } /// /// Get the hardware cursor and set the current column and row (x, y). /// fn getCursor() void { const cursor = vga.getCursor(); row = @truncate(u8, cursor / vga.WIDTH); column = @truncate(u8, cursor % vga.WIDTH); } /// /// Put a character at a specific column and row position on the screen. This will use the current /// colour. /// /// Arguments: /// IN char: u8 - The character to print. This will be combined with the current colour. /// IN x: u8 - The x position (column) to put the character at. /// IN y: u8 - The y position (row) to put the character at. /// /// Errors: /// TtyError.OutOfBounds - If trying to print outside the video buffer. /// fn putEntryAt(char: u8, x: u8, y: u8) TtyError!void { const index = y * vga.WIDTH + x; // Bounds check if (index >= VIDEO_BUFFER_SIZE) { return TtyError.OutOfBounds; } const char_entry = vga.entry(char, colour); if (index >= START_OF_DISPLAYABLE_REGION) { // If not at page zero, (bottom of page), then display that page // The user has move up a number of pages and then typed a letter, so need to move to the // 0'th page if (page_index != 0) { // This isn't out of bounds page_index = 0; try videoCopy(START_OF_DISPLAYABLE_REGION, pages[page_index][0..TOTAL_CHAR_ON_PAGE], TOTAL_CHAR_ON_PAGE); // If not on page 0, then the cursor would have been disabled vga.enableCursor(); updateCursor(); } pages[page_index][index - START_OF_DISPLAYABLE_REGION] = char_entry; } video_buffer[index] = char_entry; } /// /// Move rows up pages across multiple pages leaving the last rows blank. /// /// Arguments: /// IN rows: u16 - The number of rows to move up. /// /// Errors: /// TtyError.OutOfBounds - If trying to move up more rows on a page. /// fn pagesMoveRowsUp(rows: u16) TtyError!void { // Out of bounds check if (rows > ROW_TOTAL) { return TtyError.OutOfBounds; } // Not an error to move 0 rows, but is pointless if (rows == 0) return; // Move up rows in last page up by "rows" const row_length = rows * vga.WIDTH; const chars_to_move = (ROW_TOTAL - rows) * vga.WIDTH; try pageMove(pages[TOTAL_NUM_PAGES - 1][0..chars_to_move], pages[TOTAL_NUM_PAGES - 1][row_length..], chars_to_move); // Loop for the other pages var i = TOTAL_NUM_PAGES - 1; while (i > 0) : (i -= 1) { try pageMove(pages[i][chars_to_move..], pages[i - 1][0..row_length], row_length); try pageMove(pages[i - 1][0..chars_to_move], pages[i - 1][row_length..], chars_to_move); } // Clear the last lines for (pages[0][chars_to_move..]) |*p| { p.* = blank; } } /// /// When the text/terminal gets to the bottom of the screen, then move all line up by the amount /// that are below the bottom of the screen. Usually moves up by one line. /// /// Errors: /// TtyError.OutOfBounds - If trying to move up more rows on a page. This shouldn't happen /// as bounds checks have been done. /// fn scroll() void { // Added the condition in the if from pagesMoveRowsUp as don't need to move all rows if (row >= vga.HEIGHT and (row - vga.HEIGHT + 1) <= ROW_TOTAL) { const rows_to_move = row - vga.HEIGHT + 1; // Move rows up pages by temp, will usually be one. // TODO: Maybe panic here as we have the check above, so if this fails, then is a big problem pagesMoveRowsUp(rows_to_move) catch { panic(@errorReturnTrace(), "Can't move {} rows up. Must be less than {}\n", .{ rows_to_move, ROW_TOTAL }); }; // Move all rows up by rows_to_move var i: u32 = 0; while (i < (ROW_TOTAL - rows_to_move) * vga.WIDTH) : (i += 1) { video_buffer[START_OF_DISPLAYABLE_REGION + i] = video_buffer[(rows_to_move * vga.WIDTH) + START_OF_DISPLAYABLE_REGION + i]; } // Set the last rows to blanks i = 0; while (i < vga.WIDTH * rows_to_move) : (i += 1) { video_buffer[(vga.HEIGHT - rows_to_move) * vga.WIDTH + i] = blank; } row = vga.HEIGHT - 1; } } /// /// Print a character without updating the cursor. For speed when printing a string as only need to /// update the cursor once. This will also print the special characters: \n, \r, \t and \b. (\b is /// not a valid character so use \x08 which is the hex value). /// /// Arguments: /// IN char: u8 - The character to print. /// /// Errors: /// TtyError.OutOfBounds - If trying to scroll more rows on a page/displayable region or /// print beyond the video buffer. /// fn putChar(char: u8) TtyError!void { const column_temp = column; const row_temp = row; // If there was an error, then set the row and column back to where is was // Like nothing happened errdefer column = column_temp; errdefer row = row_temp; switch (char) { '\n' => { column = 0; row += 1; scroll(); }, '\t' => { column += 4; if (column >= vga.WIDTH) { column -= @truncate(u8, vga.WIDTH); row += 1; scroll(); } }, '\r' => { column = 0; }, // \b '\x08' => { if (column == 0) { if (row != 0) { column = vga.WIDTH - 1; row -= 1; } } else { column -= 1; } }, else => { try putEntryAt(char, column, row); column += 1; if (column == vga.WIDTH) { column = 0; row += 1; scroll(); } }, } } /// /// Set the TTY cursor position to a row and column /// /// Arguments: /// IN r: u8 - The row to set it to /// IN col: u8 - The column to set it to /// pub fn setCursor(r: u8, col: u8) void { column = col; row = r; updateCursor(); } /// /// Print a string to the TTY. This also updates to hardware cursor. /// /// Arguments: /// IN str: []const u8 - The string to print. /// /// Errors: /// TtyError.OutOfBounds - If trying to print beyond the video buffer. /// pub fn writeString(str: []const u8) TtyError!void { // Make sure we update the cursor to the last character defer updateCursor(); for (str) |char| { try putChar(char); } } /// /// Move up a page. This will copy the page above to the video buffer. Will keep trace of which /// page is being displayed. /// pub fn pageUp() void { if (page_index < TOTAL_NUM_PAGES - 1) { // Copy page to display page_index += 1; // Bounds have been checked, so shouldn't error videoCopy(START_OF_DISPLAYABLE_REGION, pages[page_index][0..TOTAL_CHAR_ON_PAGE], TOTAL_CHAR_ON_PAGE) catch |e| { log.err("Error moving page up. Error: {}\n", .{e}); }; vga.disableCursor(); } } /// /// Move down a page. This will copy the page bellow to the video buffer. Will keep trace of which /// page is being displayed. /// pub fn pageDown() void { if (page_index > 0) { // Copy page to display page_index -= 1; // Bounds have been checked, so shouldn't error videoCopy(START_OF_DISPLAYABLE_REGION, pages[page_index][0..TOTAL_CHAR_ON_PAGE], TOTAL_CHAR_ON_PAGE) catch |e| { log.err("Error moving page down. Error: {}\n", .{e}); }; if (page_index == 0) { vga.enableCursor(); updateCursor(); } else { vga.disableCursor(); } } } /// /// This clears the entire screen with blanks using the current colour. This will also save the /// screen to the pages so can scroll back down. /// pub fn clearScreen() void { // Move all the rows up // This is within bounds, so shouldn't error pagesMoveRowsUp(ROW_TOTAL) catch |e| { log.err("Error moving all pages up. Error: {}\n", .{e}); }; // Clear the screen var i: u16 = START_OF_DISPLAYABLE_REGION; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { video_buffer[i] = blank; } // Set the cursor to below the logo column = 0; row = ROW_MIN; updateCursor(); } /// /// This moves the software and hardware cursor to the left by one. /// pub fn moveCursorLeft() void { if (column == 0) { if (row != 0) { column = vga.WIDTH - 1; row -= 1; } } else { column -= 1; } updateCursor(); } /// /// This moves the software and hardware cursor to the right by one. /// pub fn moveCursorRight() void { if (column == (vga.WIDTH - 1)) { if (row != (vga.HEIGHT - 1)) { column = 0; row += 1; } } else { column += 1; } updateCursor(); } /// /// This will set a new colour for the screen. It will only become effective when printing new /// characters. Use vga.colourEntry and the colour enums to set the colour. /// /// Arguments: /// IN new_colour: u8 - The new foreground and background colour of the screen. /// pub fn setColour(new_colour: u8) void { colour = new_colour; blank = vga.entry(0, colour); } /// /// Gets the video buffer's virtual address. /// /// Return: usize /// The virtual address of the video buffer /// pub fn getVideoBufferAddress() usize { return @ptrToInt(&KERNEL_ADDR_OFFSET) + 0xB8000; } /// /// Initialise the tty. This will keep the bootloaders output and set the software cursor to where /// the bootloader left it. Will copy the current screen to the pages, set the colour and blank /// entry, print the logo and display the 0'th page. /// pub fn init() void { // Video buffer in higher half if (is_test) { video_buffer = @intToPtr([*]volatile u16, mock_getVideoBufferAddress())[0..VIDEO_BUFFER_SIZE]; } else { video_buffer = @intToPtr([*]volatile u16, getVideoBufferAddress())[0..VIDEO_BUFFER_SIZE]; } setColour(vga.entryColour(vga.COLOUR_LIGHT_GREY, vga.COLOUR_BLACK)); // Enable and get the hardware cursor to set the software cursor vga.enableCursor(); getCursor(); if (row != 0 or column != 0) { // Copy rows 7 down to make room for logo // If there isn't enough room, only take the bottom rows var row_offset: u16 = 0; if (vga.HEIGHT - 1 - row < ROW_MIN) { row_offset = ROW_MIN - (vga.HEIGHT - 1 - row); } // Make a copy into the pages // Assuming that there is only one page var i: u16 = 0; while (i < row * vga.WIDTH) : (i += 1) { pages[0][i] = video_buffer[i]; } // Move 7 rows down i = 0; if (@ptrToInt(&video_buffer[ROW_MIN * vga.WIDTH]) < @ptrToInt(&video_buffer[row_offset * vga.WIDTH])) { while (i != row * vga.WIDTH) : (i += 1) { video_buffer[i + (ROW_MIN * vga.WIDTH)] = video_buffer[i + (row_offset * vga.WIDTH)]; } } else { i = row * vga.WIDTH; while (i != 0) { i -= 1; video_buffer[i + (ROW_MIN * vga.WIDTH)] = video_buffer[i + (row_offset * vga.WIDTH)]; } } // Set the top 7 rows blank setVideoBuffer(blank, START_OF_DISPLAYABLE_REGION) catch |e| { log.err("Error clearing the top 7 rows. Error: {}\n", .{e}); }; row += @truncate(u8, row_offset + ROW_MIN); } else { // Clear the screen setVideoBuffer(blank, VIDEO_BUFFER_SIZE) catch |e| { log.err("Error clearing the screen. Error: {}\n", .{e}); }; // Set the row to below the logo row = ROW_MIN; } switch (build_options.test_mode) { .Initialisation => runtimeTests(), else => {}, } } const test_colour: u8 = vga.orig_entryColour(vga.COLOUR_LIGHT_GREY, vga.COLOUR_BLACK); var test_video_buffer: [VIDEO_BUFFER_SIZE]u16 = [_]u16{0} ** VIDEO_BUFFER_SIZE; fn mock_getVideoBufferAddress() usize { return @ptrToInt(&test_video_buffer); } fn resetGlobals() void { column = 0; row = 0; page_index = 0; colour = undefined; video_buffer = undefined; blank = undefined; pages = init: { var p: [TOTAL_NUM_PAGES][TOTAL_CHAR_ON_PAGE]u16 = undefined; for (p) |*page| { page.* = [_]u16{0} ** TOTAL_CHAR_ON_PAGE; } break :init p; }; } fn setUpVideoBuffer() !void { // Change to a stack location video_buffer = test_video_buffer[0..VIDEO_BUFFER_SIZE]; try expectEqual(@ptrToInt(video_buffer.ptr), @ptrToInt(&test_video_buffer[0])); colour = test_colour; blank = vga.orig_entry(0, test_colour); } fn setVideoBufferBlankPages() !void { try setUpVideoBuffer(); for (video_buffer) |*b| { b.* = blank; } setPagesBlank(); } fn setVideoBufferIncrementingBlankPages() !void { try setUpVideoBuffer(); for (video_buffer) |*b, i| { b.* = @intCast(u16, i); } setPagesBlank(); } fn setPagesBlank() void { for (pages) |*p_i| { for (p_i) |*p_j| { p_j.* = blank; } } } fn setPagesIncrementing() void { for (pages) |*p_i, i| { for (p_i) |*p_j, j| { p_j.* = @intCast(u16, i) * TOTAL_CHAR_ON_PAGE + @intCast(u16, j); } } } fn defaultVariablesTesting(p_i: u8, r: u8, c: u8) !void { try expectEqual(test_colour, colour); try expectEqual(@as(u16, test_colour) << 8, blank); try expectEqual(p_i, page_index); try expectEqual(r, row); try expectEqual(c, column); } fn incrementingPagesTesting() !void { for (pages) |p_i, i| { for (p_i) |p_j, j| { try expectEqual(i * TOTAL_CHAR_ON_PAGE + j, p_j); } } } fn blankPagesTesting() !void { for (pages) |p_i| { for (p_i) |p_j| { try expectEqual(blank, p_j); } } } fn incrementingVideoBufferTesting() !void { var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const b = video_buffer[i]; try expectEqual(i, b); } } fn defaultVideoBufferTesting() !void { var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const b = video_buffer[i]; try expectEqual(vga.orig_entry(0, test_colour), b); } } fn defaultAllTesting(p_i: u8, r: u8, c: u8) !void { try defaultVariablesTesting(p_i, r, c); try blankPagesTesting(); try defaultVideoBufferTesting(); } test "updateCursor" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga.updateCursor call for updating the hardware cursor vga.initTest(); defer vga.freeTest(); vga.addTestParams("updateCursor", .{ @as(u16, 0), @as(u16, 0) }); // Pre testing try defaultAllTesting(0, 0, 0); // Call function updateCursor(); // Post test try defaultAllTesting(0, 0, 0); // Tear down resetGlobals(); } test "getCursor zero" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga.getCursor call for getting the hardware cursor vga.initTest(); defer vga.freeTest(); vga.addTestParams("getCursor", .{@as(u16, 0)}); // Pre testing try defaultAllTesting(0, 0, 0); // Call function getCursor(); // Post test try defaultAllTesting(0, 0, 0); // Tear down resetGlobals(); } test "getCursor EEF" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga.getCursor call for getting the hardware cursor vga.initTest(); defer vga.freeTest(); vga.addTestParams("getCursor", .{@as(u16, 0x0EEF)}); // Pre testing try defaultAllTesting(0, 0, 0); // Call function getCursor(); // Post test try defaultAllTesting(0, 47, 63); // Tear down resetGlobals(); } test "putEntryAt out of bounds" { // Set up try setVideoBufferBlankPages(); // Pre testing try defaultAllTesting(0, 0, 0); // Call function try expectError(TtyError.OutOfBounds, putEntryAt('A', 100, 100)); // Post test try defaultAllTesting(0, 0, 0); // Tear down resetGlobals(); } test "putEntryAt not in displayable region" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("entry", vga.orig_entry); vga.addRepeatFunction("updateCursor", vga.mock_updateCursor); // Enable and update cursor is only called once, can can use the consume function call //vga.addConsumeFunction("enableCursor", vga.mock_enableCursor); // Pre testing try defaultAllTesting(0, 0, 0); // Call function const x = 0; const y = 0; const char = 'A'; try putEntryAt(char, x, y); // Post test try defaultVariablesTesting(0, 0, 0); try blankPagesTesting(); var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const b = video_buffer[i]; if (i == y * vga.WIDTH + x) { try expectEqual(vga.orig_entry(char, test_colour), b); } else { try expectEqual(vga.orig_entry(0, test_colour), b); } } // Tear down resetGlobals(); } test "putEntryAt in displayable region page_index is 0" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("entry", vga.orig_entry); vga.addRepeatFunction("updateCursor", vga.mock_updateCursor); // Pre testing try defaultAllTesting(0, 0, 0); // Call function const x = 0; const y = ROW_MIN; const char = 'A'; try putEntryAt(char, x, y); // Post test try defaultVariablesTesting(0, 0, 0); for (pages) |page, i| { for (page) |c, j| { if (i == page_index and (j == (y * vga.WIDTH + x) - START_OF_DISPLAYABLE_REGION)) { try expectEqual(vga.orig_entry(char, test_colour), c); } else { try expectEqual(blank, c); } } } var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const b = video_buffer[i]; if (i == y * vga.WIDTH + x) { try expectEqual(vga.orig_entry(char, test_colour), b); } else { try expectEqual(vga.orig_entry(0, test_colour), b); } } // Tear down resetGlobals(); } test "putEntryAt in displayable region page_index is not 0" { // Set up // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("entry", vga.orig_entry); vga.addRepeatFunction("updateCursor", vga.mock_updateCursor); // Enable and update cursor is only called once, can can use the consume function call vga.addConsumeFunction("enableCursor", vga.mock_enableCursor); try setVideoBufferBlankPages(); // Fill the 1'nd page (index 1) will all 1's const ones = vga.orig_entry('1', test_colour); for (pages) |*page, i| { for (page) |*char| { if (i == 0) { char.* = ones; } else { char.* = 0; } } } page_index = 1; // Pre testing try defaultVariablesTesting(1, 0, 0); try defaultVideoBufferTesting(); for (pages) |page, i| { for (page) |char| { if (i == 0) { try expectEqual(ones, char); } else { try expectEqual(@as(u16, 0), char); } } } // Call function const x = 0; const y = ROW_MIN; const char = 'A'; try putEntryAt(char, x, y); // Post test try defaultVariablesTesting(0, 0, 0); // Print page number const text = "Page 0 of 4"; const column_temp = column; const row_temp = row; column = @truncate(u8, vga.WIDTH) - @truncate(u8, text.len); row = ROW_MIN - 1; writeString(text) catch |e| { log.err("Unable to print page number, printing out of bounds. Error: {}\n", .{e}); }; column = column_temp; row = row_temp; for (pages) |page, i| { for (page) |c, j| { if (i == 0 and j == 0) { try expectEqual(vga.orig_entry(char, test_colour), c); } else if (i == 0) { try expectEqual(ones, c); } else { try expectEqual(@as(u16, 0), c); } } } // The top 7 rows won't be copied var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const b = video_buffer[i]; if (i < START_OF_DISPLAYABLE_REGION - 11) { try expectEqual(blank, b); } else if (i < START_OF_DISPLAYABLE_REGION) { try expectEqual(vga.orig_entry(text[i + 11 - START_OF_DISPLAYABLE_REGION], colour), b); } else if (i == y * vga.WIDTH + x) { try expectEqual(vga.orig_entry(char, test_colour), b); } else { try expectEqual(ones, b); } } // Tear down resetGlobals(); } test "pagesMoveRowsUp out of bounds" { // Set up try setVideoBufferBlankPages(); setPagesIncrementing(); // Pre testing try defaultVariablesTesting(0, 0, 0); try defaultVideoBufferTesting(); try incrementingPagesTesting(); // Call function const rows_to_move = ROW_TOTAL + 1; try expectError(TtyError.OutOfBounds, pagesMoveRowsUp(rows_to_move)); // Post test try defaultVariablesTesting(0, 0, 0); try defaultVideoBufferTesting(); try incrementingPagesTesting(); // Tear down resetGlobals(); } test "pagesMoveRowsUp 0 rows" { // Set up try setVideoBufferBlankPages(); setPagesIncrementing(); // Pre testing try defaultVariablesTesting(0, 0, 0); try defaultVideoBufferTesting(); try incrementingPagesTesting(); // Call function const rows_to_move = 0; try pagesMoveRowsUp(rows_to_move); // Post test try defaultVariablesTesting(0, 0, 0); try defaultVideoBufferTesting(); try incrementingPagesTesting(); // Tear down resetGlobals(); } test "pagesMoveRowsUp 1 rows" { // Set up try setVideoBufferBlankPages(); setPagesIncrementing(); // Pre testing try defaultVariablesTesting(0, 0, 0); try defaultVideoBufferTesting(); try incrementingPagesTesting(); // Call function const rows_to_move = 1; try pagesMoveRowsUp(rows_to_move); // Post test try defaultVariablesTesting(0, 0, 0); try defaultVideoBufferTesting(); const to_add = rows_to_move * vga.WIDTH; for (pages) |page, i| { for (page) |c, j| { if (j >= TOTAL_CHAR_ON_PAGE - to_add) { if (i == 0) { // The last rows will be blanks try expectEqual(blank, c); } else { try expectEqual((i - 1) * TOTAL_CHAR_ON_PAGE + (j + to_add - TOTAL_CHAR_ON_PAGE), c); } } else { // All rows moved up one, so add vga.WIDTH try expectEqual(i * TOTAL_CHAR_ON_PAGE + j + to_add, c); } } } // Tear down resetGlobals(); } test "pagesMoveRowsUp ROW_TOTAL - 1 rows" { // Set up try setVideoBufferBlankPages(); setPagesIncrementing(); // Pre testing try defaultVariablesTesting(0, 0, 0); try defaultVideoBufferTesting(); try incrementingPagesTesting(); // Call function const rows_to_move = ROW_TOTAL - 1; try pagesMoveRowsUp(rows_to_move); // Post test try defaultVariablesTesting(0, 0, 0); try defaultVideoBufferTesting(); const to_add = rows_to_move * vga.WIDTH; for (pages) |page, i| { for (page) |c, j| { if (j >= TOTAL_CHAR_ON_PAGE - to_add) { if (i == 0) { // The last rows will be blanks try expectEqual(blank, c); } else { try expectEqual((i - 1) * TOTAL_CHAR_ON_PAGE + (j + to_add - TOTAL_CHAR_ON_PAGE), c); } } else { // All rows moved up one, so add vga.WIDTH try expectEqual(i * TOTAL_CHAR_ON_PAGE + j + to_add, c); } } } // Tear down resetGlobals(); } test "pagesMoveRowsUp ROW_TOTAL rows" { // Set up try setVideoBufferBlankPages(); setPagesIncrementing(); // Pre testing try defaultVariablesTesting(0, 0, 0); try defaultVideoBufferTesting(); try incrementingPagesTesting(); // Call function const rows_to_move = ROW_TOTAL; try pagesMoveRowsUp(rows_to_move); // Post test try defaultVariablesTesting(0, 0, 0); try defaultVideoBufferTesting(); for (pages) |page, i| { for (page) |c, j| { if (i == 0) { // The last rows will be blanks try expectEqual(blank, c); } else { try expectEqual((i - 1) * TOTAL_CHAR_ON_PAGE + j, c); } } } // Tear down resetGlobals(); } test "scroll row is less then max height" { // Set up try setVideoBufferBlankPages(); setPagesIncrementing(); // Pre testing try defaultVariablesTesting(0, 0, 0); try defaultVideoBufferTesting(); try incrementingPagesTesting(); // Call function scroll(); // Post test try defaultVariablesTesting(0, 0, 0); try defaultVideoBufferTesting(); try incrementingPagesTesting(); // Tear down resetGlobals(); } test "scroll row is equal to height" { // Set up try setVideoBufferIncrementingBlankPages(); setPagesIncrementing(); const row_test = vga.HEIGHT; row = row_test; // Pre testing try defaultVariablesTesting(0, row_test, 0); try incrementingPagesTesting(); try incrementingVideoBufferTesting(); // Call function // Rows move up one scroll(); // Post test try defaultVariablesTesting(0, vga.HEIGHT - 1, 0); const to_add = (row_test - vga.HEIGHT + 1) * vga.WIDTH; for (pages) |page, i| { for (page) |c, j| { if (j >= TOTAL_CHAR_ON_PAGE - to_add) { if (i == 0) { // The last rows will be blanks try expectEqual(blank, c); } else { try expectEqual((i - 1) * TOTAL_CHAR_ON_PAGE + (j + to_add - TOTAL_CHAR_ON_PAGE), c); } } else { // All rows moved up one, so add vga.WIDTH try expectEqual(i * TOTAL_CHAR_ON_PAGE + j + to_add, c); } } } var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const buf = video_buffer[i]; if (i < START_OF_DISPLAYABLE_REGION) { try expectEqual(i, buf); } else if (i >= VIDEO_BUFFER_SIZE - to_add) { try expectEqual(blank, buf); } else { try expectEqual(i + to_add, buf); } } // Tear down resetGlobals(); } test "scroll row is more than height" { // Set up try setVideoBufferIncrementingBlankPages(); setPagesIncrementing(); const row_test = vga.HEIGHT + 5; row = row_test; // Pre testing try defaultVariablesTesting(0, row_test, 0); try incrementingPagesTesting(); try incrementingVideoBufferTesting(); // Call function // Rows move up 5 scroll(); // Post test try defaultVariablesTesting(0, vga.HEIGHT - 1, 0); const to_add = (row_test - vga.HEIGHT + 1) * vga.WIDTH; for (pages) |page, i| { for (page) |c, j| { if (j >= TOTAL_CHAR_ON_PAGE - to_add) { if (i == 0) { // The last rows will be blanks try expectEqual(blank, c); } else { try expectEqual((i - 1) * TOTAL_CHAR_ON_PAGE + (j + to_add - TOTAL_CHAR_ON_PAGE), c); } } else { // All rows moved up one, so add vga.WIDTH try expectEqual(i * TOTAL_CHAR_ON_PAGE + j + to_add, c); } } } var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const buf = video_buffer[i]; if (i < START_OF_DISPLAYABLE_REGION) { try expectEqual(i, buf); } else if (i >= VIDEO_BUFFER_SIZE - to_add) { try expectEqual(blank, buf); } else { try expectEqual(i + to_add, buf); } } // Tear down resetGlobals(); } test "putChar new line within screen" { // Set up try setVideoBufferBlankPages(); // Pre testing column = 5; row = 5; try defaultAllTesting(0, 5, 5); // Call function try putChar('\n'); // Post test try defaultAllTesting(0, 6, 0); // Tear down resetGlobals(); } test "putChar new line outside screen" { // Set up try setVideoBufferBlankPages(); // Pre testing column = 5; row = vga.HEIGHT - 1; try defaultAllTesting(0, vga.HEIGHT - 1, 5); // Call function try putChar('\n'); // Post test try defaultAllTesting(0, vga.HEIGHT - 1, 0); // Tear down resetGlobals(); } test "putChar tab within line" { // Set up try setVideoBufferBlankPages(); // Pre testing column = 5; row = 6; try defaultAllTesting(0, 6, 5); // Call function try putChar('\t'); // Post test try defaultAllTesting(0, 6, 9); // Tear down resetGlobals(); } test "putChar tab end of line" { // Set up try setVideoBufferBlankPages(); // Pre testing column = vga.WIDTH - 1; row = 6; try defaultAllTesting(0, 6, vga.WIDTH - 1); // Call function try putChar('\t'); // Post test try defaultAllTesting(0, 7, 3); // Tear down resetGlobals(); } test "putChar tab end of screen" { // Set up try setVideoBufferBlankPages(); // Pre testing column = vga.WIDTH - 1; row = vga.HEIGHT - 1; try defaultAllTesting(0, vga.HEIGHT - 1, vga.WIDTH - 1); // Call function try putChar('\t'); // Post test try defaultAllTesting(0, vga.HEIGHT - 1, 3); // Tear down resetGlobals(); } test "putChar line feed" { // Set up try setVideoBufferBlankPages(); // Pre testing column = vga.WIDTH - 1; row = vga.HEIGHT - 1; try defaultAllTesting(0, vga.HEIGHT - 1, vga.WIDTH - 1); // Call function try putChar('\r'); // Post test try defaultAllTesting(0, vga.HEIGHT - 1, 0); // Tear down resetGlobals(); } test "putChar back char top left of screen" { // Set up try setVideoBufferBlankPages(); // Pre testing try defaultAllTesting(0, 0, 0); // Call function try putChar('\x08'); // Post test try defaultAllTesting(0, 0, 0); // Tear down resetGlobals(); } test "putChar back char top row" { // Set up try setVideoBufferBlankPages(); // Pre testing column = 8; try defaultAllTesting(0, 0, 8); // Call function try putChar('\x08'); // Post test try defaultAllTesting(0, 0, 7); // Tear down resetGlobals(); } test "putChar back char beginning of row" { // Set up try setVideoBufferBlankPages(); // Pre testing row = 1; try defaultAllTesting(0, 1, 0); // Call function try putChar('\x08'); // Post test try defaultAllTesting(0, 0, vga.WIDTH - 1); // Tear down resetGlobals(); } test "putChar any char in row" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("entry", vga.orig_entry); // Pre testing try defaultAllTesting(0, 0, 0); // Call function try putChar('A'); // Post test try defaultVariablesTesting(0, 0, 1); try blankPagesTesting(); var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const buf = video_buffer[i]; if (i == 0) { try expectEqual(vga.orig_entry('A', colour), buf); } else { try expectEqual(blank, buf); } } // Tear down resetGlobals(); } test "putChar any char end of row" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("entry", vga.orig_entry); // Pre testing column = vga.WIDTH - 1; try defaultAllTesting(0, 0, vga.WIDTH - 1); // Call function try putChar('A'); // Post test try defaultVariablesTesting(0, 1, 0); try blankPagesTesting(); var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const buf = video_buffer[i]; if (i == vga.WIDTH - 1) { try expectEqual(vga.orig_entry('A', colour), buf); } else { try expectEqual(blank, buf); } } // Tear down resetGlobals(); } test "putChar any char end of screen" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("entry", vga.orig_entry); // Pre testing row = vga.HEIGHT - 1; column = vga.WIDTH - 1; try defaultAllTesting(0, vga.HEIGHT - 1, vga.WIDTH - 1); // Call function try putChar('A'); // Post test try defaultVariablesTesting(0, vga.HEIGHT - 1, 0); for (pages) |page, i| { for (page) |c, j| { if ((i == 0) and (j == TOTAL_CHAR_ON_PAGE - vga.WIDTH - 1)) { try expectEqual(vga.orig_entry('A', colour), c); } else { try expectEqual(blank, c); } } } var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const buf = video_buffer[i]; if (i == VIDEO_BUFFER_SIZE - vga.WIDTH - 1) { try expectEqual(vga.orig_entry('A', colour), buf); } else { try expectEqual(blank, buf); } } // Tear down resetGlobals(); } test "pageUp top page" { // Set up try setVideoBufferBlankPages(); setPagesIncrementing(); // Pre testing page_index = TOTAL_NUM_PAGES - 1; try defaultVariablesTesting(TOTAL_NUM_PAGES - 1, 0, 0); try incrementingPagesTesting(); try defaultVideoBufferTesting(); // Call function pageUp(); // Post test try defaultVariablesTesting(TOTAL_NUM_PAGES - 1, 0, 0); try incrementingPagesTesting(); try defaultVideoBufferTesting(); // Tear down resetGlobals(); } test "pageUp bottom page" { // Set up try setVideoBufferBlankPages(); setPagesIncrementing(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("entry", vga.orig_entry); vga.addRepeatFunction("updateCursor", vga.mock_updateCursor); vga.addConsumeFunction("disableCursor", vga.mock_disableCursor); // Pre testing try defaultVariablesTesting(0, 0, 0); try incrementingPagesTesting(); try defaultVideoBufferTesting(); // Call function pageUp(); // Post test try defaultVariablesTesting(1, 0, 0); try incrementingPagesTesting(); // Print page number const text = "Page 1 of 4"; const column_temp = column; const row_temp = row; column = @truncate(u8, vga.WIDTH) - @truncate(u8, text.len); row = ROW_MIN - 1; writeString(text) catch |e| { log.err("Unable to print page number, printing out of bounds. Error: {}\n", .{e}); }; column = column_temp; row = row_temp; var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const b = video_buffer[i]; // Ignore the ROW_MIN row as this is where the page number is printed and is already // tested, page number is printed 11 from the end if (i < START_OF_DISPLAYABLE_REGION - 11) { try expectEqual(blank, b); } else if (i < START_OF_DISPLAYABLE_REGION) { try expectEqual(vga.orig_entry(text[i + 11 - START_OF_DISPLAYABLE_REGION], colour), b); } else { try expectEqual(i - START_OF_DISPLAYABLE_REGION + TOTAL_CHAR_ON_PAGE, b); } } // Tear down resetGlobals(); } test "pageDown bottom page" { // Set up try setVideoBufferBlankPages(); setPagesIncrementing(); // Pre testing try defaultVariablesTesting(0, 0, 0); try incrementingPagesTesting(); try defaultVideoBufferTesting(); // Call function pageDown(); // Post test try defaultVariablesTesting(0, 0, 0); try incrementingPagesTesting(); try defaultVideoBufferTesting(); // Tear down resetGlobals(); } test "pageDown top page" { // Set up try setVideoBufferBlankPages(); setPagesIncrementing(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("entry", vga.orig_entry); vga.addRepeatFunction("updateCursor", vga.mock_updateCursor); vga.addConsumeFunction("disableCursor", vga.mock_disableCursor); // Pre testing page_index = TOTAL_NUM_PAGES - 1; try defaultVariablesTesting(TOTAL_NUM_PAGES - 1, 0, 0); try incrementingPagesTesting(); try defaultVideoBufferTesting(); // Call function pageDown(); // Post test try defaultVariablesTesting(TOTAL_NUM_PAGES - 2, 0, 0); try incrementingPagesTesting(); // Print page number const text = "Page 3 of 4"; const column_temp = column; const row_temp = row; column = @truncate(u8, vga.WIDTH) - @truncate(u8, text.len); row = ROW_MIN - 1; writeString(text) catch |e| { log.err("Unable to print page number, printing out of bounds. Error: {}\n", .{e}); }; column = column_temp; row = row_temp; var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const b = video_buffer[i]; // Ignore the ROW_MIN row as this is where the page number is printed and is already // tested, page number is printed 11 from the end if (i < START_OF_DISPLAYABLE_REGION - 11) { try expectEqual(blank, b); } else if (i < START_OF_DISPLAYABLE_REGION) { try expectEqual(vga.orig_entry(text[i + 11 - START_OF_DISPLAYABLE_REGION], colour), b); } else { try expectEqual((i - START_OF_DISPLAYABLE_REGION) + (TOTAL_CHAR_ON_PAGE * page_index), b); } } // Tear down resetGlobals(); } test "clearScreen" { // Set up try setVideoBufferIncrementingBlankPages(); setPagesIncrementing(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("updateCursor", vga.mock_updateCursor); // Pre testing try defaultVariablesTesting(0, 0, 0); try incrementingVideoBufferTesting(); try incrementingPagesTesting(); // Call function clearScreen(); // Post test try defaultVariablesTesting(0, ROW_MIN, 0); var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const buf = video_buffer[i]; if (i < START_OF_DISPLAYABLE_REGION) { try expectEqual(i, buf); } else { try expectEqual(blank, buf); } } for (pages) |page, j| { for (page) |c, k| { if (j == 0) { // The last rows will be blanks try expectEqual(blank, c); } else { try expectEqual((j - 1) * TOTAL_CHAR_ON_PAGE + k, c); } } } // Tear down resetGlobals(); } test "moveCursorLeft top left of screen" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("updateCursor", vga.mock_updateCursor); // Pre testing try defaultAllTesting(0, 0, 0); // Call function moveCursorLeft(); // Post test try defaultAllTesting(0, 0, 0); // Tear down resetGlobals(); } test "moveCursorLeft top screen" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("updateCursor", vga.mock_updateCursor); // Pre testing column = 5; try defaultAllTesting(0, 0, 5); // Call function moveCursorLeft(); // Post test try defaultAllTesting(0, 0, 4); // Tear down resetGlobals(); } test "moveCursorLeft start of row" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("updateCursor", vga.mock_updateCursor); // Pre testing row = 5; try defaultAllTesting(0, 5, 0); // Call function moveCursorLeft(); // Post test try defaultAllTesting(0, 4, vga.WIDTH - 1); // Tear down resetGlobals(); } test "moveCursorRight bottom right of screen" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("updateCursor", vga.mock_updateCursor); // Pre testing row = vga.HEIGHT - 1; column = vga.WIDTH - 1; try defaultAllTesting(0, vga.HEIGHT - 1, vga.WIDTH - 1); // Call function moveCursorRight(); // Post test try defaultAllTesting(0, vga.HEIGHT - 1, vga.WIDTH - 1); // Tear down resetGlobals(); } test "moveCursorRight top screen" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("updateCursor", vga.mock_updateCursor); // Pre testing column = 5; try defaultAllTesting(0, 0, 5); // Call function moveCursorRight(); // Post test try defaultAllTesting(0, 0, 6); // Tear down resetGlobals(); } test "moveCursorRight end of row" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("updateCursor", vga.mock_updateCursor); // Pre testing row = 5; column = vga.WIDTH - 1; try defaultAllTesting(0, 5, vga.WIDTH - 1); // Call function moveCursorRight(); // Post test try defaultAllTesting(0, 6, 0); // Tear down resetGlobals(); } test "setColour" { // Set up // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addConsumeFunction("entry", vga.orig_entry); // Pre testing // Call function const new_colour = vga.orig_entryColour(vga.COLOUR_WHITE, vga.COLOUR_WHITE); setColour(new_colour); // Post test try expectEqual(new_colour, colour); try expectEqual(vga.orig_entry(0, new_colour), blank); // Tear down resetGlobals(); } test "writeString" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga calls vga.initTest(); defer vga.freeTest(); vga.addRepeatFunction("entry", vga.orig_entry); vga.addConsumeFunction("updateCursor", vga.mock_updateCursor); // Pre testing row = ROW_MIN; try defaultAllTesting(0, ROW_MIN, 0); // Call function try writeString("ABC"); // Post test try defaultVariablesTesting(0, ROW_MIN, 3); for (pages) |page, i| { for (page) |c, j| { if ((i == 0) and (j == 0)) { try expectEqual(vga.orig_entry('A', colour), c); } else if ((i == 0) and (j == 1)) { try expectEqual(vga.orig_entry('B', colour), c); } else if ((i == 0) and (j == 2)) { try expectEqual(vga.orig_entry('C', colour), c); } else { try expectEqual(blank, c); } } } var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const buf = video_buffer[i]; if (i == START_OF_DISPLAYABLE_REGION) { try expectEqual(vga.orig_entry('A', colour), buf); } else if (i == START_OF_DISPLAYABLE_REGION + 1) { try expectEqual(vga.orig_entry('B', colour), buf); } else if (i == START_OF_DISPLAYABLE_REGION + 2) { try expectEqual(vga.orig_entry('C', colour), buf); } else { try expectEqual(blank, buf); } } // Tear down resetGlobals(); } test "init 0,0" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga.updateCursor call for updating the hardware cursor vga.initTest(); defer vga.freeTest(); vga.addTestParams("getCursor", .{@as(u16, 0)}); vga.addRepeatFunction("entryColour", vga.orig_entryColour); vga.addRepeatFunction("entry", vga.orig_entry); vga.addRepeatFunction("updateCursor", vga.mock_updateCursor); vga.addConsumeFunction("enableCursor", vga.mock_enableCursor); // Pre testing try defaultAllTesting(0, 0, 0); // Call function init(); // Post test try defaultVariablesTesting(0, ROW_MIN, 0); try blankPagesTesting(); var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const buf = video_buffer[i]; if (i < START_OF_DISPLAYABLE_REGION) { // This is where the logo will be, but is a complex string so no testing // Just take my word it works :P } else { try expectEqual(blank, buf); } } // Tear down resetGlobals(); } test "init not 0,0" { // Set up try setVideoBufferBlankPages(); // Mocking out the vga.updateCursor call for updating the hardware cursor vga.initTest(); defer vga.freeTest(); vga.addTestParams("getCursor", .{vga.WIDTH}); vga.addRepeatFunction("entryColour", vga.orig_entryColour); vga.addRepeatFunction("entry", vga.orig_entry); vga.addRepeatFunction("updateCursor", vga.mock_updateCursor); vga.addConsumeFunction("enableCursor", vga.mock_enableCursor); // Pre testing try defaultAllTesting(0, 0, 0); // Call function init(); // Post test try defaultVariablesTesting(0, ROW_MIN + 1, 0); try blankPagesTesting(); var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const buf = video_buffer[i]; if (i < START_OF_DISPLAYABLE_REGION) { // This is where the logo will be, but is a complex string so no testing // Just take my word it works :P } else { try expectEqual(blank, buf); } } // Tear down resetGlobals(); } /// /// Test the init function set up everything properly. /// fn rt_initialisedGlobals() void { if (@ptrToInt(video_buffer.ptr) != @ptrToInt(&KERNEL_ADDR_OFFSET) + 0xB8000) { panic(@errorReturnTrace(), "Video buffer not at correct virtual address, found: {}\n", .{@ptrToInt(video_buffer.ptr)}); } if (page_index != 0) { panic(@errorReturnTrace(), "Page index not at zero, found: {}\n", .{page_index}); } if (colour != vga.entryColour(vga.COLOUR_LIGHT_GREY, vga.COLOUR_BLACK)) { panic(@errorReturnTrace(), "Colour not set up properly, found: {}\n", .{colour}); } if (blank != vga.entry(0, colour)) { panic(@errorReturnTrace(), "Blank not set up properly, found: {}\n", .{blank}); } // Make sure the screen isn't all blank var all_blank = true; var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const buf = video_buffer[i]; if (buf != blank and buf != 0) { all_blank = false; break; } } if (all_blank) { panic(@errorReturnTrace(), "Screen all blank, should have logo and page number\n", .{}); } log.info("Tested globals\n", .{}); } /// /// Test printing a string will output to the screen. This will check both the video memory and /// the pages. /// fn rt_printString() void { const text = "abcdefg"; const clear_text = "\x08" ** text.len; writeString(text) catch |e| panic(@errorReturnTrace(), "Failed to print string to tty: {}\n", .{e}); // Check the video memory var counter: u32 = 0; var i: u32 = 0; while (i < VIDEO_BUFFER_SIZE) : (i += 1) { const buf = video_buffer[i]; if (counter < text.len and buf == vga.entry(text[counter], colour)) { counter += 1; } else if (counter == text.len) { // Found all the text break; } else { counter = 0; } } if (counter != text.len) { panic(@errorReturnTrace(), "Didn't find the printed text in video memory\n", .{}); } // Check the pages counter = 0; for (pages[0]) |c| { if (counter < text.len and c == vga.entry(text[counter], colour)) { counter += 1; } else if (counter == text.len) { // Found all the text break; } else { counter = 0; } } if (counter != text.len) { panic(@errorReturnTrace(), "Didn't find the printed text in pages\n", .{}); } // Clear the text writeString(clear_text) catch |e| panic(@errorReturnTrace(), "Failed to print string to tty: {}\n", .{e}); log.info("Tested printing\n", .{}); } /// /// Run all the runtime tests. /// fn runtimeTests() void { rt_initialisedGlobals(); rt_printString(); }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/idt.zig
const std = @import("std"); const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const expectError = std.testing.expectError; const log = std.log.scoped(.x86_idt); const builtin = @import("builtin"); const is_test = builtin.is_test; const panic = @import("../../panic.zig").panic; const build_options = @import("build_options"); const gdt = if (is_test) @import("../../../../test/mock/kernel/gdt_mock.zig") else @import("gdt.zig"); const arch = if (builtin.is_test) @import("../../../../test/mock/kernel/arch_mock.zig") else @import("arch.zig"); /// The structure that contains all the information that each IDT entry needs. pub const IdtEntry = packed struct { /// The lower 16 bits of the base address of the interrupt handler offset. base_low: u16, /// The code segment in the GDT which the handlers will be held. selector: u16, /// Must be zero, unused. zero: u8, /// The IDT gate type. gate_type: u4, /// Must be 0 for interrupt and trap gates. storage_segment: u1, /// The minimum ring level that the calling code must have to run the handler. So user code may not be able to run some interrupts. privilege: u2, /// Whether the IDT entry is present. present: u1, /// The upper 16 bits of the base address of the interrupt handler offset. base_high: u16, }; /// The IDT pointer structure that contains the pointer to the beginning of the IDT and the number /// of the table (minus 1). Used to load the IST with LIDT instruction. pub const IdtPtr = packed struct { /// The total size of the IDT (minus 1) in bytes. limit: u16, /// The base address where the IDT is located. base: u32, }; pub const InterruptHandler = fn () callconv(.Naked) void; /// The error set for the IDT pub const IdtError = error{ /// A IDT entry already exists for the provided index. IdtEntryExists, }; // ---------- // Task gates // ---------- /// The base addresses aren't used, so set these to 0. When a interrupt happens, interrupts are not /// automatically disabled. This is used for referencing the TSS descriptor in the GDT. const TASK_GATE: u4 = 0x5; /// Used to specify a interrupt service routine (ISR). When a interrupt happens, interrupts are /// automatically disabled then enabled upon the IRET instruction which restores the saved EFLAGS. const INTERRUPT_GATE: u4 = 0xE; /// Used to specify a interrupt service routine (ISR). When a interrupt happens, interrupts are not /// automatically disabled and doesn't restores the saved EFLAGS upon the IRET instruction. const TRAP_GATE: u4 = 0xF; // ---------- // Privilege levels // ---------- /// Privilege level 0. Kernel land. The privilege level the calling descriptor minimum will have. const PRIVILEGE_RING_0: u2 = 0x0; /// Privilege level 1. The privilege level the calling descriptor minimum will have. const PRIVILEGE_RING_1: u2 = 0x1; /// Privilege level 2. The privilege level the calling descriptor minimum will have. const PRIVILEGE_RING_2: u2 = 0x2; /// Privilege level 3. User land. The privilege level the calling descriptor minimum will have. const PRIVILEGE_RING_3: u2 = 0x3; /// The total size of all the IDT entries (minus 1). const TABLE_SIZE: u16 = @sizeOf(IdtEntry) * NUMBER_OF_ENTRIES - 1; /// The total number of entries the IDT can have (2^8). pub const NUMBER_OF_ENTRIES: u16 = 256; /// The IDT pointer that the CPU is loaded with that contains the base address of the IDT and the /// size. var idt_ptr: IdtPtr = IdtPtr{ .limit = TABLE_SIZE, .base = 0, }; /// The IDT entry table of NUMBER_OF_ENTRIES entries. Initially all zeroed. var idt_entries: [NUMBER_OF_ENTRIES]IdtEntry = [_]IdtEntry{IdtEntry{ .base_low = 0, .selector = 0, .zero = 0, .gate_type = 0, .storage_segment = 0, .privilege = 0, .present = 0, .base_high = 0, }} ** NUMBER_OF_ENTRIES; /// /// Make a IDT entry. /// /// Arguments: /// IN base: u32 - The pointer to the interrupt handler. /// IN selector: u16 - The descriptor segment the interrupt is in. This will usually be the /// kernels code segment. /// IN gate_type: u4 - The type of interrupt. This will usually be the INTERRUPT_GATE. /// IN privilege: u2 - What privilege to call the interrupt in. This will usually be /// the kernel ring level 0. /// /// Return: IdtEntry /// A new IDT entry. /// fn makeEntry(base: u32, selector: u16, gate_type: u4, privilege: u2) IdtEntry { return IdtEntry{ .base_low = @truncate(u16, base), .selector = selector, .zero = 0, .gate_type = gate_type, .storage_segment = 0, .privilege = privilege, // Creating a new entry, so is now present. .present = 1, .base_high = @truncate(u16, base >> 16), }; } /// /// Check whether a IDT gate is open. /// /// Arguments: /// IN entry: IdtEntry - The IDT entry to check. /// /// Return: bool /// Whether the provided IDT entry is open or not. /// pub fn isIdtOpen(entry: IdtEntry) bool { return entry.present == 1; } /// /// Open a interrupt gate with a given index and a handler to call. /// /// Arguments: /// IN index: u8 - The interrupt number to open. /// IN handler: InterruptHandler - The interrupt handler for the interrupt. /// /// Errors: /// IdtError.InvalidIdtEntry - If the interrupt number is invalid, see isValidInterruptNumber. /// IdtError.IdtEntryExists - If the interrupt has already been registered. /// pub fn openInterruptGate(index: u8, handler: InterruptHandler) IdtError!void { // As the IDT is a u8, that maximum can only be 255 which is the maximum IDT entries. // So there can't be a out of bounds. if (isIdtOpen(idt_entries[index])) { return IdtError.IdtEntryExists; } idt_entries[index] = makeEntry(@ptrToInt(handler), gdt.KERNEL_CODE_OFFSET, INTERRUPT_GATE, PRIVILEGE_RING_0); } /// /// Initialise the Interrupt descriptor table /// pub fn init() void { log.info("Init\n", .{}); defer log.info("Done\n", .{}); idt_ptr.base = @ptrToInt(&idt_entries); arch.lidt(&idt_ptr); switch (build_options.test_mode) { .Initialisation => runtimeTests(), else => {}, } } fn testHandler0() callconv(.Naked) void {} fn testHandler1() callconv(.Naked) void {} fn mock_lidt(ptr: *const IdtPtr) void { expectEqual(TABLE_SIZE, ptr.limit) catch panic(null, "IDT pointer limit was not correct", .{}); expectEqual(@ptrToInt(&idt_entries[0]), ptr.base) catch panic(null, "IDT pointer base was not correct", .{}); } test "IDT entries" { try expectEqual(@as(u32, 8), @sizeOf(IdtEntry)); try expectEqual(@as(u32, 6), @sizeOf(IdtPtr)); try expectEqual(TABLE_SIZE, idt_ptr.limit); try expectEqual(@as(u32, 0), idt_ptr.base); } test "makeEntry alternating bit pattern" { const actual = makeEntry(0b01010101010101010101010101010101, 0b0101010101010101, 0b0101, 0b01); const expected: u64 = 0b0101010101010101101001010000000001010101010101010101010101010101; try expectEqual(expected, @bitCast(u64, actual)); } test "isIdtOpen" { const not_open = IdtEntry{ .base_low = 0, .selector = 0, .zero = 0, .gate_type = 0, .storage_segment = 0, .privilege = 0, .present = 0, .base_high = 0, }; const open = IdtEntry{ .base_low = 0, .selector = 0, .zero = 0, .gate_type = 0, .storage_segment = 0, .privilege = 0, .present = 1, .base_high = 0, }; try expectEqual(false, isIdtOpen(not_open)); try expectEqual(true, isIdtOpen(open)); } test "openInterruptGate" { const index: u8 = 100; openInterruptGate(index, testHandler0) catch unreachable; try expectError(IdtError.IdtEntryExists, openInterruptGate(index, testHandler0)); const test_fn_0_addr = @ptrToInt(testHandler0); const expected_entry0 = IdtEntry{ .base_low = @truncate(u16, test_fn_0_addr), .selector = gdt.KERNEL_CODE_OFFSET, .zero = 0, .gate_type = INTERRUPT_GATE, .storage_segment = 0, .privilege = PRIVILEGE_RING_0, .present = 1, .base_high = @truncate(u16, test_fn_0_addr >> 16), }; try expectEqual(expected_entry0, idt_entries[index]); // Reset idt_entries[index] = IdtEntry{ .base_low = 0, .selector = 0, .zero = 0, .gate_type = 0, .storage_segment = 0, .privilege = 0, .present = 0, .base_high = 0, }; openInterruptGate(index, testHandler0) catch unreachable; // With different handler try expectError(IdtError.IdtEntryExists, openInterruptGate(index, testHandler1)); const expected_entry1 = IdtEntry{ .base_low = @truncate(u16, test_fn_0_addr), .selector = gdt.KERNEL_CODE_OFFSET, .zero = 0, .gate_type = INTERRUPT_GATE, .storage_segment = 0, .privilege = PRIVILEGE_RING_0, .present = 1, .base_high = @truncate(u16, test_fn_0_addr >> 16), }; try expectEqual(expected_entry1, idt_entries[index]); // Reset idt_entries[index] = IdtEntry{ .base_low = 0, .selector = 0, .zero = 0, .gate_type = 0, .storage_segment = 0, .privilege = 0, .present = 0, .base_high = 0, }; } test "init" { // Set up arch.initTest(); defer arch.freeTest(); arch.addConsumeFunction("lidt", mock_lidt); // Call function init(); // Post testing try expectEqual(@ptrToInt(&idt_entries), idt_ptr.base); // Reset idt_ptr.base = 0; } /// /// Check that the IDT table was loaded properly by getting the previously loaded table and /// compare the limit and base address. /// fn rt_loadedIDTSuccess() void { const loaded_idt = arch.sidt(); if (idt_ptr.limit != loaded_idt.limit) { panic(@errorReturnTrace(), "FAILURE: IDT not loaded properly: 0x{X} != 0x{X}\n", .{ idt_ptr.limit, loaded_idt.limit }); } if (idt_ptr.base != loaded_idt.base) { panic(@errorReturnTrace(), "FAILURE: IDT not loaded properly: 0x{X} != {X}\n", .{ idt_ptr.base, loaded_idt.base }); } log.info("Tested loading IDT\n", .{}); } /// /// Run all the runtime tests. /// pub fn runtimeTests() void { rt_loadedIDTSuccess(); }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/keyboard.zig
const builtin = @import("builtin"); const build_options = @import("build_options"); const std = @import("std"); const Allocator = std.mem.Allocator; const testing = std.testing; const log = std.log.scoped(.x86_keyboard); const irq = @import("irq.zig"); const pic = @import("pic.zig"); const arch = if (builtin.is_test) @import("../../../../test/mock/kernel/arch_mock.zig") else @import("arch.zig"); const panic = @import("../../panic.zig").panic; const kb = @import("../../keyboard.zig"); const Keyboard = kb.Keyboard; const KeyPosition = kb.KeyPosition; const KeyAction = kb.KeyAction; /// The initialised keyboard var keyboard: *Keyboard = undefined; /// The number of keys pressed without a corresponding release var pressed_keys: usize = 0; /// If we're in the middle of a special key sequence (e.g. print_screen and arrow keys) var special_sequence = false; /// The number of release scan codes expected before registering a release event /// This is used in special sequences since they end in a certain number of release scan codes var expected_releases: usize = 0; /// If a print_screen press is being processed var on_print_screen = false; /// /// Read a byte from the keyboard buffer /// /// Return: u8 /// The byte waiting in the keyboard buffer /// fn readKeyboardBuffer() u8 { return arch.in(u8, 0x60); } /// /// Parse a keyboard scan code and return the associated keyboard action. /// Some keys require a specific sequence of scan codes so this function acts as a state machine /// /// Arguments: /// IN scan_code: u8 - The scan code from the keyboard /// /// Return: ?KeyAction /// The keyboard action resulting from processing the scan code, or null if the scan code doesn't result in a finished keyboard action /// fn parseScanCode(scan_code: u8) ?KeyAction { var released = false; // The print screen key requires special processing since it uses a unique byte sequence if (on_print_screen or scan_code >= 128) { released = true; if (special_sequence or on_print_screen) { // Special sequences are followed by a certain number of release scan codes that should be ignored. Update the expected number if (expected_releases >= 1) { expected_releases -= 1; return null; } } else { if (pressed_keys == 0) { // A special sequence is started by a lone key release scan code special_sequence = true; return null; } } } // Cut off the top bit, which denotes that the key was released const key_code = @truncate(u7, scan_code); var key_pos: ?KeyPosition = null; if (special_sequence or on_print_screen) { if (!released) { // Most special sequences are followed by an extra key release byte expected_releases = 1; } switch (key_code) { 72 => key_pos = KeyPosition.UP_ARROW, 75 => key_pos = KeyPosition.LEFT_ARROW, 77 => key_pos = KeyPosition.RIGHT_ARROW, 80 => key_pos = KeyPosition.DOWN_ARROW, // First byte sent for the pause key 29 => return null, 42 => { // The print screen key is followed by five extra key release bytes key_pos = KeyPosition.PRINT_SCREEN; if (!released) { on_print_screen = true; expected_releases = 5; } }, // Second and final byte sent for the pause key 69 => { // The pause key is followed by two extra key release bytes key_pos = KeyPosition.PAUSE; if (!released) { expected_releases = 2; } }, 82 => key_pos = KeyPosition.INSERT, 71 => key_pos = KeyPosition.HOME, 73 => key_pos = KeyPosition.PAGE_UP, 83 => key_pos = KeyPosition.DELETE, 79 => key_pos = KeyPosition.END, 81 => key_pos = KeyPosition.PAGE_DOWN, 53 => key_pos = KeyPosition.KEYPAD_SLASH, 28 => key_pos = KeyPosition.KEYPAD_ENTER, 56 => key_pos = KeyPosition.RIGHT_ALT, 91 => key_pos = KeyPosition.SPECIAL, else => return null, } } key_pos = key_pos orelse switch (key_code) { 1 => KeyPosition.ESC, // Number keys and second row 2...28 => @intToEnum(KeyPosition, @enumToInt(KeyPosition.ONE) + (key_code - 2)), 29 => KeyPosition.LEFT_CTRL, 30...40 => @intToEnum(KeyPosition, @enumToInt(KeyPosition.A) + (key_code - 30)), 41 => KeyPosition.BACKTICK, 42 => KeyPosition.LEFT_SHIFT, 43 => KeyPosition.HASH, 44...54 => @intToEnum(KeyPosition, @enumToInt(KeyPosition.Z) + (key_code - 44)), 55 => KeyPosition.KEYPAD_ASTERISK, 56 => KeyPosition.LEFT_ALT, 57 => KeyPosition.SPACE, 58 => KeyPosition.CAPS_LOCK, 59...68 => @intToEnum(KeyPosition, @enumToInt(KeyPosition.F1) + (key_code - 59)), 69 => KeyPosition.NUM_LOCK, 70 => KeyPosition.SCROLL_LOCK, 71...73 => @intToEnum(KeyPosition, @enumToInt(KeyPosition.KEYPAD_7) + (key_code - 71)), 74 => KeyPosition.KEYPAD_MINUS, 75...77 => @intToEnum(KeyPosition, @enumToInt(KeyPosition.KEYPAD_4) + (key_code - 75)), 78 => KeyPosition.KEYPAD_PLUS, 79...81 => @intToEnum(KeyPosition, @enumToInt(KeyPosition.KEYPAD_1) + (key_code - 79)), 82 => KeyPosition.KEYPAD_0, 83 => KeyPosition.KEYPAD_DOT, 86 => KeyPosition.BACKSLASH, 87 => KeyPosition.F11, 88 => KeyPosition.F12, else => null, }; if (key_pos) |k| { // If we're releasing a key decrement the number of keys pressed, else increment it if (!released) { pressed_keys += 1; } else { pressed_keys -= 1; // Releasing a special key means we are no longer on that special key special_sequence = false; } return KeyAction{ .position = k, .released = released }; } return null; } /// /// Register a keyboard action. Should only be called in response to a keyboard IRQ /// /// Arguments: /// IN ctx: *arch.CpuState - The state of the CPU when the keyboard action occurred /// /// Return: usize /// The stack pointer value to use when returning from the interrupt /// fn onKeyEvent(ctx: *arch.CpuState) usize { const scan_code = readKeyboardBuffer(); if (parseScanCode(scan_code)) |action| { if (!keyboard.writeKey(action)) { log.warn("No room for keyboard action {}\n", .{action}); } } return @ptrToInt(ctx); } /// /// Initialise the PS/2 keyboard /// /// Arguments: /// IN allocator: Allocator - The allocator to use to create the keyboard instance /// /// Return: *Keyboard /// The keyboard created /// /// Error: std.mem.Allocator.Error /// OutOfMemory - There isn't enough memory to allocate the keyboard instance /// pub fn init(allocator: Allocator) Allocator.Error!*Keyboard { irq.registerIrq(pic.IRQ_KEYBOARD, onKeyEvent) catch |e| { panic(@errorReturnTrace(), "Failed to register keyboard IRQ: {}\n", .{e}); }; keyboard = try allocator.create(Keyboard); keyboard.* = Keyboard.init(); return keyboard; } fn testResetGlobals() void { pressed_keys = 0; special_sequence = false; expected_releases = 0; on_print_screen = false; } test "parseScanCode" { testResetGlobals(); // Test basic keys const basic_keys = &[_]?KeyPosition{ KeyPosition.ESC, KeyPosition.ONE, KeyPosition.TWO, KeyPosition.THREE, KeyPosition.FOUR, KeyPosition.FIVE, KeyPosition.SIX, KeyPosition.SEVEN, KeyPosition.EIGHT, KeyPosition.NINE, KeyPosition.ZERO, KeyPosition.HYPHEN, KeyPosition.EQUALS, KeyPosition.BACKSPACE, KeyPosition.TAB, KeyPosition.Q, KeyPosition.W, KeyPosition.E, KeyPosition.R, KeyPosition.T, KeyPosition.Y, KeyPosition.U, KeyPosition.I, KeyPosition.O, KeyPosition.P, KeyPosition.LEFT_BRACKET, KeyPosition.RIGHT_BRACKET, KeyPosition.ENTER, KeyPosition.LEFT_CTRL, KeyPosition.A, KeyPosition.S, KeyPosition.D, KeyPosition.F, KeyPosition.G, KeyPosition.H, KeyPosition.J, KeyPosition.K, KeyPosition.L, KeyPosition.SEMICOLON, KeyPosition.APOSTROPHE, KeyPosition.BACKTICK, KeyPosition.LEFT_SHIFT, KeyPosition.HASH, KeyPosition.Z, KeyPosition.X, KeyPosition.C, KeyPosition.V, KeyPosition.B, KeyPosition.N, KeyPosition.M, KeyPosition.COMMA, KeyPosition.DOT, KeyPosition.FORWARD_SLASH, KeyPosition.RIGHT_SHIFT, KeyPosition.KEYPAD_ASTERISK, KeyPosition.LEFT_ALT, KeyPosition.SPACE, KeyPosition.CAPS_LOCK, KeyPosition.F1, KeyPosition.F2, KeyPosition.F3, KeyPosition.F4, KeyPosition.F5, KeyPosition.F6, KeyPosition.F7, KeyPosition.F8, KeyPosition.F9, KeyPosition.F10, KeyPosition.NUM_LOCK, KeyPosition.SCROLL_LOCK, KeyPosition.KEYPAD_7, KeyPosition.KEYPAD_8, KeyPosition.KEYPAD_9, KeyPosition.KEYPAD_MINUS, KeyPosition.KEYPAD_4, KeyPosition.KEYPAD_5, KeyPosition.KEYPAD_6, KeyPosition.KEYPAD_PLUS, KeyPosition.KEYPAD_1, KeyPosition.KEYPAD_2, KeyPosition.KEYPAD_3, KeyPosition.KEYPAD_0, KeyPosition.KEYPAD_DOT, null, null, KeyPosition.BACKSLASH, KeyPosition.F11, KeyPosition.F12, }; comptime var scan_code = 1; inline for (basic_keys) |key| { var res = parseScanCode(scan_code); if (key) |k| { const r = res orelse unreachable; try testing.expectEqual(k, r.position); try testing.expectEqual(false, r.released); try testing.expectEqual(pressed_keys, 1); } try testing.expectEqual(on_print_screen, false); try testing.expectEqual(special_sequence, false); try testing.expectEqual(expected_releases, 0); // Test release scan code for key if (key) |k| { res = parseScanCode(scan_code | 128); const r = res orelse unreachable; try testing.expectEqual(k, r.position); try testing.expectEqual(true, r.released); try testing.expectEqual(pressed_keys, 0); try testing.expectEqual(on_print_screen, false); try testing.expectEqual(special_sequence, false); try testing.expectEqual(expected_releases, 0); } scan_code += 1; } // Test the special keys // 'simple' sepcial keys consist of one release byte, a key then an extra release byte const simple_special_keys = &[_]?KeyPosition{ KeyPosition.UP_ARROW, KeyPosition.LEFT_ARROW, KeyPosition.RIGHT_ARROW, KeyPosition.DOWN_ARROW, KeyPosition.INSERT, KeyPosition.HOME, KeyPosition.PAGE_UP, KeyPosition.DELETE, KeyPosition.END, KeyPosition.PAGE_DOWN, KeyPosition.KEYPAD_SLASH, KeyPosition.KEYPAD_ENTER, KeyPosition.RIGHT_ALT, KeyPosition.SPECIAL, }; const simple_special_codes = &[_]u8{ 72, 75, 77, 80, 82, 71, 73, 83, 79, 81, 53, 28, 56, 91 }; for (simple_special_keys) |key, i| { try testing.expectEqual(parseScanCode(128), null); try testing.expectEqual(pressed_keys, 0); try testing.expectEqual(on_print_screen, false); try testing.expectEqual(special_sequence, true); try testing.expectEqual(expected_releases, 0); var res = parseScanCode(simple_special_codes[i]) orelse unreachable; try testing.expectEqual(false, res.released); try testing.expectEqual(key, res.position); try testing.expectEqual(pressed_keys, 1); try testing.expectEqual(on_print_screen, false); try testing.expectEqual(special_sequence, true); try testing.expectEqual(expected_releases, 1); try testing.expectEqual(parseScanCode(128), null); try testing.expectEqual(pressed_keys, 1); try testing.expectEqual(on_print_screen, false); try testing.expectEqual(special_sequence, true); try testing.expectEqual(expected_releases, 0); res = parseScanCode(simple_special_codes[i] | 128) orelse unreachable; try testing.expectEqual(true, res.released); try testing.expectEqual(key, res.position); try testing.expectEqual(pressed_keys, 0); try testing.expectEqual(on_print_screen, false); try testing.expectEqual(special_sequence, false); try testing.expectEqual(expected_releases, 0); } }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/pci.zig
const std = @import("std"); const builtin = @import("builtin"); const is_test = builtin.is_test; const expectEqual = std.testing.expectEqual; const build_options = @import("build_options"); const Allocator = std.mem.Allocator; const ArrayList = std.ArrayList; const log = std.log.scoped(.pci); const arch = if (is_test) @import("../../../../test/mock/kernel/arch_mock.zig") else @import("arch.zig"); /// The port address for selecting a 32bit register in the PCI configuration space. const CONFIG_ADDRESS: u16 = 0x0CF8; /// The port address for read/writing to the selected address. const CONFIG_DATA: u16 = 0x0CFC; /// The register offsets for PCI. Currently there is no check for valid register offsets for the /// header type. The names are self explanatory. Further information can be found here: /// https://wiki.osdev.org/PCI. const PciRegisters = enum(u8) { VenderId = 0x00, DeviceId = 0x02, Command = 0x04, Status = 0x06, RevisionId = 0x08, ProgrammingInterface = 0x09, Subclass = 0x0A, ClassCode = 0x0B, CacheLineSize = 0x0C, LatencyTimer = 0x0D, HeaderType = 0x0E, BIST = 0x0F, // The next set of registers are for the 0x00 (standard) header. // This currently uses only the common registers above that are available to all header types. BaseAddr0 = 0x10, BaseAddr1 = 0x14, BaseAddr2 = 0x18, BaseAddr3 = 0x1C, BaseAddr4 = 0x20, BaseAddr5 = 0x24, CardbusCISPtr = 0x28, SubsystemVenderId = 0x2C, SubsystemId = 0x2E, ExpansionROMBaseAddr = 0x30, CapabilitiesPtr = 0x34, InterruptLine = 0x3C, InterruptPin = 0x3D, MinGrant = 0x3E, MaxLatency = 0x3F, /// /// Get the type the represents the width of the register. This can be either u8, u16 or u32. /// /// Argument: /// IN comptime pci_reg: PciRegisters - The register to get the width for. /// /// Return: type /// The width type. /// pub fn getWidth(comptime pci_reg: PciRegisters) type { return switch (pci_reg) { .RevisionId, .ProgrammingInterface, .Subclass, .ClassCode, .CacheLineSize, .LatencyTimer, .HeaderType, .BIST, .InterruptLine, .InterruptPin, .MinGrant, .MaxLatency, .CapabilitiesPtr => u8, .VenderId, .DeviceId, .Command, .Status, .SubsystemVenderId, .SubsystemId => u16, .BaseAddr0, .BaseAddr1, .BaseAddr2, .BaseAddr3, .BaseAddr4, .BaseAddr5, .CardbusCISPtr, .ExpansionROMBaseAddr => u32, }; } }; /// The PCI address used for sending to the address port. const PciAddress = packed struct { register_offset: u8, function: u3, device: u5, bus: u8, reserved: u7 = 0, enable: u1 = 1, }; /// A PCI device. This will be unique to a bus and device number. const PciDevice = struct { /// The bus on which the device is on bus: u8, /// The device number. device: u5, const Self = @This(); /// /// Get the PCI address for this device and for a function and register. /// /// Argument: /// IN self: Self - This device. /// IN function: u3 - The function. /// IN comptime pci_reg: PciRegisters - The register. /// /// Return: PciAddress /// The PCI address that can be used to read the register offset for this device and function. /// pub fn getAddress(self: Self, function: u3, comptime pci_reg: PciRegisters) PciAddress { return PciAddress{ .bus = self.bus, .device = self.device, .function = function, .register_offset = @enumToInt(pci_reg), }; } /// /// Read the configuration register data from this device, function and register. PCI configure /// reads will return a u32 value, but the register may not be u32 is size so this will return /// the correctly typed value depending on the size of the register. /// /// Argument: /// IN self: Self - This device. /// IN function: u3 - The function. /// IN comptime pci_reg: PciRegisters - The register. /// /// Return: PciRegisters.getWidth() /// Depending on the register, the type of the return value maybe u8, u16 or u32. See /// PciRegisters.getWidth(). /// pub fn configReadData(self: Self, function: u3, comptime pci_reg: PciRegisters) pci_reg.getWidth() { const address = self.getAddress(function, pci_reg); // Last 2 bits of offset must be zero // This is because we are requesting a integer (4 bytes) and cannot request a // single byte that isn't 4 bytes aligned // Write the address arch.out(CONFIG_ADDRESS, @bitCast(u32, address) & 0xFFFFFFFC); // Read the data const result = arch.in(u32, CONFIG_DATA); // Return the size the user wants const shift = switch (pci_reg.getWidth()) { u8 => (@intCast(u5, address.register_offset & 0x3)) * 8, u16 => (@intCast(u5, address.register_offset & 0x2)) * 8, u32 => 0, else => @compileError("Invalid read size. Only u8, u16 and u32 allowed."), }; return @truncate(pci_reg.getWidth(), (result >> shift)); } test "configReadData u8" { arch.initTest(); defer arch.freeTest(); // The bus, device and function values can be any value as we are testing the shifting and masking // Have chosen bus = 0, device = 1 and function = 2. // We only change the register as they will have different but widths. { const device = PciDevice{ .bus = 0, .device = 1, }; arch.addTestParams("out", .{ CONFIG_ADDRESS, @bitCast(u32, device.getAddress(2, .RevisionId)) & 0xFFFFFFFC }); arch.addTestParams("in", .{ CONFIG_DATA, @as(u32, 0xABCDEF12) }); // RevisionId is a u8 width, offset 0 const res = device.configReadData(2, .RevisionId); try expectEqual(res, 0x12); } { const device = PciDevice{ .bus = 0, .device = 1, }; arch.addTestParams("out", .{ CONFIG_ADDRESS, @bitCast(u32, device.getAddress(2, .ProgrammingInterface)) & 0xFFFFFFFC }); arch.addTestParams("in", .{ CONFIG_DATA, @as(u32, 0xABCDEF12) }); // ProgrammingInterface is a u8 width, offset 8 const res = device.configReadData(2, .ProgrammingInterface); try expectEqual(res, 0xEF); } { const device = PciDevice{ .bus = 0, .device = 1, }; arch.addTestParams("out", .{ CONFIG_ADDRESS, @bitCast(u32, device.getAddress(2, .Subclass)) & 0xFFFFFFFC }); arch.addTestParams("in", .{ CONFIG_DATA, @as(u32, 0xABCDEF12) }); // Subclass is a u8 width, offset 16 const res = device.configReadData(2, .Subclass); try expectEqual(res, 0xCD); } { const device = PciDevice{ .bus = 0, .device = 1, }; arch.addTestParams("out", .{ CONFIG_ADDRESS, @bitCast(u32, device.getAddress(2, .ClassCode)) & 0xFFFFFFFC }); arch.addTestParams("in", .{ CONFIG_DATA, @as(u32, 0xABCDEF12) }); // ClassCode is a u8 width, offset 24 const res = device.configReadData(2, .ClassCode); try expectEqual(res, 0xAB); } } test "configReadData u16" { arch.initTest(); defer arch.freeTest(); // The bus, device and function values can be any value as we are testing the shifting and masking // Have chosen bus = 0, device = 1 and function = 2. // We only change the register as they will have different but widths. { const device = PciDevice{ .bus = 0, .device = 1, }; arch.addTestParams("out", .{ CONFIG_ADDRESS, @bitCast(u32, device.getAddress(2, .VenderId)) & 0xFFFFFFFC }); arch.addTestParams("in", .{ CONFIG_DATA, @as(u32, 0xABCDEF12) }); // VenderId is a u16 width, offset 0 const res = device.configReadData(2, .VenderId); try expectEqual(res, 0xEF12); } { const device = PciDevice{ .bus = 0, .device = 1, }; arch.addTestParams("out", .{ CONFIG_ADDRESS, @bitCast(u32, device.getAddress(2, .DeviceId)) & 0xFFFFFFFC }); arch.addTestParams("in", .{ CONFIG_DATA, @as(u32, 0xABCDEF12) }); // DeviceId is a u16 width, offset 16 const res = device.configReadData(2, .DeviceId); try expectEqual(res, 0xABCD); } } test "configReadData u32" { arch.initTest(); defer arch.freeTest(); // The bus, device and function values can be any value as we are testing the shifting and masking // Have chosen bus = 0, device = 1 and function = 2. // We only change the register as they will have different but widths. { const device = PciDevice{ .bus = 0, .device = 1, }; arch.addTestParams("out", .{ CONFIG_ADDRESS, @bitCast(u32, device.getAddress(2, .BaseAddr0)) & 0xFFFFFFFC }); arch.addTestParams("in", .{ CONFIG_DATA, @as(u32, 0xABCDEF12) }); // BaseAddr0 is a u32 width, offset 0 const res = device.configReadData(2, .BaseAddr0); try expectEqual(res, 0xABCDEF12); } } }; pub const PciDeviceInfo = struct { pci_device: PciDevice, function: u3, vender_id: u16, device_id: u16, subclass: u8, class_code: u8, /// The error set. pub const Error = error{ /// There is no functions available for the given function number for a given PCI device. NoFunction, }; pub fn create(pci_device: PciDevice, function: u3) Error!PciDeviceInfo { const vender_id = pci_device.configReadData(function, .VenderId); // No function available, try the next if (vender_id == 0xFFFF) { return Error.NoFunction; } return PciDeviceInfo{ .pci_device = pci_device, .function = function, .vender_id = vender_id, .device_id = pci_device.configReadData(function, .DeviceId), .subclass = pci_device.configReadData(function, .Subclass), .class_code = pci_device.configReadData(function, .ClassCode), }; } pub fn print(device: arch.Device) void { log.info("BUS: 0x{X}, DEV: 0x{X}, FUN: 0x{X}, VID: 0x{X}, DID: 0x{X}, SC: 0x{X}, CC: 0x{X}\n", .{ device.pci_device.bus, device.pci_device.device, device.function, device.vender_id, device.device_id, device.subclass, device.class_code, }); } }; /// /// Get a list of all the PCI device. The returned list will needed to be freed by the caller. /// /// Arguments: /// IN allocator: Allocator - An allocator used for creating the list. /// /// Return: []PciDeviceInfo /// The list of PCI devices information. /// /// Error: Allocator.Error /// error.OutOfMemory - If there isn't enough memory to create the info list. /// pub fn getDevices(allocator: Allocator) Allocator.Error![]PciDeviceInfo { // Create an array list for the devices. var pci_device_infos = ArrayList(PciDeviceInfo).init(allocator); defer pci_device_infos.deinit(); // Iterate through all the possible devices var _bus: u32 = 0; while (_bus < 8) : (_bus += 1) { const bus = @intCast(u8, _bus); var _device: u32 = 0; while (_device < 32) : (_device += 1) { const device = @intCast(u5, _device); // Devices have at least 1 function const pci_device = PciDevice{ .bus = bus, .device = device, }; var num_functions: u32 = if (pci_device.configReadData(0, .HeaderType) & 0x80 != 0) 8 else 1; var _function: u32 = 0; while (_function < num_functions) : (_function += 1) { const function = @intCast(u3, _function); const device_info = PciDeviceInfo.create(pci_device, function) catch |e| switch (e) { error.NoFunction => continue, }; try pci_device_infos.append(device_info); } } } return pci_device_infos.toOwnedSlice(); }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/arch.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const log = std.log.scoped(.x86_arch); const builtin = @import("builtin"); const cmos = @import("cmos.zig"); const gdt = @import("gdt.zig"); const idt = @import("idt.zig"); const irq = @import("irq.zig"); const isr = @import("isr.zig"); const paging = @import("paging.zig"); const pic = @import("pic.zig"); const pci = @import("pci.zig"); const pit = @import("pit.zig"); const rtc = @import("rtc.zig"); const serial = @import("serial.zig"); const syscalls = @import("syscalls.zig"); const tty = @import("tty.zig"); const vga = @import("vga.zig"); const mem = @import("../../mem.zig"); const multiboot = @import("multiboot.zig"); const vmm = @import("../../vmm.zig"); const keyboard = @import("keyboard.zig"); const Serial = @import("../../serial.zig").Serial; const panic = @import("../../panic.zig").panic; const TTY = @import("../../tty.zig").TTY; const Keyboard = @import("../../keyboard.zig").Keyboard; const Task = @import("../../task.zig").Task; const MemProfile = mem.MemProfile; /// The type of a device. pub const Device = pci.PciDeviceInfo; /// The type of the date and time structure. pub const DateTime = rtc.DateTime; /// The virtual end of the kernel code. extern var KERNEL_VADDR_END: *u32; /// The virtual start of the kernel code. extern var KERNEL_VADDR_START: *u32; /// The physical end of the kernel code. extern var KERNEL_PHYSADDR_END: *u32; /// The physical start of the kernel code. extern var KERNEL_PHYSADDR_START: *u32; /// The boot-time offset that the virtual addresses are from the physical addresses. extern var KERNEL_ADDR_OFFSET: *u32; /// The virtual address of the top limit of the stack. extern var KERNEL_STACK_START: *u32; /// The virtual address of the base of the stack. extern var KERNEL_STACK_END: *u32; /// The interrupt context that is given to a interrupt handler. It contains most of the registers /// and the interrupt number and error code (if there is one). pub const CpuState = packed struct { // Page directory cr3: usize, // Extra segments gs: u32, fs: u32, es: u32, ds: u32, // Destination, source, base pointer edi: u32, esi: u32, ebp: u32, esp: u32, // General registers ebx: u32, edx: u32, ecx: u32, eax: u32, // Interrupt number and error code int_num: u32, error_code: u32, // Instruction pointer, code segment and flags eip: u32, cs: u32, eflags: u32, user_esp: u32, user_ss: u32, pub fn empty() CpuState { return .{ .cr3 = undefined, .gs = undefined, .fs = undefined, .es = undefined, .ds = undefined, .edi = undefined, .esi = undefined, .ebp = undefined, .esp = undefined, .ebx = undefined, .edx = undefined, .ecx = undefined, .eax = undefined, .int_num = undefined, .error_code = undefined, .eip = undefined, .cs = undefined, .eflags = undefined, .user_esp = undefined, .user_ss = undefined, }; } }; /// x86's boot payload is the multiboot info passed by grub pub const BootPayload = *multiboot.multiboot_info_t; /// The type of the payload passed to a virtual memory mapper. /// For x86 it's the page directory that should be mapped. pub const VmmPayload = *paging.Directory; /// The payload used in the kernel virtual memory manager. /// For x86 it's the kernel's page directory. pub const KERNEL_VMM_PAYLOAD = &paging.kernel_directory; /// The architecture's virtual memory mapper. /// For x86, it simply forwards the calls to the paging subsystem. pub const VMM_MAPPER: vmm.Mapper(VmmPayload) = vmm.Mapper(VmmPayload){ .mapFn = paging.map, .unmapFn = paging.unmap }; /// The size of each allocatable block of memory, normally set to the page size. pub const MEMORY_BLOCK_SIZE: usize = paging.PAGE_SIZE_4KB; /// /// Assembly that reads data from a given port and returns its value. /// /// Arguments: /// IN comptime Type: type - The type of the data. This can only be u8, u16 or u32. /// IN port: u16 - The port to read data from. /// /// Return: Type /// The data that the port returns. /// pub fn in(comptime Type: type, port: u16) Type { return switch (Type) { u8 => asm volatile ("inb %[port], %[result]" : [result] "={al}" (-> Type), : [port] "N{dx}" (port), ), u16 => asm volatile ("inw %[port], %[result]" : [result] "={ax}" (-> Type), : [port] "N{dx}" (port), ), u32 => asm volatile ("inl %[port], %[result]" : [result] "={eax}" (-> Type), : [port] "N{dx}" (port), ), else => @compileError("Invalid data type. Only u8, u16 or u32, found: " ++ @typeName(Type)), }; } /// /// Assembly to write to a given port with a give type of data. /// /// Arguments: /// IN port: u16 - The port to write to. /// IN data: anytype - The data that will be sent This must be a u8, u16 or u32 type. /// pub fn out(port: u16, data: anytype) void { switch (@TypeOf(data)) { u8 => asm volatile ("outb %[data], %[port]" : : [port] "{dx}" (port), [data] "{al}" (data), ), u16 => asm volatile ("outw %[data], %[port]" : : [port] "{dx}" (port), [data] "{ax}" (data), ), u32 => asm volatile ("outl %[data], %[port]" : : [port] "{dx}" (port), [data] "{eax}" (data), ), else => @compileError("Invalid data type. Only u8, u16 or u32, found: " ++ @typeName(@TypeOf(data))), } } /// /// Force the CPU to wait for an I/O operation to compete. Use port 0x80 as this is unused. /// pub fn ioWait() void { out(0x80, @as(u8, 0)); } /// /// Load the GDT and refreshing the code segment with the code segment offset of the kernel as we /// are still in kernel land. Also loads the kernel data segment into all the other segment /// registers. /// /// Arguments: /// IN gdt_ptr: *gdt.GdtPtr - The address to the GDT. /// pub fn lgdt(gdt_ptr: *const gdt.GdtPtr) void { // Load the GDT into the CPU asm volatile ("lgdt (%%eax)" : : [gdt_ptr] "{eax}" (gdt_ptr), ); // Load the kernel data segment, index into the GDT asm volatile ("mov %%bx, %%ds" : : [KERNEL_DATA_OFFSET] "{bx}" (gdt.KERNEL_DATA_OFFSET), ); asm volatile ("mov %%bx, %%es"); asm volatile ("mov %%bx, %%fs"); asm volatile ("mov %%bx, %%gs"); asm volatile ("mov %%bx, %%ss"); // Load the kernel code segment into the CS register asm volatile ( \\ljmp $0x08, $1f \\1: ); } /// /// Get the previously loaded GDT from the CPU. /// /// Return: gdt.GdtPtr /// The previously loaded GDT from the CPU. /// pub fn sgdt() gdt.GdtPtr { var gdt_ptr = gdt.GdtPtr{ .limit = 0, .base = 0 }; asm volatile ("sgdt %[tab]" : [tab] "=m" (gdt_ptr), ); return gdt_ptr; } /// /// Tell the CPU where the TSS is located in the GDT. /// /// Arguments: /// IN offset: u16 - The offset in the GDT where the TSS segment is located. /// pub fn ltr(offset: u16) void { asm volatile ("ltr %%ax" : : [offset] "{ax}" (offset), ); } /// /// Load the IDT into the CPU. /// /// Arguments: /// IN idt_ptr: *const idt.IdtPtr - The address of the iDT. /// pub fn lidt(idt_ptr: *const idt.IdtPtr) void { asm volatile ("lidt (%%eax)" : : [idt_ptr] "{eax}" (idt_ptr), ); } /// /// Get the previously loaded IDT from the CPU. /// /// Return: idt.IdtPtr /// The previously loaded IDT from the CPU. /// pub fn sidt() idt.IdtPtr { var idt_ptr = idt.IdtPtr{ .limit = 0, .base = 0 }; asm volatile ("sidt %[tab]" : [tab] "=m" (idt_ptr), ); return idt_ptr; } /// /// Enable interrupts. /// pub fn enableInterrupts() void { asm volatile ("sti"); } /// /// Disable interrupts. /// pub fn disableInterrupts() void { asm volatile ("cli"); } /// /// Halt the CPU, but interrupts will still be called. /// pub fn halt() void { asm volatile ("hlt"); } /// /// Wait the kernel but still can handle interrupts. /// pub fn spinWait() noreturn { enableInterrupts(); while (true) { halt(); } } /// /// Halt the kernel. No interrupts will be handled. /// pub fn haltNoInterrupts() noreturn { while (true) { disableInterrupts(); halt(); } } /// /// Write a byte to serial port com1. Used by the serial initialiser /// /// Arguments: /// IN byte: u8 - The byte to write /// fn writeSerialCom1(byte: u8) void { serial.write(byte, serial.Port.COM1); } /// /// Initialise serial communication using port COM1 and construct a Serial instance /// /// Arguments: /// IN boot_payload: arch.BootPayload - The payload passed at boot. Not currently used by x86 /// /// Return: serial.Serial /// The Serial instance constructed with the function used to write bytes /// pub fn initSerial(boot_payload: BootPayload) Serial { // Suppress unused var warning _ = boot_payload; serial.init(serial.DEFAULT_BAUDRATE, serial.Port.COM1) catch |e| { panic(@errorReturnTrace(), "Failed to initialise serial: {}", .{e}); }; return Serial{ .write = writeSerialCom1, }; } /// /// Initialise the TTY and construct a TTY instance /// /// Arguments: /// IN boot_payload: BootPayload - The payload passed to the kernel on boot /// /// Return: tty.TTY /// The TTY instance constructed with the information required by the rest of the kernel /// pub fn initTTY(boot_payload: BootPayload) TTY { // Suppress unused var warning _ = boot_payload; return .{ .print = tty.writeString, .setCursor = tty.setCursor, .cols = vga.WIDTH, .rows = vga.HEIGHT, .clear = tty.clearScreen, }; } /// /// Initialise the system's memory. Populates a memory profile with boot modules from grub, the amount of available memory, the reserved regions of virtual and physical memory as well as the start and end of the kernel code /// /// Arguments: /// IN mb_info: *multiboot.multiboot_info_t - The multiboot info passed by grub /// /// Return: mem.MemProfile /// The constructed memory profile /// /// Error: Allocator.Error /// Allocator.Error.OutOfMemory - There wasn't enough memory in the allocated created to populate the memory profile, consider increasing mem.FIXED_ALLOC_SIZE /// pub fn initMem(mb_info: BootPayload) Allocator.Error!MemProfile { log.info("Init\n", .{}); defer log.info("Done\n", .{}); log.debug("KERNEL_ADDR_OFFSET: 0x{X}\n", .{@ptrToInt(&KERNEL_ADDR_OFFSET)}); log.debug("KERNEL_STACK_START: 0x{X}\n", .{@ptrToInt(&KERNEL_STACK_START)}); log.debug("KERNEL_STACK_END: 0x{X}\n", .{@ptrToInt(&KERNEL_STACK_END)}); log.debug("KERNEL_VADDR_START: 0x{X}\n", .{@ptrToInt(&KERNEL_VADDR_START)}); log.debug("KERNEL_VADDR_END: 0x{X}\n", .{@ptrToInt(&KERNEL_VADDR_END)}); log.debug("KERNEL_PHYSADDR_START: 0x{X}\n", .{@ptrToInt(&KERNEL_PHYSADDR_START)}); log.debug("KERNEL_PHYSADDR_END: 0x{X}\n", .{@ptrToInt(&KERNEL_PHYSADDR_END)}); const mods_count = mb_info.mods_count; mem.ADDR_OFFSET = @ptrToInt(&KERNEL_ADDR_OFFSET); const mmap_addr = mb_info.mmap_addr; const num_mmap_entries = mb_info.mmap_length / @sizeOf(multiboot.multiboot_memory_map_t); const allocator = mem.fixed_buffer_allocator.allocator(); var reserved_physical_mem = std.ArrayList(mem.Range).init(allocator); var reserved_virtual_mem = std.ArrayList(mem.Map).init(allocator); const mem_map = @intToPtr([*]multiboot.multiboot_memory_map_t, mmap_addr)[0..num_mmap_entries]; // Reserve the unavailable sections from the multiboot memory map for (mem_map) |entry| { if (entry.@"type" != multiboot.MULTIBOOT_MEMORY_AVAILABLE) { // If addr + len is greater than maxInt(usize) just ignore whatever comes after maxInt(usize) since it can't be addressed anyway const end: usize = if (entry.addr > std.math.maxInt(usize) - entry.len) std.math.maxInt(usize) else @intCast(usize, entry.addr + entry.len); try reserved_physical_mem.append(.{ .start = @intCast(usize, entry.addr), .end = end, }); } } // Map the kernel code const kernel_virt = mem.Range{ .start = @ptrToInt(&KERNEL_VADDR_START), .end = @ptrToInt(&KERNEL_STACK_START), }; const kernel_phy = mem.Range{ .start = mem.virtToPhys(kernel_virt.start), .end = mem.virtToPhys(kernel_virt.end), }; try reserved_virtual_mem.append(.{ .virtual = kernel_virt, .physical = kernel_phy, }); // Map the multiboot info struct itself const mb_region = mem.Range{ .start = @ptrToInt(mb_info), .end = @ptrToInt(mb_info) + @sizeOf(multiboot.multiboot_info_t), }; const mb_physical = mem.Range{ .start = mem.virtToPhys(mb_region.start), .end = mem.virtToPhys(mb_region.end), }; try reserved_virtual_mem.append(.{ .virtual = mb_region, .physical = mb_physical, }); // Map the tty buffer const tty_addr = mem.virtToPhys(tty.getVideoBufferAddress()); const tty_region = mem.Range{ .start = tty_addr, .end = tty_addr + 32 * 1024, }; try reserved_virtual_mem.append(.{ .physical = tty_region, .virtual = .{ .start = mem.physToVirt(tty_region.start), .end = mem.physToVirt(tty_region.end), }, }); // Map the boot modules const boot_modules = @intToPtr([*]multiboot.multiboot_mod_list, mem.physToVirt(mb_info.mods_addr))[0..mods_count]; var modules = std.ArrayList(mem.Module).init(allocator); for (boot_modules) |module| { const virtual = mem.Range{ .start = mem.physToVirt(module.mod_start), .end = mem.physToVirt(module.mod_end), }; const physical = mem.Range{ .start = module.mod_start, .end = module.mod_end, }; try modules.append(.{ .region = virtual, .name = std.mem.span(mem.physToVirt(@intToPtr([*:0]u8, module.cmdline))), }); try reserved_virtual_mem.append(.{ .physical = physical, .virtual = virtual, }); } // Map the kernel stack const kernel_stack_virt = mem.Range{ .start = @ptrToInt(&KERNEL_STACK_START), .end = @ptrToInt(&KERNEL_STACK_END), }; const kernel_stack_phy = mem.Range{ .start = mem.virtToPhys(kernel_stack_virt.start), .end = mem.virtToPhys(kernel_stack_virt.end), }; try reserved_virtual_mem.append(.{ .virtual = kernel_stack_virt, .physical = kernel_stack_phy, }); return MemProfile{ .vaddr_end = @ptrCast([*]u8, &KERNEL_VADDR_END), .vaddr_start = @ptrCast([*]u8, &KERNEL_VADDR_START), .physaddr_end = @ptrCast([*]u8, &KERNEL_PHYSADDR_END), .physaddr_start = @ptrCast([*]u8, &KERNEL_PHYSADDR_START), // Total memory available including the initial 1MiB that grub doesn't include .mem_kb = mb_info.mem_upper + mb_info.mem_lower + 1024, .modules = modules.items, .physical_reserved = reserved_physical_mem.items, .virtual_reserved = reserved_virtual_mem.items, .fixed_allocator = mem.fixed_buffer_allocator, }; } /// /// Initialise the keyboard that may depend on the chipset or architecture in general. /// x86 initialises the keyboard connected to the PS/2 port /// /// Arguments: /// IN allocator: std.mem.Allocator - The allocator to use if necessary /// /// Return: *Keyboard /// The initialised PS/2 keyboard /// /// Error: std.mem.Allocator.Error /// OutOfMemory - There wasn't enough memory to allocate what was needed /// pub fn initKeyboard(allocator: Allocator) Allocator.Error!*Keyboard { return keyboard.init(allocator); } /// /// Initialise a stack and vmm payload used for creating a task. /// Currently only supports fn () noreturn functions for the entry point. /// /// Arguments: /// IN task: *Task - The task to be initialised. The function will only modify whatever /// is required by the architecture. In the case of x86, it will put /// the initial CpuState on the kernel stack. /// IN entry_point: usize - The pointer to the entry point of the function. Functions only /// supported is fn () noreturn /// IN allocator: Allocator - The allocator use for allocating a stack. /// IN set_up_stack: bool - Set up the kernel and user stacks (register values, PC etc.) for task entry /// /// Error: Allocator.Error /// OutOfMemory - Unable to allocate space for the stack. /// pub fn initTask(task: *Task, entry_point: usize, allocator: Allocator, set_up_stack: bool) Allocator.Error!void { // TODO Will need to add the exit point // Set up everything as a kernel task task.vmm.payload = &paging.kernel_directory; var stack = &task.kernel_stack; const kernel_stack_bottom = if (!set_up_stack) 0 else if (task.kernel) task.kernel_stack.len - 18 else task.kernel_stack.len - 20; if (set_up_stack) { const data_offset = if (task.kernel) gdt.KERNEL_DATA_OFFSET else gdt.USER_DATA_OFFSET | 0b11; // Setting the bottom two bits of the code offset designates that this is a ring 3 task const code_offset = if (task.kernel) gdt.KERNEL_CODE_OFFSET else gdt.USER_CODE_OFFSET | 0b11; // Ring switches push and pop two extra values on interrupt: user_esp and user_ss stack.*[kernel_stack_bottom] = mem.virtToPhys(@ptrToInt(&paging.kernel_directory)); stack.*[kernel_stack_bottom + 1] = data_offset; // gs stack.*[kernel_stack_bottom + 2] = data_offset; // fs stack.*[kernel_stack_bottom + 3] = data_offset; // es stack.*[kernel_stack_bottom + 4] = data_offset; // ds stack.*[kernel_stack_bottom + 5] = 0; // edi stack.*[kernel_stack_bottom + 6] = 0; // esi // End of the stack stack.*[kernel_stack_bottom + 7] = @ptrToInt(&stack.*[stack.len - 1]); // ebp stack.*[kernel_stack_bottom + 8] = 0; // esp (temp) this won't be popped by popa bc intel is dump XD stack.*[kernel_stack_bottom + 9] = 0; // ebx stack.*[kernel_stack_bottom + 10] = 0; // edx stack.*[kernel_stack_bottom + 11] = 0; // ecx stack.*[kernel_stack_bottom + 12] = 0; // eax stack.*[kernel_stack_bottom + 13] = 0; // int_num stack.*[kernel_stack_bottom + 14] = 0; // error_code stack.*[kernel_stack_bottom + 15] = entry_point; // eip stack.*[kernel_stack_bottom + 16] = code_offset; // cs stack.*[kernel_stack_bottom + 17] = 0x202; // eflags if (!task.kernel) { // Put the extra values on the kernel stack needed when chaning privilege levels stack.*[kernel_stack_bottom + 18] = @ptrToInt(&task.user_stack[task.user_stack.len - 1]); // user_esp stack.*[kernel_stack_bottom + 19] = data_offset; // user_ss } task.stack_pointer = @ptrToInt(&stack.*[kernel_stack_bottom]); } if (!task.kernel and !builtin.is_test) { // Create a new page directory for the user task by mirroring the kernel directory // We need kernel mem mapped so we don't get a page fault when entering kernel code from an interrupt task.vmm.payload = &(try allocator.allocAdvanced(paging.Directory, paging.PAGE_SIZE_4KB, 1, .exact))[0]; task.vmm.payload.* = paging.kernel_directory.copy(); if (set_up_stack) { stack.*[kernel_stack_bottom] = vmm.kernel_vmm.virtToPhys(@ptrToInt(task.vmm.payload)) catch |e| { panic(@errorReturnTrace(), "Failed to get the physical address of the user task's page directory: {}\n", .{e}); }; } } } /// /// Get a list of hardware devices attached to the system. /// /// Arguments: /// IN allocator: Allocator - An allocator for getting the devices /// /// Return: []Device /// A list of hardware devices. /// /// Error: Allocator.Error /// OutOfMemory - Unable to allocate space the operation. /// pub fn getDevices(allocator: Allocator) Allocator.Error![]Device { return pci.getDevices(allocator); } /// /// Get the system date and time. /// /// Return: DateTime /// The current date and time. /// pub fn getDateTime() DateTime { return rtc.getDateTime(); } /// /// Initialise the architecture /// /// Arguments: /// IN mem_profile: *const MemProfile - The memory profile of the computer. Used to set up /// paging. /// pub fn init(mem_profile: *const MemProfile) void { gdt.init(); idt.init(); pic.init(); isr.init(); irq.init(); paging.init(mem_profile); pit.init(); rtc.init(); syscalls.init(); // Initialise the VGA and TTY here since their tests belong the architecture and so should be a part of the // arch init test messages vga.init(); tty.init(); } /// /// Check the state of the user task used for runtime testing for the expected values. These should mirror those in test/user_program.s /// /// Arguments: /// IN ctx: *const CpuState - The task's saved state /// /// Return: bool /// True if the expected values were found, else false /// pub fn runtimeTestCheckUserTaskState(ctx: *const CpuState) bool { return ctx.eax == 0xCAFE and ctx.ebx == 0xBEEF; } /// /// Trigger a page fault to test paging and its diagnostics /// /// Arguments: /// IN the_vmm: The VMM to get an unallocated test address from /// pub fn runtimeTestChecksMem(the_vmm: *const vmm.VirtualMemoryManager(VmmPayload)) void { var addr = the_vmm.start; while (addr < the_vmm.end and (the_vmm.isSet(addr) catch unreachable)) { addr += vmm.BLOCK_SIZE; } const should_fault = @intToPtr(*usize, addr).*; log.debug("This should not be printed: {x}\n", .{should_fault}); } test "" { std.testing.refAllDecls(@This()); }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/serial.zig
const arch = @import("arch.zig"); const panic = @import("../../panic.zig").panic; const testing = @import("std").testing; /// The I/O port numbers associated with each serial port pub const Port = enum(u16) { COM1 = 0x3F8, COM2 = 0x2F8, COM3 = 0x3E8, COM4 = 0x2E8, }; /// Errors thrown by serial functions pub const SerialError = error{ /// The given baudrate is outside of the allowed range InvalidBaudRate, /// The given char len is outside the allowed range. InvalidCharacterLength, }; /// The LCR is the line control register const LCR: u16 = 3; /// Maximum baudrate const BAUD_MAX: u32 = 115200; /// 8 bits per serial character const CHAR_LEN: u8 = 8; /// One stop bit per transmission const SINGLE_STOP_BIT: bool = true; /// No parity bit const PARITY_BIT: bool = false; /// Default baudrate pub const DEFAULT_BAUDRATE = 38400; /// /// Compute a value that encodes the serial properties /// Used by the line control register /// /// Arguments: /// IN char_len: u8 - The number of bits in each individual byte. Must be 0 or between 5 and 8 (inclusive). /// IN stop_bit: bool - If a stop bit should included in each transmission. /// IN parity_bit: bool - If a parity bit should be included in each transmission. /// IN msb: u1 - The most significant bit to use. /// /// Return: u8 /// The computed lcr value. /// /// Error: SerialError /// InvalidCharacterLength - If the char_len is less than 5 or greater than 8. /// fn lcrValue(char_len: u8, stop_bit: bool, parity_bit: bool, msb: u1) SerialError!u8 { if (char_len != 0 and (char_len < 5 or char_len > 8)) return SerialError.InvalidCharacterLength; // Set the msb and OR in all arguments passed const val = char_len & 0x3 | @intCast(u8, @boolToInt(stop_bit)) << 2 | @intCast(u8, @boolToInt(parity_bit)) << 3 | @intCast(u8, msb) << 7; return val; } /// /// The serial controller accepts a divisor rather than a raw baudrate, as that is more space efficient. /// This function computes the divisor for a desired baudrate. Note that multiple baudrates can have the same divisor. /// /// Arguments: /// baud: u32 - The desired baudrate. Must be greater than 0 and less than BAUD_MAX. /// /// Return: u16 /// The computed divisor. /// /// Error: SerialError /// InvalidBaudRate - If baudrate is 0 or greater than BAUD_MAX. /// fn baudDivisor(baud: u32) SerialError!u16 { if (baud > BAUD_MAX or baud == 0) return SerialError.InvalidBaudRate; return @truncate(u16, BAUD_MAX / baud); } /// /// Checks if the transmission buffer is empty, which means data can be sent. /// /// Arguments: /// port: Port - The port to check. /// /// Return: bool /// If the transmission buffer is empty. /// fn transmitIsEmpty(port: Port) bool { return arch.in(u8, @enumToInt(port) + 5) & 0x20 > 0; } /// /// Write a byte to a serial port. Waits until the transmission queue is empty. /// /// Arguments: /// char: u8 - The byte to send. /// port: Port - The port to send the byte to. /// pub fn write(char: u8, port: Port) void { while (!transmitIsEmpty(port)) { arch.halt(); } arch.out(@enumToInt(port), char); } /// /// Initialise a serial port to a certain baudrate /// /// Arguments /// IN baud: u32 - The baudrate to use. Cannot be more than MAX_BAUDRATE /// IN port: Port - The port to initialise /// /// Error: SerialError /// InvalidBaudRate - The baudrate is 0 or greater than BAUD_MAX. /// pub fn init(baud: u32, port: Port) SerialError!void { // The baudrate is sent as a divisor of the max baud rate const divisor: u16 = try baudDivisor(baud); const port_int = @enumToInt(port); // Send a byte to start setting the baudrate arch.out(port_int + LCR, lcrValue(0, false, false, 1) catch |e| { panic(@errorReturnTrace(), "Failed to initialise serial output setup: {}", .{e}); }); // Send the divisor's lsb arch.out(port_int, @truncate(u8, divisor)); // Send the divisor's msb arch.out(port_int + 1, @truncate(u8, divisor >> 8)); // Send the properties to use arch.out(port_int + LCR, lcrValue(CHAR_LEN, SINGLE_STOP_BIT, PARITY_BIT, 0) catch |e| { panic(@errorReturnTrace(), "Failed to setup serial properties: {}", .{e}); }); // Stop initialisation arch.out(port_int + 1, @as(u8, 0)); } test "lcrValue computes the correct value" { // Check valid combinations inline for ([_]u8{ 0, 5, 6, 7, 8 }) |char_len| { inline for ([_]bool{ true, false }) |stop_bit| { inline for ([_]bool{ true, false }) |parity_bit| { inline for ([_]u1{ 0, 1 }) |msb| { const val = try lcrValue(char_len, stop_bit, parity_bit, msb); const expected = char_len & 0x3 | @boolToInt(stop_bit) << 2 | @boolToInt(parity_bit) << 3 | @intCast(u8, msb) << 7; try testing.expectEqual(val, expected); } } } } // Check invalid char lengths try testing.expectError(SerialError.InvalidCharacterLength, lcrValue(4, false, false, 0)); try testing.expectError(SerialError.InvalidCharacterLength, lcrValue(9, false, false, 0)); } test "baudDivisor" { // Check invalid baudrates inline for ([_]u32{ 0, BAUD_MAX + 1 }) |baud| { try testing.expectError(SerialError.InvalidBaudRate, baudDivisor(baud)); } // Check valid baudrates var baud: u32 = 1; while (baud <= BAUD_MAX) : (baud += 1) { const val = try baudDivisor(baud); const expected = @truncate(u16, BAUD_MAX / baud); try testing.expectEqual(val, expected); } }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/isr.zig
const std = @import("std"); const builtin = @import("builtin"); const is_test = builtin.is_test; const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const expectError = std.testing.expectError; const log = std.log.scoped(.x86_isr); const build_options = @import("build_options"); const syscalls = @import("syscalls.zig"); const panic = @import("../../panic.zig").panic; const idt = if (is_test) @import("../../../../test/mock/kernel/idt_mock.zig") else @import("idt.zig"); const arch = if (is_test) @import("../../../../test/mock/kernel/arch_mock.zig") else @import("arch.zig"); const interrupts = @import("interrupts.zig"); /// The error set for the ISR. This will be from installing a ISR handler. pub const IsrError = error{ /// The ISR index is invalid. InvalidIsr, /// A ISR handler already exists. IsrExists, }; /// The type of a ISR handler. A function that takes a interrupt context and returns void. const IsrHandler = fn (*arch.CpuState) usize; /// The number of ISR entries. const NUMBER_OF_ENTRIES: u8 = 32; /// The exception messaged that is printed when a exception happens const exception_msg: [NUMBER_OF_ENTRIES][]const u8 = [NUMBER_OF_ENTRIES][]const u8{ "Divide By Zero", "Single Step (Debugger)", "Non Maskable Interrupt", "Breakpoint (Debugger)", "Overflow", "Bound Range Exceeded", "Invalid Opcode", "No Coprocessor, Device Not Available", "Double Fault", "Coprocessor Segment Overrun", "Invalid Task State Segment (TSS)", "Segment Not Present", "Stack Segment Overrun", "General Protection Fault", "Page Fault", "Unknown Interrupt", "x87 FPU Floating Point Error", "Alignment Check", "Machine Check", "SIMD Floating Point", "Virtualization", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Security", "Reserved", }; /// Divide By Zero exception. pub const DIVIDE_BY_ZERO: u8 = 0; /// Single Step (Debugger) exception. pub const SINGLE_STEP_DEBUG: u8 = 1; /// Non Maskable Interrupt exception. pub const NON_MASKABLE_INTERRUPT: u8 = 2; /// Breakpoint (Debugger) exception. pub const BREAKPOINT_DEBUG: u8 = 3; /// Overflow exception. pub const OVERFLOW: u8 = 4; /// Bound Range Exceeded exception. pub const BOUND_RANGE_EXCEEDED: u8 = 5; /// Invalid Opcode exception. pub const INVALID_OPCODE: u8 = 6; /// No Coprocessor, Device Not Available exception. pub const DEVICE_NOT_AVAILABLE: u8 = 7; /// Double Fault exception. pub const DOUBLE_FAULT: u8 = 8; /// Coprocessor Segment Overrun exception. pub const COPROCESSOR_SEGMENT_OVERRUN: u8 = 9; /// Invalid Task State Segment (TSS) exception. pub const INVALID_TASK_STATE_SEGMENT: u8 = 10; /// Segment Not Present exception. pub const SEGMENT_NOT_PRESENT: u8 = 11; /// Stack Segment Overrun exception. pub const STACK_SEGMENT_FAULT: u8 = 12; /// General Protection Fault exception. pub const GENERAL_PROTECTION_FAULT: u8 = 13; /// Page Fault exception. pub const PAGE_FAULT: u8 = 14; /// x87 FPU Floating Point Error exception. pub const X87_FLOAT_POINT: u8 = 16; /// Alignment Check exception. pub const ALIGNMENT_CHECK: u8 = 17; /// Machine Check exception. pub const MACHINE_CHECK: u8 = 18; /// SIMD Floating Point exception. pub const SIMD_FLOAT_POINT: u8 = 19; /// Virtualisation exception. pub const VIRTUALISATION: u8 = 20; /// Security exception. pub const SECURITY: u8 = 30; /// The of exception handlers initialised to null. Need to open a ISR for these to be valid. var isr_handlers: [NUMBER_OF_ENTRIES]?IsrHandler = [_]?IsrHandler{null} ** NUMBER_OF_ENTRIES; /// The syscall handler. var syscall_handler: ?IsrHandler = null; /// /// The exception handler that each of the exceptions will call when a exception happens. /// /// Arguments: /// IN ctx: *arch.CpuState - Pointer to the exception context containing the contents /// of the register at the time of the exception. /// export fn isrHandler(ctx: *arch.CpuState) usize { // Get the interrupt number const isr_num = ctx.int_num; var ret_esp = @ptrToInt(ctx); if (isValidIsr(isr_num)) { if (isr_num == syscalls.INTERRUPT) { // A syscall, so use the syscall handler if (syscall_handler) |handler| { ret_esp = handler(ctx); } else { panic(@errorReturnTrace(), "Syscall handler not registered\n", .{}); } } else { if (isr_handlers[isr_num]) |handler| { // Regular ISR exception, if there is one registered. ret_esp = handler(ctx); } else { log.info("State: {X}\n", .{ctx}); panic(@errorReturnTrace(), "ISR {s} ({}) triggered with error code 0x{X} but not registered\n", .{ exception_msg[isr_num], isr_num, ctx.error_code }); } } } else { panic(@errorReturnTrace(), "Invalid ISR index: {}\n", .{isr_num}); } return ret_esp; } /// /// Open an IDT entry with index and handler. This will also handle the errors. /// /// Arguments: /// IN index: u8 - The IDT interrupt number. /// IN handler: idt.InterruptHandler - The IDT handler. /// fn openIsr(index: u8, handler: idt.InterruptHandler) void { idt.openInterruptGate(index, handler) catch |err| switch (err) { error.IdtEntryExists => { panic(@errorReturnTrace(), "Error opening ISR number: {} exists\n", .{index}); }, }; } /// /// Checks if the isr is valid and returns true if it is, else false. /// To be valid it must be greater than or equal to 0 and less than NUMBER_OF_ENTRIES. /// /// Arguments: /// IN isr_num: u16 - The isr number to check /// /// Return: bool /// Whether a ISR handler index is valid. /// pub fn isValidIsr(isr_num: u32) bool { return isr_num < NUMBER_OF_ENTRIES or isr_num == syscalls.INTERRUPT; } /// /// Register an exception by setting its exception handler to the given function. /// /// Arguments: /// IN irq_num: u16 - The exception number to register. /// /// Errors: IsrError /// IsrError.InvalidIsr - If the ISR index is invalid (see isValidIsr). /// IsrError.IsrExists - If the ISR handler has already been registered. /// pub fn registerIsr(isr_num: u16, handler: IsrHandler) IsrError!void { // Check if a valid ISR index if (isValidIsr(isr_num)) { if (isr_num == syscalls.INTERRUPT) { // Syscall handler if (syscall_handler) |_| { // One already registered return IsrError.IsrExists; } else { // Register a handler syscall_handler = handler; } } else { if (isr_handlers[isr_num]) |_| { // One already registered return IsrError.IsrExists; } else { // Register a handler isr_handlers[isr_num] = handler; } } } else { return IsrError.InvalidIsr; } } /// /// Initialise the exception and opening up all the IDT interrupt gates for each exception. /// pub fn init() void { log.info("Init\n", .{}); defer log.info("Done\n", .{}); comptime var i = 0; inline while (i < 32) : (i += 1) { openIsr(i, interrupts.getInterruptStub(i)); } openIsr(syscalls.INTERRUPT, interrupts.getInterruptStub(syscalls.INTERRUPT)); switch (build_options.test_mode) { .Initialisation => runtimeTests(), else => {}, } } fn testFunction0() callconv(.Naked) void {} fn testFunction1(ctx: *arch.CpuState) u32 { // Suppress unused var warning _ = ctx; return 0; } fn testFunction2(ctx: *arch.CpuState) u32 { // Suppress unused var warning _ = ctx; return 0; } fn testFunction3(ctx: *arch.CpuState) u32 { // Suppress unused var warning _ = ctx; return 0; } fn testFunction4(ctx: *arch.CpuState) u32 { // Suppress unused var warning _ = ctx; return 0; } test "openIsr" { idt.initTest(); defer idt.freeTest(); const index: u8 = 0; const handler = testFunction0; const ret: idt.IdtError!void = {}; idt.addTestParams("openInterruptGate", .{ index, handler, ret }); openIsr(index, handler); } test "isValidIsr" { comptime var i = 0; inline while (i < NUMBER_OF_ENTRIES) : (i += 1) { try expectEqual(true, isValidIsr(i)); } try expect(isValidIsr(syscalls.INTERRUPT)); try expect(!isValidIsr(200)); } test "registerIsr re-register syscall handler" { // Pre testing try expect(null == syscall_handler); // Call function try registerIsr(syscalls.INTERRUPT, testFunction3); try expectError(IsrError.IsrExists, registerIsr(syscalls.INTERRUPT, testFunction4)); // Post testing try expectEqual(testFunction3, syscall_handler.?); // Clean up syscall_handler = null; } test "registerIsr register syscall handler" { // Pre testing try expect(null == syscall_handler); // Call function try registerIsr(syscalls.INTERRUPT, testFunction3); // Post testing try expectEqual(testFunction3, syscall_handler.?); // Clean up syscall_handler = null; } test "registerIsr re-register isr handler" { // Pre testing for (isr_handlers) |h| { try expect(null == h); } // Call function try registerIsr(0, testFunction1); try expectError(IsrError.IsrExists, registerIsr(0, testFunction2)); // Post testing for (isr_handlers) |h, i| { if (i != 0) { try expect(null == h); } else { try expectEqual(testFunction1, h.?); } } // Clean up isr_handlers[0] = null; } test "registerIsr register isr handler" { // Pre testing for (isr_handlers) |h| { try expect(null == h); } // Call function try registerIsr(0, testFunction1); // Post testing for (isr_handlers) |h, i| { if (i != 0) { try expect(null == h); } else { try expectEqual(testFunction1, h.?); } } // Clean up isr_handlers[0] = null; } test "registerIsr invalid isr index" { try expectError(IsrError.InvalidIsr, registerIsr(200, testFunction1)); } /// /// Test that all handlers are null at initialisation. /// fn rt_unregisteredHandlers() void { // Ensure all ISR are not registered yet for (isr_handlers) |h, i| { if (h) |_| { panic(@errorReturnTrace(), "FAILURE: Handler found for ISR: {}-{}\n", .{ i, h }); } } if (syscall_handler) |h| { panic(@errorReturnTrace(), "FAILURE: Pre-testing failed for syscall: {}\n", .{h}); } log.info("Tested registered handlers\n", .{}); } /// /// Test that all IDT entries for the ISRs are open. /// fn rt_openedIdtEntries() void { const loaded_idt = arch.sidt(); const idt_entries = @intToPtr([*]idt.IdtEntry, loaded_idt.base)[0..idt.NUMBER_OF_ENTRIES]; for (idt_entries) |entry, i| { if (isValidIsr(i)) { if (!idt.isIdtOpen(entry)) { panic(@errorReturnTrace(), "FAILURE: IDT entry for {} is not open\n", .{i}); } } } log.info("Tested opened IDT entries\n", .{}); } /// /// Run all the runtime tests. /// pub fn runtimeTests() void { rt_unregisteredHandlers(); rt_openedIdtEntries(); }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/cmos.zig
const std = @import("std"); const builtin = @import("builtin"); const is_test = builtin.is_test; const expectEqual = std.testing.expectEqual; const build_options = @import("build_options"); const arch = if (is_test) @import("../../../../test/mock/kernel/arch_mock.zig") else @import("arch.zig"); /// The current year to be used for calculating the 4 digit year, as the CMOS return the last two /// digits of the year. const CURRENT_CENTURY: u8 = 2000; /// The port address for the CMOS command register. const ADDRESS: u16 = 0x70; /// The port address for the CMOS data register. const DATA: u16 = 0x71; /// The register location for returning the seconds, (0 - 59). const REGISTER_SECOND: u8 = 0x00; /// The register location for returning the minute, (0 - 59). const REGISTER_MINUTE: u8 = 0x02; /// The register location for returning the hours, (0 - 23 or 0 - 12 depending if a 12hr or 24hr /// clock). const REGISTER_HOUR: u8 = 0x04; /// The register location for returning the weekday, (0 - 6). Very unreliable, so will calculate /// the day of the week instead. const REGISTER_WEEKDAY: u8 = 0x06; /// The register location for returning the day, (0 - 31). const REGISTER_DAY: u8 = 0x07; /// The register location for returning the month, (0 - 11). const REGISTER_MONTH: u8 = 0x08; /// The register location for returning the year, (0 - 99). const REGISTER_YEAR: u8 = 0x09; /// The register location for returning the century. const REGISTER_CENTURY: u8 = 0x32; /// The register location for return the status A register. const STATUS_REGISTER_A: u8 = 0x0A; /// The register location for return the status B register. const STATUS_REGISTER_B: u8 = 0x0B; /// The register location for return the status C register. const STATUS_REGISTER_C: u8 = 0x0C; /// The non-mockable interrupts are on the 8th bit of port 0x70 which is the register select port /// for the CMOS. This will need to be disabled when selecting CMOS registers. const NMI_BIT: u8 = 0x80; /// The enum for selecting the status register to read from pub const StatusRegister = enum { /// Status register A A, /// Status register B B, /// Status register C C, /// /// Get the register index for the status registers /// /// Arguments: /// IN reg: StatusRegister - The enum that represents one of the 3 status registers. /// /// Return: u8 /// The register index for one of the 3 status registers. /// pub fn getRegister(reg: StatusRegister) u8 { return switch (reg) { .A => STATUS_REGISTER_A, .B => STATUS_REGISTER_B, .C => STATUS_REGISTER_C, }; } }; /// The enum for selecting the real time clock registers. pub const RtcRegister = enum { /// The seconds register SECOND, /// The minutes register MINUTE, /// The hours register HOUR, /// The days register DAY, /// The months register MONTH, /// The year register YEAR, /// The century register CENTURY, /// /// Get the register index for the RTC registers /// /// Arguments: /// IN reg: RtcRegister - The enum that represents one of the RTC registers. /// /// Return: u8 /// The register index for one of the RTC registers. /// pub fn getRegister(reg: RtcRegister) u8 { return switch (reg) { .SECOND => REGISTER_SECOND, .MINUTE => REGISTER_MINUTE, .HOUR => REGISTER_HOUR, .DAY => REGISTER_DAY, .MONTH => REGISTER_MONTH, .YEAR => REGISTER_YEAR, .CENTURY => REGISTER_CENTURY, }; } }; /// /// Tell the CMOS chip to select the given register ready for read or writing to. This also /// disables the NMI when disable_nmi is true. /// /// Arguments: /// IN reg: u8 - The register index to select in the CMOS chip. /// IN comptime disable_nmi: bool - Whether to disable NMI when selecting a register. /// inline fn selectRegister(reg: u8, comptime disable_nmi: bool) void { if (disable_nmi) { arch.out(ADDRESS, reg | NMI_BIT); } else { arch.out(ADDRESS, reg); } } /// /// Write to the selected register to the CMOS chip. /// /// Arguments: /// IN data: u8 - The data to write to the selected register. /// inline fn writeRegister(data: u8) void { arch.out(DATA, data); } /// /// Read the selected register from the CMOS chip. /// /// Return: u8 /// The value in the selected register. /// inline fn readRegister() u8 { return arch.in(u8, DATA); } /// /// Select then read a register from the CMOS chip. This include a I/O wait to ensure the CMOS chip /// has time to select the register. /// /// Arguments: /// IN reg: u8 - The register index to select in the CMOS chip. /// IN comptime disable_nmi: bool - Whether to disable NMI when selecting a register. /// /// Return: u8 /// The value in the selected register. /// inline fn selectAndReadRegister(reg: u8, comptime disable_nmi: bool) u8 { selectRegister(reg, disable_nmi); arch.ioWait(); return readRegister(); } /// /// Select then write to a register to the CMOS chip. This include a I/O wait to ensure the CMOS chip /// has time to select the register. /// /// Arguments: /// IN reg: u8 - The register index to select in the CMOS chip. /// IN data: u8 - The data to write to the selected register. /// IN comptime disable_nmi: bool - Whether to disable NMI when selecting a register. /// inline fn selectAndWriteRegister(reg: u8, data: u8, comptime disable_nmi: bool) void { selectRegister(reg, disable_nmi); arch.ioWait(); writeRegister(data); } /// /// Read a register that corresponds to a real time clock register. /// /// Arguments: /// IN reg: RtcRegister - A RTC register to select in the CMOS chip. /// /// Return: u8 /// The value in the selected register. /// pub fn readRtcRegister(reg: RtcRegister) u8 { return selectAndReadRegister(reg.getRegister(), false); } /// /// Read a status register in the CMOS chip. /// /// Arguments: /// IN reg: StatusRegister - The status register to select. /// IN comptime disable_nmi: bool - Whether to disable NMI when selecting a register. /// /// Return: u8 /// The value in the selected register. /// pub fn readStatusRegister(reg: StatusRegister, comptime disable_nmi: bool) u8 { return selectAndReadRegister(reg.getRegister(), disable_nmi); } /// /// Write to a status register in the CMOS chip. /// /// Arguments: /// IN reg: StatusRegister - The status register to select. /// IN data: u8 - The data to write to the selected register. /// IN comptime disable_nmi: bool - Whether to disable NMI when selecting a register. /// pub fn writeStatusRegister(reg: StatusRegister, data: u8, comptime disable_nmi: bool) void { selectAndWriteRegister(reg.getRegister(), data, disable_nmi); } test "selectRegister" { arch.initTest(); defer arch.freeTest(); arch.addTestParams("out", .{ ADDRESS, STATUS_REGISTER_A }); const reg = STATUS_REGISTER_A; selectRegister(reg, false); } test "selectRegister no NMI" { arch.initTest(); defer arch.freeTest(); arch.addTestParams("out", .{ ADDRESS, STATUS_REGISTER_A | NMI_BIT }); const reg = STATUS_REGISTER_A; selectRegister(reg, true); } test "writeRegister" { arch.initTest(); defer arch.freeTest(); arch.addTestParams("out", .{ DATA, @as(u8, 0xAA) }); const data = @as(u8, 0xAA); writeRegister(data); } test "readRegister" { arch.initTest(); defer arch.freeTest(); arch.addTestParams("in", .{ DATA, @as(u8, 0x55) }); const expected = @as(u8, 0x55); const actual = readRegister(); try expectEqual(expected, actual); } test "selectAndReadRegister NMI" { arch.initTest(); defer arch.freeTest(); arch.addTestParams("out", .{ ADDRESS, STATUS_REGISTER_C }); arch.addTestParams("in", .{ DATA, @as(u8, 0x44) }); arch.addConsumeFunction("ioWait", arch.mock_ioWait); const reg = STATUS_REGISTER_C; const expected = @as(u8, 0x44); const actual = selectAndReadRegister(reg, false); try expectEqual(expected, actual); } test "selectAndReadRegister no NMI" { arch.initTest(); defer arch.freeTest(); arch.addTestParams("out", .{ ADDRESS, STATUS_REGISTER_C | NMI_BIT }); arch.addTestParams("in", .{ DATA, @as(u8, 0x44) }); arch.addConsumeFunction("ioWait", arch.mock_ioWait); const reg = STATUS_REGISTER_C; const expected = @as(u8, 0x44); const actual = selectAndReadRegister(reg, true); try expectEqual(expected, actual); } test "selectAndWriteRegister NMI" { arch.initTest(); defer arch.freeTest(); arch.addTestParams("out", .{ ADDRESS, STATUS_REGISTER_C, DATA, @as(u8, 0x88) }); arch.addConsumeFunction("ioWait", arch.mock_ioWait); const reg = STATUS_REGISTER_C; const data = @as(u8, 0x88); selectAndWriteRegister(reg, data, false); } test "selectAndWriteRegister no NMI" { arch.initTest(); defer arch.freeTest(); arch.addTestParams("out", .{ ADDRESS, STATUS_REGISTER_C | NMI_BIT, DATA, @as(u8, 0x88) }); arch.addConsumeFunction("ioWait", arch.mock_ioWait); const reg = STATUS_REGISTER_C; const data = @as(u8, 0x88); selectAndWriteRegister(reg, data, true); } test "readRtcRegister" { arch.initTest(); defer arch.freeTest(); arch.addRepeatFunction("ioWait", arch.mock_ioWait); const rtc_regs = [_]RtcRegister{ RtcRegister.SECOND, RtcRegister.MINUTE, RtcRegister.HOUR, RtcRegister.DAY, RtcRegister.MONTH, RtcRegister.YEAR, RtcRegister.CENTURY }; for (rtc_regs) |reg| { const r = switch (reg) { .SECOND => REGISTER_SECOND, .MINUTE => REGISTER_MINUTE, .HOUR => REGISTER_HOUR, .DAY => REGISTER_DAY, .MONTH => REGISTER_MONTH, .YEAR => REGISTER_YEAR, .CENTURY => REGISTER_CENTURY, }; arch.addTestParams("out", .{ ADDRESS, r }); arch.addTestParams("in", .{ DATA, @as(u8, 0x44) }); const expected = @as(u8, 0x44); const actual = readRtcRegister(reg); try expectEqual(expected, actual); } } test "readStatusRegister NMI" { arch.initTest(); defer arch.freeTest(); arch.addRepeatFunction("ioWait", arch.mock_ioWait); const status_regs = [_]StatusRegister{ StatusRegister.A, StatusRegister.B, StatusRegister.C }; for (status_regs) |reg| { const r = switch (reg) { .A => STATUS_REGISTER_A, .B => STATUS_REGISTER_B, .C => STATUS_REGISTER_C, }; arch.addTestParams("out", .{ ADDRESS, r }); arch.addTestParams("in", .{ DATA, @as(u8, 0x78) }); const expected = @as(u8, 0x78); const actual = readStatusRegister(reg, false); try expectEqual(expected, actual); } } test "readStatusRegister no NMI" { arch.initTest(); defer arch.freeTest(); arch.addRepeatFunction("ioWait", arch.mock_ioWait); const status_regs = [_]StatusRegister{ StatusRegister.A, StatusRegister.B, StatusRegister.C }; for (status_regs) |reg| { const r = switch (reg) { .A => STATUS_REGISTER_A, .B => STATUS_REGISTER_B, .C => STATUS_REGISTER_C, }; arch.addTestParams("out", .{ ADDRESS, r | NMI_BIT }); arch.addTestParams("in", .{ DATA, @as(u8, 0x78) }); const expected = @as(u8, 0x78); const actual = readStatusRegister(reg, true); try expectEqual(expected, actual); } } test "writeStatusRegister NMI" { arch.initTest(); defer arch.freeTest(); arch.addRepeatFunction("ioWait", arch.mock_ioWait); const status_regs = [_]StatusRegister{ StatusRegister.A, StatusRegister.B, StatusRegister.C }; for (status_regs) |reg| { const r = switch (reg) { .A => STATUS_REGISTER_A, .B => STATUS_REGISTER_B, .C => STATUS_REGISTER_C, }; arch.addTestParams("out", .{ ADDRESS, r, DATA, @as(u8, 0x43) }); const data = @as(u8, 0x43); writeStatusRegister(reg, data, false); } } test "writeStatusRegister no NMI" { arch.initTest(); defer arch.freeTest(); arch.addRepeatFunction("ioWait", arch.mock_ioWait); const status_regs = [_]StatusRegister{ StatusRegister.A, StatusRegister.B, StatusRegister.C }; for (status_regs) |reg| { const r = switch (reg) { .A => STATUS_REGISTER_A, .B => STATUS_REGISTER_B, .C => STATUS_REGISTER_C, }; arch.addTestParams("out", .{ ADDRESS, r | NMI_BIT, DATA, @as(u8, 0x43) }); const data = @as(u8, 0x43); writeStatusRegister(reg, data, true); } }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/irq.zig
const std = @import("std"); const builtin = @import("builtin"); const is_test = builtin.is_test; const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const expectError = std.testing.expectError; const log = std.log.scoped(.x86_irq); const build_options = @import("build_options"); const panic = @import("../../panic.zig").panic; const idt = if (is_test) @import("../../../../test/mock/kernel/idt_mock.zig") else @import("idt.zig"); const arch = if (is_test) @import("../../../../test/mock/kernel/arch_mock.zig") else @import("arch.zig"); const pic = if (is_test) @import("../../../../test/mock/kernel/pic_mock.zig") else @import("pic.zig"); const interrupts = @import("interrupts.zig"); /// The error set for the IRQ. This will be from installing a IRQ handler. pub const IrqError = error{ /// The IRQ index is invalid. InvalidIrq, /// A IRQ handler already exists. IrqExists, }; /// The total number of IRQ. const NUMBER_OF_ENTRIES: u16 = 16; /// The type of a IRQ handler. A function that takes a interrupt context and returns void. const IrqHandler = fn (*arch.CpuState) usize; // The offset from the interrupt number where the IRQs are. pub const IRQ_OFFSET: u16 = 32; /// The list of IRQ handlers initialised to unhandled. var irq_handlers: [NUMBER_OF_ENTRIES]?IrqHandler = [_]?IrqHandler{null} ** NUMBER_OF_ENTRIES; /// /// The IRQ handler that each of the IRQs will call when a interrupt happens. /// /// Arguments: /// IN ctx: *arch.CpuState - Pointer to the interrupt context containing the contents /// of the register at the time of the interrupt. /// export fn irqHandler(ctx: *arch.CpuState) usize { // Get the IRQ index, by getting the interrupt number and subtracting the offset. if (ctx.int_num < IRQ_OFFSET) { panic(@errorReturnTrace(), "Not an IRQ number: {}\n", .{ctx.int_num}); } var ret_esp = @ptrToInt(ctx); const irq_offset = ctx.int_num - IRQ_OFFSET; if (isValidIrq(irq_offset)) { // IRQ index is valid so can truncate const irq_num = @truncate(u8, irq_offset); if (irq_handlers[irq_num]) |handler| { // Make sure it isn't a spurious irq if (!pic.spuriousIrq(irq_num)) { ret_esp = handler(ctx); // Send the end of interrupt command pic.sendEndOfInterrupt(irq_num); } } else { panic(@errorReturnTrace(), "IRQ not registered: {}", .{irq_num}); } } else { panic(@errorReturnTrace(), "Invalid IRQ index: {}", .{irq_offset}); } return ret_esp; } /// /// Open an IDT entry with index and handler. This will also handle the errors. /// /// Arguments: /// IN index: u8 - The IDT interrupt number. /// IN handler: idt.InterruptHandler - The IDT handler. /// fn openIrq(index: u8, handler: idt.InterruptHandler) void { idt.openInterruptGate(index, handler) catch |err| switch (err) { error.IdtEntryExists => { panic(@errorReturnTrace(), "Error opening IRQ number: {} exists", .{index}); }, }; } /// /// Check whether the IRQ index is valid. This will have to be less than NUMBER_OF_ENTRIES. /// /// Arguments: /// IN irq_num: u8 - The IRQ index to test. /// /// Return: bool /// Whether the IRQ index is valid. /// pub fn isValidIrq(irq_num: u32) bool { return irq_num < NUMBER_OF_ENTRIES; } /// /// Register a IRQ by setting its interrupt handler to the given function. This will also clear the /// mask bit in the PIC so interrupts can happen for this IRQ. /// /// Arguments: /// IN irq_num: u8 - The IRQ number to register. /// IN handler: IrqHandler - The IRQ handler to register. This is what will be called when this /// interrupt happens. /// /// Errors: IrqError /// IrqError.InvalidIrq - If the IRQ index is invalid (see isValidIrq). /// IrqError.IrqExists - If the IRQ handler has already been registered. /// pub fn registerIrq(irq_num: u8, handler: IrqHandler) IrqError!void { // Check whether the IRQ index is valid. if (isValidIrq(irq_num)) { // Check if a handler has already been registered. if (irq_handlers[irq_num]) |_| { return IrqError.IrqExists; } else { // Register the handler and clear the PIC mask so interrupts can happen. irq_handlers[irq_num] = handler; pic.clearMask(irq_num); } } else { return IrqError.InvalidIrq; } } /// /// Initialise the IRQ interrupts by first remapping the port addresses and then opening up all /// the IDT interrupt gates for each IRQ. /// pub fn init() void { log.info("Init\n", .{}); defer log.info("Done\n", .{}); comptime var i = IRQ_OFFSET; inline while (i < IRQ_OFFSET + 16) : (i += 1) { openIrq(i, interrupts.getInterruptStub(i)); } switch (build_options.test_mode) { .Initialisation => runtimeTests(), else => {}, } } fn testFunction0() callconv(.Naked) void {} fn testFunction1(ctx: *arch.CpuState) u32 { // Suppress unused variable warnings _ = ctx; return 0; } fn testFunction2(ctx: *arch.CpuState) u32 { // Suppress unused variable warnings _ = ctx; return 0; } test "openIrq" { idt.initTest(); defer idt.freeTest(); const index: u8 = 0; const handler = testFunction0; const ret: idt.IdtError!void = {}; idt.addTestParams("openInterruptGate", .{ index, handler, ret }); openIrq(index, handler); } test "isValidIrq" { comptime var i = 0; inline while (i < NUMBER_OF_ENTRIES) : (i += 1) { try expect(isValidIrq(i)); } try expect(!isValidIrq(200)); } test "registerIrq re-register irq handler" { // Set up pic.initTest(); defer pic.freeTest(); pic.addTestParams("clearMask", .{@as(u16, 0)}); // Pre testing for (irq_handlers) |h| { try expect(null == h); } // Call function try registerIrq(0, testFunction1); try expectError(IrqError.IrqExists, registerIrq(0, testFunction2)); // Post testing for (irq_handlers) |h, i| { if (i != 0) { try expect(null == h); } else { try expectEqual(testFunction1, h.?); } } // Clean up irq_handlers[0] = null; } test "registerIrq register irq handler" { // Set up pic.initTest(); defer pic.freeTest(); pic.addTestParams("clearMask", .{@as(u16, 0)}); // Pre testing for (irq_handlers) |h| { try expect(null == h); } // Call function try registerIrq(0, testFunction1); // Post testing for (irq_handlers) |h, i| { if (i != 0) { try expect(null == h); } else { try expectEqual(testFunction1, h.?); } } // Clean up irq_handlers[0] = null; } test "registerIrq invalid irq index" { try expectError(IrqError.InvalidIrq, registerIrq(200, testFunction1)); } /// /// Test that all handlers are null at initialisation. /// fn rt_unregisteredHandlers() void { // Ensure all ISR are not registered yet for (irq_handlers) |h, i| { if (h) |_| { panic(@errorReturnTrace(), "FAILURE: Handler found for IRQ: {}-{}\n", .{ i, h }); } } log.info("Tested registered handlers\n", .{}); } /// /// Test that all IDT entries for the IRQs are open. /// fn rt_openedIdtEntries() void { const loaded_idt = arch.sidt(); const idt_entries = @intToPtr([*]idt.IdtEntry, loaded_idt.base)[0..idt.NUMBER_OF_ENTRIES]; for (idt_entries) |entry, i| { if (i >= IRQ_OFFSET and isValidIrq(i - IRQ_OFFSET)) { if (!idt.isIdtOpen(entry)) { panic(@errorReturnTrace(), "FAILURE: IDT entry for {} is not open\n", .{i}); } } } log.info("Tested opened IDT entries\n", .{}); } /// /// Run all the runtime tests. /// pub fn runtimeTests() void { rt_unregisteredHandlers(); rt_openedIdtEntries(); }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/link.ld
ENTRY(_start) /* Changes to KERNEL_ADDR_OFFSET must also be made to KERNEL_PAGE_NUMBER in boot.zig */ KERNEL_ADDR_OFFSET = 0xC0000000; KERNEL_VADDR_START = 0xC0100000; SECTIONS { . = 1M; KERNEL_PHYSADDR_START = .; .rodata.boot : { *(.rodata.boot) } .text.boot : { *(.text.boot) } . += KERNEL_ADDR_OFFSET; .text ALIGN(4K) : AT (ADDR(.text) - KERNEL_ADDR_OFFSET) { *(.text) } .rodata ALIGN(4K) : AT (ADDR(.rodata) - KERNEL_ADDR_OFFSET) { *(.rodata) } .data ALIGN(4K) : AT (ADDR(.data) - KERNEL_ADDR_OFFSET) { *(.data) } .bss ALIGN(4K) : AT (ADDR(.bss) - KERNEL_ADDR_OFFSET) { *(COMMON) *(.bss) } .bss.stack ALIGN(4K) : AT (ADDR(.bss.stack) - KERNEL_ADDR_OFFSET) { KERNEL_STACK_START = .; KEEP(*(.bss.stack)) KERNEL_STACK_END = .; } KERNEL_VADDR_END = .; KERNEL_PHYSADDR_END = . - KERNEL_ADDR_OFFSET; }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/pit.zig
const std = @import("std"); const maxInt = std.math.maxInt; const builtin = @import("builtin"); const is_test = builtin.is_test; const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const expectError = std.testing.expectError; const log = std.log.scoped(.x86_pit); const build_options = @import("build_options"); const arch = if (is_test) @import("../../../../test/mock/kernel/arch_mock.zig") else @import("arch.zig"); const panic = @import("../../panic.zig").panic; const irq = @import("irq.zig"); const pic = @import("pic.zig"); /// The enum for selecting the counter const CounterSelect = enum { /// Counter 0. Counter0, /// Counter 1. Counter1, /// Counter 2. Counter2, /// /// Get the register port for the selected counter. /// /// Arguments: /// IN counter: CounterSelect - The counter to get the register port. /// /// Return: u16 /// The register port for the selected counter. /// pub fn getRegister(counter: CounterSelect) u16 { return switch (counter) { .Counter0 => COUNTER_0_REGISTER, .Counter1 => COUNTER_1_REGISTER, .Counter2 => COUNTER_2_REGISTER, }; } /// /// Get the operational control work for the selected counter. /// /// Arguments: /// IN counter: CounterSelect - The counter to get the operational control work. /// /// Return: u16 /// The operational control work for the selected counter. /// pub fn getCounterOCW(counter: CounterSelect) u8 { return switch (counter) { .Counter0 => OCW_SELECT_COUNTER_0, .Counter1 => OCW_SELECT_COUNTER_1, .Counter2 => OCW_SELECT_COUNTER_2, }; } }; /// The error set that can be returned from PIT functions const PitError = error{ /// The frequency to be used for a counter is invalid. This would be if the frequency is less /// than 19 or greater than MAX_FREQUENCY. InvalidFrequency, }; /// The port address for the PIT data register for counter 0. This is going to be used as the /// system clock. const COUNTER_0_REGISTER: u16 = 0x40; /// The port address for the PIT data register for counter 1. This was used for refreshing the DRAM /// chips. But now is unused and unknown use, so won't use. const COUNTER_1_REGISTER: u16 = 0x41; /// The port address for the PIT data register for counter 2. Connected to the PC speakers, we'll /// use this for the speakers. const COUNTER_2_REGISTER: u16 = 0x42; /// The port address for the PIT control word register. Used to tell the PIT controller what is /// about to happen. Tell what data is going where, lower or upper part of it's registers. const COMMAND_REGISTER: u16 = 0x43; // The operational command word for the different modes. // // Bit 0: (BCP) Binary Counter. // 0: Binary. // 1: Binary Coded Decimal (BCD). // Bit 1-3: (M0, M1, M2) Operating Mode. See above sections for a description of each. // 000: Mode 0: Interrupt or Terminal Count. // 001: Mode 1: Programmable one-shot. // 010: Mode 2: Rate Generator. // 011: Mode 3: Square Wave Generator. // 100: Mode 4: Software Triggered Strobe. // 101: Mode 5: Hardware Triggered Strobe. // 110: Undefined; Don't use. // 111: Undefined; Don't use. // Bits 4-5: (RL0, RL1) Read/Load Mode. We are going to read or send data to a counter register. // 00: Counter value is latched into an internal control register at the time of the I/O write operation. // 01: Read or Load Least Significant Byte (LSB) only. // 10: Read or Load Most Significant Byte (MSB) only. // 11: Read or Load LSB first then MSB. // Bits 6-7: (SC0-SC1) Select Counter. See above sections for a description of each. // 00: Counter 0. // 01: Counter 1. // 10: Counter 2. // 11: Illegal value. /// Have the counter count in binary (internally?). const OCW_BINARY_COUNT_BINARY: u8 = 0x00; /// Have the counter count in BCD (internally?). const OCW_BINARY_COUNT_BCD: u8 = 0x01; /// The PIT counter will be programmed with an initial COUNT value that counts down at a rate of /// the input clock frequency. When the COUNT reaches 0, and after the control word is written, /// then its OUT pit is set high (1). Count down starts then the COUNT is set. The OUT pin remains /// high until the counter is reloaded with a new COUNT value or a new control work is written. const OCW_MODE_TERMINAL_COUNT: u8 = 0x00; /// The counter is programmed to output a pulse every curtain number of clock pulses. The OUT pin /// remains high as soon as a control word is written. When COUNT is written, the counter waits /// until the rising edge of the GATE pin to start. One clock pulse after the GATE pin, the OUT /// pin will remain low until COUNT reaches 0. const OCW_MODE_ONE_SHOT: u8 = 0x02; /// The counter is initiated with a COUNT value. Counting starts next clock pulse. OUT pin remains /// high until COUNT reaches 1, then is set low for one clock pulse. Then COUNT is reset back to /// initial value and OUT pin is set high again. const OCW_MODE_RATE_GENERATOR: u8 = 0x04; /// Similar to PIT_OCW_MODE_RATE_GENERATOR, but OUT pin will be high for half the time and low for /// half the time. Good for the speaker when setting a tone. const OCW_MODE_SQUARE_WAVE_GENERATOR: u8 = 0x06; /// The counter is initiated with a COUNT value. Counting starts on next clock pulse. OUT pin remains /// high until count is 0. Then OUT pin is low for one clock pulse. Then resets to high again. const OCW_MODE_SOFTWARE_TRIGGER: u8 = 0x08; /// The counter is initiated with a COUNT value. OUT pin remains high until the rising edge of the /// GATE pin. Then the counting begins. When COUNT reaches 0, OUT pin goes low for one clock pulse. /// Then COUNT is reset and OUT pin goes high. This cycles for each rising edge of the GATE pin. const OCW_MODE_HARDWARE_TRIGGER: u8 = 0x0A; /// The counter value is latched into an internal control register at the time of the I/O write /// operations. const OCW_READ_LOAD_LATCH: u8 = 0x00; /// Read or load the most significant bit only. const OCW_READ_LOAD_LSB_ONLY: u8 = 0x10; /// Read or load the least significant bit only. const OCW_READ_LOAD_MSB_ONLY: u8 = 0x20; /// Read or load the least significant bit first then the most significant bit. const OCW_READ_LOAD_DATA: u8 = 0x30; /// The OCW bits for selecting counter 0. Used for the system clock. const OCW_SELECT_COUNTER_0: u8 = 0x00; /// The OCW bits for selecting counter 1. Was for the memory refreshing. const OCW_SELECT_COUNTER_1: u8 = 0x40; /// The OCW bits for selecting counter 2. Channel for the speaker. const OCW_SELECT_COUNTER_2: u8 = 0x80; /// The divisor constant const MAX_FREQUENCY: u32 = 1193182; /// The number of ticks that has passed when counter 0 was initially set up. var ticks: u32 = 0; /// The number of tick that has passed when counter 1 was initially set up. var ram_ticks: u32 = 0; /// The number of tick that has passed when counter 2 was initially set up. var speaker_ticks: u32 = 0; /// The current frequency of counter 0 var current_freq_0: u32 = undefined; /// The current frequency of counter 1 var current_freq_1: u32 = undefined; /// The current frequency of counter 2 var current_freq_2: u32 = undefined; /// The number of nanoseconds between interrupts. var time_ns: u32 = undefined; /// The number of nanoseconds to be added the to time_ns for time between interrupts. var time_under_1_ns: u32 = undefined; /// /// Send a command to the PIT command register. /// /// Arguments: /// IN cmd: u8 - The command to send to the PIT. /// inline fn sendCommand(cmd: u8) void { arch.out(COMMAND_REGISTER, cmd); } /// /// Read the current mode of the selected counter. /// /// Arguments: /// IN counter: CounterSelect - The counter to read the mode the counter is operating in. /// /// Return: u8 /// The mode the counter is operating in. Use the masks above to get each part. /// inline fn readBackCommand(counter: CounterSelect) u8 { sendCommand(0xC2); return 0x3F & arch.in(u8, counter.getRegister()); } /// /// Send data to a given counter. Will be only one of the 3 counters as is an internal function. /// /// Arguments: /// IN counter: CounterSelect - The counter port to send the data to. /// IN data: u8 - The data to send. /// inline fn sendDataToCounter(counter: CounterSelect, data: u8) void { arch.out(counter.getRegister(), data); } /// /// The interrupt handler for the PIT. This will increment a counter for now. /// /// Arguments: /// IN ctx: *arch.CpuState - Pointer to the interrupt context containing the contents /// of the register at the time of the interrupt. /// fn pitHandler(ctx: *arch.CpuState) usize { ticks +%= 1; return @ptrToInt(ctx); } /// /// Set up a counter with a tick rate and mode of operation. /// /// Arguments: /// IN counter: CounterSelect - Which counter is to be set up. /// IN freq: u32 - The frequency that the counter operates at. Any frequency that /// is between 0..18 (inclusive) or above MAX_FREQUENCY will return /// an error. /// IN mode: u8 - The mode of operation that the counter will operate in. See /// the modes definition above to chose which mode the counter is /// to run at. /// /// Error: PitError: /// PitError.InvalidFrequency - If the given frequency is out of bounds. Less than 19 or /// greater than MAX_FREQUENCY. /// fn setupCounter(counter: CounterSelect, freq: u32, mode: u8) PitError!void { if (freq < 19 or freq > MAX_FREQUENCY) { return PitError.InvalidFrequency; } // 65536, the slowest possible frequency. Roughly 19Hz var reload_value: u32 = 0x10000; // The lowest possible frequency is 18Hz. // MAX_FREQUENCY / 18 > u16 N // MAX_FREQUENCY / 19 < u16 Y if (freq > 18) { if (freq < MAX_FREQUENCY) { // Rounded integer division reload_value = (MAX_FREQUENCY + (freq / 2)) / freq; } else { // The fastest possible frequency if frequency is too high reload_value = 1; } } // Update the frequency with the actual one that the PIT will be using // Rounded integer division const frequency = (MAX_FREQUENCY + (reload_value / 2)) / reload_value; // Calculate the amount of nanoseconds between interrupts time_ns = 1000000000 / frequency; // Calculate the number of picoseconds, the left over from nanoseconds time_under_1_ns = ((1000000000 % frequency) * 1000 + (frequency / 2)) / frequency; // Set the frequency for the counter being set up switch (counter) { .Counter0 => current_freq_0 = frequency, .Counter1 => current_freq_1 = frequency, .Counter2 => current_freq_2 = frequency, } // Get the u16 version as this is what will be loaded into the PIT // If truncating 0x10000, this will equal 0, which is the slowest. const reload_val_16 = @truncate(u16, reload_value); // Send the set up command to the PIT sendCommand(mode | OCW_READ_LOAD_DATA | counter.getCounterOCW()); sendDataToCounter(counter, @truncate(u8, reload_val_16)); sendDataToCounter(counter, @truncate(u8, reload_val_16 >> 8)); // Reset the counter ticks switch (counter) { .Counter0 => ticks = 0, .Counter1 => ram_ticks = 0, .Counter2 => speaker_ticks = 0, } } /// /// A simple wait that used the PIT to wait a number of ticks. /// /// Arguments: /// IN ticks_to_wait: u32 - The number of ticks to wait. /// pub fn waitTicks(ticks_to_wait: u32) void { if (ticks > maxInt(u32) - ticks_to_wait) { // Integer overflow // Calculate the 2 conditions const wait_ticks1 = maxInt(u32) - ticks; const wait_ticks2 = ticks_to_wait - wait_ticks1; while (ticks > wait_ticks1) { arch.halt(); } while (ticks < wait_ticks2) { arch.halt(); } } else { const wait_ticks = ticks + ticks_to_wait; while (ticks < wait_ticks) { arch.halt(); } } } /// /// Get the number of ticks that have passed when the PIT was initiated. /// /// Return: u32 /// Number of ticks passed. /// pub fn getTicks() u32 { return ticks; } /// /// Get the frequency the PIT is ticking at. /// /// Return: u32 /// The frequency the PIT is running at /// pub fn getFrequency() u32 { return current_freq_0; } /// /// Initialise the PIT with a handler to IRQ 0. /// pub fn init() void { log.info("Init\n", .{}); defer log.info("Done\n", .{}); // Set up counter 0 at 10000hz in a square wave mode counting in binary const freq: u32 = 10000; setupCounter(CounterSelect.Counter0, freq, OCW_MODE_SQUARE_WAVE_GENERATOR | OCW_BINARY_COUNT_BINARY) catch { panic(@errorReturnTrace(), "Invalid frequency: {}\n", .{freq}); }; log.debug("Set frequency at: {}Hz, real frequency: {}Hz\n", .{ freq, getFrequency() }); // Installs 'pitHandler' to IRQ0 (pic.IRQ_PIT) irq.registerIrq(pic.IRQ_PIT, pitHandler) catch |err| switch (err) { error.IrqExists => { panic(@errorReturnTrace(), "IRQ for PIT, IRQ number: {} exists", .{pic.IRQ_PIT}); }, error.InvalidIrq => { panic(@errorReturnTrace(), "IRQ for PIT, IRQ number: {} is invalid", .{pic.IRQ_PIT}); }, }; switch (build_options.test_mode) { .Initialisation => runtimeTests(), else => {}, } } test "sendCommand" { arch.initTest(); defer arch.freeTest(); const cmd: u8 = 10; arch.addTestParams("out", .{ COMMAND_REGISTER, cmd }); sendCommand(cmd); } test "readBackCommand" { arch.initTest(); defer arch.freeTest(); const cmd: u8 = 0xC2; arch.addTestParams("out", .{ COMMAND_REGISTER, cmd }); arch.addTestParams("in", .{ COUNTER_0_REGISTER, @as(u8, 0x20) }); const actual = readBackCommand(CounterSelect.Counter0); try expectEqual(@as(u8, 0x20), actual); } test "sendDataToCounter" { arch.initTest(); defer arch.freeTest(); const data: u8 = 10; arch.addTestParams("out", .{ COUNTER_0_REGISTER, data }); sendDataToCounter(CounterSelect.Counter0, data); } test "setupCounter lowest frequency" { arch.initTest(); defer arch.freeTest(); const counter = CounterSelect.Counter0; var freq: u32 = 0; const mode = OCW_MODE_SQUARE_WAVE_GENERATOR | OCW_BINARY_COUNT_BINARY; while (freq <= 18) : (freq += 1) { try expectError(PitError.InvalidFrequency, setupCounter(counter, freq, mode)); } } test "setupCounter highest frequency" { arch.initTest(); defer arch.freeTest(); const counter = CounterSelect.Counter0; // Set the frequency above the maximum const freq = MAX_FREQUENCY + 10; const mode = OCW_MODE_SQUARE_WAVE_GENERATOR | OCW_BINARY_COUNT_BINARY; try expectError(PitError.InvalidFrequency, setupCounter(counter, freq, mode)); } test "setupCounter normal frequency" { arch.initTest(); defer arch.freeTest(); const counter = CounterSelect.Counter0; const port = counter.getRegister(); // Set the frequency to a normal frequency const freq = 10000; const expected_reload_value = 119; const expected_freq: u32 = 10027; const mode = OCW_MODE_SQUARE_WAVE_GENERATOR | OCW_BINARY_COUNT_BINARY; const command = mode | OCW_READ_LOAD_DATA | counter.getCounterOCW(); arch.addTestParams("out", .{ COMMAND_REGISTER, command, port, @truncate(u8, expected_reload_value), port, @truncate(u8, expected_reload_value >> 8) }); setupCounter(counter, freq, mode) catch unreachable; try expectEqual(@as(u32, 0), ticks); try expectEqual(expected_freq, current_freq_0); try expectEqual(expected_freq, getFrequency()); // These are the hard coded expected values. Calculated externally to check the internal calculation try expectEqual(@as(u32, 99730), time_ns); try expectEqual(@as(u32, 727), time_under_1_ns); // Reset globals time_ns = 0; current_freq_0 = 0; ticks = 0; } /// /// Test that waiting a number of ticks and then getting the number of ticks match. /// fn rt_waitTicks() void { const waiting = 1000; const epsilon = 2 * getFrequency() / 10000; const previous_count = getTicks(); waitTicks(waiting); const difference = getTicks() - waiting; if (previous_count + epsilon < difference or previous_count > difference + epsilon) { panic(@errorReturnTrace(), "FAILURE: Waiting failed. difference: {}, previous_count: {}. Epsilon: {}\n", .{ difference, previous_count, epsilon }); } log.info("Tested wait ticks\n", .{}); } /// /// Test that waiting a number of ticks and then getting the number of ticks match. This version /// checks for the ticks wrap around. /// fn rt_waitTicks2() void { // Set the ticks to 16 less than the max const waiting = 1000; const epsilon = 2 * getFrequency() / 10000; ticks = 0xFFFFFFF0; const previous_count = getTicks() - 0xFFFFFFF0; waitTicks(waiting); // maxInt(u32) - u32(0xFFFFFFF0) = 15 const difference = getTicks() + 15 - waiting; if (previous_count + epsilon < difference or previous_count > difference + epsilon) { panic(@errorReturnTrace(), "FAILURE: Waiting failed. difference: {}, previous_count: {}. Epsilon: {}\n", .{ difference, previous_count, epsilon }); } // Reset ticks ticks = 0; log.info("Tested wait ticks 2\n", .{}); } /// /// Check that when the PIT is initialised, counter 0 is set up properly. /// fn rt_initCounter_0() void { const expected_ns: u32 = 99730; const expected_ps: u32 = 727; const expected_hz: u32 = 10027; if (time_ns != expected_ns or time_under_1_ns != expected_ps or getFrequency() != expected_hz) { panic(@errorReturnTrace(), "FAILURE: Frequency not set properly. Hz: {}!={}, ns: {}!={}, ps: {}!= {}\n", .{ getFrequency(), expected_hz, time_ns, expected_ns, time_under_1_ns, expected_ps, }); } var irq_exists = false; irq.registerIrq(pic.IRQ_PIT, pitHandler) catch |err| switch (err) { error.IrqExists => { // We should get this error irq_exists = true; }, error.InvalidIrq => { panic(@errorReturnTrace(), "FAILURE: IRQ for PIT, IRQ number: {} is invalid", .{pic.IRQ_PIT}); }, }; if (!irq_exists) { panic(@errorReturnTrace(), "FAILURE: IRQ for PIT doesn't exists\n", .{}); } const expected_mode = OCW_READ_LOAD_DATA | OCW_MODE_SQUARE_WAVE_GENERATOR | OCW_SELECT_COUNTER_0 | OCW_BINARY_COUNT_BINARY; const actual_mode = readBackCommand(CounterSelect.Counter0); if (expected_mode != actual_mode) { panic(@errorReturnTrace(), "FAILURE: Operating mode don't not set properly. Found: {}, expecting: {}\n", .{ actual_mode, expected_mode }); } log.info("Tested init\n", .{}); } /// /// Run all the runtime tests. /// pub fn runtimeTests() void { // Interrupts aren't enabled yet, so for the runtime tests, enable it temporary arch.enableInterrupts(); defer arch.disableInterrupts(); rt_initCounter_0(); rt_waitTicks(); rt_waitTicks2(); }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/rtc.zig
const std = @import("std"); const builtin = @import("builtin"); const is_test = builtin.is_test; const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const expectError = std.testing.expectError; const log = std.log.scoped(.x86_rtc); const build_options = @import("build_options"); const arch = if (is_test) @import("../../../../test/mock/kernel/arch_mock.zig") else @import("arch.zig"); const pic = @import("pic.zig"); const pit = @import("pit.zig"); const irq = @import("irq.zig"); const cmos = if (is_test) @import("../../../../test/mock/kernel/cmos_mock.zig") else @import("cmos.zig"); const panic = @import("../../panic.zig").panic; const scheduler = @import("../../scheduler.zig"); /// The Century register is unreliable. We need a APIC interface to infer if we have a century /// register. So this is a current TODO. const CURRENT_CENTURY: u32 = 2000; /// TODO: To do with the unreliable century register. Once have APIC, can use this as a sanity check /// if the the century register gives a wild answer then the other RTC values maybe wild. So then /// could report that the CMOS chip is faulty or the battery is dyeing. const CENTURY_REGISTER: bool = false; /// The error set that can be returned from some RTC functions. const RtcError = error{ /// If setting the rate for interrupts is less than 3 or greater than 15. RateError, }; /// A structure to hold all the date and time information in the RTC. pub const DateTime = struct { second: u32, minute: u32, hour: u32, day: u32, month: u32, year: u32, century: u32, day_of_week: u32, }; /// The number of ticks that has passed when RTC was initially set up. var ticks: u32 = 0; var schedule: bool = true; /// /// Checks if the CMOS chip isn't updating the RTC registers. Call this before reading any RTC /// registers so don't get inconsistent values. /// /// Return: bool /// Whether the CMOS chip is busy and a update is in progress. /// fn isBusy() bool { return (cmos.readStatusRegister(cmos.StatusRegister.A, false) & 0x80) != 0; } /// /// Calculate the day of the week from the given day, month and year. /// /// Arguments: /// IN date_time: DateTime - The DateTime structure that holds the current day, month and year. /// /// Return: u8 /// A number that represents the day of the week. 1 = Sunday, 2 = Monday, ... /// fn calcDayOfWeek(date_time: DateTime) u32 { const t = [_]u8{ 0, 3, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4 }; const year = date_time.year - @boolToInt(date_time.month < 3); const month = date_time.month; const day = date_time.day; return (year + (year / 4) - (year / 100) + (year / 400) + t[month - 1] + day) % 7; } /// /// Check if the RTC is in binary coded decimal mode. If the RTC ic counting in BCD, then the 3rd /// bit in the status register B will be set. /// /// Return: bool /// When the RTC is counting in BCD. /// fn isBcd() bool { const reg_b = cmos.readStatusRegister(cmos.StatusRegister.B, false); return reg_b & 0x04 != 0; } /// /// Check if the RTC in 12 hour mode. If the RTC is in 12 hour mode, then the 2nd bit in the status /// register B is set and the most significant bit on the hour field is set. /// /// Arguments: /// IN date_time: DateTime - The DateTime structure containing at least the hour field set. /// /// Return: bool /// Whether the RTC is in 12 hour mode. /// fn is12Hr(date_time: DateTime) bool { const reg_b = cmos.readStatusRegister(cmos.StatusRegister.B, false); return reg_b & 0x02 != 0 and date_time.hour & 0x80 != 0; } /// /// Convert BCD to binary. /// /// Arguments: /// IN bcd: u32 - The binary coded decimal value to convert /// /// Return: u32 /// The converted BCD value. /// fn bcdToBinary(bcd: u32) u32 { return ((bcd & 0xF0) >> 1) + ((bcd & 0xF0) >> 3) + (bcd & 0xf); } /// /// Read the real time clock registers and return them in the DateTime structure. This will read /// the seconds, minutes, hours, days, months, years, century and day of the week. /// /// Return: DateTime /// The data from the CMOS RTC registers. /// fn readRtcRegisters() DateTime { // Make sure there isn't a update in progress while (isBusy()) {} var date_time = DateTime{ .second = cmos.readRtcRegister(cmos.RtcRegister.SECOND), .minute = cmos.readRtcRegister(cmos.RtcRegister.MINUTE), .hour = cmos.readRtcRegister(cmos.RtcRegister.HOUR), .day = cmos.readRtcRegister(cmos.RtcRegister.DAY), .month = cmos.readRtcRegister(cmos.RtcRegister.MONTH), .year = cmos.readRtcRegister(cmos.RtcRegister.YEAR), .century = if (CENTURY_REGISTER) cmos.readRtcRegister(cmos.RtcRegister.CENTURY) else CURRENT_CENTURY, // This will be filled in later .day_of_week = 0, }; // The day of the week register is also very unreliable, so is better to calculate it date_time.day_of_week = calcDayOfWeek(date_time); return date_time; } /// /// The interrupt handler for the RTC. /// /// Arguments: /// IN ctx: *arch.CpuState - Pointer to the interrupt context containing the contents /// of the register at the time of the interrupt. /// fn rtcHandler(ctx: *arch.CpuState) usize { ticks +%= 1; var ret_esp: usize = undefined; // Call the scheduler if (schedule) { ret_esp = scheduler.pickNextTask(ctx); } else { ret_esp = @ptrToInt(ctx); } // Need to read status register C // Might need to disable the NMI bit, set to true _ = cmos.readStatusRegister(cmos.StatusRegister.C, false); return ret_esp; } /// /// Set the rate at which the interrupts will fire. Ranges from 0x3 to 0xF. Where the frequency /// is determined by: frequency = 32768 >> (rate-1); This will assume the interrupts are disabled. /// /// Arguments: /// IN rate: u4 - The rate value to set the frequency to. /// /// Error: RtcError /// RtcError.RateError - If the rate is less than 3. /// fn setRate(rate: u8) RtcError!void { if (rate < 3 or rate > 0xF) { return RtcError.RateError; } // Need to disable the NMI for this process const status_a = cmos.readStatusRegister(cmos.StatusRegister.A, true); cmos.writeStatusRegister(cmos.StatusRegister.A, (status_a & 0xF0) | rate, true); } /// /// Enable interrupts for the RTC. This will assume the interrupts have been disabled before hand. /// fn enableInterrupts() void { // Need to disable the NMI for this process const status_b = cmos.readStatusRegister(cmos.StatusRegister.B, true); // Set the 7th bit to enable interrupt cmos.writeStatusRegister(cmos.StatusRegister.B, status_b | 0x40, true); } /// /// Read a stable time from the real time clock registers on the CMOS chip and return a BCD and /// 12 hour converted date and time. /// /// Return: DateTime /// The data from the CMOS RTC registers with correct BCD conversions, 12 hour conversions and /// the century added to the year. /// pub fn getDateTime() DateTime { var date_time1 = readRtcRegisters(); var date_time2 = readRtcRegisters(); // Use the method: Read the registers twice and check if they are the same so to avoid // inconsistent values due to RTC updates var compare = false; inline for (@typeInfo(DateTime).Struct.fields) |field| { compare = compare or @field(date_time1, field.name) != @field(date_time2, field.name); } while (compare) { date_time1 = readRtcRegisters(); date_time2 = readRtcRegisters(); compare = false; inline for (@typeInfo(DateTime).Struct.fields) |field| { compare = compare or @field(date_time1, field.name) != @field(date_time2, field.name); } } // Convert BCD to binary if necessary if (isBcd()) { date_time1.second = bcdToBinary(date_time1.second); date_time1.minute = bcdToBinary(date_time1.minute); // Needs a special calculation because the upper bit is set date_time1.hour = ((date_time1.hour & 0x0F) + (((date_time1.hour & 0x70) / 16) * 10)) | (date_time1.hour & 0x80); date_time1.day = bcdToBinary(date_time1.day); date_time1.month = bcdToBinary(date_time1.month); date_time1.year = bcdToBinary(date_time1.year); if (CENTURY_REGISTER) { date_time1.century = bcdToBinary(date_time1.century); } } // Need to add on the century to the year if (CENTURY_REGISTER) { date_time1.year += date_time1.century * 100; } else { date_time1.year += CURRENT_CENTURY; } // Convert to 24hr time if (is12Hr(date_time1)) { date_time1.hour = ((date_time1.hour & 0x7F) + 12) % 24; } return date_time1; } /// /// Initialise the RTC. /// pub fn init() void { log.info("Init\n", .{}); defer log.info("Done\n", .{}); // Register the interrupt handler irq.registerIrq(pic.IRQ_REAL_TIME_CLOCK, rtcHandler) catch |err| switch (err) { error.IrqExists => { panic(@errorReturnTrace(), "IRQ for RTC, IRQ number: {} exists", .{pic.IRQ_REAL_TIME_CLOCK}); }, error.InvalidIrq => { panic(@errorReturnTrace(), "IRQ for RTC, IRQ number: {} is invalid", .{pic.IRQ_REAL_TIME_CLOCK}); }, }; // Set the interrupt rate to 512Hz setRate(7) catch |err| switch (err) { error.RateError => { panic(@errorReturnTrace(), "Setting rate error", .{}); }, }; // Enable RTC interrupts enableInterrupts(); // Read status register C to clear any interrupts that may have happened during set up _ = cmos.readStatusRegister(cmos.StatusRegister.C, false); switch (build_options.test_mode) { .Initialisation => runtimeTests(), else => {}, } } test "isBusy not busy" { cmos.initTest(); defer cmos.freeTest(); cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.A, false, @as(u8, 0x60) }, ); try expect(!isBusy()); } test "isBusy busy" { cmos.initTest(); defer cmos.freeTest(); cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.A, false, @as(u8, 0x80) }, ); try expect(isBusy()); } test "calcDayOfWeek" { var date_time = DateTime{ .second = 0, .minute = 0, .hour = 0, .day = 10, .month = 1, .year = 2020, .century = 0, .day_of_week = 0, }; var actual = calcDayOfWeek(date_time); var expected = @as(u32, 5); try expectEqual(expected, actual); date_time.day = 20; date_time.month = 7; date_time.year = 1940; actual = calcDayOfWeek(date_time); expected = @as(u32, 6); try expectEqual(expected, actual); date_time.day = 9; date_time.month = 11; date_time.year = 2043; actual = calcDayOfWeek(date_time); expected = @as(u32, 1); try expectEqual(expected, actual); date_time.day = 1; date_time.month = 1; date_time.year = 2000; actual = calcDayOfWeek(date_time); expected = @as(u32, 6); try expectEqual(expected, actual); } test "isBcd not BCD" { cmos.initTest(); defer cmos.freeTest(); cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.B, false, @as(u8, 0x00) }, ); try expect(!isBcd()); } test "isBcd BCD" { cmos.initTest(); defer cmos.freeTest(); cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.B, false, @as(u8, 0x04) }, ); try expect(isBcd()); } test "is12Hr not 12Hr" { const date_time = DateTime{ .second = 0, .minute = 0, .hour = 0, .day = 0, .month = 0, .year = 0, .century = 0, .day_of_week = 0, }; cmos.initTest(); defer cmos.freeTest(); cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.B, false, @as(u8, 0x00) }, ); try expect(!is12Hr(date_time)); } test "is12Hr 12Hr" { const date_time = DateTime{ .second = 0, .minute = 0, .hour = 0x80, .day = 0, .month = 0, .year = 0, .century = 0, .day_of_week = 0, }; cmos.initTest(); defer cmos.freeTest(); cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.B, false, @as(u8, 0x02) }, ); try expect(is12Hr(date_time)); } test "bcdToBinary" { var expected = @as(u32, 59); var actual = bcdToBinary(0x59); try expectEqual(expected, actual); expected = @as(u32, 48); actual = bcdToBinary(0x48); try expectEqual(expected, actual); expected = @as(u32, 1); actual = bcdToBinary(0x01); try expectEqual(expected, actual); } test "readRtcRegisters" { cmos.initTest(); defer cmos.freeTest(); // Have 2 busy loops cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.A, false, @as(u8, 0x80), cmos.StatusRegister.A, false, @as(u8, 0x80), cmos.StatusRegister.A, false, @as(u8, 0x00) }, ); // All the RTC registers without century as it isn't supported yet cmos.addTestParams("readRtcRegister", .{ cmos.RtcRegister.SECOND, @as(u8, 1), cmos.RtcRegister.MINUTE, @as(u8, 2), cmos.RtcRegister.HOUR, @as(u8, 3), cmos.RtcRegister.DAY, @as(u8, 10), cmos.RtcRegister.MONTH, @as(u8, 1), cmos.RtcRegister.YEAR, @as(u8, 20), }); const expected = DateTime{ .second = 1, .minute = 2, .hour = 3, .day = 10, .month = 1, .year = 20, .century = 2000, .day_of_week = 5, }; const actual = readRtcRegisters(); try expectEqual(expected, actual); } test "readRtc unstable read" { cmos.initTest(); defer cmos.freeTest(); // No busy loop cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.A, false, @as(u8, 0x00), cmos.StatusRegister.A, false, @as(u8, 0x00) }, ); // Reading the RTC registers twice, second time is one second ahead cmos.addTestParams("readRtcRegister", .{ cmos.RtcRegister.SECOND, @as(u8, 1), cmos.RtcRegister.MINUTE, @as(u8, 2), cmos.RtcRegister.HOUR, @as(u8, 3), cmos.RtcRegister.DAY, @as(u8, 10), cmos.RtcRegister.MONTH, @as(u8, 1), cmos.RtcRegister.YEAR, @as(u8, 20), cmos.RtcRegister.SECOND, @as(u8, 2), cmos.RtcRegister.MINUTE, @as(u8, 2), cmos.RtcRegister.HOUR, @as(u8, 3), cmos.RtcRegister.DAY, @as(u8, 10), cmos.RtcRegister.MONTH, @as(u8, 1), cmos.RtcRegister.YEAR, @as(u8, 20), }); // Will try again, and now stable // No busy loop cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.A, false, @as(u8, 0x00), cmos.StatusRegister.A, false, @as(u8, 0x00) }, ); cmos.addTestParams("readRtcRegister", .{ cmos.RtcRegister.SECOND, @as(u8, 2), cmos.RtcRegister.MINUTE, @as(u8, 2), cmos.RtcRegister.HOUR, @as(u8, 3), cmos.RtcRegister.DAY, @as(u8, 10), cmos.RtcRegister.MONTH, @as(u8, 1), cmos.RtcRegister.YEAR, @as(u8, 20), cmos.RtcRegister.SECOND, @as(u8, 2), cmos.RtcRegister.MINUTE, @as(u8, 2), cmos.RtcRegister.HOUR, @as(u8, 3), cmos.RtcRegister.DAY, @as(u8, 10), cmos.RtcRegister.MONTH, @as(u8, 1), cmos.RtcRegister.YEAR, @as(u8, 20), }); // Not BCD cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.B, false, @as(u8, 0x00) }, ); // Not 12hr cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.B, false, @as(u8, 0x00) }, ); const expected = DateTime{ .second = 2, .minute = 2, .hour = 3, .day = 10, .month = 1, .year = 2020, .century = 2000, .day_of_week = 5, }; const actual = getDateTime(); try expectEqual(expected, actual); } test "readRtc is BCD" { cmos.initTest(); defer cmos.freeTest(); // No busy loop cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.A, false, @as(u8, 0x00), cmos.StatusRegister.A, false, @as(u8, 0x00) }, ); // Reading the RTC registers once cmos.addTestParams("readRtcRegister", .{ cmos.RtcRegister.SECOND, @as(u8, 0x30), cmos.RtcRegister.MINUTE, @as(u8, 0x59), cmos.RtcRegister.HOUR, @as(u8, 0x11), cmos.RtcRegister.DAY, @as(u8, 0x10), cmos.RtcRegister.MONTH, @as(u8, 0x1), cmos.RtcRegister.YEAR, @as(u8, 0x20), cmos.RtcRegister.SECOND, @as(u8, 0x30), cmos.RtcRegister.MINUTE, @as(u8, 0x59), cmos.RtcRegister.HOUR, @as(u8, 0x11), cmos.RtcRegister.DAY, @as(u8, 0x10), cmos.RtcRegister.MONTH, @as(u8, 0x1), cmos.RtcRegister.YEAR, @as(u8, 0x20), }); // BCD cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.B, false, @as(u8, 0x04) }, ); // Not 12hr cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.B, false, @as(u8, 0x00) }, ); const expected = DateTime{ .second = 30, .minute = 59, .hour = 11, .day = 10, .month = 1, .year = 2020, .century = 2000, .day_of_week = 5, }; const actual = getDateTime(); try expectEqual(expected, actual); } test "readRtc is 12 hours" { cmos.initTest(); defer cmos.freeTest(); // No busy loop cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.A, false, @as(u8, 0x00), cmos.StatusRegister.A, false, @as(u8, 0x00) }, ); // Reading the RTC registers once cmos.addTestParams("readRtcRegister", .{ cmos.RtcRegister.SECOND, @as(u8, 1), cmos.RtcRegister.MINUTE, @as(u8, 2), cmos.RtcRegister.HOUR, @as(u8, 0x83), cmos.RtcRegister.DAY, @as(u8, 10), cmos.RtcRegister.MONTH, @as(u8, 1), cmos.RtcRegister.YEAR, @as(u8, 20), cmos.RtcRegister.SECOND, @as(u8, 1), cmos.RtcRegister.MINUTE, @as(u8, 2), cmos.RtcRegister.HOUR, @as(u8, 0x83), cmos.RtcRegister.DAY, @as(u8, 10), cmos.RtcRegister.MONTH, @as(u8, 1), cmos.RtcRegister.YEAR, @as(u8, 20), }); // Not BCD cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.B, false, @as(u8, 0x00) }, ); // 12hr cmos.addTestParams( "readStatusRegister", .{ cmos.StatusRegister.B, false, @as(u8, 0x02) }, ); const expected = DateTime{ .second = 1, .minute = 2, .hour = 15, .day = 10, .month = 1, .year = 2020, .century = 2000, .day_of_week = 5, }; const actual = getDateTime(); try expectEqual(expected, actual); } test "setRate below 3" { try expectError(RtcError.RateError, setRate(0)); try expectError(RtcError.RateError, setRate(1)); try expectError(RtcError.RateError, setRate(2)); } test "setRate" { cmos.initTest(); defer cmos.freeTest(); cmos.addTestParams("readStatusRegister", .{ cmos.StatusRegister.A, true, @as(u8, 0x10) }); cmos.addTestParams("writeStatusRegister", .{ cmos.StatusRegister.A, @as(u8, 0x17), true }); const rate = @as(u8, 7); try setRate(rate); } test "enableInterrupts" { cmos.initTest(); defer cmos.freeTest(); cmos.addTestParams("readStatusRegister", .{ cmos.StatusRegister.B, true, @as(u8, 0x20) }); cmos.addTestParams("writeStatusRegister", .{ cmos.StatusRegister.B, @as(u8, 0x60), true }); enableInterrupts(); } /// /// Check that the IRQ is registered correctly /// fn rt_init() void { var irq_exists = false; irq.registerIrq(pic.IRQ_REAL_TIME_CLOCK, rtcHandler) catch |err| switch (err) { error.IrqExists => { // We should get this error irq_exists = true; }, error.InvalidIrq => { panic(@errorReturnTrace(), "FAILURE: IRQ for RTC, IRQ number: {} is invalid\n", .{pic.IRQ_REAL_TIME_CLOCK}); }, }; if (!irq_exists) { panic(@errorReturnTrace(), "FAILURE: IRQ for RTC doesn't exists\n", .{}); } // Check the rate const status_a = cmos.readStatusRegister(cmos.StatusRegister.A, false); if (status_a & @as(u8, 0x0F) != 7) { panic(@errorReturnTrace(), "FAILURE: Rate not set properly, got: {}\n", .{status_a & @as(u8, 0x0F)}); } // Check if interrupts are enabled const status_b = cmos.readStatusRegister(cmos.StatusRegister.B, true); if (status_b & ~@as(u8, 0x40) == 0) { panic(@errorReturnTrace(), "FAILURE: Interrupts not enabled\n", .{}); } log.info("Tested init\n", .{}); } /// /// Check if the interrupt handler is called after a sleep so check that the RTC interrupts fire. /// fn rt_interrupts() void { const prev_ticks = ticks; pit.waitTicks(100); if (prev_ticks == ticks) { panic(@errorReturnTrace(), "FAILURE: No interrupt happened\n", .{}); } log.info("Tested interrupts\n", .{}); } /// /// Run all the runtime tests. /// pub fn runtimeTests() void { rt_init(); // Disable the scheduler temporary schedule = false; // Interrupts aren't enabled yet, so for the runtime tests, enable it temporary arch.enableInterrupts(); rt_interrupts(); arch.disableInterrupts(); // Can enable it back schedule = true; }
0
repos/pluto/src/kernel/arch
repos/pluto/src/kernel/arch/x86/gdt.zig
const std = @import("std"); const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const log = std.log.scoped(.x86_gdt); const builtin = @import("builtin"); const is_test = builtin.is_test; const panic = @import("../../panic.zig").panic; const build_options = @import("build_options"); const arch = if (is_test) @import("../../../../test/mock/kernel/arch_mock.zig") else @import("arch.zig"); /// The access bits for a GDT entry. const AccessBits = packed struct { /// Whether the segment has been access. This shouldn't be set as it is set by the CPU when the /// segment is accessed. accessed: u1, /// For code segments, when set allows the code segment to be readable. Code segments are /// always executable. For data segments, when set allows the data segment to be writeable. /// Data segments are always readable. read_write: u1, /// For code segments, when set allows this code segments to be executed from a equal or lower /// privilege level. The privilege bits represent the highest privilege level that is allowed /// to execute this segment. If not set, then the code segment can only be executed from the /// same ring level specified in the privilege level bits. For data segments, when set the data /// segment grows downwards. When not set, the data segment grows upwards. So for both code and /// data segments, this shouldn't be set. direction_conforming: u1, /// When set, the segment can be executed, a code segments. When not set, the segment can't be /// executed, data segment. executable: u1, /// Should be set for code and data segments, but not set for TSS. descriptor: u1, /// Privilege/ring level. The kernel level is level 3, the highest privilege. The user level is /// level 0, the lowest privilege. privilege: u2, /// Whether the segment is present. This must be set for all valid selectors, not the null /// segment. present: u1, }; /// The flag bits for a GDT entry. const FlagBits = packed struct { /// The lowest bits must be 0 as this is reserved for future use. reserved_zero: u1, /// When set indicates the segment is a x86-64 segment. If set, then the IS_32_BIT flag must /// not be set. If both are set, then will throw an exception. is_64_bit: u1, /// When set indicates the segment is a 32 bit protected mode segment. When not set, indicates /// the segment is a 16 bit protected mode segment. is_32_bit: u1, /// The granularity bit. When set the limit is in 4KB blocks (page granularity). When not set, /// then limit is in 1B blocks (byte granularity). This should be set as we are doing paging. granularity: u1, }; /// The structure that contains all the information that each GDT entry needs. const GdtEntry = packed struct { /// The lower 16 bits of the limit address. Describes the size of memory that can be addressed. limit_low: u16, /// The lower 24 bits of the base address. Describes the start of memory for the entry. base_low: u24, /// The access bits, see AccessBits for all the options. 8 bits. access: AccessBits, /// The upper 4 bits of the limit address. Describes the size of memory that can be addressed. limit_high: u4, /// The flag bits, see above for all the options. 4 bits. flags: FlagBits, /// The upper 8 bits of the base address. Describes the start of memory for the entry. base_high: u8, }; /// The TSS entry structure const Tss = packed struct { /// Pointer to the previous TSS entry prev_tss: u16, reserved1: u16, /// Ring 0 32 bit stack pointer. esp0: u32, /// Ring 0 32 bit stack pointer. ss0: u16, reserved2: u16, /// Ring 1 32 bit stack pointer. esp1: u32, /// Ring 1 32 bit stack pointer. ss1: u16, reserved3: u16, /// Ring 2 32 bit stack pointer. esp2: u32, /// Ring 2 32 bit stack pointer. ss2: u16, reserved4: u16, /// The CR3 control register 3. cr3: u32, /// 32 bit instruction pointer. eip: u32, /// 32 bit flags register. eflags: u32, /// 32 bit accumulator register. eax: u32, /// 32 bit counter register. ecx: u32, /// 32 bit data register. edx: u32, /// 32 bit base register. ebx: u32, /// 32 bit stack pointer register. esp: u32, /// 32 bit base pointer register. ebp: u32, /// 32 bit source register. esi: u32, /// 32 bit destination register. edi: u32, /// The extra segment. es: u16, reserved5: u16, /// The code segment. cs: u16, reserved6: u16, /// The stack segment. ss: u16, reserved7: u16, /// The data segment. ds: u16, reserved8: u16, /// A extra segment FS. fs: u16, reserved9: u16, /// A extra segment GS. gs: u16, reserved10: u16, /// The local descriptor table register. ldtr: u16, reserved11: u16, /// ? trap: u16, /// A pointer to a I/O port bitmap for the current task which specifies individual ports the program should have access to. io_permissions_base_offset: u16, }; /// The GDT pointer structure that contains the pointer to the beginning of the GDT and the number /// of the table (minus 1). Used to load the GDT with LGDT instruction. pub const GdtPtr = packed struct { /// 16bit entry for the number of entries (minus 1). limit: u16, /// 32bit entry for the base address for the GDT. base: u32, }; /// The total number of entries in the GDT including: null, kernel code, kernel data, user code, /// user data and the TSS. const NUMBER_OF_ENTRIES: u16 = 0x06; /// The size of the GTD in bytes (minus 1). const TABLE_SIZE: u16 = @sizeOf(GdtEntry) * NUMBER_OF_ENTRIES - 1; // ---------- // The indexes into the GDT where each segment resides. // ---------- /// The index of the NULL GDT entry. const NULL_INDEX: u16 = 0x00; /// The index of the kernel code GDT entry. const KERNEL_CODE_INDEX: u16 = 0x01; /// The index of the kernel data GDT entry. const KERNEL_DATA_INDEX: u16 = 0x02; /// The index of the user code GDT entry. const USER_CODE_INDEX: u16 = 0x03; /// The index of the user data GDT entry. const USER_DATA_INDEX: u16 = 0x04; /// The index of the task state segment GDT entry. const TSS_INDEX: u16 = 0x05; /// The null segment, everything is set to zero. const NULL_SEGMENT: AccessBits = AccessBits{ .accessed = 0, .read_write = 0, .direction_conforming = 0, .executable = 0, .descriptor = 0, .privilege = 0, .present = 0, }; /// This bit pattern represents a kernel code segment with bits: readable, executable, descriptor, /// privilege 0, and present set. const KERNEL_SEGMENT_CODE: AccessBits = AccessBits{ .accessed = 0, .read_write = 1, .direction_conforming = 0, .executable = 1, .descriptor = 1, .privilege = 0, .present = 1, }; /// This bit pattern represents a kernel data segment with bits: writeable, descriptor, privilege 0, /// and present set. const KERNEL_SEGMENT_DATA: AccessBits = AccessBits{ .accessed = 0, .read_write = 1, .direction_conforming = 0, .executable = 0, .descriptor = 1, .privilege = 0, .present = 1, }; /// This bit pattern represents a user code segment with bits: readable, executable, descriptor, /// privilege 3, and present set. const USER_SEGMENT_CODE: AccessBits = AccessBits{ .accessed = 0, .read_write = 1, .direction_conforming = 0, .executable = 1, .descriptor = 1, .privilege = 3, .present = 1, }; /// This bit pattern represents a user data segment with bits: writeable, descriptor, privilege 3, /// and present set. const USER_SEGMENT_DATA: AccessBits = AccessBits{ .accessed = 0, .read_write = 1, .direction_conforming = 0, .executable = 0, .descriptor = 1, .privilege = 3, .present = 1, }; /// This bit pattern represents a TSS segment with bits: accessed, executable and present set. const TSS_SEGMENT: AccessBits = AccessBits{ .accessed = 1, .read_write = 0, .direction_conforming = 0, .executable = 1, .descriptor = 0, .privilege = 0, .present = 1, }; /// The bit pattern for all bits set to zero. const NULL_FLAGS: FlagBits = FlagBits{ .reserved_zero = 0, .is_64_bit = 0, .is_32_bit = 0, .granularity = 0, }; /// The bit pattern for all segments where we are in 32 bit protected mode and paging enabled. const PAGING_32_BIT: FlagBits = FlagBits{ .reserved_zero = 0, .is_64_bit = 0, .is_32_bit = 1, .granularity = 1, }; // ---------- // The offsets into the GDT where each segment resides. // ---------- /// The offset of the NULL GDT entry. pub const NULL_OFFSET: u16 = 0x00; /// The offset of the kernel code GDT entry. pub const KERNEL_CODE_OFFSET: u16 = 0x08; /// The offset of the kernel data GDT entry. pub const KERNEL_DATA_OFFSET: u16 = 0x10; /// The offset of the user code GDT entry. pub const USER_CODE_OFFSET: u16 = 0x18; /// The offset of the user data GDT entry. pub const USER_DATA_OFFSET: u16 = 0x20; /// The offset of the TTS GDT entry. pub const TSS_OFFSET: u16 = 0x28; /// The GDT entry table of NUMBER_OF_ENTRIES entries. var gdt_entries: [NUMBER_OF_ENTRIES]GdtEntry = init: { var gdt_entries_temp: [NUMBER_OF_ENTRIES]GdtEntry = undefined; // Null descriptor gdt_entries_temp[0] = makeGdtEntry(0, 0, NULL_SEGMENT, NULL_FLAGS); // Kernel code descriptor gdt_entries_temp[1] = makeGdtEntry(0, 0xFFFFF, KERNEL_SEGMENT_CODE, PAGING_32_BIT); // Kernel data descriptor gdt_entries_temp[2] = makeGdtEntry(0, 0xFFFFF, KERNEL_SEGMENT_DATA, PAGING_32_BIT); // User code descriptor gdt_entries_temp[3] = makeGdtEntry(0, 0xFFFFF, USER_SEGMENT_CODE, PAGING_32_BIT); // User data descriptor gdt_entries_temp[4] = makeGdtEntry(0, 0xFFFFF, USER_SEGMENT_DATA, PAGING_32_BIT); // TSS descriptor, one each for each processor // Will initialise the TSS at runtime gdt_entries_temp[5] = makeGdtEntry(0, 0, NULL_SEGMENT, NULL_FLAGS); break :init gdt_entries_temp; }; /// The GDT pointer that the CPU is loaded with that contains the base address of the GDT and the /// size. var gdt_ptr: GdtPtr = GdtPtr{ .limit = TABLE_SIZE, .base = undefined, }; /// The main task state segment entry. pub var main_tss_entry: Tss = init: { var tss_temp = std.mem.zeroes(Tss); tss_temp.ss0 = KERNEL_DATA_OFFSET; tss_temp.io_permissions_base_offset = @sizeOf(Tss); break :init tss_temp; }; /// /// Make a GDT entry. /// /// Arguments: /// IN base: u32 - The linear address where the segment begins. /// IN limit: u20 - The maximum addressable unit whether it is 1B units or page units. /// IN access: AccessBits - The access bits for the descriptor. /// IN flags: FlagBits - The flag bits for the descriptor. /// /// Return: GdtEntry /// A new GDT entry with the give access and flag bits set with the base at 0x00000000 and /// limit at 0xFFFFF. /// fn makeGdtEntry(base: u32, limit: u20, access: AccessBits, flags: FlagBits) GdtEntry { return .{ .limit_low = @truncate(u16, limit), .base_low = @truncate(u24, base), .access = .{ .accessed = access.accessed, .read_write = access.read_write, .direction_conforming = access.direction_conforming, .executable = access.executable, .descriptor = access.descriptor, .privilege = access.privilege, .present = access.present, }, .limit_high = @truncate(u4, limit >> 16), .flags = .{ .reserved_zero = flags.reserved_zero, .is_64_bit = flags.is_64_bit, .is_32_bit = flags.is_32_bit, .granularity = flags.granularity, }, .base_high = @truncate(u8, base >> 24), }; } /// /// Initialise the Global Descriptor table. /// pub fn init() void { log.info("Init\n", .{}); defer log.info("Done\n", .{}); // Initiate TSS gdt_entries[TSS_INDEX] = makeGdtEntry(@ptrToInt(&main_tss_entry), @sizeOf(Tss) - 1, TSS_SEGMENT, NULL_FLAGS); // Set the base address where all the GDT entries are. gdt_ptr.base = @ptrToInt(&gdt_entries[0]); // Load the GDT arch.lgdt(&gdt_ptr); // Load the TSS arch.ltr(TSS_OFFSET); switch (build_options.test_mode) { .Initialisation => runtimeTests(), else => {}, } } fn mock_lgdt(ptr: *const GdtPtr) void { expectEqual(TABLE_SIZE, ptr.limit) catch panic(null, "GDT pointer limit was not correct", .{}); expectEqual(@ptrToInt(&gdt_entries[0]), ptr.base) catch panic(null, "GDT pointer base was not correct", .{}); } test "GDT entries" { try expectEqual(@as(u32, 1), @sizeOf(AccessBits)); try expectEqual(@as(u32, 1), @sizeOf(FlagBits)); try expectEqual(@as(u32, 8), @sizeOf(GdtEntry)); try expectEqual(@as(u32, 104), @sizeOf(Tss)); try expectEqual(@as(u32, 6), @sizeOf(GdtPtr)); const null_entry = gdt_entries[NULL_INDEX]; try expectEqual(@as(u64, 0), @bitCast(u64, null_entry)); const kernel_code_entry = gdt_entries[KERNEL_CODE_INDEX]; try expectEqual(@as(u64, 0xCF9A000000FFFF), @bitCast(u64, kernel_code_entry)); const kernel_data_entry = gdt_entries[KERNEL_DATA_INDEX]; try expectEqual(@as(u64, 0xCF92000000FFFF), @bitCast(u64, kernel_data_entry)); const user_code_entry = gdt_entries[USER_CODE_INDEX]; try expectEqual(@as(u64, 0xCFFA000000FFFF), @bitCast(u64, user_code_entry)); const user_data_entry = gdt_entries[USER_DATA_INDEX]; try expectEqual(@as(u64, 0xCFF2000000FFFF), @bitCast(u64, user_data_entry)); const tss_entry = gdt_entries[TSS_INDEX]; try expectEqual(@as(u64, 0), @bitCast(u64, tss_entry)); try expectEqual(TABLE_SIZE, gdt_ptr.limit); try expectEqual(@as(u32, 0), main_tss_entry.prev_tss); try expectEqual(@as(u32, 0), main_tss_entry.esp0); try expectEqual(@as(u32, KERNEL_DATA_OFFSET), main_tss_entry.ss0); try expectEqual(@as(u32, 0), main_tss_entry.esp1); try expectEqual(@as(u32, 0), main_tss_entry.ss1); try expectEqual(@as(u32, 0), main_tss_entry.esp2); try expectEqual(@as(u32, 0), main_tss_entry.ss2); try expectEqual(@as(u32, 0), main_tss_entry.cr3); try expectEqual(@as(u32, 0), main_tss_entry.eip); try expectEqual(@as(u32, 0), main_tss_entry.eflags); try expectEqual(@as(u32, 0), main_tss_entry.eax); try expectEqual(@as(u32, 0), main_tss_entry.ecx); try expectEqual(@as(u32, 0), main_tss_entry.edx); try expectEqual(@as(u32, 0), main_tss_entry.ebx); try expectEqual(@as(u32, 0), main_tss_entry.esp); try expectEqual(@as(u32, 0), main_tss_entry.ebp); try expectEqual(@as(u32, 0), main_tss_entry.esi); try expectEqual(@as(u32, 0), main_tss_entry.edi); try expectEqual(@as(u32, 0), main_tss_entry.es); try expectEqual(@as(u32, 0), main_tss_entry.cs); try expectEqual(@as(u32, 0), main_tss_entry.ss); try expectEqual(@as(u32, 0), main_tss_entry.ds); try expectEqual(@as(u32, 0), main_tss_entry.fs); try expectEqual(@as(u32, 0), main_tss_entry.gs); try expectEqual(@as(u32, 0), main_tss_entry.ldtr); try expectEqual(@as(u16, 0), main_tss_entry.trap); // Size of Tss will fit in a u16 as 104 < 65535 (2^16) try expectEqual(@as(u16, @sizeOf(Tss)), main_tss_entry.io_permissions_base_offset); } test "makeGdtEntry NULL" { const actual = makeGdtEntry(0, 0, NULL_SEGMENT, NULL_FLAGS); const expected: u64 = 0; try expectEqual(expected, @bitCast(u64, actual)); } test "makeGdtEntry alternating bit pattern" { const alt_access = AccessBits{ .accessed = 1, .read_write = 0, .direction_conforming = 1, .executable = 0, .descriptor = 1, .privilege = 0b10, .present = 0, }; try expectEqual(@as(u8, 0b01010101), @bitCast(u8, alt_access)); const alt_flag = FlagBits{ .reserved_zero = 1, .is_64_bit = 0, .is_32_bit = 1, .granularity = 0, }; try expectEqual(@as(u4, 0b0101), @bitCast(u4, alt_flag)); const actual = makeGdtEntry(0b01010101010101010101010101010101, 0b01010101010101010101, alt_access, alt_flag); const expected: u64 = 0b0101010101010101010101010101010101010101010101010101010101010101; try expectEqual(expected, @bitCast(u64, actual)); } test "init" { // Set up arch.initTest(); defer arch.freeTest(); arch.addTestParams("ltr", .{TSS_OFFSET}); arch.addConsumeFunction("lgdt", mock_lgdt); // Call function init(); // Post testing const tss_entry = gdt_entries[TSS_INDEX]; const tss_limit = @sizeOf(Tss) - 1; const tss_addr = @ptrToInt(&main_tss_entry); var expected: u64 = 0; expected |= @as(u64, @truncate(u16, tss_limit)); expected |= @as(u64, @truncate(u24, tss_addr)) << 16; expected |= @as(u64, 0x89) << (16 + 24); expected |= @as(u64, @truncate(u4, tss_limit >> 16)) << (16 + 24 + 8); // Flags are zero expected |= @as(u64, @truncate(u8, tss_addr >> 24)) << (16 + 24 + 8 + 4 + 4); try expectEqual(expected, @bitCast(u64, tss_entry)); // Reset gdt_ptr.base = 0; gdt_entries[TSS_INDEX] = makeGdtEntry(0, 0, NULL_SEGMENT, NULL_FLAGS); } /// /// Check that the GDT table was loaded properly by getting the previously loaded table and /// compare the limit and base address. /// fn rt_loadedGDTSuccess() void { const loaded_gdt = arch.sgdt(); if (gdt_ptr.limit != loaded_gdt.limit) { panic(@errorReturnTrace(), "FAILURE: GDT not loaded properly: 0x{X} != 0x{X}\n", .{ gdt_ptr.limit, loaded_gdt.limit }); } if (gdt_ptr.base != loaded_gdt.base) { panic(@errorReturnTrace(), "FAILURE: GDT not loaded properly: 0x{X} != {X}\n", .{ gdt_ptr.base, loaded_gdt.base }); } log.info("Tested loading GDT\n", .{}); } /// /// Run all the runtime tests. /// pub fn runtimeTests() void { rt_loadedGDTSuccess(); }