repo
stringlengths
26
115
file
stringlengths
54
212
language
stringclasses
2 values
license
stringclasses
16 values
content
stringlengths
19
1.07M
https://github.com/MultisampledNight/flow
https://raw.githubusercontent.com/MultisampledNight/flow/main/src/gfx/draw.typ
typst
MIT License
// cetz.draw but with extra utilities which are not needed when not drawing on a canvas #import "maybe-stub.typ": cetz #import cetz.draw: * #import "../palette.typ": * // Only uses the x component of the given coordinate. #let hori(coord) = (coord, "|-", ()) // Only uses the y component of the given coordinate. #let vert(coord) = (coord, "-|", ()) // Convert from Typst alignment to cetz directions. #let to-anchor(it) = { if it == top { "north" } else if it == bottom { "south" } else if it == left { "west" } else if it == right { "east" } } // Returns a point according to the given value // on the edge of the given object. // `object` needs to be a string, the name of a drawn element. #let lerp-edge(object, edge, value) = { // decide what bounds to use let ys = if edge.y == none { (top, bottom) } else { (edge.y,) * 2 } let xs = if edge.x == none { (left, right) } else { (edge.x,) * 2 } let point = xs.zip(ys).map( ((x, y)) => object + "." + to-anchor(y) + "-" + to-anchor(x) ) point.insert(1, value) point } #let over(object, value) = lerp-edge(object, top, value) #let under(object, value) = lerp-edge(object, bottom, value) #let right-to(object, value) = lerp-edge(object, right, value) #let left-to(object, value) = lerp-edge(object, left, value) // Instruction to the `trans` function that a certain tree part has to be handled differently. // The `queue` is the tree part to be placed under the modifier, // the `cfg` specifies additional arguments for the `trans` function. #let _modifier(queue, cfg) = (queue: queue, cfg: cfg) #let _is-modifier(part) = ( type(part) == dictionary and "queue" in part and "cfg" in part ) // Create a new branch. After all coordinates in this branch have been processed, // return to the node before it. // At the end of a branch, an arrow mark is always drawn. #let br(..args) = _modifier(args.pos(), (branch: true, last-is-arrow: true)) // Label the edges created in this call. // The label is: // // - Only drawn once, after all edges and states have been drawn. // - Placed in the center of all states in this call. #let tag( ..args, tag: none, offset: (0, 0), ) = _modifier( args.pos(), ( tag: tag, offset: offset, content-args: args.named(), ), ) // Style all edges inside this call. // Use named arguments for doing so, just like any other cetz element. // Styles can be stacked and will be merged. // Note that they are merged shallowly: // If there are multiple styles with the same key, // the deeper one will override the entire value, and // NOT be merged with the previous value. // // The function is suffixed with `d` // to avoid shadowing the builtin Typst `style` function. #let styled(..args) = _modifier(args.pos(), (styles: args.named())) // shallowly replaces () with last #let _make-concrete(coord, last) = if type(coord) == array { if coord.len() == 0 { last } else { coord.map(_make-concrete) } } else if type(coord) == dictionary { for (key, p) in coord { coord.insert(key, _make-concrete(last)) } if "rel" in coord and "to" not in coord { coord.to = last; } coord } else { coord } // Creates an edge denoting transition away from a starting state, // branching out arbitrarily. // The syntax for branching is inspired by the IUPAC nomenclature of organic chemistry: // https://en.wikipedia.org/wiki/IUPAC_nomenclature_of_organic_chemistry // // At least, one starting state and a target state are needed. // Afterwards, any number of branches can follow, and the default one is automatically entered. // A branch is a sequence, where each element can be a coordinate or another branch. // Coordinates can be specified in place. // Branches are specified via the `br` function. // // Essentially, you can think of branches like save and restore points. // Anytime you type `br(`, the current position in the tree is stored on a stack. // Continuing to type more coordinates or branches after do not modify this stored entry. // Typing a closing `)` of a `br` call pops the last position from the stack and // continues from there. // This can nest arbitrarily often. #let trans(from, ..args, arrow-mark: (symbol: ">")) = { // ...one day, when typst has proper types, this will be hopefully much cleaner if args.pos().len() == 0 { panic("need at least one target state to transition to") } // essentially an "inverse" depth first search // we already have the tree and the search we want to follow // we just need to repeat it along the plan // basically manual recursion. each array entry is a stack frame // each stack frame is a dictionary with fields: // - `queue` for the coords/branches to next run through // - `cfg` for modifier options (see above functions calling _modifier) // - (optional) `last-is-arrow` if to draw an arrowhead at the end of this frame let depth = ((queue: args.pos().rev(), cfg: (:)),) let last = from // optimization: instead of going through the whole depth // each edge to get the current style, // just combine it at each new `styled` modifier and push it onto here, // popping when the `styled` frame ends let styles-depth = () // collecting them while drawing edges so we can draw them all on the edges // each tag is a dictionary with fields: // - `pos` for where to draw the tag // - `display` for what to show at `pos` let tags = () while depth.len() != 0 { let frame = depth.last() let queue = frame.queue // has this frame has been fully processed? if queue.len() == 0 { let frame = depth.pop() // if this was the end of a section to be tagged, note where to draw it if "tag" in frame.cfg { tags.push(( pos: ( to: (frame.last, 50%, last), rel: frame.cfg.offset, ), display: frame.cfg.tag, content-args: frame.cfg.content-args, )) } // if there are styles that end here, remove them if "styles" in frame.cfg { let _ = styles-depth.pop() } // if we should reset due to branching, do so if frame.cfg.at("branch", default: false) { last = frame.last } continue } let part = queue.pop() depth.last().queue = queue let maybe-arrowhead = if ( queue.len() == 0 and frame.at("last-is-arrow", default: true) ) { // oh that means we want to draw an arrowhead // though if this is a modifier, that information needs to be propagated instead if _is-modifier(part) { part.last-is-arrow = true } (mark: (end: arrow-mark)) } if _is-modifier(part) { // advance in depth // can just make it a new frame // the queue is popped at the back to receive the next one // so it is reversed here so it's in the right order again part.queue = part.queue.rev() // some modifiers (e.g. branch, tag) need the last node, // so store that one, too part.last = last // if this is a style modifier, store the style if "styles" in part.cfg { let next-styles = ( styles-depth.at(-1, default: (:)) + part.cfg.styles ) styles-depth.push(next-styles) } depth.push(part) continue } let current = part line( last, current, ..styles-depth.at(-1, default: (:)), ..maybe-arrowhead, ) last = current } // draw all tags we collected // they're drawn afterwards so they're always visible over the edges for (pos, display, content-args) in tags { content( pos, box( fill: bg, inset: 0.25em, radius: 0.1em, display, ), ..content-args, ) } }
https://github.com/Fabian-Heinrich/typst_homework
https://raw.githubusercontent.com/Fabian-Heinrich/typst_homework/main/homework_template.typ
typst
MIT License
/* * based on: * - https://typst.app/docs/tutorial/making-a-template/ */ #let author_info( authors: () ) = { pad( top: 0.5em, x: 2em, grid( columns: (1fr,) * calc.min(3, authors.len()), gutter: 1em, ..authors.map(author => align(center)[ #grid( columns: (auto), rows: 2pt, [*#author.name*], ) #author.student_number \ #author.email ] ) ) ) } #let conf( title: "", subtitle: "", authors: (), date: datetime.today(), date_format: "[day].[month].[year]", language: "de", show_page_numbers: true, bottom_margin: 25mm, doc ) = { // Set the document's basic properties. set document(author: authors.map(a => a.name), title: title) if(show_page_numbers) { bottom_margin = bottom_margin + 10mm } set page( margin: (left: 25mm, right: 25mm, top: 25mm, bottom: bottom_margin), number-align: center, header: [ #set text(fill: luma(15%)) #title - #subtitle #h(1fr) #date.display(date_format) ], footer: [ #set text(fill: luma(15%), weight: 100) #author_info(authors: authors) #if(show_page_numbers) { align(center)[ #set text(fill: black) #counter(page).display( "1/1", both: true ) ] } ] ) set text(font: "New Computer Modern", lang: language) show math.equation: set text(weight: 400) show math.equation: set block(spacing: 0.65em) line(length: 100%, stroke: 1pt) // Title row. pad( bottom: 4pt, top: 4pt, align(center)[ #block(text(weight: 500, 1.75em, title)) #v(1em, weak: true) #block(text(weight: 500, 1.25em, subtitle)) ] ) line(length: 100%, stroke: 1pt) author_info(authors: authors) // Main body. set text(hyphenate: false) doc }
https://github.com/lxl66566/my-college-files
https://raw.githubusercontent.com/lxl66566/my-college-files/main/信息科学与工程学院/嵌入式系统/实验/报告/5/5.typ
typst
The Unlicense
#import "../template.typ": * #show: project.with( title: "实验报告 5", authors: ( "absolutex", ) ) = IIC 实验 == 实验目的 使用 STM32F429 的普通 IO 口,用软件模拟 IIC 时序,实现和 24C02 之间的双向通信(读写),并将结果显示在 LCD 模块上。 == 实验原理 IIC(Inter-Integrated Circuit)总线是一种由 PHILIPS 公司开发的两线式串行总线,用于连接微控制器及其外围设备。它是由数据线 SDA 和时钟 SCL 构成的串行总线,可发送和接收数据。在 CPU 与被控 IC 之间、IC 与 IC 之间进行双向传送,高速 IIC 总线一般可达 400kbps 以上。 I2C 总线在传送数据过程中共有三种类型信号。 - 开始信号:SCL 为高电平时,SDA 由高电平向低电平跳变,开始传送数据。 - 结束信号:SCL 为高电平时,SDA 由低电平向高电平跳变,结束传送数据。 - 应答信号:接收数据的 IC 在接收到 8bit 数据后,向发送数据的 IC 发出特定的低电平脉冲,表示已收到数据。CPU 向受控单元发出一个信号后,等待受控单元发出一个应答信号,CPU 接收到应答信号后,根据实际情况作出是否继续传递信号的判断。若未收到应答信号,由判断为受控单元出现故障。 这些信号中,起始信号是必需的,结束信号和应答信号是可选的。IIC 总线时序图如图所示: #figure( image("5.1.png", width: 70%), caption: [IIC 总线时序图], ) 目前大部分 MCU 都带有 IIC 总线接口,STM32F4 也不例外。但是这里我们不使用 STM32F4的硬件 IIC 来读写 24C02,而是通过软件模拟。软件模拟最大的好处就是方便移植,同一个代码兼容所有 MCU,任何一个单片机。只要有 IO 口,就可以很快的移植过去,而且不需要特定的 IO 口。而硬件 IIC,则换一款 MCU,基本上就得重新搞一次,移植是比较麻烦的。 == 代码修改 未经修改的代码能够显示在按下不同按键时,通过 IIC 总线向 24C02 写入一段固定的字符串,并且将数据读出,显示在 LCD 模块上。 我们小组修改了程序逻辑,使其能够判断当前按下的按键,并将按键的值通过 IIC 总线读写 24C02,并显示在 LCD 模块上。 #include_code_file("../代码/9.c","main.c 片段", "c") 函数 `itoa_1` 将一位整数转为字符,写入 buffer。这一位整数即为 `key.h` 中定义的按键值,例如 `#define KEY0_PRES 1`。 == 实验结果 按下 `WKUP_PRES` 按键,观察到开发板在 LCD 模块上显示按键值,初始时该值并不存在,因此不显示。而按下其他按键后,再次按下 `WKUP_PRES` 按键,观察到 LCD 模块上显示按键值,并且该值与按下按键的值一致,证明按键值写入与读取成功。 断电重启开发板,按下 `WKUP_PRES` 按键,观察到 LCD 上不显示按键值,证明 `24C02` 中的数据被清除,是易失性存储。 == 心得体会 这次实验,我们小组修改了 IIC 总线读写 24C02 的程序,并且成功将按键值写入 24C02,成功读出,并且亲身验证了其数据易失性,启发了我们在实际应用时需要做好断电预防措施。 = SPI 实验 == 实验目的 使用 STM32F429 自带的 SPI 来实现对外部 FLASH(W25Q256)的读写,并将结果显示在 LCD 模块上,并通过一些方法验证数据切实写入了Flash中。 == 实验原理 SPI 是一种高速的,全双工,同步的通信总线,并且在芯片的管脚上只占用四根线,节约了芯片的管脚,同时为 PCB 的布局上节省空间,提供方便,正是出于这种简单易用的特性,现在越来越多的芯片集成了这种通信协议,STM32F4 也有 SPI 接口。SPI 的内部简图如下: #figure( image("5.2.png", width: 70%), caption: [SPI 内部结构简明图], ) 其中,MISO 主设备数据输入,从设备数据输出。MOSI 主设备数据输出,从设备数据输入。SCLK 时钟信号,由主设备产生。CS 从设备片选信号,由主设备控制。STM32F429 的 SPI 功能很强大,SPI 时钟最高可以到 45Mhz,支持 DMA,可以配置为 SPI协议或者 I2S 协议(支持全双工 I2S)。本次实验中,使用 STM32F429 的 SPI 来读取外部 SPI FLASH 芯片(W25Q256),使用了 STM32F429 的 SPI5 的主模式。 == 修改后的代码 本次实验的修改代码流程与 IIC 实验基本一致,我们小组也使用 `WKUP_PRES` 按键来控制 SPI 读操作,显示其数据在 LCD 模块上。其他按键进行写操作,将按键写入 SPI FLASH 芯片。 #include_code_file("../代码/10.c","main.c 片段", "c") 代码与 IIC 实验的代码主要不同在初始化阶段,使用 `W25QXX_Init` 初始化了不同的模块,并且在读写操作上使用了不同的函数。`W25QXX_Write` 比起 `AT24CXX_Write` 的参数发生了一些变化,需要指定从 FLASH 上读取的起始地址。 == 实验结果 按下 `WKUP_PRES` 按键,观察到开发板在 LCD 模块上显示按键值。而按下其他按键后,再次按下 `WKUP_PRES` 按键,观察到 LCD 模块上显示按键值,并且该值与按下按键的值一致,证明按键值写入与读取成功。 断电重启开发板,按下 `WKUP_PRES` 按键,观察到 LCD 上显示断电前的按键值,证明数据确实写入了 FLASH 芯片,属于非易失性存储。 == 心得体会 实验修改了 SPI 实验的代码,并且成功将按键值写入 SPI FLASH 芯片并读出,验证了其数据非易失性。 FLASH 在实际使用中有寿命的限制,W25Q128 的擦写周期大概有 10W 次,具有 20 年的数据保存期限。因此实际设计时需要考虑均匀在SPI FLASH 芯片上写入数据,并且注意坏块检测与处理,以延长 FLASH 的使用寿命。
https://github.com/EunTilofy/Compiler2024
https://raw.githubusercontent.com/EunTilofy/Compiler2024/main/lab1/Report_of_Lab1.typ
typst
#import "../template.typ": * #show: project.with( course: "编译原理", title: "Compilers Principals - Lab1", date: "2024.4.07", authors: "<NAME>, 3210106357", has_cover: false ) = 实验内容 本次实验,我们利用 Flex 和 Bison 实现了 sysY 语言的词法分析和语法分析。 通过 ``` make compiler ./compiler <input file> ``` 可以对输入的 sy 文件进行语法检查, 如果可以正确解析出语法树,程序将正常退出并返回 0。同时在错误流中显示 ``` Parse success! ``` 否则,程序将汇报错误,一个错误的代码的解析输出如下: ``` Error at Line 5 Mysterious character"@". error: syntax error Failed to parse the file : tests/lab1/error1.sy ``` = 代码实现 == 主接口 ```cpp int main(int argc, char **argv) { yylineno = 1; if (argc < 2) { std::cerr << "Usage: " << argv[0] << "<input file> [output file]" << std::endl; return 1; } if(!(yyin = fopen(argv[1], "r"))) { std::cerr << "Open file error : " << argv[1] << std::endl; return 1; } if(yyparse()) { std::cerr << "Failed to parse the file : " << argv[1] << std::endl; return 1; } // if(!line_error) // 尚未完整实现 // Print_Tree(Root, 0); std::cerr << "\nParse success !" << std::endl; fclose(yyin); return 0; } ``` == Flex 词法分析 报错方法: ```cpp void error_print(int line, std::string text, std::string msg) { if (line_error == line) return; line_error = line; std::cout << "Error at Line " << line << " " << msg << "\"" << text << "\".\n"; } ``` 正则表达式定义: ```cpp digit [0-9] blank [ \t\r\n] letter [a-zA-Z] alpha _|{letter} Comment1 "/*"[^*]*"*"+([^*/][^*]*"*"+)*"/" Comment2 "//"[^\r\n]* ident {alpha}({alpha}|{digit})* oct 0[0-7]+ hex 0[Xx][0-9a-fA-F]+ zero 0 n_zero [1-9]+{digit}* integer {zero}|{n_zero} newline "\n" ws [ \r\t\n]+ ``` 词法解析部分: ```cpp {Comment1} { /* nothing to do */ } {Comment2} { /* nothing to do */ } "int" { return INT; } "void" { return VOID; } "if" { return IF; } "else" { return ELSE; } "while" { return WHILE; } "+" { return ADD; } // ................................ {ident} { return IDENT; } {hex} { return INTCONST; } {oct} { return INTCONST; } {integer} { return INTCONST; } {ws} { /* nothing to do */} . { error_print(yylineno, std::string(yytext), "Mysterious character"); return 0; } ``` == Bison 语法分析 从 Root 开始进行自底向上分析。 部分语法分析如下: 需要适当修改语法,避免二义性。 ```cpp ROOT : CompUnit ; CompUnit : Decl | FuncDef | CompUnit Decl | CompUnit FuncDef ; Decl : VarDecl ; BType : INT ; VarDef : IDENT | IDENT Widths | IDENT ASSIGN InitVal | IDENT Widths ASSIGN InitVal ; VarDecl : BType VarDef VarDefs SEMI ; VarDefs : VarDefs COMMA VarDef | ; Widths : "[" INTCONST "]" | Widths "[" INTCONST "]" ; InitVal : Exp | "{" "}" | "{" InitVals "}" ; InitVals : InitVal | InitVals COMMA InitVal ; FuncHead : BType IDENT | VOID IDENT ; FuncDef : FuncHead "(" FuncParams ")" Block | FuncHead "(" ")" Block ; ``` = 测试结果 ``` python3 test.py ./compiler lab1 ``` tests 下的测试样例全部通过: #figure( image("1.png", width: 50%), caption: [ All tests passed! ], )
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/layout/align-00.typ
typst
Other
#set page(height: 100pt) #stack(dir: ltr, align(left, square(size: 15pt, fill: eastern)), align(center, square(size: 20pt, fill: eastern)), align(right, square(size: 15pt, fill: eastern)), ) #align(center + horizon, rect(fill: eastern, height: 10pt)) #align(bottom, stack( align(center, rect(fill: green, height: 10pt)), rect(fill: red, height: 10pt, width: 100%), ))
https://github.com/coastalwhite/typst-tudelft
https://raw.githubusercontent.com/coastalwhite/typst-tudelft/main/main.typ
typst
#import "tudelft-template.typ": conf, COLORS, bare-slide, title-slide, outline-slide, topic-slide, slide, bare-bg-slide, show_heading #show: doc => conf(doc) #bare-slide[ #align(center + horizon, [ #heading(level: 1, outlined: false, [ WP3 Kick-off Meeting ]) #box(inset: (x: 5cm), grid( columns: (1fr, 1fr, 1fr), column-gutter: 1cm, image("./assets/logo/TUDelft_logo_rgb.svg"), image("./assets/logo/TUDelft_logo_rgb.svg"), image("./assets/logo/TUDelft_logo_rgb.svg"), ) ) ]) ] #title-slide(author: "<NAME>")[Hello!] #outline-slide() #topic-slide(subtitle: "Resilient Trust")[WP3 Kick Off] #slide(title: "Title Placeholder")[ - Point 1 + dsajlsdjaflj - dsafjlka - Point 2 ] #slide(title: "Hello!", rhs_figure: [ #figure(caption: "Caption")[ #image("./assets/logo/TUDelft_logo_rgb.svg") ] ])[ - Point 1 - dsajlsdjaflj ] #bare-bg-slide[ #box(inset: 2cm, width: 100%, height: 100%, show_heading(fill: white, align(right + bottom, stack(dir: ttb, spacing: 1cm, [ = Title Placeholder ], [ == Subtitle Placeholder ] )))) ]
https://github.com/DieracDelta/survey-ML-seminar
https://raw.githubusercontent.com/DieracDelta/survey-ML-seminar/master/main.typ
typst
/// instructions // Preferably related to the chosen paper for presentation // Two page write-up // Describe: problem/challenge, state-of-the-art, top papers // Examples: ML for protein folding (alpha-fold) // ML for financial time-series #import "template.typ": * #show: ieee.with( title: "Automating Interactive Theorem Proving", abstract: [ Preventing software bugs has always been an important problem in software engineering largely due to the usage context. For example, bugs in both mission critical software and large scale deployment settings can be disastrous. We present a literature review of the current research on preventing bugs in an automated fashion. Then, we discuss three current works that apply machine learning to solve this problem: LegoProver, Curriculum Learning, and LeanDojo. Finally, we provide a history of prior approaches to bug prevention. ], authors: ( ( name: "<NAME>", email: "<EMAIL>" ), ), bibliography-file: "refs.bib", ) = The Task The task is to prevent software bugs before software is deployed. Software is used in many important contexts in which bugs can be catastrophic. Real world examples include self driving cars, nuclear @STUXNET, medical devices @THERAC25, and financial systems @DEFIHACK . Furthermore, software bugs are becoming worse with time. With introduction of AI assisted coding practices such as Copilot @COPILOT or Codeium @CODEIUM, studies find that software code quality has gone down @CODEQUALITY, Which increases the liklihood of bugs. = Task History and Related Work There have been many approaches and decades of research devoted to solving the problem of writing correct, bug-free software. We mention several of the most popular approaches. == Programming Languages According to Microsoft, 70% of their bugs were due to errors in manual memory management (denoted memory safety) @MSFT . One part of the solution has been to write application code in memory safe langauges, like Rust, Golang, Java, or Python. Even the US government recommends the use of memory safe languages to prevent bugs @GOV . Other common causes of bugs include implicit type coersion and runtime type errors. For example, runtime type errors became so common that languages that allow runtime type errors introduced types. Notably, Javascript has largely been replaced by Typescript. And, Python has introduced optional type annotations and a type checker. Type systems serve as a bandaid by using the compiler to prevent bugs. However, this does not solve the entire problem. There still remains a class of logical bugs that are not caught by the compiler. These bugs must be dealt with in other ways. == Automated Theorem Proving One way to guarantee correct code is to specify the utility of each function. This specification is generally created via pre and post conditions for each function (as well as loop invariants). Then, the function's code can be verified to match the specification. This may be done by converting the code to guarded commands @GC then creating a formula from those commands using hoare logic. If the negation of that formula is satisfiable, then there exists an input such that the specification is violated. That is, a bug has been identified. A popular implementation of this methodology is Dafny @DAFNY . However, there are still several open problems with this approach. Checking if the formula is satisfiable is a NP-complete problem. So, verifying the code may run for an unbounded and unpredictable period of time. One solution to this is to run several SAT solvers with different techniques/flags at the same time (and possibly propagate information between them) @SAT1 @SAT2 . However, this is an imperfect solution because of the higher compute demands for a potential speedup. Another downside is the need to specify loop invariants in order for the hoare logic to be applicable. Large amounts of compute is required. Additionally, reasoning about parallel programs is difficult. == Interactive Theorem Proving Another prominent approach is to use an interactive theorem prover such as Lean or Coq. Interactive theorem provers are able to, using logical axioms, work with a programmer to interactively prove facts about code. For example, a programmer could define a palindrome, then prove that the reverse of the palindrome is also a palindrome @LEANPALLY . This scales better from a computation standpoint, since NP-complete problems need not be solved to verify the code. On the other hand, this approach is time consuming in programmer hours. Large scale examples of this approach include: - CompCert, a verified C compiler, which is 30k lines of ocaml code and took 6 person years to verify @COMPCERT - Certikos, a verified kernel, which is 6.5k lines of C/asm code and took 3 person years to verify @CERTIKOS - SEL4, a verified kernel, which is 9k lines of C/asm code and took 11 person years to verify @SEL4 = Interactive Theorem Proving with Machine Learning If there were a way to automate the theorem proving, then the programmer would not need to spend time proving the code. This would make interactive theorem proving much more feasible, thereby making verified code much more common. Although a relatively new field, we survey recent contributions in three recent works: Curriculum Learning, LeanDojo, and LEGO-Prover. == Curriculum Learning Curriculum Learning's primary contribution is the application of expert iteration @EXPERTITERATION to automate the proof generation for theorems in Lean. Though the original usecase for this technique was chess, Karpukhin et al. @EXPERTITERATION applies the same techniques to iteractive theorem proving because of the similarities in search space size and game type. Karpukhin et al. started with GPT-3 pretrained on Commoncrawl (a web dataset) and webmath (a math dataset). They then defined two objective functions. The ProofStep objective function used the theorem name as a heuristic to enforce recall of related data. And the ProofSize objective function used the estimated size of the proof to prioritize smaller proofs. The model is then trained iteratively starting with the aforementioned pretrained model, these objective functions, and a tree search over the application of different tactics (branches) to different states (goals to prove). This model may then be used in conjunction with a best-first search using a heuristic called logprob @LOGPROB to determine which branch to traverse. This approach worked reasonablely well, as it was able to prove 36.6% of the theroems in the MiniF2F test benchmark @MINIF2F . However, it used LeanStep for data extraction and contributed lean-gym for data training, respectively. These two frameworks have since gone unmaintained. == LeanDojo LeanDojo reimplemented data extraction and forked lean-gym, both improving these libraries and upgrading them to handle Lean 4. LeanDojo's second contribution was a computationally cheap to train and run machine learning guided interactive theorem prover, denoted ReProver. ReProver reuses the insights developed by Karpukhin et al. both of using a large language model to generate tactics to progress through a proof and a best-first search using logprob. ReProver also emphasized the importance of choosing the right premises (known facts) as arguments to the tactic generation. ReProver applied the idea of dense passage retrieval @DPR to select related premises to the state to prove. It then fed these premises and the state into a large language model to generate the tactics to use in the best-first search. This approach seemed to work really well given the limited resources of the model, proving 26.5% of theorems in MiniF2F. LeanDojo's large language model has an order of magnitude less parameters than Karpukhin et al. and emphasized short training times on consumer-level hardware. LeanDojo also contributed a large dataset including training data (a large improvement over MiniF2F). == Lego-Prover Lego-Prover solves a slightly different, but related problem. Lego-Prover takes a paper proof of the theorem, a english version of the theorem in addition to premises and the theorem itself. Using the paper proof, Lego-Prover then splits the theorem into the composition of simpler lemmas. Lego-Prover uses K nearest neighbors to select premises to feed into the llm to prove the lemmas. Lego-Prover constantly adds to this lemma database, and improves each lemma (denoted "evolution") with a large langauge model before choosing the lemmas to use in progressing the state. Notably, unlike LeanDojo and Karpukhin et al. Lego-Prover does not use a best-first search approach. Lego-Prover performed better than ReProver or Karpukhin et al. with a 50% success rate on MiniF2F. However, the large language model used was GPT4, which is significantly larger than both LeanDojo and Reprover. So, better performance is not unexpected due to a higher parameter count. = Conclusion Bugs in code are a serious problem, and researchers have been working for decades on their prevention to varying levels of success. Common bug prevention techniques include automated theorem proving, better type systems, and interactive theorem proving. However, only recently has the approach of interactive theorem proving begun to become automated using machine learning techniques. LeanDojo, LEGO-Prover, and Curriculum Learning all provide encouraging results that in the future may make writing bug free code easier.
https://github.com/Servostar/dhbw-abb-typst-template
https://raw.githubusercontent.com/Servostar/dhbw-abb-typst-template/main/template/appendix.typ
typst
MIT License
= Raw data #label("Anhang-A") #lorem(50) == More raw data #lorem(50) #figure(``` ```)
https://github.com/Geson-anko/vconf24_template_typst
https://raw.githubusercontent.com/Geson-anko/vconf24_template_typst/main/README.md
markdown
# バーチャル学会要旨テンプレート Typst版 本テンプレートはバーチャル学会非運営員によって制作されたものです.本テンプレートを用いてバーチャル学会2024に投稿する要旨データを作成することができますが,本typstテンプレートのフォーマットの正確性は担保されません. よって,投稿後に不備が見つかった際はバーチャル学会の運営員の指示に従い,適切に修正や対応を行ってください. バーチャル学会2024 HP: <https://vconf.org/2024/presentation/#04>
https://github.com/andreasKroepelin/lovelace
https://raw.githubusercontent.com/andreasKroepelin/lovelace/main/examples/number-no-number-low-level.typ
typst
MIT License
#import "../lib.typ": * #set page(width: auto, height: auto, margin: 1em) #set text(font: "TeX Gyre Pagella") #show math.equation: set text(font: "TeX Gyre Pagella Math") #pseudocode( [normal line with a number], no-number[this line has no number], [this one has a number again], )
https://github.com/fenjalien/metro
https://raw.githubusercontent.com/fenjalien/metro/main/tests/unit/sqrt/test.typ
typst
Apache License 2.0
#import "/src/lib.typ": unit, metro-setup #set page(width: auto, height: auto) #metro-setup(power-half-as-sqrt: true, per-mode: "symbol") #unit("hertz per sqrt(kilo watt hour)")
https://github.com/polarkac/MTG-Stories
https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/054%20-%20Lost%20Caverns%20of%20Ixalan/003_Episode%203.typ
typst
#import "@local/mtgstory:0.2.0": conf #show: doc => conf( "Episode 3", set_name: "Lost Caverns of Ixalan", story_date: datetime(day: 20, month: 10, year: 2023), author: "<NAME>", doc ) == Wayta If someone had told Wayta a few days earlier that she and a loxodon archaeologist would be chasing a ghost through underground ruins, she would have told them to see a healer. Also, she would have asked what a loxodon was. The ghost—Abuelo, he'd called himself—floated instead of running, his poncho flapping in an invisible breeze as he darted between buildings. Quint raced after him, trunk curled out of the way, and Wayta followed, scanning their surroundings for potential dangers. Unfortunately, being at the back meant she was the last to see what awaited them around a corner, next to an underground river. "Titan!" Abuelo shouted, then vanished in a swirl of purple-pink energy. Wayta skidded to a halt, nearly running into Quint's back. Ahead, a hulking figure loomed, easily twice her height. She might have mistaken it for part of the fungus growing from the walls, until it moved. Its head was a huge, layered mushroom, like those that grew from jungle trees, while its shoulders and chest were clusters of smaller round-topped morels. Jagged, chitinous spikes jutted from the backs of its massive hands and up its forearms. #figure(image("003_Episode 3/01.png", width: 100%), caption: [Art by: Domenico Cava], supplement: none, numbering: none) A low, discordant buzzing, more seen than heard, raised the hair on Wayta's arms. Before she or Quint could do more than stare, the creature charged at them. "Get back," Wayta told Quint. She brandished her sword to attract the creature's attention, circling toward the rushing river and away from Quint. Tilonalli, smite my enemies, she prayed. The titan grabbed half of a crumbling wall and threw it at Wayta. She danced sideways, the huge stone block breezing past and landing with a crash behind her. Its impact sent pebbles and splinters flying, slicing her bare skin and rattling off her armor. With a roar, the titan thundered toward her and reared back to strike. Wayta ducked, lunging under a swipe from an arm big as a tree trunk. She rolled into a crouch and sliced at the back of its leg, then hopped back to her feet. A human would have been disabled; the titan was unaffected. It turned and swung again, and again Wayta slipped between its legs, darting toward its back. She hacked with her sword, carving out a chunk of fibrous material to no effect. She might as well have been fighting an ahuehuete tree. The point of a spear drove through the titan's chest. Huatli, Inti, Caparocti, and the other warriors had arrived while Wayta was fighting, and as one they screamed and attacked. They surrounded the creature, taunted it, stabbed it, carved away pieces until mushrooms and chitinous bark littered the ground. The pack dinosaurs were kept away for their own safety, but Pantlaza leaped and clawed with the sharp talons on his feet, leaving long score marks in its back. The longer they fought, the more Wayta's muscles sang with fatigue, her breath burning raggedly in her lungs. None of their strikes slowed the titan down, and it showed no sign that it felt pain. It knocked away their spears, grabbed their swords in its massive hands and tossed them into the ruins. Its wounds oozed black fluid that formed into viscous strands, weaving together until they solidified and burst into new fungal growths. It pulled the spear out of its own chest and swung it at one of the warriors. The weapon missed, but the titan's spiked arm slammed into her like a stegosaurus tail and flung her backward into a wall. She crumpled and lay unmoving. More blackness spilled from the creature's mouth, and it spat at a nearby warrior, who screamed as the tarry substance ate through his armor. Wayta ran to his side to help, but the sight of bloody bone through the remains of the man's flesh told her she was too late. Wayta would pray for the spirits of the dead later. For now, she fought. "More incoming!" Quint shouted, pointing deeper into the city. A dozen new creatures surrounded them, crossing the crumbling bridge over the nearby river, creeping through the deserted roads and clambering over half-fallen walls. Smaller versions of the titan, formed of different shapes and sizes of mushrooms. Some were armed with crude weapons, likely taken from the many corpses in this empty place. The Sun Empire warriors were now outnumbered and outflanked. One of the unarmed creatures plucked a mushroom from its own body and threw it at the feet of another warrior. The mushroom glowed an eerie green, bursting into thick black mold that encased the man's boots and spread up his legs. He stumbled, and the mold surged into his mouth. Wayta's heart clenched with the sudden certainty that she would fall in this place, never to see the light of the Threefold Sun again. Then, the tides turned. A wave lashed out of the underground river, knocking two of the creatures away. A moment later, a half-dozen River Heralds leaped onto the shore, joining the battle with jade spears and blades and magic. "This battle is lost," one of the Heralds said. "Come with us to safety." Wayta hesitated. After what happened at Orazca before the war, relations between the Sun Empire and the River Heralds had been tense, even hostile. Could they trust these people? Trust had to start somewhere. Might as well be here. Wayta searched for Quint, finding him weaving a spell from behind a wall nearby. An ancient weapon rose from the ground as if wielded by a spirit, spinning through the air and embedding itself in a mushroom creature's neck. "Quint, let's go!" Wayta shouted. He immediately obeyed, and she cleared them a path to the river. One of the Heralds noticed them and grabbed her free hand. They murmured an incantation, rotating their finger around her face. Suddenly, the air tasted different, thick with moisture. A rainbow sheen covered Wayta like she'd been encased in a bubble molded to her form, and she prodded her own arm, feeling nothing unusual. "Swift travels, little sister," the merfolk said, and shoved her into the river. #figure(image("003_Episode 3/02.png", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) == Malcolm The elevator descended deeper into the cenote, cables creaking and wooden supports swaying. Malcolm watched his injured companions out of the corner of his eye, a cold lump of dread in his stomach. Breeches seemed fine, and the ones who hadn't explored the cave with those horrible dinosaurs, but the others … They weren't right. The black markings on their wounds had spread, a delicate filigree of circles and lines along any exposed skin. Worse, they had begun to glow a sickly shade of green. The pirates didn't complain of pain or discomfort, when normally they'd be grumbling and asking to rest. Instead, they alternated between foggy detachment and examining their surroundings with oddly keen interest. In the light of his shoulder lamp, the walls of the cenote gleamed wetly, coated with slick fungal growths that were spreading at an alarming pace. The caves had never been entirely dry, but this was excessive. As the scent of rot and mold strengthened, Malcolm pulled a cloth from his pack and tied it around his face, covering his nose and mouth. Breeches mimicked him, and Malcolm nearly laughed at how comical they must look. Like common thieves or bandits instead of pirates. Not that he'd engaged in much piracy lately. The Brazen Coalition kept him too busy. The elevator lurched as it hit something. One of the pirates leaned over the edge of the guardrail to check. "Looks like a big mushroom," she said. "Can you cut through it?" Malcolm asked. She nodded, drawing her sword. After hacking at it a few times, the elevator shifted. The pirate sneezed and stumbled backward. "Gross," she said. "It burst like a sack of flour." She coughed and rubbed her eyes as another pirate thumped her on the back. Where she had stood, a cloud of glittering green spores rose in the still air, thickening like smoke. Malcolm retreated, eyes narrowed. He glanced at the injured pirates, standing impassively in the center of the elevator, their wounds glowing the same color. Was there some connection? As if in response, those pirates lunged at two of the uninjured ones and shoved them into the glowing spore cloud. Cries of surprise turned into wet coughs, then gagging and retching, black fluid splattering against the floor. #figure(image("003_Episode 3/03.png", width: 100%), caption: [Art by: Izzy], supplement: none, numbering: none) As quickly as it had begun, the sudden attack of sickness stopped. The affected pirates rose jerkily and faced the others. Their eyes were glassy and green, and black veins stretched across their faces. Air hissed from their mouths like leaking rubber bladders. Malcolm unsheathed his sword and backed away, adjusting the bandanna that still covered his nose and mouth. The surviving pirates had just enough time to draw their own weapons before their infected allies attacked. The tight confines of the space made dodging nearly impossible; every slice or thrust could hit a friend instead of a foe. "BIG BOOM?" Breeches asked. "No! You'd kill us all," Malcolm yelled. He leaped over the guardrail, the lack of air currents forcing him to rely on his magic to fly. He circled back and clung to the elevator ropes, which pulled him up and away. Below him, the pirates fought desperately, but like the dinosaurs in the cavern, the infected seemed impervious to pain or injury. Breeches joined Malcolm on the ropes as the elevator continued to descend. "ESCAPE?" Breeches yelled. At the sound of his voice, the infected looked up in unison with their vile green eyes. "Cut the lines," Malcolm said, his blood cold. "Hurry." Breeches gripped the rope with both feet and his tail. He sliced one cable with his knife, while Malcolm sawed at the other. The lines were thick, intended to hold substantial weight, and they were less than half cut when the infected started clambering up the side of the elevator. Though Malcolm's muscles burned, he sawed faster. The cable in his hands unraveled and thinned, then came apart with a force that knocked him loose. The elevator jerked, the infected pirates inside stumbling. Breeches grimly held on as his line snapped, and with a terrible silence, the elevator fell away into the dark. Closing his eyes, Malcolm tried to calm the hummingbird flutter of his heart. "Come on," he said finally. "We can't stay here." He alternated flying up and climbing, Breeches pacing him on the ropes. He diligently avoided a tendril of fungus on the nearby wall, shuddering as an eerily eyelike mushroom seemed to track their movements. Malcolm would have previously assumed it was a figment of his imagination. Now, he wondered what terrible creature could turn pirates and dinosaurs into mindless puppets—and why. #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) == Bartolomé As unpleasant as a waterfall of fiery molten rock might be, multiple such spouts and even rivers of the stuff were infinitely worse. If the path to Aclazotz continued through such inhospitable and deadly terrain, Bartolomé wondered whether, despite Vito's assertions, a god as powerful as theirs was meant to be found. The goblin-like creatures who had pursued the strange newcomer, Kellan, did not reappear. Even so, the farther their pilgrimage led them into the underground, the more signs of habitation they discovered: buildings carved into cliffsides and stalactites, glowing markers covered in unfamiliar glyphs, gardens of pale leafless plants in sandy soil. They never encountered any inhabitants, but scuttling sounds and glimpses of movement suggested they weren't alone. Amalia conversed with Kellan, who examined their surroundings with wonderment and unease. Bartolomé saw his own daughter in the young cartographer, still so innocent despite surviving the deprivations of the recent invasion. To protect that innocence, to guard the future of those like her, he would make any sacrifice necessary. Thus far, that meant capitulating to Vito's orders. When Que<NAME> assigned Bartolomé to join this expedition, he was told to play along and discover Vito's true intentions and loyalties. He hadn't realized how heretical the hierophant's ideas had become, how far he had turned from the church. He also hadn't been able to discover who gave Vito the lance and journal of Venerable Tarrian, but it suggested that the opposition to the queen, the faction supporting Vona de Iedo and other so-called prophets, was larger and more unified than they had hoped. What if Aclazotz himself aligned with the Antifex? Bartolomé shuddered to consider it. The Legion entered a larger, flatter plateau overlooking a pair of lava falls. From one moment to the next, silence yielded to shouting and a clatter of motion. Clavileño's soldiers circled the others protectively, weapons drawn. Two dozen warriors surrounded them, taller than the vampires, broader, with faces like great cats and spotted fur to match. They wore elaborately decorated helmets and armor and wielded bows and vicious-looking serrated obsidian blades and polearms. Bared fangs promised violence, and Bartolomé wasn't eager to test their skills at arms. They had sheer numbers on their side, unless one counted the Legion's porters and prisoners. #figure(image("003_Episode 3/04.png", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) "You will come with us," one of the cat-people said, brandishing a polearm covered in glyphs. "Who are you?" Vito asked, his voice cold with authority. "I am Kutzil, champion of the Malamet," was the response. "You will come with us, or you will die." Bartolomé cleared his throat. "We are on a holy pilgrimage," he said. "We seek only safe passage through these lands. We mean you no harm." Vito's glare suggested he resented either Bartolomé's intrusion or his lies. Kutzil shifted her gaze, head tilted. "Your mission is not our concern. Sovereign Okinec Ahau will decide your fates." Clavileño looked to Vito. "Orders?" "We've fought too much already," Bartolomé told Vito quietly. "Morale and supplies are low. Diplomacy may serve our cause better than making enemies." Vito returned his attention to Kutzil. "I would meet your sovereign," he said. "Lead us." The Malamet warriors kept their weapons trained on the Legion. With her polearm, Kutzil gestured for them to follow. Vito leaned closer to Bartolomé. "Do not undermine me again," he said quietly, his voice thick with menace. Bartolomé inclined his head in acknowledgment. They followed Kutzil across more stone bridges, deeper into a city of these people, the Malamet. Now that they were escorted, the denizens of the homes appeared, as well as some of the odd, pale goblins. Bartolomé marveled that an entire culture existed in these caves and tunnels and had never traveled to the surface and made contact. Perhaps unsealing the doors as they had would lead to something fruitful. Or, given how distrustful these people were, perhaps not. Kutzil halted the company. "Behold," she said. "You are the first strangers to see Ban Koj since the time of the Oltec." #figure(image("003_Episode 3/05.png", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) Bartolomé stepped back, a hand rising to his mouth in awe. The few huts their group had passed in the hours before were nothing compared to this sight. An entire city—easily the size of Alta Torrezon—was built into a cluster of stalactites so large they might have been inverted mountains. Some buildings looked hewn directly from rough rock, while others featured painted white walls like pottery. Rope bridges and nets extended between buildings, as well as thick cables from which strange carriages hung, their wheels at the top to let them move back and forth. Passengers rode inside, climbing out to join other Malamet as they stalked along the unsettling, suspended streets. Bartolomé hid his nerves behind a placid exterior as the catfolk split the Legion into two groups for transport up into the city. Escaping from this place would be nearly impossible if diplomacy failed. Some of the soldiers could fly, being skymarchers, but everyone else … His gaze slid to Amalia, who was standing close to Kellan, the twitch of her fingers near her sword showing her anxiety. The Malamet warriors continued to guard them as they marched across a wide stone bridge to the largest of the stalactites. Unlike the rest, this one didn't have any buildings carved into the exterior, not even windows. Instead, hundreds of enormous glyphs covered every visible surface, glowing intermittently. They passed through a huge opening at the end of the bridge with a rotating door at the center. More armed guards stood at attention as they passed, silent as the stalking predators they resembled. One of the Legion's porters veered too close, and the nearest guard growled. The interior of the stalactite was filled by an enormous pyramid carved from the rock, hundreds of steps leading to a small room at its peak. A strange susurration echoed in the cavernous space, its source invisible. Thankfully, they were not forced to ascend the staircase, and were instead led inside the pyramid to a long room flanked by carved pillars, between which Malamet crouched on woven mats. Their elaborate headdresses and collars suggested some form of nobility or priesthood, and they all stared at the Legion members as they passed, some showing fangs that made vampiric teeth look tame by comparison. Seated on a throne on a raised dais at the end of the room, a large, armored Malamet idly toyed with a massive, serrated sword. This, presumably, was Sovereign Okinec Ahau. "What have you brought us, Kutzil?" He asked. Kutzil bared her throat deferentially as she spoke. "Invaders from the surface, Sovereign," she replied. "We are only passing through," Vito said, bowing politely. "You will speak when spoken to," Kutzil growled, pointing her polearm at Vito. With a sneer, Vito ignored him. Sovereign Okinec Ahau eyed Vito curiously. "What is your purpose in my realm?" "We are pilgrims," Vito replied. "We are on a journey to the land of Aclazotz, our god." "There are no gods here but me," <NAME> said, gripping his sword. "Poq," he said, looking to a group of robed advisers to his right. A tawny-furred, burly Malamet stepped forward, his arms crossed behind his back. They wore a simple silver harness, richly etched with glyphs and pictographs. His hair hung down in locks, weighted at the end with small, flashing silver medallions. "Poq is my mythweaver," <NAME>au said, introducing the Malamet. "He will speak. With our words, he will see through yours." Poq nodded. He raised his arms in front of his chest and ushered one small, soft word. The scent of rain, scouring lightning, and the heat of a dry summer day filled the air. Swirling green mist appeared between his claws, coalescing into shapes that built upon themselves, revealing a cloudy, but discernible image: A snarling face, its fangs bared and growing long. The face twisted, rippling, as it noticed the Malamet around it. With a screech, it snapped forward, biting at Mythweaver Poq like a beast snapping at food. Mythweaver Poq dropped his hands, dismissing the image. He looked to Okinec Ahau, shook his head, then walked back to his place among the assembled advisers. Okinec Ahau stood and addressed the assembled Malamet, speaking above the Legion soldiers. "One invasion begets another," Okinec Ahau said. "We will not allow it." The Legion soldiers shifted to defensive postures. Bartolomé rested a hand on Amalia's arm, and they exchanged a worried glance. Sovereign Okinec Ahau gestured at the seated figures. "I sentence these trespassers to be given to the sand. Let my justice be done." A few of the vampires drew their swords, and Vito aimed his lance at the sovereign. Before they could attack or defend themselves, however, the Malamet flanking them growled and raised their arms. Glowing glyphs seared the air, mimicking the spots on the Malamets' fur. The magic lashed out and wrapped around the Legion members like chains, forcing them to kneel. Vito struggled, but his lance was pressed uselessly against his chest. He glared at Bartolomé so venomously that if looks could have killed, Bartolomé would already be dead. "This isn't fair!" Kellan shouted from the back of their group. "We haven't done anything!" Sovereign Okinec Ahau bared his fangs. "The fire does not concern itself with fairness. It simply burns." #figure(image("003_Episode 3/06.png", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) One by one, warriors carried the Legion to a large fountain, a jaguar head at the top. But instead of water, sand flowed from the carved mouth, pooling below and draining into a large hole. Vito was the first of their party to reach the structure, his eyes blazing with wrath. "Though I am beset by enemies," Vito intoned, "my god will grant me strength and vengeance. His will be done." Bartolomé watched as the Malamet threw his comrades into the basin of sand, some succumbing silently while others screamed or fought. Vito went in head first, the lance still pinned to him by the magic chains that kept him bound. Clavileño followed, hissing and baring his fangs. Amalia stayed still and eerily calm as she sank down, first her legs and then her arms disappearing below the surface. She murmured something to herself that he couldn't read on her lips, eyes wide and unseeing. Kellan, across from her, struggled and flailed at the sand, panic obvious on his face as he slid closer to the gaping drain. Just before Amalia vanished, she told Kellan, "Hold your breath." Had she been granted a vision? Bartolomé hoped so, because otherwise their mission was at an end, and he was to blame. He followed her lead and let the Malamet lift him in their large coarse-furred arms, fighting his own fear as he was unceremoniously tossed into the fountain's basin. His only consolation as the sands claimed him was that Vito wouldn't have the chance to do more mischief in Torrezon. He only wished that goal hadn't come at so high a cost. #figure(image("003_Episode 3/07.png", width: 100%), caption: [Art by: L.A draws], supplement: none, numbering: none) #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) == Wayta Wayta tumbled through the cold dark of the river. An impossibly fast current pulled her forward, the light of the Threefold Sun affixed to her belt casting wild shadows around her. She realized that she could breathe, no doubt thanks to the spell the merfolk had cast. Even so, she had little control over where she went, and could only do her best not to be dashed against the walls or the riverbed. Sometimes she glimpsed a wider cavern above her, or the ground opened below like a lake or cenote. Sometimes flashes of glowing green teased at the edges of her vision, or the tunnel forked, and she glimpsed some unreachable alternate path. Through it all, the current carried her toward its unknown destination. Finally, after an age, a new light appeared in the distance. With a brief increase in pressure, she burst out into open water, cold and clear. Wayta kicked her way to the surface, searching for the others, finding some of them already swimming toward the nearest shore, including the scout who had pushed her in. Merfolk guards lingered nearby, their fins rippling as they bobbed in place, watching her progress but not moving to help or hinder her. She soon flopped onto the land, glad to see Quint beside her already sitting up and peering at their surroundings with undisguised awe. A massive stone city rose from an underground ocean—freshwater, not salt—and extended down into the depths even farther, stepped in a manner like temples to the Threefold Sun. Bright lamps burned above low buildings, while long strings of bioluminescent baubles and baskets holding firebugs lit the visible streets and alleys. Everywhere she looked, River Heralds walked or swam or rested, watching the newcomers warily and talking among themselves. Huatli wrung out her clothes, surveying the area. "There must be thousands of Heralds here," she said, wincing as Pantlaza shook himself and flung water everywhere. The young merfolk scout flared her gills. "My mother says it's the greatest assembly of merfolk bands Ixalan has ever known." She held out a hand to Wayta, who accepted it and stood. "I am Nicanzil," she said. "Be welcome." #figure(image("003_Episode 3/08.png", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) Huatli furrowed her brow. "Why are you here, if I am not too bold in asking?" "We await the opening of the final gate to the source," Nicanzil said, gesturing at the corroded door at the top of the temple's staircase. "My mother, the Great Shaper Pashona, can tell you more. She found this place after the death of the Deeproot Tree." Huatli stared at the door, squinting as if to see it better from a distance. "Could this be Matzalantli?" she murmured. "Have we truly discovered the door that leads to the birthplace of humankind, the home of the gods, as the poem claimed?" "If it is," Quint said, "I wonder what else they've found here. Did the Coin Empire make it this far, or did they remain closer to the surface? And imagine what historically significant artifacts might be lurking in some ancient cupboard." He checked the seals on his scrolls, apparently satisfied that everything was intact. Wayta regarded him with curiosity and faint unease. "What would you do with artifacts if you found them?" "I'd love to establish a proper excavation site," he said. "To be sure everything is treated as carefully as possible." "And then what?" Wayta asked. "You dig things up and leave them there?" "Not necessarily leave them there," Quint said. "I suppose it depends on what all of you want to do. Whether you want to keep everything here, or take some items back to Orazca, perhaps even set up a museum." "So, people would come here to … ogle things?" Wayta frowned. "How strange." Quint laughed. "I suppose it does seem odd. It's one way of making sure the stories of the past aren't forgotten." "Ah, like the warrior-poet," Wayta said, glancing at Huatli. "Right, yes!" Quint exclaimed. "I'd make notes over the course of the excavation, then write up a detailed account for my colleagues on Arcavios, to share what we found." His gaze grew distant. "Maybe I could publish it across the Multiverse somehow …" Wayta's frown deepened. "But it is not your story." Quint's ears flared slightly. "Well, no, I would just be the one telling it." "Why you?" Wayta pressed. "You are not from Ixalan. You are not of the Sun Empire, or the River Heralds. You shouldn't be telling our stories for us." "Perhaps not telling, then," Quint said, looking away over the water. "Recording. Disseminating. I am trained in this, you know," he added with a hint of ire. "I won't trample all over your history." He pulled his goggles off his head with his trunk and began to clean them with a cloth. Wayta huffed a breath, wondering why she cared so much. Why this bothered her. Sure, she had once dreamed of following Huatli's example and earning the mantle of warrior-poet herself someday. But the war had shattered those dreams like the pottery they'd found in these caverns. She'd been a soldier, and a pirate, sometimes helping the Brazen Coalition steal and sell not only artifacts from Torrezon but from her own people as well. Those thefts had paid her passage and kept her fed, never giving her more than an occasional hint of guilt. She'd deserved more than she'd gotten from the army, hadn't she? But she'd left all that behind. And now she was—what? An explorer? Could she also be a collector of stories? Those of her homeland, and her people? Huatli, who had sat quietly nearby as the conversation proceeded, now offered Wayta a soft smile. "Not everyone understands the power of words," she said. "The control it can give over others." She began to recite part of a poem Wayta didn't recognize. #emph[When my bones sleep in the earth, Who will share memories of me? Friends may raise monuments while enemies profane my grave. When they, too, have passed on what will their children remember?] "Sad, to think of how so much is lost," Wayta murmured, looking up at the strange door atop the pyramid in the distance. Huatli squeezed Wayta's shoulder companionably. "And yet, such joy when something lost is found, and when what is found is shared." Wayta glanced at Quint, then at Inti and Caparocti, organizing the other soldiers and issuing quiet orders. Perhaps it was best if some things remained buried, depending on what people intended to do with them. Perhaps some monuments deserved to be brought down. She hoped whatever was behind that mysterious golden door to the so-called home of the gods was a blessing and not a curse. #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) == Malcolm The tunnel where Malcolm and Breeches settled to catch their breath smelled dry and disused, possibly abandoned after the veins had been exhausted, or because nothing was found to warrant further exploration. He and the tunnel's creators had that much in common: He had no desire to delve any deeper. His muscles ached, and his head throbbed from using too much magic. The mystery of Downtown's mass disappearances almost certainly related to all the mold and fungus. He didn't fully understand how it worked, but he'd seen enough of what it could do to be afraid. Their supplies were at the bottom of the elevator shaft, and of the eight people he'd brought down here with him, only he and Breeches had survived. "All dead, no gold," Breeches muttered morosely. "Too true," Malcolm agreed. Should they continue or turn back? If they left now, Malcolm would have to report to Vance that he'd found few answers and even more questions. If there were survivors from Downtown or his Sunray Bay squad somewhere down in the dark, he'd be abandoning them to their fates—and some of those people were his friends, and none of them deserved to be left behind. Moreover, Downtown would remain empty, and recruiting new miners would be difficult, if not impossible; who wanted to work in a place where all the previous denizens had disappeared? And if no one was mining, then no money was flowing to the Brazen Coalition, and it was only a matter of time before the fragile economy fractured into the old feuding pirate fleets. Malcolm had loved his time sailing, navigating, flying free over the rolling waves through sun and storm. Feeling the thrill of divesting pampered merchants of their goods, and rival pirates of their lives. But after the war, he'd been almost relieved to settle into a more stable existence. To lose that now … It wasn't something to consider lightly, not when he might still solve the problem. Not when he might save people if he chose not to give up yet. "What do you think, Breeches?" Malcolm asked, leaning against the tunnel wall. "Do we head back up and live to fail another day? Or do we keep going down into the unknown?" Breeches removed his hat and scratched his head, then shrugged. "NO MINE, NO GOLD." "There are other mines," Malcolm said. But none as large or productive as Downtown, if he were honest. "And anyway, the dead can't spend coin, can they?" That was it, then. He'd all but talked himself into leaving. Vance could send someone else—a lot of someones, Malcolm would recommend. Though that might end with many more people turned into … whatever had become of his companions. A faint glow in the shaft caught his attention. Malcolm struggled to his feet and peered over the edge of the tunnel, a hand on the hilt of his sword. Fungus climbed the wall, growing at an impossible speed. Black tendrils formed networks of circles that bloomed into various mushrooms, some small and feathery, others stepped like stairs, still others ridged like coral. The effect was chaotic and eerily beautiful even as it turned his stomach. Some of the tendrils moved like ink on a page. As Malcolm watched, he realized the fungus was forming words, too dark to make out. Slowly, those words began to give off the same sickly green glow that had overtaken his lost people. SAFE, the first word read. Then, DOWN. Was this a truce, or a trap? Malcolm couldn't be sure. But now he knew that whatever had done all of this was sentient. If that was the case, perhaps diplomacy wasn't out of the question. Perhaps the residents of Downtown really were alive somewhere below, and he could get them out safely. Hope was the most dangerous weapon of all, and Malcolm felt it slide between his ribs to his heart, sharp as a blade. #figure(image("003_Episode 3/09.png", width: 100%), caption: [Art by: Daarken], supplement: none, numbering: none) #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) == Amalia The sand pressed against Amalia's body, dense and heavy, worse than water. The grit seeped into her clothes, her nose, even her mouth and eyes, though she closed them as tightly as she could. Distantly she remembered telling Kellan to hold his breath, and she had done so, too. The longer she held it, the more she wondered whether her vampiric abilities would shield her from suffocation, whether she would spend eternity trapped in this river of sand, unable to die, unable to drink life-giving blood. As if summoned by her fear, visions once again consumed her. The mysterious door, round and covered in glyphs, clearer than before. It was set into the stone of a cavern wall, its coppery surface tinged with green corrosion. A sky filled with faintly purple-tinged clouds, only beyond the sky was … ground? As if someone held a vast map somewhere above her, painted with all the colors of the land it was meant to represent, greens and browns and blues and snowy white. A sphere burning brightly as the sun—was it the sun? It couldn't be. Strange metal scraps floated around it, reminding Amalia of broken plate armor. More pieces trailed behind like debris from a shipwreck, glowing a purple-pink. #emph[Come to me …] The pressure around Amalia suddenly eased, becoming a sensation of falling. Without warning, she hit water, her eyes flying open. How had they reached the ocean? No, this was freshwater. Disorientation sent her swimming in the wrong direction, toward what looked like a city, before she realized the buildings were underwater. She turned and kicked the other way, finally breaking the surface with a gasp. Around her, others did the same, including Kellan, to her relief. They weren't dead. She had been so sure the Malamet were going to kill them all, then that the quicksand would do that work, yet once again they had survived. Simple luck? Or the will of Aclazotz? Before she could feel more than a momentary pang of relief, a flurry of motion in the water surrounded them. River Heralds, dozens, all armed with their strange jade weapons and elemental magics. "Do not provoke us," one of the merfolk said. "Come quietly or you will be subdued by force." Vito snarled and Bartolomé cast a worried look at him. Certainly, trying to fight Heralds in their element seemed a fool's errand. Kellan coughed and swam closer to Amalia. "I can't believe this is my third time being ambushed in one day," he said mournfully. Amalia snorted a laugh. "Careful that you don't make it a habit. Those can be hard to break." Kellan grinned and playfully splashed her, then they followed the other vampires as the merfolk herded them toward the shore to an unknown fate.
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/tiaoma/0.1.0/README.md
markdown
Apache License 2.0
# tiaoma [tiaoma(条码)](https://github.com/enter-tainer/zint-wasi) is a barcode generator for typst. It compiles [zint](https://github.com/zint/zint) to wasm and use it to generate barcode. It support nearly all common barcode types. For a complete list of supported barcode types, see [zint's documentation](https://zint.org.uk/). ## Example ```typ #import "@preview/tiaoma:0.1.0" #set page(width: auto, height: auto) = tiáo mǎ #tiaoma.ean("1234567890128") ``` ![example](./example.svg) ## Manual Please refer to [manual](./manual.pdf) for more details. ## Comparison There are multiple barcode/qrcode libraries for typst such as 1. https://github.com/jneug/typst-codetastic 2. https://github.com/Midbin/cades Here is a comparison of them. Pros of this package: 1. Support more barcode types 2. Might be faster because the zint is written in C and compiled to wasm. These libraries are written in typst and javascript. Cons of this package: 1. Doesn't provide enough customization options although it can be improved in the future.
https://github.com/dashuai009/dashuai009.github.io
https://raw.githubusercontent.com/dashuai009/dashuai009.github.io/main/src/content/blog/020.typ
typst
#let date = datetime( year: 2022, month: 3, day: 14, ) #metadata(( "title": "卡特兰数", "author": "dashuai009", description: "简单介绍卡特兰数", pubDate: "'Jul 08 2022'", subtitle: [Catalan,math], ))<frontmatter> #import "../__template/style.typ": conf #show: conf 以下搬运自#link("https://zh.wikipedia.org/wiki/%E5%8D%A1%E5%A1%94%E5%85%B0%E6%95%B0")[wikipedia] == 卡特兰数 <卡特兰数> $C_n= 1 / (n+1) binom(2n, n)= (2n)! / ((n+1)!n!),n=1,2,3,...$ == 另一种表示形式 <另一种表示形式> $C_n= binom(2n, n) - binom(2n, n+1)$ 所以,Cn是一个自然数;这一点在先前的通项公式中并不显而易见。 == 递推关系 <递推关系> $ C_0 & = 1\ C_(n + 1) & = sum_(i = 0)^n C_i C_(n - i) $ $ C_0 & = 1\ C_(n + 1) & = frac(2 (2 n + 1), n + 1) C_n $ == 卡塔兰数的渐近增长 <卡塔兰数的渐近增长> $C_n tilde.op frac(4^n, n^(3 \/ 2) sqrt(pi))$ == 所有的奇卡塔兰数$C_n$都满足$n = 2^k - 1$。所有其他的卡塔兰数都是偶数。 <所有的奇卡塔兰数c_n都满足displaystyle-n2k-1所有其他的卡塔兰数都是偶数> == emm <emm> $C_n = integral_0^4 x^n frac(1, 2 pi) sqrt(4 / x - 1)$ == 母函数 <母函数> $ M (x) & = sum_(n gt.eq 0) C_n X^n\ & = 1 + sum_(n gt.eq 1) sum_(i = 0)^(n - 1) C_i x^i C_(n - i - 1) x^(n - i - 1) x\ & = 1 + x sum_(i gt.eq 0) C_i x^i sum_(n gt.eq 0) C_n x^n\ & = 1 + x M^2 (x)\ M (x) & = 1 + x M (x)^2\ M (x) & = frac(2, 1 + sqrt(1 - 4 x)) = frac(1 - sqrt(1 - 4 x), 2 x) $ 生成式的另一个解可以用M(0)特判掉。 == 广义二项式定理 <广义二项式定理> 二项式定理: $(x + y)^n = sum_(k = 0)^n (binom(n, k) x^(n - k) y^k)$, 其中$binom(n, k)$是组合数. 当$n$不是正整数时, $k$无法正好求和到$n$, 因此将一直求和至正无穷, 这样形式上就得到了广义二项式定理: $(x + y)^alpha = sum_(k = 0)^oo binom(alpha, k) x^(alpha - k) y^k$, 其中$binom(alpha, k) = frac(alpha (alpha - 1) . . . (alpha - k + 1), k !) = frac((alpha)^(underline(k)), k !)$是形式上的组合数. == 展开形式 <展开形式> 先展开$sqrt(1 - 4 x)$, $ (1 - 4 x)^(1 / 2) & = sum_(n gt.eq 0) binom(1 / 2, n) (- 4 x)^n\ & = 1 + sum_(n gt.eq 1) frac(1 / 2^(underline(n)), n !) (- 4 x)^n\ $ 其中, $ (1 / 2)^(underline(n)) & = 1 / 2 frac(- 1, 2) frac(- 3, 2) . . . frac(- (2 n - 3), 2)\ & = frac((- 1)^(n - 1) (2 n - 3) ! !, 2^n)\ & = frac((- 1)^(n - 1) (2 n - 2) !, 2^n (2 n - 2) ! !)\ & = frac((- 1)^(n - 1) (2 n - 2) !, 2^(2 n - 1) (n - 1) !)\ $ 则 $ (1 - 4 x)^(1 / 2) & = 1 + sum_(n gt.eq 1) frac((- 1)^(n - 1) (2 n - 2) !, 2^(2 n - 1) (n - 1) ! n !) (- 4 x)^n\ & = 1 - sum_(n gt.eq 1) frac((2 n - 2) !, (n - 1) ! n !) 2 x^n\ & = 1 - sum_(n gt.eq 1) binom(2 n - 1, n) frac(1, 2 n - 1) 2 x^n\ $ 带回 $ M (x) & = frac(1 - sqrt(1 - 4 x), 2 x)\ & = frac(1, 2 x) sum_(n gt.eq 1) binom(2 n - 1, n) frac(1, 2 n - 1) 2 x^n\ & = sum_(n gt.eq 1) binom(2 n - 1, n) frac(1, 2 n - 1) x^(n - 1)\ & = sum_(n gt.eq 0) binom(2 n + 1, n + 1) frac(1, 2 n + 1) x^n\ & = sum_(n gt.eq 0) binom(2 n, n) frac(1, n + 1) x^n $ 即可得到通项公式 == 应用 <应用> - Cn表示长度2n的dyck word\[7\]的个数。Dyck词是一个有n个X和n个Y组成的字串,且所有的前缀字串皆满足X的个数大于等于Y的个数。 - 将上例的X换成左括号,Y换成右括号,Cn表示所有包含n组括号的合法运算式的个数: - Cn表示有n个节点组成不同构二叉树的方案数。 - Cn表示有2n+1个节点组成不同构满二叉树的方案数。 #strong[证明];: 令1表示进栈,0表示出栈,则可转化为求一个2n位、含n个1、n个0的二进制数,满足从左往右扫描到任意一位时,经过的0数不多于1数。显然含n个1、n个0的2n位二进制数共有$binom(2 n, n)$个,下面考虑不满足要求的数目。 考虑一个含n个1、n个0的2n位二进制数,扫描到第2m+1位上时有m+1个0和m个1(容易证明一定存在这样的情况),则后面的0-1排列中必有n-m个1和n-m-1个0。将2m+2及其以后的部分0变成1、1变成0,则对应一个n+1个0和n-1个1的二进制数。反之亦然(相似的思路证明两者一一对应)。 从而$C_n = binom(2 n, n) - binom(2 n, n + 1) = frac(1, n + 1) binom(2 n, n)$。证毕。 - Cn表示所有在n × n格点中不越过对角线的单调路径的个数。 - Cn表示通过连结顶点而将n + 2边的凸多边形分成三角形的方法个数。 - Cn表示对{1, …, n}依序进出栈的置换个数。一个置换w是依序进出栈的当S(w) = (1, …, n),其中S(w)递归定义如下:令w = unv,其中n为w的最大元素,u和v为更短的数列;再令S(w) = S(u)S(v)n,其中S 为所有含一个元素的数列的单位元。 - Cn表示集合{1, …, n}的不交叉划分的个数。那么, Cn永远不大于第n项贝尔数. Cn也表示集合{1, …, 2n}的不交叉划分的个数,其中每个段落的长度为2。综合这两个结论,可以用数学归纳法证明:在 魏格纳半圆分布定律 中度数大于2的情形下,所有 自由的 累积量s 为零。 该定律在自由概率论和随机矩阵理论中非常重要。 - Cn表示用n个长方形填充一个高度为n的阶梯状图形的方法个数。 - Cn表示表为2×n的矩阵的标准杨氏矩阵的数量。 也就是说,它是数字 1, 2, …, 2n 被放置在一个2×n的矩形中并保证每行每列的数字升序排列的方案数。同样的,该式可由勾长公式的一个特殊情形推导得出。 - Cn表示n个无标号物品的半序的个数。 == 汗科尔矩阵 <汗科尔矩阵> == 超级卡特兰数(大施罗德数) <超级卡特兰数大施罗德数> === 递推式 <递推式> $ & S_1 = 1\ & forall i gt.eq 2 , s_i = S_(i - 1) + sum_(i = 1)^(n - 1) S_i S_(n - i) $ === 生成函数 <生成函数> $ S (x) = x S (x) + S^2 (x) + x $ 解得 $ S (x) = frac(1 - x - sqrt(x^2 - 6 x + 1), 2) $ === 化简求通向公式 <化简求通向公式> 广义二项式 === 通项公式 <通项公式> $ S_n = sum_(i = 1)^n binom(n + i - 1, 2 i) C_i $ === 一种方法 <一种方法> 接下来给出的一种方法,可以在$O (n k)$的复杂度快速求出$k$次多项式开方后前$n$项的值 设要开方的多项式为$P (x)$,开方后的多项式为$F (x)$. $ F (x) = P (x)^(1 / 2) = sum_(i = 0)^oo f_i x^i $ 两边求导,可得 $ F prime (x) = 1 / 2 P (x)^(- 1 / 2) P prime (x) $ 得 $ F (x) P prime (x) = 2 F prime (x) P (x) $ 对比每一项系数,不难得到$k + 1$项的递推式. === 递推式 <递推式-1> $ (n + 1) f_(n + 1) = (6 n - 3) f_n - (n - 2) f_(n - 1) $ 不过要注意这个递推公式除了第一项其他的项都是超级卡特兰数的$1 / 2$ === 组合意义 <组合意义> - $n + 1$边形的任意剖分方案数。#link("http://noi.openjudge.cn/ch0206/9282/")[例题链接] 还是考虑枚举一条多边形上的边,那么有可能这条边仍然与另一个点拉成一个三角形,也有可能这条边上没有,那么把这两种加起来就是定义的递推式 - 括号序列,每个位置可能左括号,右括号和0.括号对数与0的个数之和为$n - 1$, 问合法的括号序列数. 同样枚举第一个是左括号还是0. 值得一提的是,可以看成先插括号再插0,那么枚举括号对数i,0有$n - i - 1$个,需要放入$2 i + 1$个位置中,那么不难得到上面通过广义二项式推出来的式子. - 从$(0 , 0)$到$(n - 1 , n - 1)$,每次能往右,往上,往右上走,求不超过$y = x$这条直线的方案数. 枚举第一次是走右上还是走右. 同样使用容斥思想,把答案转化为从$(0 , 0)$到$(n - 1 , n - 1)$减去(-1,1)到$(n - 1 , n - 1)$. 那么从$(0 , 0)$到$(n , m)$的路径条数怎么求呢.可以考虑枚举往右上的次数,然后还是考虑先走右和上,然后再把走右上的插入即可.
https://github.com/polarkac/MTG-Stories
https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/041%20-%20Kaldheim/004_Aim%20Through%20the%20Target.typ
typst
#import "@local/mtgstory:0.2.0": conf #show: doc => conf( "Aim Through the Target", set_name: "Kaldheim", story_date: datetime(day: 15, month: 01, year: 2021), author: "<NAME>", doc ) #emph[Note: This is Part 2 of a two-part story. Make sure to check out Part 1 before reading on.] For a tight, breathless moment, there was no ground. Niko's boot came down through shimmering unrealities and landed on a plank of wood with a light thump. It was much smoother than the first time they traveled. Shorter, easier, the momentum carrying them forward on a tiny boat from a lonely outpost on an ice-strewn shore, through the skin between worlds, to an entirely different port. The skiff bumped against a wooden pylon, and Niko hauled themself up to get their bearings. Everything was so bright after the spate of gloomy weather that dogged the Kannah on Bretagard. The vast network of docks twined lazily over a mirror-black lake, with no land in any direction except the dock, barely visible in the cool mist. #figure(image("004_Aim Through the Target/01.jpg", width: 100%), caption: [<NAME> | Art by: <NAME>], supplement: none, numbering: none) Niko's breath fogged, but they weren't cold. They breathed in through their nose, waiting for the icy bite of winter to prick their lungs as it had the moment they'd fled from Theros to Kannah lands—but there was no sudden chill this time. The air was cool and bracing, alive, the perfect temperature for a tournament. They pressed on, the docks crossing and recrossing themselves like the strata of ages. Among the planks were carvings of all manner of beasts—great bears and dragons, boars and rabbits, squirrels, fish, and whales. Niko stepped and hopped around these symbols with the grace of a dancer, amused and unsurprised that Kaldheimrs set their boasts and stories into the very ground. Squinting, Niko twitched the silver-violet lock of hair out of their eyes and took it all in. Proud as a palace, strong as a fortress, the hall was a curving a-frame, like the interlacing fingers of a line of dancers. The hall itself was tented under towering branches that pulsed with magic. This was the absolute pinnacle of the World Tree, a living ornament at the height of the realm. Niko had seen it tooled into Kannah armor as a trinity of stars or hanging in the sky as a triple diamond—the only light in a starless expanse of undulating color. Up close, it was so much more. Stone menhirs stood sentry at the base of the stairway, their concentric carvings lovely and unreadable as Niko began the climb. With each step, understanding and longing bloomed in their chest. They knew why Kjell had spoken of it as he did. The Kannah described Starnheim as paradise; the Omenseekers, as a puzzle to be unlocked. Niko thought that meant riches and rest, but the place soothed something far deeper. Every step was like the last stretch of road to safety. The warmth radiating from its heart carried the same promise as a dozen hands sharing the work of a feast. Music and chatter hummed through the air, ready at any moment to burst into welcome. Blue and violet light swam over Niko's brown skin as their throat tightened. Tears of relief squeezed behind their eyes, and like an embrace, the doors opened. The moment Niko crossed the threshold, they understood what every mortal on Bretagard yearned for. It was more than the relief of journey's end, more than celebration—Starnheim was home. #figure(image("004_Aim Through the Target/02.jpg", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) The hall's architecture was both built and grown, like a sweeping ruin restored to the height of its glory. Niko saw faces like and unlike theirs, tattooed, pierced, bare—and others made of pure obsidian stone. Warriors and poets, humans, dwarves, elves, and giants that glittered with frost or glowed with lava. Also among them were those who looked like farmers or soft-handed scholars, those whose bravery and cunning dared them to greatness for glory, love, or justice. Every tale floated on a sea of raucous laughter and the rich smells of roasting meat, seasoned vegetables, and crackling logs. Mindful of their mission, Niko glanced up. Above the long table, the feasting, the honored dead and their endless stories, layers of clouds glowed bright within the canopy of the World Tree's branches. Some white clouds remained, but behind them deep blue-gray clouds began to emerge, threatening a storm. Only Niko seemed to notice. "More drink!" shouted a wiry warrior with a flame-red beard and rust-colored tattoos covering their arms and chest. "Say it into your horn, idiot," said another warrior in splint mail, laugh lines like deep ruts in her leathery face. She shoved a drinking horn as long as her arm into Flamebeard's hand. "Mjød!" Flamebeard shouted into the horn. The horn filled instantly, spilling down their front. "The first sip of wedding mead, a gift from my wife's family," said the woman in splint mail. Her horn filled with gold liquid, redolent of wildflower fields where the honey was made. "Drøss!" said an Omenseeker with a long scar slashed across the map of his face like a destination marker. His horn overflowed with white foam, flecked with black. Flamebeard belched. "What in all the realms is drøss?" Mapmark wiped foam from his mouth with relish. "Dragon egg whites whipped into cream, infused with herbs and sap resin." "Ugh!" Flamebeard swore. "Is that how you died?" Splintmail grimaced. "How can you let such filth touch your mouth?" Niko slipped in among them. "I asked your wife the same thing when I left her on shore." Mapmark howled with laughter, slapped Niko on the back and held out his horn. "Drink, Thura!" "Damn it." Splintmail laughed, took a heroic swig of the strange foam, chased it with her own drink, and handed the horn back. "Who are you, Steelhair?" asked Flamebeard. The stylist who had dyed Niko's black locks would have been mortified to hear their mirror-like hues compared to something as crude as steel. Niko grabbed a horn and thought of home. It bubbled with something citrus, sweet, and strong, carrying with it the memory of night swimming in a summer sea. Niko put the drink to their lips but didn't drink. "Greatest name first." Splintmail grinned, pushing a plate of roast boar toward Niko. "<NAME>-Rend, she of the Beskir clan." Mapmark slurped another swig of foam. "<NAME>-breath, he of the Omenseeker ship Icecutter. Repelled an entire Skelle raid to protect my grandson's family." "Vígniút!" shouted Flamebeard, thumping their mjød-soaked chest. "They of the Tuskeri~and damn Brokenbrow's pet mages for running away rather than getting dust on their pretty little boots!" "Regroup or be routed," said Thura. "Warriors and berserkers out front, mages at a distance." Vígniút scoffed, spraying the table directly in front of them. "Raiders, trolls, dragons—if you're not in spitting distance, you're not in the fight." Niko flinched back, slapping a hand over their own horn's opening. "You're a mess, Viggy—drink with your mouth closed." No-breath tossed a cloth at the flame-bearded youth. "Sail-Rend~I've heard of you!" Niko said to Thura. The story had been tragic and triumphant, how she had fought her traitorous brother and killed him with his own sword rather than defiling her own. Niko might have found the subject painful in Thura's place and finished on a more diplomatic note. "Birgi told your story to a room full of Kannah and Omenseekers. Your name has spread far." Thura pounded the table. "Ha! See? The Storyteller herself tells Beskir tales to your people! Fermented shark's blood! Drink!" "Joke's on you, I like this stuff." No-breath took the horn from Thura and threw back a long draught. "But I'm not listening to the song of your death again—I want to hear something new. Great names have spoken, Steelhair. Tell us how you earned the glory of Starnheim!" "<NAME>, they of Meletis," they began. "And I am here because I never miss." The other three listened while Niko told their tale. The powerful oracle who decreed that Niko would become an unbeatable champion, never miss, and never lose. Ceaseless training brought victory, and victory, fame—but it was meaningless. What was the point of fate without purpose? At the last Akroan games, where the jewels of every polis gathered to compete, Niko threw their javelin, spat in the face of destiny, and intentionally lost. #figure(image("004_Aim Through the Target/03.jpg", width: 100%), caption: [Niko Defies Destiny | Art by: <NAME>], supplement: none, numbering: none) "Fate itself sent an agent to punish and push me back, to #emph[correct] the weave I had unraveled," said Niko. "What happened then?" asked Thura. "Did you kill the assassin?" Vígniút asked. "We fought," said Niko, vaguely. They had been terrified. Desperate. Trapping an agent of fate in a mirror shard was like a child stomping an adult's toe—more surprise than strategy. Niko's entire being lit up, a lightning rod for something buried deep within. Their destiny was a lie. And they could run anywhere, go anywhere, with a thought. No-breath watched Niko put the horn to their mouth and not drink. "That's sticking it to the gods, eh?" he said. "Knives in the dark won't change the fact you proved 'em wrong." Thura waved the Omenseeker off. "The gods aren't always right, the boats in the black harbor say as much. They've got to earn their place in Starnheim same as anyone." Over Thura's shoulder, Niko caught sight of a huge, fluffy cat whose fur mirrored the storm clouds that hung over the feast. It was at least twice the size little Threat had been, and like Starnheim itself, the cat's eyes and the edges of its fur shimmered with polar light. The same light that sparked off Valkyries' wings. #figure(image("004_Aim Through the Target/04.jpg", width: 100%), caption: [Gods' Hall Guardian | Art by: <NAME>vedi], supplement: none, numbering: none) It was also the first creature Niko had seen in the home of the Valkyrie that wasn't obviously a person, and the cat seemed to watch Niko with the same interest. "I'll be back," said Niko, "I have friends to see." They handed their full horn of Theran sparkling blue to Vígniút. The cat trotted off and Niko followed, leaving Vígniút and the others to taste what they would of another world. The cat glanced back at Niko, twitched its ear, then took off again through the boasting crowd. It took a sharp left and disappeared through a gap in the wall and into a crawlspace. Niko followed, emerging into a quiet hall, the stone floor as black as the lake, lit overhead by a silent storm. For about half a mile, Niko followed the cat's green and violet light to another small gap, the echoing arguments beyond suggesting a much larger space. "~Tremor in the World Tree. If travel continues to be this challenging, how are we to collect the dead and bring them here unharmed?" The cat slowed, stretched, lashed its fluffy tail, and disappeared through the hole. Niko knew how to make a good impression on Theros, but Kjell had taught them much since then, including how to make a good impression on Kaldheim. #emph[Kick the door in. Punch one in the face.] Niko slid through, stood tall, and gaped. There were dozens of them, scattered among the World Tree's branches like a flock of raptors. Uniformly tall and striking in appearance, the Valkyries wore armor of all types in silver, gold, and burnished black and bronze. Layers of fur and stone amulets on chains distinguished some, while others swanned through the hall in tooled belts and harnesses plated with the most finely crafted metal Niko had seen since leaving home. Their long braids were ringed with cuffs like the bands of snakes, and many drank from long horns like the flame-bearded berserker. Those with white wings radiated the pale hues of dawn, and their black-winged counterparts, like the one Niko had captured, ribboned with the strange greens and blues of a deep winter night. What could possibly threaten these gods? A honeyed voice came from a pale-winged Valkyrie with deep brown skin. "Aggressor! Did you find a little friend? Are you lost, my dear? You should return to the feast." It took a moment for Niko to realize the Valkyrie was now addressing them, rather than the cat. "Valkyrie of Starnheim, I am <NAME>, they of—" "Yes, go on, back to the hall with you," said another. Niko scanned the room, looking for the dark-skinned Valkyrie with yellow hair and dove-gray wings—a witness to what had happened—but there were so many of them. "I don't belong in the hall. I'm not supposed to be here—" Another cut them off, dark wings flashing amethyst. "Be brave, little one. You're safe. I assure you." Niko ground their teeth. These beings were no different than Klothys, or the agent, or the oracle who used fate as a cage to keep people controlled. Niko addressed them with a voice trained for stadiums. "I am from Theros, a land that has never heard of you. My name is <NAME>, and I captured one of you to stop a meaningless death and find a way here. Two clans of Bretagard pooled their power to bring you a warning—the Cosmos Serpent is coming for you. It will see your hall destroyed, your dead obliterated, and your lake drained to nothing. There will be nothing left of your home but the dregs of a pig's trough!" Another Valkyrie, white-gold hair and pale skin in sharp relief against the black clouds roiling among the World Tree's branches, rested her chin in her hand. "Evocative, and impossible," she said. "The Valkmir lake and everything on it is our blood and bone. We cannot be taken unawares, here." "And yet no one greeted me at the door. Your cat has better manners," Niko shot back. The gray cat leapt onto the blonde's shoulder, nuzzling her snowy wing. #figure(image("004_Aim Through the Target/05.jpg", width: 100%), caption: [Stalwart Valkyrie | Art by: <NAME>], supplement: none, numbering: none) "Must have taken you for a nosy little squirrel, didn't you, Aggro?" said the blonde. The cat accepted a scritch, shimmering with polar light—then perked his ears, spooked. He launched a vertical twenty feet, vaulted off a rafter, and disappeared into the interlacing branches above. The open archway that overlooked the lake filled with black wings. "Found you, mortal!" Niko recognized him the moment he spoke. Avtyr, the missing reaper, glided into a hard landing. The agate-green glow off his wings blazed, erasing his shadow and turning his brown skin pale, brown eyes almost yellow with fury. All the Valkyries watched with confusion. Avtyr looked a little worse for wear, his long black braids a bit less lustrous and his wings ungroomed, like a crow in a downpour. He shook out his wings, tugged irritably at the tight bindings circling his ribs beneath his armor, and stalked toward the center of the aerie. He jabbed a finger at Niko. "This mortal interrupted our judgement and showed no respect for the laws by which all of Kaldheim lives and dies. Not even the upstart Skoti gods would show such audacity!" Avtyr didn't draw his sword, but rage crackled off him as though he might do so at any moment. His blackened armor glinted as he pointed at different Valkyries, some huddled close as siblings, others sneering at the mention of their counterpart. "Evot, Tove, will you let this affront go unpunished? What about you, Gisla? If Alsig had been attacked by a mortal under the auspices of #emph[keeping frith] in the middle of a pitched battle, would you have abandoned her? Of course not—you would have fought! This #emph[vision] they speak of is nothing but <NAME> chasing his lost youth. Could you imagine that blowhard flailing against a creature whose body encircles the World Tree itself? Outrageous." Avtyr rounded on Niko with the sweep of his wings. Niko had to adjust their stance to keep from being blown back. "That little stoat forced me to negotiate my freedom with a Vedrune rather than face me themself! Not only an invasion, but a craven, deceptive—" Another Valkyrie glided down from the branches on dove-gray wings that shimmered blue as a winter moon. Their yellow hair framed a brown face with severe gray eyes. "Avtyr," they said, reaching for him, "are you hurt? What happened to your wings?" Niko recognized them. The other one from the battlefield. Their presence seemed to blunt Avtyr's anger. "The ways through the Cosmos are"—Avtyr struggled for the word—"crowded. If I had carried a mortal with me, I might have lost them. Why did you leave me there, Rytva?" Rytva turned up toward the others. "I told you something was wrong—look at the clouds. They boil with the violence of the lower realms!" "Your journey did that to you, and not I?" Niko asked, innocently. "You don't have the strength," Avtyr said over Rytva's shoulder. "A child's trick at best." Niko had to convince all the Valkyries, and quickly. They recalled a flamboyant gesture of honesty in Akros—drawing one's xiphoi, touching the tip of the sword to one's own belly, and offering the hilt to the offended party. Instead of alarming everyone by conjuring a weapon, Niko revealed its secrets. "If something cracks or shatters the mirror, you're free. The more traps that require my attention, the less time I can hold them, and if I forget about you—since I don't have to concentrate on just one—the magic wears off on its own. A few hours at most. You were never in danger from me." The other Valkyries all looked to Avtyr, his accusations and experiences seeming to help rather than hurt Niko's case. Avtyr blew air from his nose, beaten but unwilling to concede. He muttered a long string of curses Niko couldn't understand and swept out of the aerie with Rytva at his elbow. Then he froze. Rytva touched his arm, staring at the sky in horror. "Mother of us all~!" High above them, past the soft serenity of the cloud line, the smooth expanse of twilight began to boil with infection. As if seen through thin ice, glimpses of other realms appeared, sharpened, faded—as though dozens of Doomskars pressed in on the edges of Starnheim. Land and sky met at orthogonal gravities, showing lakes of fire flowing uphill. A long drop onto broken boulders tufted with moss and lichen, and a familiar land under strange skies. This last image swelled, rippled, and tore. At first, it looked like a festering puncture, a congealing length of black blood oozing into reality, but the rivulet tensed and drew back into itself, trunk thickening, skin splitting and flaking off into the Valkmir like falling leaves of pure iridescence—each the size of a village. What began as a featureless slug coiled and tensed in the air, bulging and bristling with scales and spikes. The creature solidified into a massive, armored eel, born from the interstices of the Cosmos itself. Then there was the sound. The jaw opened. Dislocated. Venom-laced teeth like towering spikes glistened against the cyanotic flesh of its maw. Its shriek ripped through the sky, a tortured cacophony of twisted metal, toppled cities, whole worlds ground to rubble. Niko's hands over their ears went numb from fear. "Koma," Avtyr breathed, "The Cosmos Serpent." #figure(image("004_Aim Through the Target/06.jpg", width: 100%), caption: [Koma, Cosmos Serpent | Art by: <NAME>], supplement: none, numbering: none) If an Omenpath was an opening between worlds, the gash this creature caused was a violation. Arcs of magical energy skittered and crackled like a parasite's acid to weaken the soft skin of the world. Niko looked to the Valkyries for structure, for leadership; but there was none. They were as scared as Niko was. "This shouldn't be possible!" Rytva murmured. "Someone must have released it—sent it—but who would attack us?#emph[ Why?] " Avtyr stammered. Rytva swallowed. "We—we have to fight it. We can't let it harm the people." "We have to run," said Avtyr. The serpent thrashed, and the black lake roiled in its wake. Clouds churned, and the serpent lunged at the sight of movement—the snap of its jaw resounding like a boulder split by lightning. "With the way between worlds unstable? Absolutely not. I won't abandon our home—our blood—not without a fight!" Rytva cried. A wild, hysterical memory of Threat the ratcatcher crossed Niko's mind, the way it chased Niko's mirrors wherever Niko threw them. "If we can't fight, and we can't run, then we have to herd it back out," said Niko. "Fly in close, right side or left side—make it chase you the way cats chase toys." "And if we can't outfly that thing?" Avtyr asked. "Mirror. Safe, too small to see. Animals follow what looks alive, right? We tug its attention like tugging on reins and drive it back out through one of those holes." Rytva and Avtyr exchanged a glance, then looked up at the creature. "What about the dead?" Rytva asked. "Can they fly?" Niko asked. "If not, keep them inside. If that thing gets distracted, the plan's finished." Rytva spoke softly to Avtyr. "You see what I see in them, dear one. Don't be stubborn." Avtyr swallowed. "It should go back to through the path it came from—I won't risk sending it to some random undeserving place." "Done," said Niko. "If we can follow it home, then maybe we can find the one who sent it." With grim resolve, Avtyr followed Rytva's lead. The two Valkyries pulled their horns from their belts and blew, assembling all their kin from the endless hall. They gathered spears, swords, shields, war hammers, and axes; made final adjustments to their armor, and formed up. Niko swept one arm across their chest, then the other, stretching their shoulders. They tried to notice the fear without getting lost in it, the same way they managed their nerves in the last moments of darkness before entering the blazing sun and a packed arena full of faceless strangers screaming their name. Moving targets from a moving platform they had trained for, but this~this time they might die. This time, immortals might die. Niko had only meant to bring a warning, not lead the charge. This place meant so much to so many. To Thura, No-breath, the berserker youth. Kjell. They deserved to come home at the end of their time. Niko wanted to see them again. The athlete, the professional, bounced on the balls of their feet, tempering the rush of adrenaline into a steady reserve for the marathon to come. Forty Valkyries took to the air in waves, including Rytva who carried Niko. Niko's stomach dropped as they watched the dock shrink to a narrow line between the hall and the endless black lake. A thin, flimsy defense. The sky bubbled, other realms still stretching and pressing inward. Visions of primordial forests, and the charred remains of villages dilated and winked out on all sides. Rytva and Niko detached from the main group and headed for the monster. Rytva pulled up to avoid being blown off course by Koma's wake as it swam through the sky. Avtyr beat his wings, green light blazing through black feathers, and flew ahead to find breakers they could ride toward the serpent's head. "Ready?" Rytva called. Niko tried to call ready back, but their mouth had gone bone-dry from fear. They spoke with their weapon instead, muscle memory taking over where conscious thought had fled. Deep azure light radiated from the length of liquid silver Niko formed into a javelin with a hooked end. Niko forced focus into their grip. They pointed at the base of the serpent's skull, their first target. That was enough for the Valkyries. Rytva and Avtyr carried Niko together and dived. No sooner than Niko had angled their legs for the drop were they airborne, the serpent's undulating body within reach. They landed, rolled, and used their momentum against the wind to land in a crouch, getting a feel for the way Koma moved. Niko bear-crawled across scales thick as boulders in some places and smooth as ice in others; each bearing an uncanny resemblance to Fynn's shield. They slid down the last few feet of neck to bury their spear between the scales of Koma's skull. Gritting their teeth, Niko's power flowed down through the javelin to make an anchor, extending three prongs off the base that curled deeper into the beast's flesh. It stank of charred metal and acid. Niko angled their feet at either side of the wound, hoping their boots would save them from the worst of the burns. Niko raised their left hand, and Avtyr, far to their left, sounded his horn. A squad of five Valkyries pressed forward, howling war cries and radiating storm light. They beat their swords against shields, taunting Koma to come after them. Koma took the bait, flying onward after the light and the thunder, opening its jaws to snap up the nearest. The Valkyries scattered in all directions, and just as Koma's teeth closed on the slowest Valkyrie, Niko hurled a mirror and the Valkyrie appeared to shatter into a thousand shards of glass while her true body—trapped the still-hurtling shard—sailed harmlessly out of Koma's reach. The serpent's jaws clamped down on nothing but clouds. When the trap released, the Valkyrie emerged from the glass as though falling from a midair trapdoor. She beat her wings, righted herself, and rejoined her unit out of Koma's periphery. "It's working!" shouted Rytva, far to Niko's right. Niko scanned the sky, looking for a pattern, and signaled for the next sets of Valkyries to ready themselves. Another path opened, arcing with electricity but not yet fully formed. Niko raised their right hand, and Rytva blew their horn. Valkyries swarmed Koma's right side, hurling insults and taunts, waving their weapons but never striking. The second Koma's eyes were damaged, the plan would fall apart. The snake pounced, striking out at the Valkyries, and Niko threw mirror traps, disappearing the beast's prey. Each time the Valkyries fell out of Koma's periphery, Niko summoned another mirror, shattering the first. Koma flew onward, and Niko's holstered mirrors hovered nearby as Niko scanned for the right Omenpath. Koma's head jerked up, and it shrieked. Niko felt the world tilt and nearly lost their balance. Koma must have felt the spear as an itch. Niko dropped to their knees and shifted forward, letting their first javelin shatter. They dug their gloved fingers under two of Koma's scales and heaved, holding them up with their forearms and driving two thicker, shorter spears into the soft flesh beneath. Koma howled, thrashed its head back and forth, and spun around in the air. Every inch of Niko's body clung to Koma's scales, even as the serpent's acid blood sizzled and flecked against their armor. Kannah armor. Bretagard armor. Gifted without expectation of anything in return because whoever you find in the snow is either foe or family; there is nothing in between. Niko righted themself, knees dug in, two spears dug deep, mirrors circling as more Omenpaths opened, belching detritus, or storm winds, or desert dust. None of them were right. It had opened before—where was it? Which one? Niko, Rytva, and Avtyr called out commands, and Koma flew where they told it to. The Valkyries were down to their last two squads. Niko's arms felt leaden, their core and lungs burned. They had to keep going. If not for the Valkyries and Starnheim, then for all Kaldheimr who lived under its light and the promise of home. Koma pitched to the left, and Niko crouched down into the centrifugal force that held them in place. Out of the corner of their vision, another Omenpath opened just over the Valkmir. "Niko!" shouted Avtyr. He'd seen it, too. If Koma didn't destroy this place, the portals still might. Eyes bleary from the relentless wind, Niko blinked to clear them. Then they saw a portal open unlike any others, no glittering waterfalls or mossy mountains; but billowing clouds glowing with flame, a crush of bodies roiling in a battle like the end of the world. Without safety or certainty, Niko chose. "THERE!" Niko shouted, raising their left fist. "FAST!" Avtyr blew the horn, and the last squad of Valkyries mobilized, roaring for battle, blazing with light, and drawing the serpent to follow. #figure(image("004_Aim Through the Target/07.jpg", width: 100%), caption: [Starnheim Unleashed | Art by: <NAME>oss], supplement: none, numbering: none) As they hurtled toward the ground, to the space between the dock and the blood-black water, the hole in the world began to close. Niko could barely lift their arms. If they threw a mirror now, they knew they'd miss. They almost smiled. It made their choice simpler. Niko roared, funneling every ounce of their magic, every drop of strength into the two anchor spikes in Koma's skull. The javelins lengthened, and Koma felt it. The last Valkyrie dived out of the way as Koma's head jerked back, trying to shake free of the needles that crept deeper into its flesh. Koma whipped back to right itself and bashed its face on the edge of the hole. The dock exploded into a mass of planks and torn metal bands. Niko was thrown off. The serpent, stunned, slipped through. Koma's weight and momentum carried it the rest of the way down through the hole, the Valkmir's black waters spilling in after it, hissing over the smears of Koma's acid blood as it fell down through the hole in the world. Niko raised themself on hands and knees to crawl away, but the broken dock collapsed under their weight, toppling them into the hole. At the last moment, Niko caught a section of pylon, clinging with burnt-out arms and exhausted legs. The wood above them creaked. They panted. Sweating. Shaky. Silvery hair matted to their face, and ears still ringing from the Cosmos Serpent's horrible screams. They were in over their head. Past fear. Past bravery. This was exactly where Niko had aimed, and Niko never missed. Depleted, weary, Niko turned their eyes up to the lights of Starnheim, the short path to journey's end, all Kaldheim's hope of home~ And let go. Whether it was the wind or the fading magic, Niko felt colder. Heart-rending panic gnawed at the edges of Starnheim's ease. They raised one hand toward the light, every muscle burning, and reached for a mirror. Avtyr's hand clamped around their wrist. The light from his wings was soft as fireflies, his brown eyes gray in the strange glow. "You done watching, reaper?" Niko murmured. Avtyr looked at Niko the way Orhaft had, suspicion and hope all tangled together, hope a little stronger. "Your fate isn't decided yet," he answered. A wheezy laugh cracked Niko's lips. "Fate's just someone else telling you who to be." Niko righted themself at the Valkyrie's side as the two plummeted together toward the battle. Avtyr flapped his wings, holding tight and speeding down the path between worlds. A silver javelin materialized in Niko's free hand, mirror-bright and trailing a deep azure glow as a horde of Valkyries followed, lighting the way in gold and green, purple and orange, silver, scarlet, and blue—a new rainbow born of a darker world. In this winter sky, the fall became flight.
https://github.com/luiswirth/bsc-thesis
https://raw.githubusercontent.com/luiswirth/bsc-thesis/main/README.md
markdown
# Rust Implementation of Finite Element Exterior Calculus on Coordinate-Free Simplicial Complexes This repository contains the [Typst](https://typst.app/) source files for the Bachelor's Thesis of <NAME> under the supervision of Prof. Dr. <NAME>, for Computational Science and Engineering at ETH Zürich. For the implementation see [Formoniq](https://github.com/luiswirth/formoniq). ## Objective The project aims to develop a finite element library in Rust based on the principles of Finite Element Exterior Calculus (FEEC) to solve partial differential equations (PDEs) formulated in terms of differential forms over simplicial complexes, using an intrinsic, coordinate-free approach. The focus will be on solving elliptic Hodge-Laplace problems on simplicial meshes, where geometric properties are defined by the Regge metric, and linear (first-order) Whitney forms are used as basis functions. ## Background Finite Element Exterior Calculus (FEEC) provides a unified framework that extends the finite element method using the language of differential geometry and algebraic topology. @hiptmair1999canonical By employing differential forms and (co-)chain complexes, FEEC offers a robust approach for preserving key topological and structural features in the solution of PDEs. @arnold2006 This framework is particularly well-suited for problems such as the Hodge-Laplace equation and Maxwell’s equations. @femster Traditional finite element methods rely on explicit coordinate representations of the computational domain. However, a coordinate-free formulation aligns more naturally with the intrinsic nature of differential geometry. By representing the computational domain as a simplicial complex with an associated Riemannian metric, we can define geometric quantities (such as lengths, areas, and volumes) intrinsically, without explicit coordinates. This metric is an inner product on the tangent spaces and defines operators like the Hodge star, which are essential in the formulation of the Hodge-Laplace operator. Rust was chosen for its strong guarantees in memory safety, performance, and modern language features, making it ideal for high-performance computing tasks like finite elements. The Rust ownership model, borrow checker, and type system act as a proof system to ensure there are no memory bugs, race conditions, or similar undefined behaviors in any program, while achieving performance levels comparable to C/C++. @klabnik2018rust @jung2017rustbelt ## Approach + _Coordinate-Free Simplicial Complex Data Structure_: Develop a mesh data structure that represents the computational domain as a simplicial complex without explicit coordinates for vertices. Instead, the mesh will store topological information (incidence and adjacency) and associate a metric tensor (geometry) on individual simplicies. + _Finite Element Spaces and Basis Functions_: Utilize Whitney forms as basis functions for discretizing differential forms on the simplicial complex. These forms are naturally defined on all simplices. + _Weak Formulation of the Hodge-Laplace Operator_: Derive the weak formulation of the Hodge-Laplace operator in the coordinate-free, intrinsic setting. This involves a generalization of integration by parts to differential forms. We should consider both the primal and mixed formulation. + _Element Matrices for Hodge-Laplace Problem_: Derive the formulas for the element matrix of a Hodge-Laplace problem. This involves explicit calculations of the exterior derivative, the codifferential, the Hodge star operator and inner products on differential forms. + _Assembly_: Assemble the Galerkin matrix from the element matrices. Using Rust's fearless concurrency feature, we can have a parallel implementation of the assembly process. + _Solving the system_: Due to the possibly high-dimensionality of the computational problem and the curse of dimensionality it would be beneficial to not only use a direct solver, but also matrix-free iterative solvers. + _Testing and Validation_: Test the implementation across multiple dimensions (e.g., 2D, 3D) to assess accuracy, convergence rates, and performance. Compare results to existing methods, including traditional finite element methods that use explicit coordinates and no FEEC. ## Significance This project will result in a versatile finite element library that generalizes across dimensions, manifold geometries, and forms, broadening the applicability of finite element methods to a wider class of PDEs. In theory, such a PDE library could be used to solve PDEs relativistically on 4D spacetime. The project will also showcase Rust’s potential as a modern language for scientific computing, particularly in developing high-performance numerical tools that can handle complex mathematical structures. ## Prior Work In the Julia ecosystem, there are several tools for exterior algebra and exterior calculus. Notable implementations of FEEC for arbitrary dimensional simplicial meshes and first-order Whitney forms already exist. @ddfjl However, these implementations typically rely on explicit coordinate representations. ## Possible Extensions - Support for Higher-Order Basis Functions: Extend the library to support higher-order Whitney forms and other basis functions, allowing for increased accuracy and flexibility. - Solving Maxwell's Equations on non-trivial manifolds, such as 4D spacetime to showcase a real-life application. - Variable Coefficient Functions: Implement parametric formulation of finite element spaces, enabling the inclusion of functor-like coefficient functions within the PDEs. - Hodge Decomposition (generalization of Helmholtz Decomposition) and determining Betti numbers. - Evolution Problems such as Heat and Wave Equation.
https://github.com/eneoli/kit-thesis-template
https://raw.githubusercontent.com/eneoli/kit-thesis-template/main/common/statutory-declaration.typ
typst
#import "../meta.typ": * #v(15cm) #align(center)[*Statutory Declaration*] #line(length: 100%, stroke: (thickness: 0.5pt)) I hereby declare that I have developed and written the enclosed thesis completely by myself. I have not used any other than the aids that I have mentioned. I have marked all parts of the thesis that I have included from referenced literature, either in their original wording or paraphrasing their contents. I have followed the by-laws to implement scientific integrity at KIT. #pad(x: 1em, [*#statutoryDeclarationPlaceAndDate*]) #v(2cm) #box( width: 175pt, [ #line(length: 100%, stroke: (thickness: 1pt, dash: "dotted")) #align(center)[(#author)] ] )
https://github.com/alberto-lazari/computer-science
https://raw.githubusercontent.com/alberto-lazari/computer-science/main/type-theory/theory-exercises/exercises/equality.typ
typst
#import "/common.typ": * #exercise( section: (num: "3.6", title: "Martin-Löf's Intensional Propositional Equality"), ex: 7, [Prove that there exists a proof-term $pf$ such that. $ pf in Id(N1, star, w) ctx(w in N1) $ is derivable.] ) There exists a proof-term $pf = ElN1(w, id(star)), (x). id(x)$, such that $ pf in Id(N1, star, w) ctx(w in N1) $ is derivable, in fact $ElN1(w, id(star)) in Id(N1, star, w) ctx(Gamma)$ is derivable: #align(center, box[ #set text(8pt) #prooftree( axiom($Gamma cont$), rule(label: var, $w in N1 ctx(Gamma)$), axiom($Gamma cont$), rule(label: FS, $N1 type ctx(Gamma)$), axiom($Gamma cont$), rule(label: IS, $star in N1 ctx(Gamma)$), axiom($Gamma cont$), rule(label: var, $w in N1 ctx(Gamma)$), rule(n: 3, label: FId, $Id(N1, star, w) type ctx(Gamma)$), axiom($Gamma cont$), rule(label: IS, $star in N1 ctx(Gamma)$), rule(label: IId, $id(star) in Id(N1, star, star) ctx(Gamma)$), rule(n: 3, label: ES, $ElN1(w, id(star)) in Id(N1, star, w) ctx(Gamma)$) ) ]) Where $Gamma cont$ derivable, because: - $Gamma = w in N1$ - $w in N1 cont$ derivable: $ #prooftree( axiom($ctx() cont$), rule(label: $FS$, $N1 type ctx()$), rule(label: $Fc$, $w in N1 cont$), ) $ #exercise( section: (num: "3.6", title: "Martin-Löf's Intensional Propositional Equality"), ex: 8, [Prove that there exists a proof-term $pf$ such that. $ pf in Id(N1, x, w) ctx(x in N1, w in N1) $ is derivable.] ) There exists a proof-term $pf = ElN1(x, ElN1(w, id(star))), (y). id(y)$, such that $ pf in Id(N1, x, w) ctx(x in N1, w in N1) $ is derivable, in fact $ElN1(x, ElN1(w, id(star))) in Id(N1, x, w) ctx(Gamma)$ is derivable: #align(center, box[ #set text(8pt) #prooftree( axiom(label: $pi_1$, $Gamma cont$), rule(label: var, $x in N1 ctx(Gamma)$), axiom(label: $pi_1$, $Gamma cont$), rule(label: FS, $N1 type ctx(Gamma)$), axiom(label: $pi_1$, $Gamma cont$), rule(label: var, $x in N1 ctx(Gamma)$), axiom(label: $pi_1$, $Gamma cont$), rule(label: var, $w in N1 ctx(Gamma)$), rule(n: 3, label: FId, $Id(N1, x, w) type ctx(Gamma)$), axiom(label: $pi_2$, $ElN1(w, id(star)) in Id(N1, star, w) ctx(Gamma)$), rule(n: 3, label: ES, $ElN1(x, ElN1(w, id(star))) in Id(N1, x, w) ctx(Gamma)$) ) ]) Where: #pi-enum[ + $Gamma cont$ derivable, because: - $Gamma = x in N1, w in N1$ - $x in N1, w in N1 cont$ derivable: $ #prooftree( axiom($ctx() cont$), rule(label: $FS$, $N1 type ctx()$), rule(label: $Fc$, $x in N1 cont$), rule(label: $FS$, $N1 type ctx(x in N1)$), rule(label: $Fc$, $x in N1, w in N1 cont$), ) $ + $ElN1(w, id(star)) in Id(N1, star, w) ctx(Gamma)$ derivable: #align(center, box[ #set text(8pt) #prooftree( axiom(label: $pi_1$, $Gamma cont$), rule(label: var, $w in N1 ctx(Gamma)$), axiom(label: $pi_1$, $Gamma cont$), rule(label: FS, $N1 type ctx(Gamma)$), axiom(label: $pi_1$, $Gamma cont$), rule(label: IS, $star in N1 ctx(Gamma)$), axiom(label: $pi_1$, $Gamma cont$), rule(label: var, $w in N1 ctx(Gamma)$), rule(n: 3, label: FId, $Id(N1, star, w) type ctx(Gamma)$), axiom(label: $pi_1$, $Gamma cont$), rule(label: IS, $star in N1 ctx(Gamma)$), rule(label: IId, $id(star) in Id(N1, star, star) ctx(Gamma)$), rule(n: 3, label: ES, $ElN1(w, id(star)) in Id(N1, star, w) ctx(Gamma)$) ) ]) ]
https://github.com/pku-typst/PKU-typst-template
https://raw.githubusercontent.com/pku-typst/PKU-typst-template/main/lib.typ
typst
MIT License
#import "templates/思政课/课程论文/lib.typ" as 思政课课程论文 #import "templates/通用/作业/lib.typ" as 通用作业
https://github.com/katamyra/Notes
https://raw.githubusercontent.com/katamyra/Notes/main/Compiled%20School%20Notes/CS3001/Modules/Utilitarianism.typ
typst
#import "../../../template.typ": * = Utilitarianism #theorem[ In utilitarianism, an action is good if the benefits exceed its harms, and an action is bad if its harms exceed its benefits ] #definition[ *Act Utilitarianism* is the ethical theory that an action is good if the net effect (over anyone and everyone who is affected) is to produce more happiness than unhappiness ] *Case For*: - It focuses on happiness - It is practical - It is generally comprehensive *Case Against*: - Where to draw line in calculations? How bad is too bad if the end justifies the means? - Takes a lot of time for each moral calculation - Ignores innate sense of #definition[ *Rule Utilitarianism* is the ethical theory that holds that we ought to adopt those moral rules that, if followed by everyone, lead to the greatest increase in total happiness over all affected parties ] A _rule utilitarian_ chooses to follow a moral rule because its universal adoption would result in the greatest net increase in happiness. _This differs from kantianism because a rule utilitarian is still concerned with he consequences and not the motive_. *Case For*: - Not every moral decision requires performing utilitarian calculus - Exceptional situations don't overthrow moral rules - Reduces problem of *bias*(?) - idk what this means *Case Against*: - It forces us to use a single scale or measure to evaluate completely different kinds of consequences - It ignores the problem of an unjust distribution of good consequences
https://github.com/HPDell/touying-brandred-uobristol
https://raw.githubusercontent.com/HPDell/touying-brandred-uobristol/main/lib.typ
typst
MIT License
// This theme is inspired by the brand guidelines of University of Bristol // This theme is revised from https://github.com/touying-typ/touying/blob/main/themes/metropolis.typ // The original theme was written by https://github.com/Enivex // The code was revised by https://github.com/HPDell #import "@preview/touying:0.5.2": * /// Use to replace the default composer #let multicolumns(columns: auto, alignment: top, gutter: 1em, ..bodies) = { let bodies = bodies.pos() if bodies.len() == 1 { return bodies.first() } let columns = if columns == auto { (1fr,) * bodies.len() } else { columns } grid(columns: columns, gutter: gutter, align: alignment, ..bodies) } #let _typst-builtin-align = align /// Default slide function for the presentation. /// /// - `title` is the title of the slide. Default is `auto`. /// /// - `config` is the configuration of the slide. You can use `config-xxx` to set the configuration of the slide. For more several configurations, you can use `utils.merge-dicts` to merge them. /// /// - `repeat` is the number of subslides. Default is `auto`,which means touying will automatically calculate the number of subslides. /// /// The `repeat` argument is necessary when you use `#slide(repeat: 3, self => [ .. ])` style code to create a slide. The callback-style `uncover` and `only` cannot be detected by touying automatically. /// /// - `setting` is the setting of the slide. You can use it to add some set/show rules for the slide. /// /// - `composer` is the composer of the slide. You can use it to set the layout of the slide. /// /// For example, `#slide(composer: (1fr, 2fr, 1fr))[A][B][C]` to split the slide into three parts. The first and the last parts will take 1/4 of the slide, and the second part will take 1/2 of the slide. /// /// If you pass a non-function value like `(1fr, 2fr, 1fr)`, it will be assumed to be the first argument of the `components.side-by-side` function. /// /// The `components.side-by-side` function is a simple wrapper of the `grid` function. It means you can use the `grid.cell(colspan: 2, ..)` to make the cell take 2 columns. /// /// For example, `#slide(composer: 2)[A][B][#grid.cell(colspan: 2)[Footer]] will make the `Footer` cell take 2 columns. /// /// If you want to customize the composer, you can pass a function to the `composer` argument. The function should receive the contents of the slide and return the content of the slide, like `#slide(composer: grid.with(columns: 2))[A][B]`. /// /// - `..bodies` is the contents of the slide. You can call the `slide` function with syntax like `#slide[A][B][C]` to create a slide. #let slide( title: auto, align: auto, config: (:), repeat: auto, setting: body => body, composer: auto, ..bodies, ) = touying-slide-wrapper(self => { if align != auto { self.store.align = align } // restore typst builtin align function let align = _typst-builtin-align let header(self) = { set align(top) show: components.cell.with(fill: self.colors.primary, inset: (x: 2em)) set align(horizon) set text(fill: self.colors.neutral-lightest, weight: "bold", size: 1.2em) if title != auto { utils.fit-to-width.with(grow: false, 100%, title) } else { utils.call-or-display(self, self.store.header) } } let footer(self) = { set align(bottom) set text(size: 0.8em) block(height: 1.5em, width: 100%, stroke: (top: self.colors.primary + 2pt), pad( y: .4em, x: 2em, components.left-and-right( text(fill: self.colors.neutral-darkest, utils.call-or-display(self, self.store.footer)), text(fill: self.colors.neutral-darkest, utils.call-or-display(self, self.store.footer-right)), ), )) } let self = utils.merge-dicts( self, config-page( fill: self.colors.neutral-lightest, header: header, footer: footer, ), ) let new-setting = body => { show: align.with(self.store.align) set text(fill: self.colors.neutral-darkest) show heading.where(level: self.slide-level + 1): it => { stack( dir: ltr, spacing: .4em, image("uob-bullet.svg", height: .8em), text(fill: self.colors.primary, it.body) ) } set enum(numbering: (nums) => { text(fill: self.colors.primary, weight: "bold", str(nums) + ".") }) set list(marker: (level) => { text(fill: self.colors.primary, weight: "bold", sym.triangle.r.filled) }) set table(stroke: self.colors.primary) show: setting body } touying-slide(self: self, config: config, repeat: repeat, setting: new-setting, composer: multicolumns, ..bodies) }) /// Title slide for the presentation. You should update the information in the `config-info` function. You can also pass the information directly to the `title-slide` function. /// /// Example: /// /// ```typst /// #show: metropolis-theme.with( /// config-info( /// title: [Title], /// logo: emoji.city, /// ), /// ) /// /// #title-slide(subtitle: [Subtitle], extra: [Extra information]) /// ``` /// /// - `extra` is the extra information you want to display on the title slide. #let title-slide( extra: none, ..args, ) = touying-slide-wrapper(self => { let info = self.info + args.named() let body = { set text(fill: self.colors.neutral-darkest) set align(horizon) grid( rows: (auto, 1fr), pad(y: 2em, x: 2em, image("./uob-logo.svg", height: 2.4em)), block(width: 100%, height: 100%, { place(bottom + right, polygon( stroke: none, fill: gray.transparentize(60%), (0pt, 0pt), (2cm, -8cm), (14cm, -8cm), (14cm, 0pt) )) place(top + left, polygon( fill: self.colors.primary.transparentize(10%), stroke: none, (0cm, 0cm), (0cm, 8cm), (22cm, 8cm), (24cm, 0cm) )) place(top + left, { set text(fill: self.colors.neutral-lightest) grid( rows: (4cm, 4cm), columns: (24cm), block(inset: (x: 2em, y: 1em), width: 100%, height: 100%, { set align(bottom + left) set text(size: 32pt, weight: "bold") info.title }), block(inset: (left: 2em, right: 8em, y: 1em), width: 100%, height: 100%, { set align(top + left) set text(size: 24pt) info.subtitle }) ) }) place(bottom + left, block( width: 100%, height: 8cm, inset: (x: 2em, top: 4em), { set align(left + horizon) set text(16pt) stack( dir: ttb, spacing: 8pt, self.info.date.display(self.datetime-format), self.info.institution ) } )) }) ) } self = utils.merge-dicts( self, config-common(freeze-slide-counter: true), config-page( fill: self.colors.neutral-lightest, margin: 0em ), ) touying-slide(self: self, body) }) /// Show the outline slide /// /// - `title` is the title shown on top of the outline. Default: `Outline` /// - `column` is the number of columns to show the outline. Default: 2. /// - `marker` is something to mark the items before each heading. Default to the `uob-bullet.svg`. /// - `args` are additional args passing to `touying-slide-wrapper`. /// /// Example: /// /// ``` /// #outline-slide() /// ``` #let outline-slide(title: [Outline], column: 2, marker: auto, ..args) = touying-slide-wrapper(self => { let info = self.info + args.named() let header = { set align(center + bottom) block( fill: self.colors.neutral-lightest, outset: (x: 2.4em, y: .8em), stroke: (bottom: self.colors.primary + 3.2pt), text(self.colors.primary, weight: "bold", size: 1.6em, title) ) } let body = { set align(horizon) show outline.entry: it => { let mark = if ( marker == auto ) { image("uob-bullet.svg", height: .8em) } else if type(marker) == image { set image(height: .8em) image } else if type(marker) == symbol { text(fill: self.colors.primary, marker) } block(stack(dir: ltr, spacing: .8em, mark, it.body), below: 0pt) } show: pad.with(x: 1.6em) columns(column, outline(title: none, indent: 1em, depth: 1)) } self = utils.merge-dicts( self, config-page( header: header, margin: (top: 4.8em, bottom: 1.6em), fill: self.colors.neutral-lightest ) ) touying-slide(self: self, body) }) /// New section slide for the presentation. You can update it by updating the `new-section-slide-fn` argument for `config-common` function. /// /// Example: `config-common(new-section-slide-fn: new-section-slide.with(numbered: false))` /// /// - `level` is the level of the heading. /// /// - `numbered` is whether the heading is numbered. /// /// - `title` is the title of the section. It will be pass by touying automatically. #let new-section-slide(level: 1, numbered: true, title) = touying-slide-wrapper(self => { let header = { components.progress-bar(height: 8pt, self.colors.primary, self.colors.primary.lighten(40%)) } let footer = { set align(bottom) set text(size: 0.8em, fill: self.colors.neutral-lightest) block(height: 1.5em, width: 100%, fill: self.colors.primary, pad( y: .4em, x: 2em, components.left-and-right( text(utils.call-or-display(self, self.store.footer)), text(utils.call-or-display(self, self.store.footer-right)), ), )) } let body = { set align(horizon + center) show: pad.with(20%) set text(size: 1.5em, fill: self.colors.neutral-lightest, weight: "bold") block( // outset: (right: 2pt, bottom: 2pt), fill: self.colors.neutral-light, radius: 8pt, move(dx: -4pt, dy: -4pt, block( width: 100%, fill: self.colors.primary, inset: (x: 1em, y: .8em), radius: 8pt, utils.display-current-heading(level: level, numbered: numbered) )) ) } self = utils.merge-dicts( self, config-page( fill: self.colors.neutral-lightest, header: header, footer: footer, margin: 0em, ), ) touying-slide(self: self, body) }) /// Focus on some content. /// /// Example: `#focus-slide[Wake up!]` /// /// - `align` is the alignment of the content. Default is `horizon + center`. #let focus-slide(align: horizon + center, body) = touying-slide-wrapper(self => { let _align = align let align = _typst-builtin-align self = utils.merge-dicts( self, config-common(freeze-slide-counter: true), config-page(fill: self.colors.neutral-lightest, margin: 2em), ) set text(fill: self.colors.primary, size: 1.5em) touying-slide(self: self, align(_align, body)) }) /// Touying metropolis theme. /// /// Example: /// /// ```typst /// #show: metropolis-theme.with(aspect-ratio: "16-9", config-colors(primary: blue))` /// ``` /// /// Consider using: /// /// ```typst /// #set text(font: "Fira Sans", weight: "light", size: 20pt)` /// #show math.equation: set text(font: "Fira Math") /// #set strong(delta: 100) /// #set par(justify: true) /// ``` /// /// - `aspect-ratio` is the aspect ratio of the slides. Default is `16-9`. /// /// - `align` is the alignment of the content. Default is `horizon`. /// /// - `header` is the header of the slide. Default is `self => utils.display-current-heading(setting: utils.fit-to-width.with(grow: false, 100%), depth: self.slide-level)`. /// /// - `header-right` is the right part of the header. Default is `self => self.info.logo`. /// /// - `footer` is the footer of the slide. Default is `none`. /// /// - `footer-right` is the right part of the footer. Default is `context utils.slide-counter.display() + " / " + utils.last-slide-number`. /// /// - `footer-progress` is whether to show the progress bar in the footer. Default is `true`. /// /// ---------------------------------------- /// /// The default colors: /// /// ```typ /// config-colors( /// primary: rgb("#eb811b"), /// primary-light: rgb("#d6c6b7"), /// secondary: rgb("#23373b"), /// neutral-lightest: rgb("#fafafa"), /// neutral-dark: rgb("#23373b"), /// neutral-darkest: rgb("#23373b"), /// ) /// ``` #let uobristol-theme( aspect-ratio: "16-9", align: horizon, header: self => utils.display-current-heading(setting: utils.fit-to-width.with(grow: false, 100%), depth: self.slide-level), header-right: self => self.info.logo, footer: none, footer-right: context utils.slide-counter.display(), footer-progress: true, ..args, body, ) = { show: touying-slides.with( config-page( paper: "presentation-" + aspect-ratio, header-ascent: 30%, footer-descent: 30%, margin: (top: 3em, bottom: 1.5em, x: 2em), ), config-common( slide-fn: slide, new-section-slide-fn: new-section-slide, ), config-methods( init: (self: none, body) => { set text(size: 20pt, font: "Lato") show highlight: body => text(fill: self.colors.primary, strong(body.body)) body }, alert: utils.alert-with-primary-color ), config-colors( neutral-lightest: rgb("#fafafa"), primary: rgb("#ab1f2d"), secondary: rgb("#ea6719") ), // save the variables for later use config-store( align: align, header: header, header-right: header-right, footer: footer, footer-right: footer-right, footer-progress: footer-progress, ), config-info( datetime-format: "[day] [month repr:short] [year]" ), ..args, ) body }
https://github.com/TheLukeGuy/backtrack
https://raw.githubusercontent.com/TheLukeGuy/backtrack/main/src/checks.typ
typst
Apache License 2.0
// Copyright © 2023 <NAME> // This file is part of Backtrack. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not // use this file except in compliance with the License. You may obtain a copy of // the License at <http://www.apache.org/licenses/LICENSE-2.0>. // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations under // the License. // From the changelog: // > Exposed content representation: Can be observed in hover tooltips or with // > repr #let v2023-03-21-supported = repr[a] != "[...]" // Commit 50b0318: // > Mark 3 symbols as combining accents (#2218) #let v0-9-0-supported = if v2023-03-21-supported { math.accent("a", "↔").accent != "↔" } else { false } // From the changelog: // > Scripting: Types are now first-class values #let v0-8-0-supported() = str(type(type(0))) == "type" // From the changelog: // > Math: Improved display of multi-primes (e.g. in $a''$) #let v0-7-0-supported() = $a''$.body.has("base") // From the changelog: // > Math: Increased precedence of factorials in math mode ($1/n!$ works // > correctly now) #let v0-6-0-supported() = $0/a!$.body.has("num") // From the changelog: // > Math: The syntax rules for mathematical attachments were improved #let v0-5-0-supported() = $a^b(0)$.body.has("base") // From the changelog: // > Scripting: Fixed replacement strings: They are now inserted completely // > verbatim instead of supporting the previous (unintended) magic dollar // > syntax for capture groups #let v0-4-0-supported() = "a".replace(regex("a"), "$0") == "$0" // From the changelog: // > Added support for attachments (sub-, superscripts) that precede the base // > symbol. The top and bottom arguments have been renamed to t and b. #let v0-3-0-supported() = $a^b$.body.has("t") // From the changelog: // > Dictionaries now iterate in insertion order instead of alphabetical order. #let v0-2-0-supported() = (b: 0, a: 0).keys().first() == "b" // From the changelog: // > Miscellaneous improvements: Fixed invalid parsing of language tag in raw // > block with a single backtick #let v0-1-0-supported() = not [`rust let`].has("lang") // From the changelog: // > Enumerations now require a space after their marker, that is, 1.ok must now // > be written as 1. ok #let v2023-03-28-supported() = [1.a].has("text") // TODO: Add a check for v2023-02-25. This may be impossible. // From the changelog: // > The eval function now expects code instead of markup and returns an // > arbitrary value. Markup can still be evaluated by surrounding the string // > with brackets. #let v2023-02-15-supported() = eval("0") == 0 // From the changelog: // > Fixed parsing of not in operation #let v2023-02-12-supported() = -1 not in (1, 2, 3) // TODO: Add a check for v2023-02-02. This may be impossible.
https://github.com/Myriad-Dreamin/tinymist
https://raw.githubusercontent.com/Myriad-Dreamin/tinymist/main/crates/tinymist-query/src/fixtures/completion/modify_string.typ
typst
Apache License 2.0
// contains: "New Computer Modern" #set text(font: ""/* range -2..0 */)
https://github.com/NyxAlexandra/ib-internal-asessment
https://raw.githubusercontent.com/NyxAlexandra/ib-internal-asessment/main/src/rough-draft/paper.typ
typst
#set page(numbering: "1.", number-align: right) #set table(align: left) // --- #align(horizon)[ #heading(outlined: false)[Hashing: How do servers store passwords?] <NAME> (2023-2024) / Topic: Cryptography / Stimulus: Computer Science / Source: https://github.com/NyxAlexandra/ib-internal-asessment #pagebreak() #outline(depth: 2, indent: 1em) ] #pagebreak() = Introduction How do servers store passwords? The natural answer is a structure like this: #figure( table( columns: (auto, auto), [`username`], [`password`], [`<EMAIL>`], [`<PASSWORD>`], ), caption: [A snippet of a hypothetical database], ) <naive-database> When a user attempts to log in, they transmit their credentials. If the provided password matches the password for the requested username, authentication succeeds. While this works, there are glaring security issues with this model. Consider the consequences of a data leak: anyone with the data can now access every leaked user's accounts and the user's accounts on other servers, given how common non-unique passwords are. To solve this conundrum, break the question into it's parts: - Authentication involves the user inputting their username and password - The server checks whether the received password is correct for the requested user We know that we can compare the value of the sent password and the password in the database, but that involves the server knowing the password. What if the value of the password could be stored without knowing the original password? To avoid the security problems, this encoded version would have to be non-reversible. A function like this would have a very useful property: #figure( [\ if $f(a) = f(b)$, then $a = b$ and vice versa\ \ ], caption: [The fundamental property of crytographic hash functions] ) <hash-function-property> where $f$ is the encoding function. = Hashing A _Hash Function_ is a function that maps inputs of any size to a fixed-size value. This output is called the "Digest". #figure( table( columns: (auto, auto), [`username`], [`password`], [`<EMAIL>`], [`<PASSWORD>`], ), caption: [@naive-database using SHA-256-encoded passwords], ) Via hashing, the server never receives raw passwords from users. When the user inputs their password, the password is hashed with the same algorithm used by the server and is only then transmitted. While there are many hash functions, the overall structure is as so: #figure( [\ ``` hash(bytes: Bytes): Digest; ```\ ], caption: [Psuedocode signature of a hash function], ) A _Cryptographic Hash Function_ is a hash function which has additional security properties. For any length $n$, these include: - The probability of any one digest being output by the function decreases exponentially as $n$ increases (at least $2^(-n)$) - It is unfeasible to derive the input string given a digest - It is unfeasible to find 2 inputs that produce the same digest Non-cryptographic hash functions are widely used for non-security-critical infrastructure. Common use cases include unique identifiers in a "Hash Map", a datastructure that maps hashes of keys to values. Cryptographic hash functions are widely used for digital security. Besides passwords, they are used to verify integrity. When a server sends a request to another computer, the response usually contains the hash of the response. If the hashed response does not match the provided hash, there is an issue with the integrity of the message. = Attacks #quote(block: true)[ *NOTICE* I'm not sure how to work this part. I have done a lot of experimenting and have read the paper on MD5 collision attacks @colision-attacks-md5 but don't know the best way to connect that to this paper. I'm considering using a simple algorithm to show the importance of cryptographic algorithms, but I feel like putting focus on the security of algorithms like SHA-256 helps reinforce the above sections. Some keywords: - collision attack - brute-force attack - birthday attack - dictionary/rainbow table attack ] #pagebreak() = Definitions / Bit: The fundamental component of computer memory. A Bit can hold two values: on or off. This is represented numerically as `1` or `0` or logically as `true` or `false`. / Byte: A byte is a collection of 8 bits. It is the most common way of measuring an amount of bits. / Psuedocode: In lieu of actual code, psuedocode describes in language and common programming paradigms what the actual code would be like. = Bibliography #bibliography( "sources.yml", title: none, full: true, )
https://github.com/maucejo/cnam_templates
https://raw.githubusercontent.com/maucejo/cnam_templates/main/template/main_lettre.typ
typst
MIT License
#import "../src/cnam-templates.typ": * #show: cnam-lettre.with( // type: "lettre-officielle", // type: "courrier-interne", // type: "note-service", // type: "note-cadrage", destinataire: ( nom: "<NAME>", adresse: [1 rue de la Paix \ 75000 Paris], ), expediteur: ( nom: "<NAME>", adresse: "1 rue de la Paix", telephone: "01 02 03 04 05", mail: "<EMAIL>" ), objet: "Objet de la lettre", lieu: "Paris", date: "01 janvier 2025", ) #lorem(100)
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/text/em_01.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page // Test using ems in arbitrary places. #set text(size: 5pt) #set text(size: 2em) #set square(fill: red) #let size = { let size = 0.25em + 1pt for _ in range(3) { size *= 2 } size - 3pt } #stack(dir: ltr, spacing: 1fr, square(size: size), square(size: 25pt))
https://github.com/ClazyChen/Table-Tennis-Rankings
https://raw.githubusercontent.com/ClazyChen/Table-Tennis-Rankings/main/history_CN/2015/MS-09.typ
typst
#set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (1 - 32)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [1], [马龙], [CHN], [3593], [2], [许昕], [CHN], [3367], [3], [樊振东], [CHN], [3294], [4], [张继科], [CHN], [3258], [5], [方博], [CHN], [3180], [6], [#text(gray, "王皓")], [CHN], [3147], [7], [闫安], [CHN], [3124], [8], [水谷隼], [JPN], [3099], [9], [迪米特里 奥恰洛夫], [GER], [3085], [10], [黄镇廷], [HKG], [3011], [11], [庄智渊], [TPE], [3004], [12], [弗拉基米尔 萨姆索诺夫], [BLR], [2996], [13], [吉村真晴], [JPN], [2996], [14], [朱世赫], [KOR], [2992], [15], [马克斯 弗雷塔斯], [POR], [2986], [16], [蒂姆 波尔], [GER], [2975], [17], [唐鹏], [HKG], [2964], [18], [郑荣植], [KOR], [2947], [19], [高宁], [SGP], [2939], [20], [于子洋], [CHN], [2932], [21], [丹羽孝希], [JPN], [2924], [22], [周雨], [CHN], [2922], [23], [帕特里克 弗朗西斯卡], [GER], [2911], [24], [陈卫星], [AUT], [2899], [25], [梁靖崑], [CHN], [2888], [26], [大岛祐哉], [JPN], [2884], [27], [帕特里克 鲍姆], [GER], [2863], [28], [李尚洙], [KOR], [2862], [29], [KOU Lei], [UKR], [2858], [30], [帕纳吉奥迪斯 吉奥尼斯], [GRE], [2855], [31], [塩野真人], [JPN], [2852], [32], [徐晨皓], [CHN], [2844], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (33 - 64)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [33], [西蒙 高兹], [FRA], [2838], [34], [斯特凡 菲格尔], [AUT], [2837], [35], [GERELL Par], [SWE], [2837], [36], [森园政崇], [JPN], [2831], [37], [吉田海伟], [JPN], [2825], [38], [DRINKHALL Paul], [ENG], [2803], [39], [利亚姆 皮切福德], [ENG], [2801], [40], [李廷佑], [KOR], [2800], [41], [MONTEIRO Joao], [POR], [2799], [42], [KIM Donghyun], [KOR], [2794], [43], [蒂亚戈 阿波罗尼亚], [POR], [2788], [44], [LI Hu], [SGP], [2786], [45], [GERALDO Joao], [POR], [2784], [46], [江天一], [HKG], [2780], [47], [雨果 卡尔德拉诺], [BRA], [2780], [48], [尚坤], [CHN], [2774], [49], [安德烈 加奇尼], [CRO], [2772], [50], [CHEN Feng], [SGP], [2772], [51], [卢文 菲鲁斯], [GER], [2771], [52], [#text(gray, "LIU Yi")], [CHN], [2763], [53], [村松雄斗], [JPN], [2763], [54], [汪洋], [SVK], [2758], [55], [奥马尔 阿萨尔], [EGY], [2757], [56], [HABESOHN Daniel], [AUT], [2751], [57], [罗伯特 加尔多斯], [AUT], [2744], [58], [LI Ping], [QAT], [2743], [59], [沙拉特 卡马尔 阿昌塔], [IND], [2742], [60], [周恺], [CHN], [2733], [61], [夸德里 阿鲁纳], [NGR], [2729], [62], [松平健太], [JPN], [2729], [63], [<NAME>], [POL], [2729], [64], [<NAME>], [FRA], [2725], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (65 - 96)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [65], [克里斯坦 卡尔松], [SWE], [2723], [66], [林高远], [CHN], [2722], [67], [周启豪], [CHN], [2722], [68], [丁祥恩], [KOR], [2722], [69], [巴斯蒂安 斯蒂格], [GER], [2718], [70], [HE Zhiwen], [ESP], [2717], [71], [MACHI Asuka], [JPN], [2717], [72], [王臻], [CAN], [2716], [73], [HO Kwan Kit], [HKG], [2716], [74], [PERSSON Jon], [SWE], [2710], [75], [金珉锡], [KOR], [2709], [76], [朴申赫], [PRK], [2709], [77], [HACHARD Antoine], [FRA], [2708], [78], [张禹珍], [KOR], [2703], [79], [TSUBOI Gustavo], [BRA], [2699], [80], [KARAKASEVIC Aleksandar], [SRB], [2691], [81], [PROKOPCOV Dmitrij], [CZE], [2690], [82], [及川瑞基], [JPN], [2689], [83], [陈建安], [TPE], [2689], [84], [吉田雅己], [JPN], [2683], [85], [VLASOV Grigory], [RUS], [2682], [86], [上田仁], [JPN], [2682], [87], [吴尚垠], [KOR], [2679], [88], [博扬 托基奇], [SLO], [2676], [89], [HIELSCHER Lars], [GER], [2667], [90], [#text(gray, "KIM Hyok Bong")], [PRK], [2664], [91], [BOBOCICA Mihai], [ITA], [2663], [92], [<NAME>], [FRA], [2663], [93], [MATSUDAIRA Kenji], [JPN], [2660], [94], [艾曼纽 莱贝松], [FRA], [2658], [95], [KANG Dongsoo], [KOR], [2657], [96], [马蒂亚斯 法尔克], [SWE], [2655], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (97 - 128)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [97], [#text(gray, "约尔根 佩尔森")], [SWE], [2652], [98], [#text(gray, "KIM Nam Chol")], [PRK], [2646], [99], [斯蒂芬 门格尔], [GER], [2646], [100], [WU Zhikang], [SGP], [2646], [101], [#text(gray, "张一博")], [JPN], [2645], [102], [赵胜敏], [KOR], [2639], [103], [CIOTI Constantin], [ROU], [2639], [104], [维尔纳 施拉格], [AUT], [2637], [105], [PATTANTYUS Adam], [HUN], [2637], [106], [SHIBAEV Alexander], [RUS], [2635], [107], [KIM Minhyeok], [KOR], [2632], [108], [奥维迪乌 伊奥内斯库], [ROU], [2632], [109], [薛飞], [CHN], [2631], [110], [SEO Hyundeok], [KOR], [2630], [111], [SAKAI Asuka], [JPN], [2629], [112], [安东 卡尔伯格], [SWE], [2627], [113], [雅克布 迪亚斯], [POL], [2625], [114], [LIVENTSOV Alexey], [RUS], [2623], [115], [TAN Ruiwu], [CRO], [2622], [116], [米凯尔 梅兹], [DEN], [2618], [117], [CHOE Il], [PRK], [2618], [118], [WANG Zengyi], [POL], [2617], [119], [#text(gray, "OYA Hidetoshi")], [JPN], [2616], [120], [HUANG Sheng-Sheng], [TPE], [2615], [121], [LAMBI<NAME>], [BEL], [2615], [122], [诺沙迪 阿拉米扬], [IRI], [2613], [123], [TAKAKIWA Taku], [JPN], [2611], [124], [<NAME>], [GER], [2607], [125], [阿列克谢 斯米尔诺夫], [RUS], [2606], [126], [<NAME>], [SVK], [2605], [127], [<NAME>], [JPN], [2603], [128], [<NAME>], [KOR], [2603], ) )
https://github.com/yonatanmgr/university-notes
https://raw.githubusercontent.com/yonatanmgr/university-notes/main/0366-%5BMath%5D/03661111-%5BLinear%20Algebra%201A%5D/src/lectures/03661111_merged.typ
typst
#import "/0366-[Math]/globals/template.typ": * #show: project.with( title: "אלגברה לינארית 1א׳", authors: ("<NAME>",), date: "סמסטר א׳ - 2024", ) ֿ #place(center)[עדכני נכון ל-#datetime.today().display("[day]/[month]/[year]")] #set enum(numbering: "(1.א)") #include "/0366-[Math]/globals/toc.typ" #include "03661111_lecture_3.typ" #pagebreak() #include "03661111_lecture_4.typ" #pagebreak() #include "03661111_lecture_5.typ" #pagebreak() #include "03661111_lecture_6.typ" #pagebreak() #include "03661111_lecture_7.typ" #pagebreak() #include "03661111_lecture_8.typ" #pagebreak() #include "03661111_lecture_9.typ" #pagebreak() #include "03661111_lecture_10.typ" #pagebreak() #include "03661111_lecture_11.typ" #pagebreak() #include "03661111_lecture_12.typ" #pagebreak() #include "03661111_lecture_13.typ" #pagebreak() #include "03661111_lecture_14.typ"
https://github.com/glocq/typst-forthright-cv
https://raw.githubusercontent.com/glocq/typst-forthright-cv/master/src/cv.typ
typst
MIT License
#import "template.typ": name, description, contactDetails, sectionTitle, cvEntry, miscEntry, layout #let header = [ #name([Jane], [Doedenaj]) #description("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod") #contactDetails( phone: "+123 456 789", email: "<EMAIL>", github: "janedoe", homepage: "janedoe.com", location: "Doe City" ) ] #let body = [ #sectionTitle([Experience]) #cvEntry( [Chief Executive Officer], [Doe International], [Doe City], [Jan. 2050 -- Dec. 2099], [This is a standard entry, feel free to make bullet lists by prefixing your paragraphs with dashes: - You can also make text *stronger* or _oblique_ - Links are detected automatically https://www.wikipedia.org/ but you can also make #link("https://www.wikipedia.org/")[named links].] ) #cvEntry( [Lorem Ipsum], [Dolor sit amet, Consectetur], [Earth], [2000 -- 2049], [- Lorem ipsum dolor sit amet, *consectetur* adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna *aliqua*. - Ut enim ad minim *veniam*, quis nostrud *exercitation* ullamco laboris nisi ut *aliquip* ex ea #link("https://www.wikipedia.org/")[commodo] consequat. Duis aute irure dolor ] ) #cvEntry(short: true, [Short Entry], [Some Organisation], [Earth], [1950 -- 1999], [- Here all the info above appears on one compact line] ) #cvEntry( [Lorem Ipsum], [Dolor sit amet, Consectetur], [Earth], [2000 -- 2049], [- Lorem ipsum dolor sit amet, *consectetur* adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna *aliqua*.] ) #sectionTitle([Education]) #cvEntry(short: true, [Degree], [Bigbrain University], [Braintown], [1949], [Lorem ipsum dolor sit amet, *consectetur* incididunt ut labore et dolore magna *aliquip* ex ea #link("https://www.wikipedia.org/")[commodo] consequat, duis dolor.] ) #cvEntry(short: true, [Degree], [Bigbrain University], [Braintown], [1949], [Lorem ipsum dolor sit amet, *consectetur* incididunt ut labore et dolore magna *aliquip* ex ea #link("https://www.wikipedia.org/")[commodo] consequat, duis dolor.] ) #cvEntry(short: true, [Another Degree], [Some Uni], [Some Place], [1948], [] // You can also leave the description empty ) #cvEntry(short: true, [Degree], [Bigbrain University], [Braintown], [1949], [Lorem ipsum dolor sit amet, *consectetur* incididunt ut labore et dolore magna *aliquip* ex ea #link("https://www.wikipedia.org/")[commodo] consequat, duis dolor.] ) ] #let sidebar = [ #sectionTitle([Skills]) #miscEntry( [Languages], [- *Klingon* : Native - *Sumerian* : Fluent - *English* : Basic proficiency] ) #miscEntry( [Programming Languages], [Brain\*\*\*k, Assembly, Scratch] ) #sectionTitle([Personal Projects]) #miscEntry( [Rocket Ship Building], [Just building a couple rocket ships in my backyard when I have free time] ) #miscEntry( [Nuclear Plant Building], [Just building a couple nuclear plant in my backyard when I have free time] ) #miscEntry( [Archaeological Space Exploration], [Going to see what ancient civilizations were up to in our star system] ) ] #layout( header: header, photo: "images/photo.png", body: body, sidebar: sidebar )
https://github.com/MrToWy/Bachelorarbeit
https://raw.githubusercontent.com/MrToWy/Bachelorarbeit/master/Diagrams/simple_ER.typ
typst
```pintora erDiagram User ||--o{ User_Translation : "translations" Module }o--|| DegreeProgram : "degreeProgram" Module }o--o{ SubModule : "subModules" Module ||--o{ Module_Translation : "translations" Module }o--|| ModuleGroup : "group" Module }o--o| User : "responsible" SubModule }o--|| DegreeProgram : "degreeProgram" SubModule }o--o| User : "responsible" SubModule ||--o{ SubModule_Translation : "translations" DegreeProgram ||--o{ DegreeProgram_Translation : "translations" DegreeProgram |o--o| User : "responsible" ModuleGroup ||--o{ ModuleGroup_Translation : "translations" ```
https://github.com/mem-courses/linear-algebra
https://raw.githubusercontent.com/mem-courses/linear-algebra/main/homework/linear-algebra-homework1.typ
typst
#import "../template.typ": * #show: project.with( title: "Linear Algebra Homework #1", authors: ( (name: "<NAME>", email: "<EMAIL>", phone: "3230104585"), ), date: "September 18, 2023", ) = P14 习题一 1(3) #prob[ 使用 Gauss 消元法解线性方程组: #let tmp = mem_equations( (5, -1, 1, -2, 3), (8, -1, 1, -1, 1), (7, 2, -2, 3, 0), (5, -3, 3, -6, 6), ) $ tmp $ ] 对原线性方程组实施初等变换: #let tmp1 = mem_equations( (5, -1, 1, -2, 3), (8, -1, 1, -1, 1), (7, 2, -2, 3, 0), (5, -3, 3, -6, 6), ) #let tmp2 = mem_equations( (5, -1, 1, -2, 3), (3, 0, 0, 1, -2), (17, 0, 0, -1, 6), (-10, 0, 0, 0, -3), ) $ tmp1 dxarrow(R_2 - R_1 \ R_3 + 2R_1 \ R_4 - 3R_1) tmp2 $ 可得 $x_1 = 0.3$. 由 $R_2$ 知 $x_4 = -2 - 3x_1 = -2.9$;由 $R_3$ 知 $x_4 = 17x_1 - 6 = -0.9$. 矛盾!故原线性方程组无解. = P14 习题一 2 #prob[ 证明:任意一个形如 $ cases( a_11 x_1 + a_12 x_2 + &dots.c + a_(1n) x_n &= 0, a_21 x_1 + a_22 x_2 + &dots.c + a_(2n) x_n &= 0, &dots.c, a_(m 1) x_1 + a_(m 2) x_2 + &dots.c + a_(m n) x_n &= 0, ) $ 的齐次线性方程组一定有解:它或有唯一解 $x_1 = 0, x_2 = 0, dots, x_n = 0$,或有无穷多解. ] 代入可知 $x_i$ 全为 0 一定是合法解,现只需证:如果存在一组非零解,则一定存在无穷多解. 若存在一组非零解 $x_1, x_2, dots, x_n$,则 $forall c in RR$,一定存在 ${x'_i = x_i dot c}_(i=1)^n$ 也是合法解.因为实数有无穷多个,所以此时有无穷多解. = P14 习题一 3(1) #prob[ 使用 Gauss 消元法解齐次线性方程组: #let tmp = mem_equations( (2, 2, -4, 3, 0), (2, 4, -11, 10, 0), (4, 2, -1, -8, 0), (1, 1, -3, 3, 0), ) $ tmp $ ] 对原线性方程组实施初等变换: #let t0 = mem_equations( (1, 1, -3, 3, 0), (2, 4, -11, 10, 0), (4, 2, -1, -8, 0), (2, 2, -4, 3, 0), ) #let t1 = mem_equations( (2, 2, -4, 3, 0), (2, 4, -11, 10, 0), (4, 2, -1, -8, 0), (1, 1, -3, 3, 0), ) #let t2 = mem_equations( (1, 1, -3, 3, 0), (0, 2, -5, 4, 0), (0, -2, 11, -20, 0), (0, 0, 2, -3, 0), ) #let t3 = mem_equations( (1, 1, -3, 3, 0), (0, 2, -5, 4, 0), (0, 0, 6, -16, 0), (0, 0, 6, -9, 0), ) #let t4 = mem_equations( (1, 1, -3, 3, 0), (0, 2, -5, 4, 0), (0, 0, 6, -16, 0), (0, 0, 0, 7, 0), ) $ t0 dxarrow(R_14) t1 \ dxarrow(R_2 - 2R_1 \ R_3 - 4R_1 \ R_4 - 2R_1) t2 dxarrow(R_3 + R_2 \ 3 R_4) t3 \ dxarrow(R_4 - R_3) t4 $ 可解得 $display(cases(x_1 = 0, x_2 = 0, x_3 = 0, x_4 = 0))$. = P14 习题一 6 #prob[ 设矩阵 $bold(A)$ 通过初等变换 $R_12$ 换成矩阵 $bold(B)$,请写出仅用行的倍加和倍乘变换将 $bold(A)$ 换成 $bold(B)$ 的过程. ] 设 $bold(A) = display(mat(bold(R_1); bold(R_2); bold(C)))$,则有 $bold(B) = display(mat(bold(R_2); bold(R_1); bold(C)))$. 对 $bold(A)$ 实施初等行变换得 $ bold(A) dxarrow(R_2+R_1) mat(bold(R_1); bold(R_1) + bold(R_2); bold(C)) dxarrow(-1 times R_1) mat(-bold(R_1); bold(R_1) + bold(R_2); bold(C)) dxarrow(R_1 + R_2) mat(bold(R_2); bold(R_1) + bold(R_2); bold(C)) dxarrow(R_2 - R_1) mat(bold(R_2); bold(R_1); bold(C)) = bold(B) $ = P15 习题一 9 · 3(1) #prob[ 使用 Gauss 消元法解线性方程组: #let tmp = mem_equations( (5, -1, 1, -2, 3), (8, -1, 1, -1, 1), (7, 2, -2, 3, 0), (5, -3, 3, -6, 6), ) $ tmp $ ] 对原线性方程组实施初等变换: #let tmp1 = mem_equations( (5, -1, 1, -2, 3), (8, -1, 1, -1, 1), (7, 2, -2, 3, 0), (5, -3, 3, -6, 6), ) #let tmp2 = mem_equations( (5, -1, 1, -2, 3), (3, 0, 0, 1, -2), (17, 0, 0, -1, 6), (-10, 0, 0, 0, -3), ) $ tmp1 dxarrow(R_2 - R_1 \ R_3 + 2R_1 \ R_4 - 3R_1) tmp2 $ 可得 $x_1 = 0.3$. 由 $R_2$ 知 $x_4 = -2 - 3x_1 = -2.9$;由 $R_3$ 知 $x_4 = 17x_1 - 6 = -0.9$. 矛盾!故原线性方程组无解. = P15 习题一 10(2) #prob[ 问线性方程组中的参数取何值时,线性方程组无解,有唯一解,有无穷多个解?当线性方程组有解时,求其(通)解: $ cases( lambda x_1 &+ x_2 &+ x_3 &+ x_4 &= 1, x_1 &+ lambda x_2 &+ x_3 &+ x_4 &= lambda, x_1 &+ x_2 &+ lambda x_3 &+ lambda x_4 &= lambda^2, ) $ ] 对原线性方程组的系数矩阵的增广矩阵实施初等行变换得 $ overline(bold(A)) = mat( lambda, 1, 1, 1, 1; 1, lambda, 1, 1, lambda; 1, 1, lambda, lambda, lambda^2; ) dxarrow(lambda R_2 \ lambda R_3) mat( 𝜆, 1, 1, 1, 1; 𝜆, 𝜆^2, 𝜆, 𝜆, 𝜆^2; 𝜆, 𝜆, 𝜆^2, 𝜆^2, 𝜆^3; ) dxarrow(R_2 - R_1 \ R_3 - R_1) mat( 𝜆, 1, 1, 1, 1; 0, 𝜆^2-1, 𝜆-1, 𝜆-1, 𝜆^2-1; 0, 𝜆-1, 𝜆^2-1, 𝜆^2-1, 𝜆^3-1; ) \ dxarrow((𝜆+1) R_3) mat( 𝜆, 1, 1, 1, 1; 0, 𝜆^2-1, 𝜆-1, 𝜆-1, 𝜆^2-1; 0, 𝜆^2-1, 𝜆^3+𝜆^2-𝜆-1, 𝜆^3+𝜆^2-𝜆-1, 𝜆^4+𝜆^3-𝜆-1; ) \ dxarrow(R_3 - R_2) mat( 𝜆, 1, 1, 1, 1; 0, 𝜆^2-1, 𝜆-1, 𝜆-1, 𝜆^2-1; 0, 0, 𝜆^3+𝜆^2-2𝜆, 𝜆^3+𝜆^2-2𝜆, 𝜆^4+𝜆^3-𝜆^2-𝜆; ) $ 当 $lambda=0$ 时,有 $display(cases( x_2 + x_3 + x_4 = 1, -x_2 - x_3 - x_4 = -1, x_3 + x_4 = 0 ))$ 故原线性方程组有通解 $ display(cases( x_1 = t_1, x_2 = 1, x_3 = t_2, x_4 = -t_2, )) quad quad t_1,t_2 in RR $ 当 $lambda=1$ 时,有 $display(cases( x_1 + x_2 + x_3 + x_4 = 1, ))$,故原线性方程组有通解 $ display(cases( x_1 = t_1, x_2 = t_2, x_3 = t_3, x_4 = 1 - t_1 - t_2 - t_3, )) quad quad t_1,t_2,t_3 in RR $ 当 $lambda=-1$ 时,有 $display(cases( -x_1 + x_2 + x_3 + x_4 = 1, x_3 + x_4 = 0, ))$,故原线性方程组有通解 $ display(cases( x_1 = t_1 - 1, x_2 = t_1, x_3 = t_2, x_4 = -t_2, )) quad quad t_1, t_2 in RR $ 当 $lambda = -2$ 时,有 $display(cases( -2x_1 + x_2 + x_3 + x_4 = 0, x_2 - x_3 - x_4 = 1, 0 = 6, ))$,故此时原线性方程组无解. 否则,当 $lambda in.not {-2,-1,0,1}$ 时,原线性方程组有通解 $ cases( x_1 = display(-(lambda+1) / (lambda+2)), x_2 = display(1 / (lambda+2)), x_3 = t, x_4 = display((lambda+1)^2 / (lambda+2)) - t ) quad quad t in RR $ = P15 习题一 10(4) #prob[ 问线性方程组中的参数取何值时,线性方程组无解,有唯一解,有无穷多个解?当线性方程组有解时,求其(通)解: $ cases( x_1 + 2x_2 + 4x_3 + 2x_4 &= 2, 3x_1 + 5x_2 + 10x_3 + 7x_4 &= 5, x_1 - 3x_2 - a x_3 + 9x_4 &= 1, 2x_1 - 3x_2 - 6x_3 + 14x_4 &= b, ) $ ] 对原线性方程组的系数矩阵的增广矩阵实施初等行变换得 $ overline(bold(A)) = mat( 1,2,4,2,2; 3,5,10,7,5; 1,-3,-a,9,1; 2,-3,-6,14,b; ) dxarrow(R_2 - 3R_1 \ R_3 - R_1 \ R_4 - 2R_1) mat( 1,2,4,2,2; 0,-1,-2,1,-1; 0,-5,-a-4,7,-1; 0,-7,-14,10,b-4; ) dxarrow(-R_2 \ -R_3 \ -R_4) \ mat( 1,2,4,2,2; 0,1,2,-1,1; 0,5,a+4,-7,1; 0,7,14,-10,-b+4; ) dxarrow(R_3-5R_1 \ R_4-7R_1) mat( 1,2,4,2,2; 0,1,2,-1,1; 0,0,a-6,-2,-4; 0,0,0,-3,-b-3; ) $ 当 $a=6$ 时: - 若 $x_4 != 2$ 即 $b!=3$,则原方程无解; - 若 $x_4 = 2$ 即 $b=3$,则原方程有无数解: $ cases( x_1 = -8, x_2 = 3-2t, x_3 = t, x_4 = 2, )quad quad t in RR $ 当 $a!=6$ 时,原方程有唯一解: $ cases( x_1 = display(-(4a b+12a-24b-72)/(3a-18)), x_2 = display((a b+6a-10b-24)/(3a-18)), x_3 = display((2b-6)/(3a-18)), x_4 = display((b+3)/3), ) $ = P15 习题一 11 #prob[ 设线性方程组 $ cases( x_1 &+ x_2 &+ x_3 &= 0, x_1 &+ 2x_2 &+ a x_3 &= 0, x_1 &+ 4x_2 &+ a^2 x_3 &= 0, ) $ 与方程 $ x_1 + 2x_2 + x_3 = a - 1 $ 有公共解,求 $a$ 的值和所有公共解. ] 对其系数矩阵的增广矩阵应用初等变换得 $ overline(bold(A)) = mat( 1, 1, 1, 0; 1, 2, a, 0; 1, 4, a^2, 0; 1, 2, 1, a-1; ) dxarrow(R_2-R_1 \ R_3-R_1 \ R_4-R_1) mat( 1, 1, 1, 0; 0, 1, a-1, 0; 0, 3, a^2-1, 0; 0, 1, 0, a-1; ) dxarrow(R_3-3R_2 \ R_4-R_1) \ mat( 1, 1, 1, 0; 0, 1, a-1, 0; 0, 0, a^2-3a+2, 0; 0, 0, -(a-1), a-1; ) $ 若 $a=1$,代入得: $ overline(bold(A)) arrow.r mat( 1,1,1,0; 0,1,0,0; 0,0,0,0; 0,0,0,0; ) $ 此时有公共解: $ cases(x_1 = -t, x_2 = 0, x_3 = t) quad quad t in RR $ 若 $a != 1$,则需满足 $x_3 = -1$ 且 $(a^2-3a+2) x_3 = 0$,可知 $a=2$.此时有公共解: $ cases(x_1 = 0, x_2 = 1, x_3 = -1) $ 综上所述,$a$ 的值为 $1$ 或 $2$,公共解为 $ &cases(x_1 = -t, x_2 = 0, x_3 = t) quad quad t in RR quad quad &(a=1)\ &cases(x_1 = 0, x_2 = 1, x_3 = -1) quad quad &(a=2) $ = P15 习题一 12 #prob[ 证明线性方程组 $ cases( x_1 &- x_2 &= a_1, x_2 &- x_3 &= a_2, x_3 &- x_4 &= a_3, x_4 &- x_5 &= a_4, x_5 &- x_1 &= a_5, ) $ 有解的充分必要条件是 $a_1+a_2+a_3+a_4+a_5 = 0$,并在有解时求其(通)解. ] 必要性:令 $R_1+R_2+R_3+R_4+R_5$ 得: $ 0 = a_1 + a_2 + a_3 + a_4 + a_5 $ 充分性:对原线性方程组实施初等变换得 $ cases( x_1 &- x_2 &= a_1, x_1 &- x_3 &= a_1 + a_2, x_1 &- x_4 &= a_1 + a_2 + a_3, x_1 &- x_5 &= a_1 + a_2 + a_3 + a_4, ) $ 故原线性方程组的通解为 $ cases( x_1 &= t, x_2 &= t - a_1, x_3 &= t - a_1 - a_2, x_4 &= t - a_1 - a_2 - a_3, x_5 &= t - a_1 - a_2 - a_3 - a_4, ) quad quad t in RR $ 可以说明充分性. = P16 习题一 13 #prob[ 判断下列论断是否成立.若成立,请给出证明;若不成立,请给出一个反例: (1) 方程个数小于未知量个数的齐次线性方程组必有非零解; (2) 方程个数小于未知量个数的线性方程组必有无穷多个解. ] (1) 成立.通过重排标号可使得应用 Gauss 后线性方程组形如 $ cases( x_1 + a_(1,2)x_2 + a_(1,3)x_3 + dots.c + a_(1,m)x_m &= b_1, x_2 + a_(2,3)x_3 + a_(2,4)x_4 + dots.c + a_(2,m)x_m &= b_2, &dots.c, x_r + a_() ) $ 其中 $r<=m<n$.那么必存在 $i in (r,n] sect ZZ$.令 $x_i$ 取非零常数并代入.得到新的线性方程组有 $r=n$.由定理可知此时必有唯一解. 且由于 $exists r<i<=n "st." x_i != 0$,这组解是非零解.即原线性方程组必有非零解. (2) 成立.同 (1) 理转化后可知随意调整 $r<i<=n$ 的 $x_i$ 后一定有唯一解.由于 $x_n in RR$,实数域有无穷多元素,故原线性方程组有无穷多解. = P16 补充题一 1 #prob[ 设空间中三张平面的方程为 $ pi_1 &:space 2x_1 + a x_2 + b x_3 = 0,\ pi_2 &:space 2x_1 + x_2 + x_3 = 0,\ pi_3 &:space x_1 + a x_2 - b x_3 = -1.\ $ 若该三张平面有一公共点 $(-1,1,1)$,试求 $a$ 和 $b$ 的值以及这三张平面的所有公共点. ] 代入 $display(mat(x_1;x_2;x_3) = mat(-1;1;1))$ 得: $ cases( -2 + a + b = 0, -2 + 1 + 1 = 0, -1 + a - b = -1, ) $ 解得 $display(cases(a = 1, b = 1))$.代入方程组得 $ cases( 2x_1 + x_2 + x_3 = 0, 2x_1 + x_2 + x_3 = 0, x_1 + x_2 - x_3 = -1, ) $ 对其应用 Gauss 消元可知原线性方程组同解于 $ cases( x_1 + x_2 - x_3 = -1, x_2 - 3 x_3 = -2, ) $ 故原线性方程组的通解为 $ cases( x_1 = -2t + 1, x_2 = 3t - 2, x_3 = t, ) quad quad t in RR $ 可知三张平面的所有公共点为 ${(-2t + 1, 3t - 2, t) | t in RR}$. = P16 补充题一 3 #prob[ 解齐次线性方程组 $ cas( quad &+ x_2 &+ x_3 &+ &dots.c &+ x_(n-1) &+ x_n &= 0, x_1 &+ quad &+ x_3 &+ &dots.c &+ x_(n-1) &+ x_n &= 0, x_1 &+ x_2 &+ quad &+ &dots.c &+ x_(n-1) &+ x_n &= 0, &&&&&&&dots.c, x_1 &+ x_2 &+ quad &+ &dots.c &+ x_(n-1) &+ quad &= 0, ) $ 其中 $n>1$ ] 对原线性方程组实施初等变换得 $ dxarrow(display(R_(i mod n + 1) - R_i "," i in [1,n] sect ZZ)) cases( x_1 - x_2 = 0, x_2 - x_3 = 0, dots.c, x_(n-1) - x_n = 0, x_n - x_1 = 0, ) $ 故 $x_1 = x_2 = dots.c = x_n$,则有 $(n-1) x_1 = 0$. 由于 $n>1$,只能有 $x_1 = 0$,故原线性方程组的解为 $x_1 = x_2 = dots.c = x_n = 0$.
https://github.com/VisualFP/docs
https://raw.githubusercontent.com/VisualFP/docs/main/SA/project_documentation/content/risk_management.typ
typst
#import "@preview/tablex:0.0.5": tablex, cellx #import "../../acronyms.typ": ac #let lightgreen = rgb(119, 221, 119) #let lightyellow = rgb(255, 250, 160) #let lightred = rgb(250, 160, 160) = Risk Management The following section describes the risks we identified for our project and actions to prevent or correct them. Risks are color-coded according to @risk_matrix. == Initial Risk Assessment In @initial_risk_assessment all risks identified at the start of the project are documented. #let risk_head(name) = cellx(align: center + horizon)[*#name*] #let initial_risk(id, level, risk: [-], prevention: [-], correction: [-]) = ( cellx(fill: level, id), cellx()[*#risk*], cellx(prevention), cellx(correction), ) #show figure: set block(breakable: true) #figure( tablex( columns: (auto, 1.3fr, 2fr, 2fr), risk_head[ID], risk_head[Risk], risk_head[Preventive actions], risk_head[Corrective actions], ..initial_risk([001], lightgreen, risk: [Difficulties with Typst], correction: [Switch to LaTeX in case typst lacks necessary features]), ..initial_risk([002], lightyellow, risk: [Absence of team member], correction: [Trying to catch up, adjust project scope]), ..initial_risk([003], lightyellow, risk: [Too little time for prototype], prevention: [Include buffer in project plan, limit prototype to most important features], correction: [Focus on functionality instead of UI design, cut features if necessary/possible]), ..initial_risk([004], lightyellow, risk: [Block Model cannot be simplified for PoC], correction: [Change block model where appropriate to make it easier to implement]), ..initial_risk([005], lightred, risk: [UI design is too difficult for certain features], prevention: [Limit amount of features included in design to minimize time impact], correction: [Try to simplify feature, remove feature from project scope]), ..initial_risk([006], lightred, risk: [Translation between blocks and Haskell too difficult for PoC], correction: [Implement alternative execution model]), ), kind: "table", supplement: "Table", caption: "Initial Risk Assessment" )<initial_risk_assessment> == Risk Matrix This risk matrix describes the color-coding used above using "probability" and "severity". #figure( tablex( columns: (1fr, auto, auto, auto, auto, auto), align: center + horizon, risk_head()[Probability / Severity], risk_head()[1 Unlikely], risk_head()[2 Seldom], risk_head()[3 Occasional], risk_head()[4 Probable], risk_head()[5 Frequent], risk_head()[4 Catastrophic], cellx(fill: lightyellow)[], cellx(fill: lightred)[], cellx(fill: lightred)[], cellx(fill: lightred)[], cellx(fill: lightred)[], risk_head()[3 Critical], cellx(fill: lightgreen)[], cellx(fill: lightyellow)[], cellx(fill: lightyellow)[], cellx(fill: lightred)[], cellx(fill: lightred)[], risk_head()[2 Major], cellx(fill: lightgreen)[], cellx(fill: lightgreen)[], cellx(fill: lightyellow)[], cellx(fill: lightyellow)[], cellx(fill: lightred)[], risk_head()[1 Minor], cellx(fill: lightgreen)[], cellx(fill: lightgreen)[], cellx(fill: lightgreen)[], cellx(fill: lightyellow)[], cellx(fill: lightyellow)[] ), supplement: "Table", kind: "table", caption: "Risk matrix" )<risk_matrix> == Risk Assessment Retrospection @risk_assessment_retrospection lists all previously identified risks, their initial assessments and adds a retrospection assessment to it. #let retrospective_risk(id, risk: [-], initial: red, quantitative: red, qualitative: [-]) = ( cellx(id), cellx()[*#risk*], cellx(fill: initial, []), cellx(fill: quantitative, qualitative), ) #figure( tablex( columns: (auto, 1fr, 0.4fr, 2fr), risk_head[ID], risk_head[Risk], risk_head[Initial], risk_head[Retrospection], ..retrospective_risk([001], risk: [Difficulties with Typst], initial: lightgreen, quantitative: lightyellow, qualitative: [ Most use cases could be implemented with little effort, but some were not supported. \ For example, there is no Typst equivalent to the 'part' system of the popular LaTeX article document class. Although possible, rebuilding such features is time-consuming. ]), ..retrospective_risk([002], risk: [Absence of team member], initial: lightyellow, quantitative: lightgreen, qualitative: [No team member was absent for the duration of the project.]), ..retrospective_risk([003], risk: [Too little time for prototype], initial: lightyellow, quantitative: lightgreen, qualitative: [ The concept took a considerable amount of time from the project. Implementing the PoC has indeed been a challenge but progressed surprisingly fast.]), ..retrospective_risk([004], risk: [Block Model cannot be simplified for PoC], initial: lightyellow, quantitative: lightgreen, qualitative: [ We've found a concept that satisfied the requirements and was proven by a simple PoC. ]), ..retrospective_risk([005], risk: [UI design is too difficult for certain features], initial: lightred, quantitative: lightyellow, qualitative: [ Although we could realize everything planned for the PoC, Threepenny did not allow for a transition to #ac("FRP") because of a lack of features. ]), ..retrospective_risk([006], risk: [Translation between blocks and Haskell too difficult for PoC], initial: lightred, quantitative: white, qualitative: [ The PoC does not offer a two-way translation between the block-language and Haskell, as it was deemed out of scope during the project.]), ), kind: "table", supplement: "Table", caption: "Risk Assessment Retrospection" )<risk_assessment_retrospection>
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/text/lang-with-region-02.typ
typst
Other
// with region configured #set text(lang: "zh", region: "TW") #outline()
https://github.com/Tweoss/math_scratch
https://raw.githubusercontent.com/Tweoss/math_scratch/main/template.typ
typst
#let script-size = 7.97224pt #let footnote-size = 8.50012pt #let small-size = 9.24994pt #let normal-size = 10.00002pt #let large-size = 11.74988pt // This function gets your whole document as its `body` and formats #let homework( class: none, // The article's title. number: 0, // An array of authors. For each author you can specify a name, // department, organization, location, and email. Everything but // but the name is optional. name: "<NAME>", // The article's paper size. Also affects the margins. paper-size: "us-letter", // the current day date: none, // The document's content. body, ) = { let title = class + " Homework " + str(number) if date == none { date = strong(text(red, "NO DATE")) } // Set document metdata. set document(title: title, author: name) // Set the body font. AMS uses the LaTeX font. set text(size: normal-size, font: "New Computer Modern") // Configure the page. set page( paper: paper-size, // The margins depend on the paper size. margin: if paper-size != "a4-paper" { ( top: (116pt / 279mm) * 100%, left: (126pt / 216mm) * 100%, right: (128pt / 216mm) * 100%, bottom: (94pt / 279mm) * 100%, ) } else { ( top: 117pt, left: 118pt, right: 119pt, bottom: 96pt, ) }, // The page header should show the page number and list of // authors, except on the first page. The page number is on // the left for even pages and on the right for odd pages. header-ascent: 14pt, header: locate(loc => { let i = counter(page).at(loc).first() if i == 1 { return } set text(size: script-size) grid( columns: (6em, 1fr, 6em), if calc.even(i) [#i], align(center, upper( if calc.odd(i) { title } else { name } )), if calc.odd(i) { align(right)[#i] } ) }), // On the first page, the footer should contain the page number. footer-descent: 12pt, footer: locate(loc => { let i = counter(page).at(loc).first() if i == 1 { align(center, text(size: script-size, [#i])) } }) ) // Configure lists and links. set list(indent: 24pt, body-indent: 5pt) set enum(indent: 24pt, body-indent: 5pt) show link: set text(font: "New Computer Modern Mono") // Configure equations. show math.equation: set block(below: 8pt, above: 9pt) show math.equation: set text(weight: 400) show figure: it => { show: pad.with(x: 23pt) set align(center) v(12.5pt, weak: true) // Display the figure's body. it.body // Display the figure's caption. if it.has("caption") { // Gap defaults to 17pt. v(if it.has("gap") { it.gap } else { 17pt }, weak: true) smallcaps[Figure] if it.numbering != none { [ #counter(figure).display(it.numbering)] } [. ] it.caption } v(15pt, weak: true) } // Display the title and author. v(35pt, weak: true) align(center, { upper(text(size: large-size, weight: 700, title)) v(15pt, weak: true) text(size: normal-size, weight: 500, name) v(10pt, weak: true) text(size: small-size, date) }) // Configure paragraph properties. set par(first-line-indent: 1.2em, justify: true, leading: 0.58em) show par: set block(spacing: 0.58em) // Display the article's contents. v(29pt, weak: true) body } #let problem_counter = counter("problem") #let problem(body, number: none, format: "a.a)") = locate(location => { problem_counter.step() if number != none { problem_counter.update(number + 1) } show: block.with(spacing: 11.5pt) // Problem # { set text(size: 1.4em, weight: 800) v(11pt, weak: true) [Problem #problem_counter.display()] } { // body inside a block set enum(numbering: format, indent: 0pt) block(width: 100%, fill: rgb("#cccccc"), stroke: black, inset: 10pt, radius: 2pt)[#body] } }) #let solution(body, level: 0, format: "1.a") = locate(location => { // Only display Solution # for more nested solutions if level != 0 { problem_counter.step(level: level + 1) // let top_number = problem_counter.at(location).first() // Heading { set text(size: 1.2em, weight: 800) set par(first-line-indent: 0pt) emph([Solution #problem_counter.display((..nums) => numbering(format, ..nums))]) } linebreak() } [#body] // should go to next page for a one part solution that is not the last one if level == 0 { if problem_counter.at(location) != problem_counter.final(location) { pagebreak(weak: true) } } else { v(normal-size) } }) #let sq(a) = [$#a^2$]
https://github.com/crystalsolenoid/typst-resume-template
https://raw.githubusercontent.com/crystalsolenoid/typst-resume-template/main/README.md
markdown
# A Typst Resume Template A [Typst](https://typst.app/) resume template for anyone who wants to use or modify it. I got started with Typst a couple of days ago and threw this together based on my old LaTeX resume. ## Usage Define individual items in `items.typ`, add these items to the section lists in `sections.typ`, and customize the order of the sections in `main.typ`. If you open this in the [Typst web editor](https://typst.app/) and double-click on any of the text in the preview output, it will jump you to the point in the code to edit it. I recommend making variables at the top of `items.typ` for things like schools and cities, if any of those repeat often in your resume data. ## Limitations The positioning of the (optional) links in the projects section is a little weird. If your project titles are too long, you'll have to adjust the position manually. Suggestions welcome. ## Other Information SVG's are from the free pack at [Font Awesome](https://fontawesome.com/download). I only included the ones I used.
https://github.com/avonmoll/bamdone-rebuttal
https://raw.githubusercontent.com/avonmoll/bamdone-rebuttal/main/template/main.typ
typst
MIT No Attribution
#import "@preview/bamdone-rebuttal:0.1.0": * // Configure text colors for points, responses, and new text #let (point, response, new) = configure( point-color: blue.darken(30%), response-color: black, new-color: green.darken(30%) ) // Setup the rebuttal #show: rebuttal.with( authors: [First A. Author and Second B. Author], // date: , // paper-size: , ) We thank the reviewers... #lorem(60) We hope it is now suitable for inclusion in... #reviewer() This reviewers' feedback was... #point[ There appears to be an error... ]<p1> #response[ #lorem(20). The revised text now reads: #quote[ #lorem(10) #new[#lorem(2)]. ] ] #point[ #lorem(10). ] #response[ See response to @pt-p1. Similar to the `i-figured` package, references to labeled `point`s must be prefixed by `pt-` as in `@pt-p1` which refers to the `point` labeled `<p1>`. ] #reviewer() We generally agree with this reviewer... #point[ Have you considered... ] #response[ We will address this in a future work... ]
https://github.com/satshi/typst-jp-template
https://raw.githubusercontent.com/satshi/typst-jp-template/main/template.typ
typst
#let jarticle( fontsize: 11pt, title: none, authors: (), abstract: [], date: none, doc, ) = { let roman = "STIX Two Text" let mincho = "<NAME>" let kakugothic = "<NAME>" let math_font = "STIX Two Math" set text(lang:"ja", font: (roman,mincho), fontsize) // Use A4 paper set page( paper: "a4", margin: auto, ) set par(justify: true) // 行間の調整 set par( leading: 1.2em, justify: true, first-line-indent: 1.1em, ) show par: set block(spacing: 1.2em) show heading: set block(above: 1.6em, below: 0.6em) set heading(numbering: "1.1 ") // 見出しの下の段落を字下げするため show heading: it =>{ it par(text(size: 0pt, "")) } // 様々な場所でのフォント show heading: set text(font: kakugothic) show strong: set text(font: kakugothic) show emph: set text(font: (roman, kakugothic)) show math.equation: set text(font: (math_font,roman,mincho)) // 数式番号 set math.equation(numbering: "(1)") show ref: it => { let eq = math.equation let el = it.element if el != none and el.func() == eq { // Override equation references. link( el.label, numbering( el.numbering, ..counter(eq).at(el.location()) ) ) } else { // Other references as usual. it } } // 目次 show outline.entry.where( level: 1 ): it => { v(1.2*fontsize, weak: true) it } set outline(indent: auto) // 図のキャプション set figure(gap: 1.6em) show figure.caption: it => [ #block(width: 90%, [#it]) #v(1em) ] show figure.caption: set text(font: kakugothic, 0.9*fontsize) show figure.caption: set align(left) // タイトル { set align(center) text(1.5*fontsize, font:kakugothic, strong(title)) par(for a in authors {a}) par(date) if abstract != [] { par(text(0.9*fontsize,[ *概要* #block(width: 90%)[#align(left, abstract)] ])) } } doc } #let appendix(app) = [ #counter(heading).update(0) #set heading(numbering: "A.1 ") #app ] #let 年月日 = "[year]年[month repr:numerical padding:none]月[day padding:none]日" #let 年月 = "[year]年[month repr:numerical padding:none]月"
https://github.com/Mc-Zen/quill
https://raw.githubusercontent.com/Mc-Zen/quill/main/src/quill.typ
typst
MIT License
#import "utility.typ" #import "decorations.typ": lstick, rstick, midstick, nwire, annotate, slice, setwire, gategroup #import "gates.typ": gate, mqgate, ctrl, swap, targ, meter, phantom, permute, phase, targX, draw-functions #import "quantum-circuit.typ": quantum-circuit #import "tequila.typ" #let help(..args) = { import "@preview/tidy:0.3.0" let namespace = ( ".": ( read.with("/src/quantum-circuit.typ"), read.with("/src/gates.typ"), read.with("/src/decorations.typ"), ), "gates": read.with("/src/gates.typ"), ) tidy.generate-help(namespace: namespace, package-name: "quill")(..args) }
https://github.com/grnin/Zusammenfassungen
https://raw.githubusercontent.com/grnin/Zusammenfassungen/main/template_zusammenf.typ
typst
// Template Zusammenfassung // (C) 2024, <NAME>, <NAME> #import "helpers.typ": * // Global variables #let colors = ( hellblau: rgb("#29769E"), dunkelblau: rgb("#1a4e69"), grün: rgb("#8B9654"), hellgrün: rgb("#BFBC8A"), gelb: rgb("#F2C12E"), rot: rgb("#A6460F"), orange: rgb("#D98825"), comment: rgb("#2D9428"), ) #let languages = ( de: (page: "Seite"), en: (page: "Page") ) #let dateformat = "[day].[month].[year]" // Main template #let project( authors: (), fach: "", fach-long: "", semester: "", date: datetime.today(), landscape: false, column-count: 1, tableofcontents: (enabled: false, depth: "", columns: ""), // (depth: none, columns: 1) language: "de", font-size: 11pt, body, ) = { // == Document Configuration == // PDF Metadata set document( author: authors, title: fach + " Zusammenfassung " + semester, date: date, ) let font-default = (font: "Calibri", lang: language, region: "ch", size: font-size) let font-special = ( ..font-default, font: "JetBrains Mono", weight: "bold", fill: colors.hellblau, ) let footer = [ #set text(font: font-special.font, size: 0.9em) #fach | #semester | #authors.join(" & ") #h(1fr) #languages.at(language).page #counter(page).display() ] set page( flipped: landscape, columns: column-count, footer: footer, margin: if (column-count < 2) { (top: 2cm, left: 1.5cm, right: 1.5cm, bottom: 2cm) } else { 0.5cm } ) set columns(column-count, gutter: 2em) // Default document font set text(..font-default) // Style built-in functions // Headings formatting set heading(numbering: "1.1.") show heading: hd => block({ if hd.numbering != none and hd.level <= 3 { context counter(heading).display() h(1.3em) } hd.body }) show heading.where(level: 1): h => { set text(..font-special, top-edge: 0.18em) line(length: 100%, stroke: 0.18em + colors.hellblau) upper(h) v(0.45em) } show heading.where(level: 2): h => { set text(size: 0.9em) upper(h) } // Remove space above H4, fixes spacing between H3 & H4 show heading.where(level: 4): h => { v(-0.4em) h } // Table formatting set table( stroke: (x, y) => (left: if x > 0 { 0.07em }, top: if y > 0 { 0.07em }), inset: 0.5em, ) // Recommended workaround in Typst 0.11 until table.header is styleable show table.cell.where(y: 0): emph // Unordered list, use with "- " or #list[] show list: set list(marker: "-", body-indent: 0.45em) // "Important" template, use with "_text_" or #emph[] show emph: set text(fill: font-special.fill, weight: font-special.weight) // Code, use with ```python print("Hello World")``` show raw: set text(font: font-special.font, size: 1em) // Quotes set quote(block: true, quotes: true) show quote: q => { set align(left) set text(style: "italic") q } // Table of contents, header level 1 show outline.entry.where(level: 1): entry => { v(1.1em, weak: true) strong(entry) } // Title page configuration let subtitle(subt) = [ #set text(..font-special, size: 1.2em) #pad(bottom: 1.3em, subt) ] // == Page Content == // title row align(left)[ #text(..font-special, size: 1.8em, fach-long + " | " + fach) #v(1em, weak: true) #subtitle[Zusammenfassung] ] if (tableofcontents.enabled) { columns(tableofcontents.at("columns", default: 1), outline(depth: tableofcontents.at("depth", default: none))) pagebreak() } // Main body set par(justify: true) body } // Additional formatting templates // "Zusätzlicher Hinweis"-Vorlage #let hinweis(t) = { set text(style: "italic", size: 0.8em) show raw: set text(font: "JetBrains Mono", size: 1.05em) t } // "Definition"-Vorlage #let definition(t) = { rect(stroke: 0.13em + colors.hellblau, inset: 0.73em, columns(1, t)) } // Kommentar #let comment(t) = { set text(style: "italic", weight: "bold", fill: colors.comment) t } // Text added by Jannis #let jannis(t) = { set text(weight: "bold", fill: colors.orange) t } // Set a text color from the color dict for a math formula #let fxcolor(subcolor, x) = { text(fill: colors.at(subcolor), $bold(#x)$) } #let tcolor(subcolor, x) = { text(fill: colors.at(subcolor), style: "italic", strong(x)) }
https://github.com/HellOwhatAs/sfbooks2typst
https://raw.githubusercontent.com/HellOwhatAs/sfbooks2typst/raw/template.typ
typst
#let project(title: "", abstract: none, authors: (), logo: none, body) = { // Set the document's basic properties. set document(author: authors, title: title) set page(paper: "a5") set text(font: "Noto Serif CJK SC", lang: "zh") // set heading(numbering: "1.") show heading.where(level: 1): it => [ #pagebreak(weak: true) #align(center, pad(y: 1.5em, it)) ] show heading.where(level: 2): it => pad(y: 1.5em, it) show heading: it => { if it.level > 2 { parbreak() fake-italic(it.body) } else { it } } // Title page. // The page can contain a logo if you pass one with `logo: "logo.png"`. v(0.6fr) if logo != none { align(right, image(logo, width: 26%)) } v(9.6fr) text(2em, weight: 700, title) // Author information. pad(top: 0.7em, right: 20%, grid( columns: (1fr,) * calc.min(3, authors.len()), gutter: 1em, ..authors.map(author => align(start, strong(author))), )) v(2.4fr) pagebreak() // Abstract page. if abstract != none { v(1fr) align(center)[ #abstract ] v(1.618fr) pagebreak() } // Table of contents. outline(depth: 2, indent: 1.5em) pagebreak() // Main body. set par(justify: true) set page(numbering: "1", number-align: center) counter(page).update(1) body }
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/board-n-pieces/0.2.0/README.md
markdown
Apache License 2.0
# Board & Pieces Display chessboards in Typst. ## Displaying chessboards The main function of this package is `board`. It lets you display a specific position on a board. ```typ #board(starting-position) ``` ![image](examples/example-1.svg) `starting-position` is a position that is provided by the package. It represents the initial position of a chess game. You can create a different position using the `position` function. It accepts strings representing each rank. Use upper-case letters for white pieces, and lower-case letters for black pieces. Dots and spaces correspond to empty squares. ```typ #board(position( "....r...", "........", "..p..PPk", ".p.r....", "pP..p.R.", "P.B.....", "..P..K..", "........", )) ``` ![image](examples/example-2.svg) Alternatively, you can use the `fen` function to create a position using [Forsyth–Edwards Notation](https://en.wikipedia.org/wiki/Forsyth%E2%80%93Edwards_Notation): ```typ #board(fen("r1bk3r/p2pBpNp/n4n2/1p1NP2P/6P1/3P4/P1P1K3/q5b1 b - - 1 23")) ``` ![image](examples/example-3.svg) Note that you can also specify only the first part of the FEN string: ```typ #board(fen("r4rk1/pp2Bpbp/1qp3p1/8/2BP2b1/Q1n2N2/P4PPP/3RK2R")) ``` ![image](examples/example-4.svg) ## Using the `game` function The `game` function creates an array of positions from a full chess game. A game is described by a series of turns written using [standard algebraic notation](https://en.wikipedia.org/wiki/Algebraic_notation_(chess)). Those turns can be specified as an array of strings, or as a single string containing whitespace-separated moves. ```typ The scholar's mate: #let positions = game("e4 e5 Qh5 Nc6 Bc4 Nf6 Qxf7") #grid( columns: (auto, ) * 4, gutter: 0.2cm, ..positions.map(board.with(square-size: 0.5cm)), ) ``` ![image](examples/example-5.svg) You can specify an alternative starting position to the `game` function with the `starting-position` named argument. ## Using the `pgn` function to import PGN files Similarly to the `game` function, the `pgn` function creates an array of positions. It accepts a single argument, which is a string containing [portable game notation](https://en.wikipedia.org/wiki/Portable_Game_Notation). To read a game from a PGN file, you can use this function in combination with Typst's native [`read`](https://typst.app/docs/reference/data-loading/read/) function. ```typ #let positions = pgn(read("game.pgn")) ``` Note that the argument to `pgn` must describe a single game. If you have a PGN file containing multiple games, you will need to split them using other means. ## Customizing a chessboard The `board` function lets you customize the appearance of the board with multiple arguments. They are described below. - `highlighted-squares` is a list of squares to highlight (e.g. `("d3", "d2", "e3")`). It can also be specified as a single string containing whitespace-separated squares (e.g. `"d3 d2 e3"`). - `reverse` is a boolean indicating whether to reverse the board, displaying it from black's point of view. This is `false` by default, meaning the board is displayed from white's point of view. - `display-numbers` is a boolean indicating whether ranks and files should be numbered. This is `false` by default. - `rank-numbering` and `file-numbering` are functions describing how ranks and files should be numbered. By default they are respectively `numbering.with("1")` and `numbering.with("a")`. - `square-size` is a length describing the size of each square. By default, this is `1cm`. - `white-square-color` and `black-square-color` correspond to the background color of squares. - `highlighted-white-square-color` and `highlighted-black-square-color` correspond to the background color of highlighted squares. - `pieces` is a dictionary containing images representing each piece. If specified, the dictionary must contain an entry for every piece kind in the displayed position. Keys are single upper-case letters for white pieces and single lower-case letters for black pieces. The default images are taken from [Wikimedia Commons](https://commons.wikimedia.org/wiki/Category:SVG_chess_pieces). Please refer to [the section on licensing](#licensing) for information on how you can use them in your documents. ## Chess symbols This package also exports chess-related symbols under `chess-sym.{pawn,knight,bishop,rook,queen,king}.{filled,stroked,white,black}`. `filled` and `black` variants are equivalent, and `stroked` and `white` as well. ## Licensing The default images for chess pieces used by the `board` function come from [Wikimedia Commons](https://commons.wikimedia.org/wiki/Category:SVG_chess_pieces). They are all licensed the [GNU General Public License, version 2](https://www.gnu.org/licenses/old-licenses/gpl-2.0.html) by their original author: [Cburnett](https://en.wikipedia.org/wiki/User:Cburnett). ## Changelog ### Version 0.2.0 - Allow using dashes for empty squares in `position` function. - Allow passing highlighted squares as a single string of whitespace-separated squares. - Describe entire games using algebraic notation with the `game` function. - Initial PGN support through the `pgn` function. ### Version 0.1.0 - Display a chess position on a chessboard with the `board` function. - Get the starting position with `starting-position`. - Use chess-related symbols with the `chess-sym` module.
https://github.com/kdog3682/mathematical
https://raw.githubusercontent.com/kdog3682/mathematical/main/0.1.0/src/patterns/recursive-diamond.typ
typst
#import "@preview/cetz:0.2.2" #import cetz.draw #let offsetf(offset) = { let callback(p) = { return p.map((x) => x + offset) } return callback } #let recursive_diamond(size, depth) = { let runner(size, offset) = { let half = size / 2 let x = 0 let y = 0 let callback = offsetf(offset) let rect-points = ((x, y), (size, size)).map(callback) let diamond-points = ((half, y), (size, half), (half, size), (x, half)).map(callback) return (rect: rect-points, diamond: diamond-points) } let offset = 0 let store = () while depth > 0 { let points = runner(size, offset) store.push(points) let new_size = size / calc.sqrt(2) offset = (size - new_size) / 2 size = new_size depth -= 1 } cetz.canvas(length: 1pt, { for arg in store { draw.rect(..arg.rect) draw.line(..arg.diamond, close: true) } }) } // have to make it smaller // have to make it fit TE // this kind of works // #let diamond = recursive_diamond(30, 2) // #let pat = pattern(size: (30pt, 30pt), diamond) // #rect(fill: pat, width: 60pt, height: 60pt) // #let pat = pattern(size: (5pt, 5pt), place(rect(width: 5pt, height: 5pt))) // #rect(fill: pat, width: 60pt, height: 60pt, stroke: none)
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/scholarly-tauthesis/0.4.0/template/content/glossary.typ
typst
Apache License 2.0
/** glossary.typ * * Write the glossary (sanasto) of your work here, into the typst * [dictionary] glossary_words. Each entry in the dictionary * needs to contain the keys name and description. The glossary * will be sorted according to the entry keys. * * [dictionary]: https://typst.app/docs/reference/foundations/dictionary/ * ***/ #import "../preamble.typ": * #let glossary_words = ( scalar: ( name: $s$, description: [ Lower-case italic letters denote scalars. ] ), vector: ( name: $vector(v)$, description: [ Bold upright lower-case letters denote vectors. ] ), matrix: ( name: $upright(M)$, description: [ Upright bold capital letters denote matrices. ] ), tut: ( name: "TUT", description: "Tampere University of Technology" ), tuni: ( name: "TUNI", description: "Tampere University" ), julia: ( name: "Julia", description: [ A high-level, dynamically typed general-purpose programming language. Julia is compiled via LLVM into native code which makes it fast. ] ), )
https://github.com/TypstApp-team/typst
https://raw.githubusercontent.com/TypstApp-team/typst/master/tests/typ/visualize/gradient-dir.typ
typst
Apache License 2.0
// Test gradients with direction. --- #set page(width: 900pt) #for i in range(0, 360, step: 15){ box( height: 100pt, width: 100pt, fill: gradient.linear(angle: i * 1deg, (red, 0%), (blue, 100%)), align(center + horizon)[Angle: #i degrees], ) h(30pt) }
https://github.com/Yzx7/public_study_files
https://raw.githubusercontent.com/Yzx7/public_study_files/main/Monografía FIEE/chapters/objetivos.typ
typst
== Objetivos - Comprobar experimentalmente la ley de reflexión en un espejo plano. - Verificar que la distancia del objeto al espejo es igual a la distancia de su imagen reflejada. #pagebreak()
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/meta/ref-02.typ
typst
Other
= First <foo> = Second <foo> // Error: 1-5 label occurs multiple times in the document @foo
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-0980.typ
typst
Apache License 2.0
#let data = ( ("<NAME>", "Lo", 0), ("BENGALI SIGN CANDRABINDU", "Mn", 0), ("BENGALI SIGN ANUSVARA", "Mc", 0), ("BENGALI SIGN VISARGA", "Mc", 0), (), ("BENGALI LETTER A", "Lo", 0), ("BENGALI LETTER AA", "Lo", 0), ("BENGALI LETTER I", "Lo", 0), ("BENGALI LETTER II", "Lo", 0), ("BENGALI LETTER U", "Lo", 0), ("BENGALI LETTER UU", "Lo", 0), ("BENGALI LETTER VOCALIC R", "Lo", 0), ("BENGALI LETTER VOCALIC L", "Lo", 0), (), (), ("BENGALI LETTER E", "Lo", 0), ("BENGALI LETTER AI", "Lo", 0), (), (), ("BENGALI LETTER O", "Lo", 0), ("BENGALI LETTER AU", "Lo", 0), ("BENGALI LETTER KA", "Lo", 0), ("BENGALI LETTER KHA", "Lo", 0), ("BENGALI LETTER GA", "Lo", 0), ("BENGALI LETTER GHA", "Lo", 0), ("BENGALI LETTER NGA", "Lo", 0), ("BENGALI LETTER CA", "Lo", 0), ("BENGALI LETTER CHA", "Lo", 0), ("BENGALI LETTER JA", "Lo", 0), ("BENGALI LETTER JHA", "Lo", 0), ("BENGALI LETTER NYA", "Lo", 0), ("BENGALI LETTER TTA", "Lo", 0), ("BENGALI LETTER TTHA", "Lo", 0), ("BENGALI LETTER DDA", "Lo", 0), ("BENGALI LETTER DDHA", "Lo", 0), ("BENGALI LETTER NNA", "Lo", 0), ("BENGALI LETTER TA", "Lo", 0), ("BENGALI LETTER THA", "Lo", 0), ("BENGALI LETTER DA", "Lo", 0), ("BENGALI LETTER DHA", "Lo", 0), ("BENGALI LETTER NA", "Lo", 0), (), ("BENGALI LETTER PA", "Lo", 0), ("BENGALI LETTER PHA", "Lo", 0), ("BENGALI LETTER BA", "Lo", 0), ("BENGALI LETTER BHA", "Lo", 0), ("BENGALI LETTER MA", "Lo", 0), ("BENGALI LETTER YA", "Lo", 0), ("BENGALI LETTER RA", "Lo", 0), (), ("BENGALI LETTER LA", "Lo", 0), (), (), (), ("BENGALI LETTER SHA", "Lo", 0), ("BENGALI LETTER SSA", "Lo", 0), ("BENGALI LETTER SA", "Lo", 0), ("BENGALI LETTER HA", "Lo", 0), (), (), ("BENGALI SIGN NUKTA", "Mn", 7), ("BENGALI SIGN AVAGRAHA", "Lo", 0), ("BENGALI VOWEL SIGN AA", "Mc", 0), ("BENGALI VOWEL SIGN I", "Mc", 0), ("BENGALI VOWEL SIGN II", "Mc", 0), ("BENGALI VOWEL SIGN U", "Mn", 0), ("BENGALI VOWEL SIGN UU", "Mn", 0), ("BENGALI VOWEL SIGN VOCALIC R", "Mn", 0), ("BENGALI VOWEL SIGN VOCALIC RR", "Mn", 0), (), (), ("BENGALI VOWEL SIGN E", "Mc", 0), ("BENGALI VOWEL SIGN AI", "Mc", 0), (), (), ("BENGALI VOWEL SIGN O", "Mc", 0), ("BENGALI VOWEL SIGN AU", "Mc", 0), ("BENGALI SIGN VIRAMA", "Mn", 9), ("BENGALI LETTER KHANDA TA", "Lo", 0), (), (), (), (), (), (), (), (), ("BENGALI AU LENGTH MARK", "Mc", 0), (), (), (), (), ("BENGALI LETTER RRA", "Lo", 0), ("BENGALI LETTER RHA", "Lo", 0), (), ("BENGALI LETTER YYA", "Lo", 0), ("BENGALI LETTER VOCALIC RR", "Lo", 0), ("BENGALI LETTER VOCALIC LL", "Lo", 0), ("BENGALI VOWEL SIGN VOCALIC L", "Mn", 0), ("BENGALI VOWEL SIGN VOCALIC LL", "Mn", 0), (), (), ("BENGALI DIGIT ZERO", "Nd", 0), ("BENGALI DIGIT ONE", "Nd", 0), ("BENGALI DIGIT TWO", "Nd", 0), ("BENGALI DIGIT THREE", "Nd", 0), ("BENGALI DIGIT FOUR", "Nd", 0), ("BENGALI DIGIT FIVE", "Nd", 0), ("BENGALI DIGIT SIX", "Nd", 0), ("BENGALI DIGIT SEVEN", "Nd", 0), ("BENGALI DIGIT EIGHT", "Nd", 0), ("BENGALI DIGIT NINE", "Nd", 0), ("BENGALI LETTER RA WITH MIDDLE DIAGONAL", "Lo", 0), ("BENGALI LETTER RA WITH LOWER DIAGONAL", "Lo", 0), ("BENGALI RUPEE MARK", "Sc", 0), ("BENGALI RUPEE SIGN", "Sc", 0), ("BENGALI CURRENCY NUMERATOR ONE", "No", 0), ("BENGALI CURRENCY NUMERATOR TWO", "No", 0), ("BENGALI CURRENCY NUMERATOR THREE", "No", 0), ("BENGALI CURRENCY NUMERATOR FOUR", "No", 0), ("BENGALI CURRENCY NUMERATOR ONE LESS THAN THE DENOMINATOR", "No", 0), ("BENGALI CURRENCY DENOMINATOR SIXTEEN", "No", 0), ("<NAME>", "So", 0), ("<NAME>", "Sc", 0), ("BENGALI LETTER VEDIC ANUSVARA", "Lo", 0), ("BENGALI ABBREVIATION SIGN", "Po", 0), ("<NAME>", "Mn", 230), )
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/docs/cookery/guide/all-in-one.typ
typst
Apache License 2.0
#import "/docs/cookery/book.typ": * #show: book-page.with(title: "All-in-one (Simplified) Library for Browsers") #include "claim.typ" Note: This is suitable for running in browser, but not very fit in node.js applications. This is because: - The compiler for browsers is in wasm module and slower than running compiler as native code. - You must carefully maintain the bundle size of your browser applications, there for the components are split for better tree-shaking. - The default fonts to load in browser are for network. In other words: - The node.js library runs compiler as native code, thus native performance. - The compiler and renderer are integrated into a same node library for simpler and cleaner APIs. - You can simply use system fonts lazily with the compiler for node but not that for web. If you want to run the compiler or renderer in Node.js, please see #cross-link("/guide/all-in-one-node.typ")[All-in-one Library for Node.js]. #let snippet-source = "https://github.com/Myriad-Dreamin/typst.ts/blob/main/packages/typst.ts/src/contrib/snippet.mts" #let snippet-lib = link(snippet-source)[`snippet.mts`] The most simple examples always work with #snippet-lib utility library, an all-in-one library with simplified API interfaces: ```ts import { $typst } from '@myriaddreamin/typst.ts/dist/esm/contrib/snippet.mjs'; console.log((await $typst.svg({ mainContent: 'Hello, typst!' })).length); // :-> 7317 ``` However, it is less flexible and stable than the underlying interfaces, the `TypstCompiler` and `TypstRenderer`. If you've become more familar with typst.ts, we recommend you rewrite your library with underlying interfaces according to example usage shown by the #snippet-lib library. Note: If your script targets to *CommonJS*, you should import it in *CommonJS* path instead of In *ES Module* path: ```ts const { createTypstCompiler } = require( '@myriaddreamin/typst.ts/dist/cjs/compiler.cjs'); ``` == Examples Here are some examples for the #snippet-lib utility library. === Example: Use the _global shared_ compiler instance: ```typescript import { $typst } from '@myriaddreamin/typst.ts/dist/esm/contrib/snippet.mjs'; ``` === Example: Create an instance of the utility class: ```typescript const $typst = new TypstSnippet({ // optional renderer instance renderer: enableRendering ?? (() => { return createGlobalRenderer( createTypstRenderer, initOptions); }), compiler() => { return createGlobalCompiler(createTypstCompiler, initOptions); } }); ``` #include "all-in-one-inputs.typ" === Example: reuse compilation result The compilation result could be stored in an artifact in #link("https://github.com/Myriad-Dreamin/typst.ts/blob/main/docs/proposals/8-vector-representation-for-rendering.typ")[_Vector Format_], so that you could decouple compilation from rendering or make high-level cache compilation. ```ts const vectorData = await $typst.vector({ mainContent }); // or load vector data from remote const remoteData = await (fetch( './main.sir.in').then(resp => resp.arrayBuffer())); const vectorData = new Uint8Array(remoteData); // into svg format await $typst.svg({ vectorData }); // into canvas operations await $typst.canvas(div, { vectorData }); ``` Note: the compilation is already cached by typst's `comemo` implicitly. == Specify extra init options Ideally, you don't have to specify any options. But if necessary, the extra init options must be at the start of the main routine, or accurately before all invocations. ```ts // Example: cache default fonts to file system $typst.setCompilerInitOptions(await cachedFontInitOptoins()); // specify init options to renderer $typst.setRendererInitOptions(rendererInitOptions); // The compiler instance is initialized in this call. await $typst.svg({ mainContent }); ``` Note: There are more documentation about initialization in the *Import typst.ts to your project* section of #link("https://myriad-dreamin.github.io/typst.ts/cookery/get-started.html")[Get started with Typst.ts]. == Configure snippet by the `use` API Specify address to a http server for filesystem backend (shadowed by the `addSource` and `mapShadow` api): ```js const cm = window.TypstCompileModule; const fetchBackend = new cm.FetchAccessModel( 'http://localhost:20810', ); $typst.use( TypstSnippet.withAccessModel(fetchBackend), ); ``` Specify a memory filesystem backend (shadowed by the `addSource` and `mapShadow` api): ```js const memoryAccessModel = new cm.MemoryAccessModel(); $typst.use( TypstSnippet.withAccessModel(memoryAccessModel), ); ``` Fetch package from remote registry: ```js const acessModel = cm.FetchAccessModel() or cm.MemoryAccessModel() or others; $typst.use( TypstSnippet.fetchPackageRegistry(fetchBackend), ); ``` == Specify extra render options See #link(snippet-source)[comments on source] for more details. === Sample application: real-time preview document See #link("https://github.com/Myriad-Dreamin/typst.ts/blob/main/packages/typst.ts/examples/all-in-one.html")[Preview by all-in-one Library] by a single included file (`all-in-one.bundle.js`). See #link("https://github.com/Myriad-Dreamin/typst.ts/blob/main/packages/typst.ts/examples/all-in-one-lite.html")[Preview by all-in-one-lite Library] by the more pratical single included file (`all-in-one-lite.bundle.js`), which needs configure your frontend to have access to wasm module files: ```js $typst.setCompilerInitOptions({ getModule: () => '/path/to/typst_ts_web_compiler_bg.wasm', }); $typst.setRendererInitOptions({ getModule: () => '/path/to/typst_ts_renderer_bg.wasm', }); ```
https://github.com/0x1B05/nju_os
https://raw.githubusercontent.com/0x1B05/nju_os/main/book_notes/content/03_concurrency.typ
typst
#import "../template.typ": * #pagebreak() = Concurrency == Introduction to Concurrency - threads: own PC, private registers, private stack, shared address space - process control block (PCB): to store the state of a processes - thread control blocks (TCBs): to store the state of each thread of a process. if there are two threads that are running on a single processor, when switching from running one (T1) to running the other (T2), *a context switch* must take place #image("images/2023-10-30-21-38-29.png", width: 60%) === Why Use Threads - Parallelism(尽可能多用 CPU) - To avoid blocking program progress due to slow I/O === An Example: Thread Creation ```c #include <assert.h> #include <pthread.h> #include <stdio.h> void *mythread(void *arg) { printf("%s\n", (char *)arg); return NULL; } int main() { pthread_t p1, p2; int rc; printf("main: begin\n"); pthread_create(&p1, NULL, mythread, "A"); pthread_create(&p2, NULL, mythread, "B"); // wait for the thread to complete pthread_join(p1, NULL); pthread_join(p2, NULL); printf("main: begin\n"); return 0; } ``` #image("images/2023-10-30-21-42-03.png", width: 60%) #image("images/2023-10-30-21-42-14.png", width: 60%) #image("images/2023-10-30-21-42-22.png", width: 60%) ```c #include <assert.h> #include <pthread.h> #include <stdio.h> static volatile int counter = 0; void *mythread(void *arg) { printf("%s: begin\n", (char *)arg); int i; for (i = 0; i < 1e7; i++) { counter = counter + 1; } printf("%s: done\n", (char *)arg); return NULL; } int main() { pthread_t p1, p2; int rc; printf("main: begin (counter = %d)\n", counter); pthread_create(&p1, NULL, mythread, "A"); pthread_create(&p2, NULL, mythread, "B"); pthread_join(p1, NULL); pthread_join(p2, NULL); printf("main: done with both (counter = %d)\n", counter); return 0; } ``` output: ```sh ❯ ./test main: begin (counter = 0) A: begin B: begin A: done B: done main: done with both (counter = 10921243) ❯ ./test main: begin (counter = 0) A: begin B: begin B: done A: done main: done with both (counter = 10472806) ``` *Threads make life complicated* ==== The Heart Of The Problem: Uncontrolled Scheduling Disassemble `counter = counter + 1;` ```asm 100 mov 0x8049a1c, %eax // %eax = counter 105 add $0x1, %eax // %eax = %eax + 1 108 mov %eax, 0x8049a1c // counter = %eax ``` > suppose that the variable `counter` is located at address `0x8049a1c`. > x86 has variable-length instructions; this `mov` instruction takes up 5 bytes of memory, and the `add` only 3 #image("images/2023-10-31-21-07-52.png") *Synchronization primitives(hardware support) + some help from the operating system*, we will be able to build multi-threaded code that accesses critical sections in a synchronized and controlled manner. === key concurrency terms - A *critical section* is a piece of code that accesses a shared resource, usually a variable or data structure. - A *race condition* (or *data race* [NM92]) arises if multiple threads of execution enter the critical section at roughly the same time; both attempt to update the shared data structure, leading to a surprising outcome. - An *indeterminate* program consists of one or more race conditions; the output of the program varies from run to run, depending on which threads ran when. The outcome is thus *not deterministic*, something we usually expect from computer systems. - To avoid these problems, threads should use some kind of *mutual exclusion* primitives; doing so guarantees that only a single thread ever enters a critical section, thus avoiding races, and resulting in deterministic program outputs. == Interface: Thread API *better used as a reference* === Thread Creation ```c #include <pthread.h> int pthread_create( // init object pthread_t *thread, // stack size/scheduling priority/... const pthread_attr_t *attr, // function void * (*start_routine)(void*), // args for function void * arg ); ``` `void *` allows "any" === Thread Completion ```c int pthread_join( // which one to wait pthread_t thread, // return value void *value_ptr ); ``` example: ```c typedef struct __myarg_t { int a; int b; } myarg_t; typedef struct __myret_t { int x; int y; } myret_t; void *mythread(void *arg) { myarg_t *m = (myarg_t *) arg; printf("%d %d\n", m->a, m->b); myret_t *r = malloc(sizeof(myret_t)); r->x = 1; r->y = 2; return (void *) r; } int main(int argc, char *argv[]) { pthread_t p; myret_t *m; myarg_t args = {10, 20}; Pthread_create(&p, NULL, mythread, &args); // (void *) &m = (void *) r Pthread_join(p, (void *) &m); printf("returned %d %d\n", m->x, m->y); free(m); return 0; } ``` > 不能返回函数栈中的地址, 因为栈弹出后, 地址会被"清理". === Locks locks<->critical section ```c int pthread_mutex_lock(pthread_mutex_t *mutex); int pthread_mutex_unlock(pthread_mutex_t *mutex); ``` example: ```c pthread_mutex_t lock; pthread_mutex_lock(&lock); x = x + 1; // or whatever your critical section is pthread_mutex_unlock(&lock); ``` But *No init! No check error code!* `pthread_mutex_t` must be initialized: - `PTHREAD_MUTEX_INITIALIZER` - `pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;` - `pthread_mutex_init(..)`(runtime) ```c int rc = pthread_mutex_init(&lock, NULL); assert(rc == 0); // always check success! // an example warpper // Use this to keep your code clean but check for failures // Only use if exiting program is OK upon failure void Pthread_mutex_lock(pthread_mutex_t *mutex) { int rc = pthread_mutex_lock(mutex); assert(rc == 0); } ``` > Note that a corresponding call to `pthread_mutex_destroy()` should also be made, when you are done with the lock When does a thread acquire the lock? - No other thread holds the lock: acquire the lock. - Another thread hold the lock: wait util acquire the lock. ```c // returns after a timeout or after acquiring the lock, whichever happens first int pthread_mutex_timedlock(pthread_mutex_t *mutex, struct timespec *abs_timeout); // returns failure if the lock is already held; // special version withe a timeout of zero int pthread_mutex_trylock(pthread_mutex_t *mutex); ``` > Both of these versions should generally be avoided; however, there are a few cases where avoiding getting stuck. === Condition Variables ```c // put the calling thread to sleep // and thus waits for some other thread to signal it int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); int pthread_cond_signal(pthread_cond_t *cond); ``` example: ```c // T1 pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t cond = PTHREAD_COND_INITIALIZER; Pthread_mutex_lock(&lock); while (ready == 0) // release the lock when sleeping Pthread_cond_wait(&cond, &lock); Pthread_mutex_unlock(&lock); // T2 // when signaling, the thread must be locked Pthread_mutex_lock(&lock); ready = 1; Pthread_cond_signal(&cond); Pthread_mutex_unlock(&lock); ``` t2(has the lock held, signaling) --wake up--> t1(waiting) --> t1 reacquire the lock A lazy way? ```c // waiting while (ready == 0) ; // spin // signaling ready = 1; ``` *Don't ever do this!!!* === Compiling and Running To compile them, you must include the header `pthread.h` in your code. On the link line, you must also explicitly link with the `pthreads` library, by adding the `-pthread` flag. ```sh prompt> gcc -o main main.c -Wall -pthread ``` === THREAD API GUIDELINES - Keep it simple. As simple as possible. - Minimize thread interactions. - Initialize locks and condition variables. - Check your return codes. - Be careful with how you pass arguments to, and return values from, threads. - Each thread has its own stack. - Always use condition variables to signal between threads. - Use the manual pages. == Locks === Locks: The Basic Idea A lock is just a variable. It is either unlocked and thus no thread holds the lock, or locked, and thus exactly one thread holds the lock and presumably is in a critical section. > We could store other information in the data type as well. (Such as which thread holds the lock, or a queue for ordering lock acquisition) The semantics of the `lock()` and `unlock()` routines are simple. - Calling the routine `lock()` tries to acquire the lock; if no other thread holds the lock, the thread will acquire the lock and enter the critical section > this thread is sometimes said to be *the owner of the lock*. - If another thread then calls `lock()` on that same lock variable, it will not return while the lock is held by another thread; in this way, other threads are prevented from entering the critical section while the first thread that holds the lock is in there. - Once the owner of the lock calls `unlock()`, the lock is now available (free) again. If no other threads are waiting for the lock, the state of the lock is simply changed to free. If there are waiting threads, one of them will notice (or be informed of) this change of the lock's state, acquire the lock, and enter the critical section. Thus locks help transform the chaos that is traditional OS scheduling into a more controlled activity. === Pthread Locks The name that the POSIX library uses for a lock is a `mutex`, as it is used to provide *mutual exclusion* between threads. We use our wrappers that check for errors upon lock and unlock. ```c pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; Pthread_mutex_lock(&lock); // wrapper; exits on failure balance = balance + 1; Pthread_mutex_unlock(&lock); ``` We may be using different locks to protect different variables. Doing so can increase concurrency: - instead of one big lock that is used any time any critical section is accessed (*a coarse-grained locking strategy*), one will often protect different data and data structures with different locks, thus allowing more threads to be in locked code at once (*a more fine-grained approach*) === Building A Lock How should we build a lock? What hardware support is needed? What OS support? === Evaluating Locks We should first understand what our goals are, and thus we ask how to evaluate the efficacy of a particular lock implementation. To evaluate whether a lock works (and works well), we should first establish some basic criteria. - The first is whether the lock does its basic task, which is *to provide mutual exclusion*. - Basically, does the lock work, preventing multiple threads from entering a critical section? - The second is *fairness*. - Does each thread contending for the lock get a fair shot at acquiring it once it is free? - Does any thread contending for the lock starve while doing so? - The final criterion is *performance*, specifically the time overheads added by using the lock. There are a few different cases that are worth considering here. - One is the case of no contention; when a single thread is running and grabs and releases the lock, what is the overhead of doing so? - Another is the case where multiple threads are contending for the lock on a single CPU; in this case, are there performance concerns? - Finally, how does the lock perform when there are multiple CPUs involved, and threads on each contending for the lock? === Controlling Interrupts One of the earliest solutions used to provide mutual exclusion was to *disable interrupts for critical sections;* this solution was invented for single-processor systems. The code would look like this: ```c void lock() { DisableInterrupts(); } void unlock() { EnableInterrupts(); } ``` The main positive of this approach is its *simplicity*. Without interruption, a thread can be sure that the code it executes will execute and that no other thread will interfere with it. The negatives, unfortunately, are many. - Fist, this approach requires us to allow any calling thread to perform *a privileged operation*, and thus trust that this facility is not abused. Here, the trouble manifests in numerous ways: - a greedy program could call `lock()` at the beginning of its execution and thus monopolize the processor; - worse, an errant or malicious program could call `lock()` and go into an endless loop. In this latter case, the OS never regains control of the system, and there is only one recourse: reboot. > Using interrupt disabling as a general purpose synchronization solution requires *too much trust* in applications. - Second, the approach *does not work on multiprocessors*. If multiple threads are running on different CPUs, and each try to enter the same critical section, it does not matter whether interrupts are disabled; threads will be able to run on other processors, and thus could enter the critical section. - Third, turning off interrupts for extended periods of time can *lead to interrupts becoming lost*, which can lead to serious systems problems. - For example, if the CPU missed the fact that a disk device has finished a read request. How will the OS know to wake the process waiting for said read? - Finally, and probably least important, this approach can be *inefficient*. Compared to normal instruction execution, code that masks or unmasks interrupts tends to be executed slowly by modern CPUs. For these reasons, turning off interrupts is only used in limited contexts as a mutual-exclusion primitive. For example, in some cases an operating system itself will use interrupt masking to guarantee atomicity when accessing its own data structures, or at least to prevent certain messy interrupt handling situations from arising. This usage makes sense, as the trust issue disappears inside the OS, which always trusts itself to perform privileged operations anyhow. === A Failed Attempt: Just Using Loads/Stores We will have to rely on CPU hardware and the instructions it provides us to build a proper lock. Let's first try to build a simple lock by using a single flag variable. ```c typedef struct __lock_t { int flag; } lock_t; void init(lock_t *mutex) { // 0 -> lock is available, 1 -> held mutex->flag = 0; } void lock(lock_t *mutex) { while (mutex->flag == 1) // TEST the flag ; // spin-wait (do nothing) mutex->flag = 1; // now SET it! } void unlock(lock_t *mutex) { mutex->flag = 0; } ``` In our imagination: The first thread that enters the critical section will call `lock()`, which tests whether the flag is equal to 1 (in this case, it is not), and then sets the flag to 1 to indicate that the thread now holds the lock. When finished with the critical section, the thread calls `unlock()` and clears the flag, thus indicating that the lock is no longer held. If another thread happens to call `lock()` while that first thread is in the critical section, it will simply spin-wait in the while loop for that thread to call `unlock()` and clear the flag. Once that first thread does so, the waiting thread will fall out of the while loop, set the flag to 1 for itself, and proceed into the critical section. Unfortunately, the code has two problems: one of `correctness`, and another of `performance`. - The correctness problem is simple to see once you get used to thinking about concurrent programming. Imagine the code interleaving below; assume `flag=0` to begin. #image("images/2023-12-23-20-36-40.png", width: 70%) > we have obviously failed to provide the most basic requirement: providing *mutual exclusion*. > *Remember this situation in mind. Spin locks below just make the interrupt after "while" disappear to solve the problem.* - The performance problem, is the fact that the way a thread waits to acquire a lock that is already held: it endlessly checks the value of flag, a technique known as *spin-waiting*. - Spin-waiting wastes time waiting for another thread to release a lock. The waste is exceptionally high on a uniprocessor, where the thread that the waiter is waiting for cannot even run (at least, until a context switch occurs)! === Peterson's algorithm ```c int flag[2]; int turn; void int(){ // indicate you intend to hold the lock w/ 'flag' flag[0] = flag[1] = 0; // whose turn is it? (thread 0 or 1) turn = 0; } void lock(){ // 'self' is the thread ID of caller flag[self] = 1; // make it other thread's turn turn = 1 - self; while((flag[1-self] == 1) && (turn == 1 - self)) ; // spin-wait while it's not your turn } void unlock(){ // simply undo your intent flag[self] = 0; } ``` No hardware support. Don't work on modern hardware (due to relaxed memory consistency models) === BuildingWorking Spin Locks with Test-And-Set System designers started to invent hardware support for locking. The simplest bit of hardware support to understand is known as a *test-and-set (or atomic exchange)* instruction. We define what the test-and-set instruction does via the following C code snippet: ```c TestAndSet(int *old_ptr, int new) { int old = *old_ptr; // fetch old value at old_ptr *old_ptr = new; // store 'new' into old_ptr return old; // return the old value } ``` The reason it is called "test and set" is that - it enables you to "test" the old value (which is what is returned) - simultaneously "setting" the memory location to a new value As it turns out, this slightly more powerful instruction is enough to build a simple *spin lock*. ```c typedef struct __lock_t { int flag; } lock_t; void init(lock_t *lock) { // 0: lock is available, 1: lock is held lock->flag = 0; } void lock(lock_t *lock) { while (TestAndSet(&lock->flag, 1) == 1) ; // spin-wait (do nothing) } void unlock(lock_t *lock) { lock->flag = 0; } ``` It is the simplest type of lock to build, and simply spins, using CPU cycles, until the lock becomes available. To work correctly on a single processor, it requires *a preemptive scheduler* (i.e., one that will interrupt a thread via a timer, in order to run a different thread, from time to time). ==== Evaluating Spin Locks - correctness: does it provide mutual exclusion? - The answer here is yes: the spin lock only allows a single thread to enter the critical section at a time. - fairness. How fair is a spin lock to a waiting thread? - The answer here, unfortunately, is bad news: spin locks don’t provide any fairness guarantees. - performance. What are the costs of using a spin lock? - imagine threads competing for the lock on a single processor; - *painful*. The scheduler might then run every other thread (imagine there are N − 1 others), each of which tries to acquire the lock. In this case, each of those threads will spin for the duration of a time slice before giving up the CPU, a waste of CPU cycles. - consider threads spread out across many CPUs. - spin locks work reasonably well (if the number of threads roughly equals the number of CPUs). ==== THINK ABOUT CONCURRENCY AS A MALICIOUS SCHEDULER What you should try to do is to pretend you are a malicious scheduler, one that *interrupts threads at the most inopportune of times* in order to foil their feeble attempts at building synchronization primitives. === Compare-And-Swap(or compare-and-exchange) ```c int CompareAndSwap(int *ptr, int expected, int new) { int actual = *ptr; if (actual == expected) *ptr = new; return actual; } void lock(lock_t *lock) { while (CompareAndSwap(&lock->flag, 0, 1) == 1) ; // spin } ``` Compare-and-swap is a more powerful instruction than test-and-set(when we briefly delve into topics such as *lock-free synchronization*.) However, if we just build a simple spin lock with it, its behavior is identical to the spin lock we analyzed above. === Load-Linked and Store-Conditional The *load-linked* and *store-conditional* instructions can be used in tandem to build locks and other concurrent structures. ```c int LoadLinked(int *ptr) { return *ptr; } int StoreConditional(int *ptr, int value) { if (no update to *ptr since LoadLinked to this address) { *ptr = value; return 1; // success! } else { return 0; // failed to update } } void lock(lock_t *lock) { while (1) { while (LoadLinked(&lock->flag) == 1) ; // spin until it’s zero if (StoreConditional(&lock->flag, 1) == 1) return; // if set-it-to-1 was a success: all done // otherwise: try it all over again } } void lock(lock_t *lock) { while (LoadLinked(&lock->flag) || !StoreConditional(&lock->flag, 1)) ; // spin } void unlock(lock_t *lock) { lock->flag = 0; } ``` === Fetch-And-Add One final hardware primitive is the fetch-and-add instruction, which atomically increments a value while returning the old value at a particular address. we’ll use fetch-and-add to build a more interesting *ticket lock* ```c int FetchAndAdd(int *ptr) { int old = *ptr; *ptr = old + 1; return old; } typedef struct __lock_t { int ticket; int turn; } lock_t; void lock_init(lock_t *lock) { lock->ticket = 0; lock->turn = 0; } void lock(lock_t *lock) { int myturn = FetchAndAdd(&lock->ticket); while (lock->turn != myturn) ; // spin } void unlock(lock_t *lock) { lock->turn = lock->turn + 1; } ``` When a thread wishes to acquire a lock, it first does an atomic fetch-and-add on the ticket value; that value is now considered this thread’s “turn” (myturn). The globally shared lock->turn is then used to determine which thread’s turn it is; when (myturn == turn) for a given thread, it is that thread’s turn to enter the critical section. Unlock is accomplished simply by incrementing the turn such that the next waiting thread (if there is one) can now enter the critical section. Note one important difference with this solution versus our previous attempts: it ensures progress for all threads. Once a thread is assigned its ticket value, it will be scheduled at some point in the future (once those in front of it have passed through the critical section and released the lock). In our previous attempts, no such guarantee existed; a thread spinning on test-and-set (for example) could spin forever even as other threads acquire and release the lock. > LESS CODE IS BETTER CODE (LAUER’S LAW) === Too much Spinning: What Now? These solutions can be quite inefficient. Imagine you are running two threads on a single processor. Now imagine that one thread (thread 0) is in a critical section and thus has a lock held, and unfortunately gets interrupted. The second thread (thread 1) now tries to acquire the lock, but finds that it is held. Thus, it begins to spin. And spin. Then it spins some more. And finally, a timer interrupt goes off, thread 0 is run again, which releases the lock, and finally (the next time it runs, say), thread 1 won’t have to spin so much and will be able to acquire the lock. Thus it wastes an entire time slice doing nothing but checking a value that isn’t going to change! The problem gets worse with N threads contending for a lock; N − 1 time slices may be wasted in a similar manner, simply spinning and waiting for a single thread to release the lock. === A Simple Approach: Just Yield, Baby How can we develop a lock that doesn’t needlessly waste time spinning on the CPU? What to do when a context switch occurs in a critical section, and threads start to spin endlessly, waiting for the interrupted (lock-holding) thread to be run again? ```c void init() { flag = 0; } void lock() { while (TestAndSet(&flag, 1) == 1) yield(); // give up the CPU } void unlock() { flag = 0; } ``` We assume an operating system primitive `yield()` which a thread can call when it wants to give up the CPU and let another thread run. #tip("Tip")[ A thread can be in one of three states (running, ready, or blocked); yield is simply a system call that moves the caller from the *running* state to the *ready* state, and thus promotes another thread to running. Thus, the yielding process essentially *deschedules* itself. ] The example with two threads on one CPU; in this case, our yield-based approach works quite well. If a thread happens to call `lock()` and find a lock held, it will simply yield the CPU, and thus the other thread will run and finish its critical section. Consider the case where there are many threads (say 100) contending for a lock repeatedly. In this case, if one thread acquires the lock and is preempted before releasing it, the other 99 will each call `lock()`, find the lock held, and yield the CPU. #tip("Tip")[ While better than our spinning approach (which would waste 99 time slices spinning), this approach is still costly; the cost of a context switch can be substantial, and there is thus plenty of waste. ] Worse, we have not tackled the *starvation* problem at all. A thread may get caught in an endless yield loop while other threads repeatedly enter and exit the critical section.
https://github.com/arakur/typst-to-mathlog
https://raw.githubusercontent.com/arakur/typst-to-mathlog/master/example/example.typ
typst
MIT License
// set mathlog style #import "../style/mathlog_style.typ": * // = Gröbner 基底 == 単項式順序 $K$ を体,$R = K[X_1, ..., X_n]$ を $K$-上 $n$ 変数多項式環とする. $R$ の単項式全体の集合を $cal(M)_R$ とおく. $cal(M)_R$ は乗法に関して可換モノイドをなす. #def(title: "単項式順序")[ 多項式環 $R$ の *単項式順序* (_monomial order_) とは,$cal(M)_R$ 上の全順序 $prec.eq$ であって,任意の $mu, mu', nu in cal(M)_R$ に対して以下を満たすもののことである: 1. $1 prec.eq mu$; 2. $mu prec.eq mu' ==> mu dot nu prec.eq mu' dot nu$. ] #prop[ 任意の単項式順序は整礎である. ] #prf[ 略. ] == 先頭イデアル 以下,多項式環 $R$ の単項式順序 $prec.eq$ を固定する. #def[ 多項式 $f in R$ を $ f = sum_(mu in cal(M)_R) c_mu dot mu $ と表すとき,$c_mu eq.not 0$ となる $mu in cal(M)_R$ 全体の集合を $ "supp"_R f := {mu in cal(M)_R | c_mu eq.not 0} $ と書き,$f$ の *台* (_support_) と呼ぶ. 多項式の台は有限集合であることに注意する. $f$ の台の,$prec.eq$ に関する最大元 $mu$ を $prec.eq$ に関する $f$ の *先頭単項式* (_initial monomial_) と呼び,$"in"_prec.eq f$ と書く. $c_mu$ を $prec.eq$ に関する $f$ の *先頭項係数* (_initial coefficient_),$c_mu dot mu$ を $prec.eq$ に関する $f$ の *先頭項* (_initial term_) と呼び,それぞれ $"inic"_prec.eq f, space "init"_prec.eq f$ と書く. ] #def[ 多項式環 $R$ のイデアル $I$ に対し,イデアル $ "in"_prec.eq I := angle.l "in"_prec.eq f | f in I angle.r $ を $I$ の *先頭イデアル* (_initial ideal_) と呼ぶ. ] #rem[ $f_1, ..., f_n in I$ が $I$ を生成するとき,$"in"_prec.eq f_1, ..., "in"_prec.eq f_n in "in"_prec.eq I$ は $"in"_prec.eq I$ を生成するとは限らない. ] #def[ $R$ のイデアル $I$ の生成元 $f_1, ..., f_n in I$ が $I$ の *Gröbner 基底* であるとは,先頭単項式 $"in"_prec.eq f_1, ..., "in"_prec.eq f_n in "in"_prec.eq I$ が先頭イデアル $"in"_prec.eq I$ を生成することをいう. ]
https://github.com/1sSay/USPTU_conspects
https://raw.githubusercontent.com/1sSay/USPTU_conspects/main/src/math/Matrixes.typ
typst
// Global settings and templates #set text(14pt) #let def(term, color: black) = { box(stroke: color, inset: 7pt, text()[ #term ]) } // Lecture header and date #let subject = text()[Математика] #let lecture_header = text()[Матрицы] #let date = text()[03.09.2024] // Header #align(center, heading(level: 1)[#subject. \ #lecture_header ]) #align(center, text(weight: "thin")[#date]) #align(center, text(weight: "thin")[Конспект Сайфуллина Искандара БПО09-01-24]) // Content #box(stroke: black, inset: 7pt, text(weight: "black", fill: red)[Я ПРОПУСТИЛ ЧАСТЬ ЛЕКЦИИ. ЕСЛИ ЧЕГО-ТО НЕ ХВАТАЕТ, ТО МОЖЕТЕ ОТПРАВИТЬ СВОЙ КОНСПЕКТ, Я ДОПИШУ]) #heading(level: 1)[Матрицы] #heading(level: 2)[Диагональная матрица] $ A = mat( a_1, 0, ..., 0; 0, a_2, ..., 0; dots.v, dots.v, dots.down, dots.v; 0, 0, ..., a_n; ) $ #heading(level: 2)[Единичная матрица] $ E = mat( 1, 0, ..., 0; 0, 1, ..., 0; dots.v, dots.v, dots.down, dots.v; 0, 0, ..., 1; ) $ \ #heading(level: 2)[Операция транспонирования] $ A^T = mat( a_11, a_12, ..., a_(1n); a_21, a_22, ..., a_(2n); dots.v, dots.v, dots.down, dots.v; a_(m 1), a_(m 2), ..., a_(m n); )^T = mat( a_11, a_21, ..., a_(m 1); a_21, a_22, ..., a_(m 2); dots.v, dots.v, dots.down, dots.v; a_(1 n), a_(2 n), ..., a_(n m); ) $ \ \ \ \ \ \ \ \ \ \ \ #heading(level: 2)[Сумма] #box(stroke: black, inset: 7pt, text(weight: "black", fill: red)[Размерность матриц должна совпадать]) $ A_"m*n" + B_"m*n" = C_"m*n" $ $ mat( a_11, a_12, ..., a_(1 n); a_21, a_22, ..., a_(2 n); dots.v, dots.v, dots.down, dots.v; a_(m 1), a_(m 2), ..., a_(m n) ) + mat( b_11, b_12, ..., b_(1 p); b_21, b_22, ..., b_(2 p); dots.v, dots.v, dots.down, dots.v; b_(n 1), b_(n 2), ..., b_(n p) ) = \ mat( a_11 + b_11, a_12, ..., a_(1 n); a_21, a_22, ..., a_(2 n); dots.v, dots.v, dots.down, dots.v; a_(m 1), a_(m 2), ..., a_(m n) ) $ #heading(level: 3)[Свойства сложения матриц:] - $A + B = B + A$ - $(A + B) + C = A + (B + C)$ - $А + 0 = А$ - $А + (-А) = 0$ \ #heading(level: 2)[Умножение матриц на число] $ k * mat( a_11, a_12, ..., a_(1n); a_21, a_22, ..., a_(2n); dots.v, dots.v, dots.down, dots.v; a_(m 1), a_(m 2), ..., a_(m n); ) = mat( k * a_11, k * a_12, ..., k * a_(1n); k * a_21, k * a_22, ..., k * a_(2n); dots.v, dots.v, dots.down, dots.v; k * a_(m 1), k * a_(m 2), ..., k * a_(m n); ) $ #heading(level: 3)[Свойства умножения матрицы на число:] - $1 * A = A$ - $(alpha beta)A = alpha(beta A)$ - $(alpha + beta)A = alpha A + beta A$ - $alpha (A + B) = alpha A + alpha B$ \ \ \ \ #heading(level: 2)[Перемепножение матриц] \ $ A_(m * n) * B_(n * p) = C_(m x p) $ $ mat( a_11, a_12, ..., a_(1 n); a_21, a_22, ..., a_(2 n); dots.v, dots.v, dots.down, dots.v; a_(m 1), a_(m 2), ..., a_(m n) ) * mat( b_11, b_12, ..., b_(1 p); b_21, b_22, ..., b_(2 p); dots.v, dots.v, dots.down, dots.v; b_(n 1), b_(n 2), ..., b_(n p) ) = \ mat( (a_11 * b_11 + ... + a_(1 n) * b_(n 1)), (a_11 * b_12 + ... + a_(1 n) * b_(n 2)), ..., (a_11 * b_(1 p) + ... + a_(1 n) * b_(n p)); dots.v, dots.v, dots.down, dots.v; (a_(m 1) * b_11 + ... + a_(m n) * b_(n 1)), (a_(m 1) * b_12 + ... + a_(m n) * b_(n 2)), ..., (a_(m 1) * b_(1 p) + ... + a_(m n) * b_(n p)); ) $ #heading(level: 2)[Свойства произведения матриц:] - $(A B)C = A(B C)$ - $A B eq.not B A$ - $A E = E A$ - $(A + B)C = A C + B C; A(B + C) = A B + A C$ - $k(A B) = (k A)B = A(k B)$ \ \ #heading(level: 2)[Определитель матрицы] #box(stroke: black, inset: 7pt, text(weight: "black", fill: red)[Применяется только к матрицам с равным количеством строк и столбцов]) #heading(level: 3)[Определитель матрицы $2*2$:] $ A_(2 * 2) = mat( a_11, a_12; a_21, a_22; ) $ $ Delta A = a_11 * a_22 - a_12 * a_21; $ \ \ \ \ \ #heading(level: 3)[Определитель матрицы $3*3$] #heading(level: 4)[Правило треугольника:] #image("images/ПравилоТреугольника.png") #heading(level: 4)[Правило Саррюса:] #image("images/ПравилоСаррюса.png") #heading(level: 3)[Свойства определителя] #set text(18pt) - $Delta A = Delta A^T$\ - $mat(delim: "|", a_11, a_12; a_21, a_22; ) = -mat(delim: "|", a_12, a_11; a_22, a_21; )$ - $k * mat(delim: "|", a_11, a_12; a_21, a_22; ) = mat(delim: "|", k * a_11, k * a_12; a_21, a_22; )$ - $mat(delim: "|", k * a_11, k * a_12; a_11, a_12; ) =$ *0* \ \ \ \ #heading(level: 3)[Минор матрицы] #set text(14pt) #box(stroke: black, inset: 7pt, text(weight: "black")[Минор - это определитель матрицы, из которой вычеркнут n-ая столбец и m-ая строка]) #box(stroke: black, inset: 7pt, text(weight: "black")[Алгебраическим дополнением элемента $a_(i j)$ матрицы $A$ называется число $A_(i j) = (-1)^(i + j) * M_(i j)$, где $M_(i j)$ - дополнительным минор матрицы]) #heading(level: 2)[Разложение определителя] \ #image("images/Разложение определителя.png")
https://github.com/jrihon/cv
https://raw.githubusercontent.com/jrihon/cv/main/brilliant-template/template.typ
typst
// awesomeCV-Typst 2023-07-05 mintyfrankie // Github Repo: https://github.com/mintyfrankie/brilliant-CV // Typst version: 0.6.0 /* Packages */ #import "../metadata.typ": * #import "@preview/fontawesome:0.1.0": * // all the fa-FUNCTION calls /* Styles */ #let awesomeColors = ( skyblue: rgb("#0395DE"), red: rgb("#DC3522"), nephritis: rgb("#27AE60"), concrete: rgb("#95A5A6"), darknight: rgb("#131A28"), babyblue: rgb("#669DF2"), coral: rgb("#FF4252"), nightblue: rgb("#2C63B8"), darkrose: rgb("#851545"), greenish: rgb("#35CC56"), kahki: rgb("#CCB832"), brown: rgb("#CC8139"), blood: rgb("#CC485D"), anthracite: rgb("#767F94"), ) #let regularColors = ( lightaccent: rgb("#A9B5D490"), // anthracite ish rgb accent and alpha: 90% alpha: rgb("#A9B5D410"), // anthracite ish rgb accent and alpha: 90% lightgray: rgb("#343a40"), darkgray: rgb("#212529"), linkblue: rgb("#48A1D9") ) #let accentColor = awesomeColors.at(awesomeColor) /* Layout */ #let layout(doc) = { set text( font: ("Source Sans Pro", "Font Awesome 6 Brands", "Font Awesome 6 Free"), weight: "regular", size: 9pt, ) set align(left) set page( paper: "a4", margin: ( left: 1.4cm, right: 1.4cm, top: .8cm, bottom: .4cm, ), ) doc } /* Utility Functions */ #let hBar() = [ #h(5pt) | #h(5pt) ] #let autoImport(file) = { include {"../sections/" + file + ".typ"} // string concatenation } #let languageSwitch(dict) = { // if multiple files in different languages for (k, v) in dict { if k == varLanguage { return v break } } panic("i18n: language value not matching any key in the array") } #let headerFont = "Roboto" #let beforeSectionSkip = 1pt #let beforeEntrySkip = 1pt #let beforeEntryDescriptionSkip = 1pt #let headerFirstNameStyle(str) = {text( font: headerFont, size: 32pt, weight: "light", fill: regularColors.darkgray, str )} #let headerLastNameStyle(str) = {text( font: headerFont, size: 32pt, weight: "bold", str )} #let headerInfoStyle(str) = {text( size: 10pt, fill: accentColor, str )} #let headerQuoteStyle(str) = {text( size: 10pt, weight: "medium", style: "italic", fill: accentColor, str )} #let sectionTitleStyle(str, color:black, size:16pt) = {text( size: size, weight: "bold", fill: color, str )} #let entryA1Style(str) = {text( size: 10pt, weight: "bold", str )} #let entryA2Style(str) = {align(right, text( weight: "medium", fill: accentColor, style: "oblique", str ))} #let entryB1Style(str) = {text( size: 8pt, fill: accentColor, weight: "medium", smallcaps(str) )} #let entryB2Style(str) = {align(right, text( size: 8pt, weight: "medium", fill: gray, style: "oblique", str ))} #let entryDescriptionStyle(str) = {text( fill: regularColors.lightgray, { v(beforeEntryDescriptionSkip) str } )} #let entryKeywords(str) = { text( fill: regularColors.lightgray, style: "italic", v(-4pt) + h(8pt) + "Keywords : " + str ) } #let skillTypeStyle(str) = {align(right, text( size: 10pt, weight: "bold", str)) } #let skillInfoStyle(str) = {text( str )} #let honorDateStyle(str) = {align(right, text( str)) } #let honorTitleStyle(str) = {text( weight: "bold", str )} #let honorIssuerStyle(str) = {text( str )} #let honorLocationStyle(str) = {align(right, text( weight: "medium", fill: accentColor, style: "oblique", str ))} #let publicationStyle(str) = {text( str )} #let footerStyle(str) = {text( size: 8pt, fill: rgb("#999999"), smallcaps(str) )} #let letterHeaderNameStyle(str) = {text( fill: accentColor, weight: "bold", str )} #let letterHeaderAddressStyle(str) = {text( fill: gray, size: 0.9em, smallcaps(str) )} #let letterDateStyle(str) = {text( size: 0.9em, style: "italic", str )} #let letterSubjectStyle(str) = {text( fill: accentColor, weight: "bold", underline(str) )} #let graytext(str) = { text( fill: regularColors.lightgray, str ) } /* Functions */ #let makeHeaderInfo() = { let personalInfoIcons = ( // fa = font awesome; https://fontawesome.com phone: fa-phone(), email: fa-envelope(), linkedin: fa-linkedin(), homepage: fa-pager(), github: fa-square-github(), gitlab: fa-gitlab(), orcid: fa-orcid(), researchgate: fa-researchgate(), address: fa-location-dot(), extraInfo: "", ) // just add "fa-" + the name of the icon you want -> https://fontawesome.com let n = 1 for (k, v) in personalInfo { // personalInfo initialised in metadata file, which is imported if v != "" { // Adds hBar if n != 1 { hBar() } if n == 5 { //after the linkedin call, better inline linebreak() } // Adds icons personalInfoIcons.at(k) + h(5pt) // get icon and at 5pt space // Adds hyperlinks if k == "email" { link("mailto:" + v)[#v] } else if k == "linkedin" { link("https://www.linkedin.com/in/" + v)[#v] } else if k == "github" { link("https://github.com/" + v)[#v] } else if k == "gitlab" { link("https://gitlab.com/" + v)[#v] } else if k == "homepage" { link("https://" + v)[#v] } else if k == "orcid" { link("https://orcid.org/" + v)[#v] } else if k == "researchgate" { link("https://www.researchgate.net/profile/" + v)[#v] } else { v } } n = n + 1 } } #let makeHeaderNameSection() = table( columns: 1fr, inset: 0pt, stroke: none, row-gutter: 6mm, [#headerFirstNameStyle(firstName) #h(5pt) #headerLastNameStyle(lastName)], [#headerInfoStyle(makeHeaderInfo())], [#headerQuoteStyle(languageSwitch(headerQuoteInternational))] ) #let makeHeaderPhotoSection() = { if profilePhoto != "" { image(profilePhoto, height: 3.6cm) place(dx: 0cm, dy: -3.6cm, circle(radius: 1.795cm, stroke: (paint: accentColor, thickness: 2pt))) // add circle around pp } else { v(3.6cm) } } #let cvHeader( align: left, hasPhoto: true ) = { let makeHeader(leftComp, rightComp, columns, align) = table( columns: columns, inset: 0pt, stroke: none, column-gutter: 15pt, align: align + horizon, {leftComp}, {rightComp} ) if hasPhoto { makeHeader(makeHeaderNameSection(), makeHeaderPhotoSection(), (auto, 20%), align) } else { makeHeader(makeHeaderNameSection(), makeHeaderPhotoSection(), (auto, 0%), align) } } #let cvSection(title) = { let highlightText = title.slice(0,3) let normalText = title.slice(3) v(beforeSectionSkip) sectionTitleStyle(highlightText, color: accentColor) sectionTitleStyle(normalText, color: black) h(2pt) box(width: 1fr, line(stroke: 0.9pt, length: 100%)) } #let cvSubSection(title) = { let highlightText = title.slice(0,3) let normalText = title.slice(3) // let c_test = rgb("#C0C0C0") v(beforeSectionSkip) h(2pt) sectionTitleStyle(highlightText, color: accentColor, size: 12pt) sectionTitleStyle(normalText, color: regularColors.lightaccent, size: 12pt) h(2pt) box(width: 1fr, line(stroke: (thickness: 0.9pt, paint: regularColors.lightaccent), length: 100%)) linebreak() } #let cvEntry( title: "Title", society: "Society", date: "Date", location: "Location", description: "", // optional value logo: "", keywords: "", ) = { let ifSocietyFirst(condition, field1, field2) = { return if condition {field1} else {field2} } let ifLogo(path, ifTrue, ifFalse) = { return if varDisplayLogo { if path == "" { ifFalse } else { ifTrue } } else { ifFalse } } let setLogoLength(path) = { return if path == "" { 0% } else { 4% } } let setLogoContent(path) = { return if logo == "" [] else {image(path, width: 100%)} } v(beforeEntrySkip) table( columns: (ifLogo(logo, 4%, 0%), 1fr), inset: 0pt, stroke: none, align: horizon, column-gutter: ifLogo(logo, 4pt, 0pt), setLogoContent(logo), table( columns: (1fr, auto), inset: 0pt, stroke: none, row-gutter: 6pt, align: auto, {entryA1Style(ifSocietyFirst(varEntrySocietyFirst, society, title))}, // var is in metadata, boolean {entryA2Style(date)}, {entryB1Style(ifSocietyFirst(varEntrySocietyFirst, title, society))}, {entryB2Style(location)}, ) ) entryDescriptionStyle(description) if keywords != "" {entryKeywords(keywords)} else [] } #let cvSkill( type: "Type", info: "Info", ) = { table( columns: (16%, 1fr), inset: 0pt, column-gutter: 10pt, stroke: none, skillTypeStyle(type), skillInfoStyle(info), ) v(-6pt) } #let cvHonor( date: "1990", title: "Title", issuer: "", location: "" ) = { table( columns: (16%, 1fr, 15%), inset: 0pt, column-gutter: 10pt, align: horizon, stroke: none, honorDateStyle(date), if issuer == "" { honorTitleStyle(title) } else [ #honorTitleStyle(title), #honorIssuerStyle(issuer) ], honorLocationStyle(location) ) v(-6pt) } #let cvPublication( bibPath: "", keyList: list(), refStyle: "apa", ) = { show cite: it => hide(it) show bibliography: it => publicationStyle(it) bibliography(bibPath, title: none, style: refStyle) for key in keyList { cite(key) } v(-15pt) } #let cvFooter() = { place( bottom, table( columns: (1fr, auto), inset: 0pt, stroke: none, footerStyle([#firstName #lastName]), footerStyle(languageSwitch(cvFooterInternational)), ) ) } #let letterHeader( myAddress: "Your Address Here", recipientName: "Company Name Here", recipientAddress: "Company Address Here", date: "Today's Date", subject: "Subject: Hey!" ) = { letterHeaderNameStyle(firstName + " " + lastName) v(1pt) letterHeaderAddressStyle(myAddress) v(1pt) align(right, letterHeaderNameStyle(recipientName)) v(1pt) align(right, letterHeaderAddressStyle(recipientAddress)) v(1pt) letterDateStyle(date) v(1pt) letterSubjectStyle(subject) linebreak(); linebreak() } #let letterSignature(path) = { linebreak() place(right, dx:-5%, dy:0%, image(path, width: 25%)) } #let letterFooter() = { place( bottom, table( columns: (1fr, auto), inset: 0pt, stroke: none, footerStyle([#firstName #lastName]), footerStyle(languageSwitch(letterFooterInternational)), ) ) } // add skill menu #let skill(name, rating) = { let max_rating = 5 let done = false let i = 1 name // put name h(1fr) let colour = accentColor while (not done){ if (i > rating){ colour = regularColors.lightaccent } box(circle( radius: 4pt, fill: colour)) if (max_rating == i){ done = true } else { h(2pt) } i += 1 } [\ ] // newline } // make a box environment to write text int #let boxEnvironment(header, align_header, body) = { let highlightText = header.slice(0,3) let normalText = header.slice(3) if header.at(2) == " " { // account for space in at the third letter. Can substitute for any index highlightText = header.slice(0,4) normalText = header.slice(4) } v(beforeSectionSkip) let h = sectionTitleStyle(highlightText, color: accentColor) + sectionTitleStyle(normalText, color: regularColors.lightaccent) align( // header of the box align_header, text(size: 16pt, h) ) v(-5mm) align( // align_header, rect( width: 100%, fill: regularColors.alpha, radius: 10%, stroke : ( top : regularColors.lightaccent, bottom : regularColors.lightaccent, right : regularColors.lightaccent, left : regularColors.lightaccent, rest : rgb("#FFFFFF") ), par(justify: true, body) // justify the body of content to take up the full width ) ) } #let manual_cite(dict, match_name, is_first : false) = { for (k, v) in dict { if k == "authors" { let auth_list = v.split(",") let auth_len = auth_list.len() let c = 1 for auth in auth_list { if auth == match_name and c == 1 { // if first author is my name text(auth, weight: "bold") } else if c == 1 { // if it is someone else's name as first author text(auth) } else if auth == match_name { // if my name is somewhere in the list ", " + text(auth, weight: "bold") if is_first { // if I am also joint first-author super("◆") // diamond to signify joint first } } else if c == auth_len { // if we have arrived to the last one in the list, prefix with ampersand " & " + text(auth) } else { ", " + text(auth) // if all else, just regular print the name } c += 1 } } else if k == "title" { "\"" + text(v) + "\". " } else if k == "date" { " (" + text(v) + ") " } else if k == "journal" { text(v, style: "italic") + ". " } else if k == "doi" { link("https://doi.org/" + v)[#text(v, fill: regularColors.linkblue)] } else if k == "url" { link(v)[#text("Download poster", fill: regularColors.linkblue)] linebreak() // last thing to print from the citation } else if k == "volume" { text(v) + " " } else if k == "number" { "(" + text(v) + "), " } else if k == "page_range" { "pp. " + text(v) + ". " } } v(1pt) box(width: 1fr, line(stroke: (thickness: 0.9pt, paint: regularColors.lightaccent), length: 100%)) // just a line across the page v(1pt) }
https://github.com/alberto-lazari/computer-science
https://raw.githubusercontent.com/alberto-lazari/computer-science/main/lcd/project-presentation/sections/encoder.typ
typst
#import "/common.typ": * #new-section-slide[Encoder] #slide(title: [Trivial cases])[ #set text(.9em) #let grid = grid.with(columns: (4fr, 3fr)) #box(stroke: (bottom: 1pt), inset: (y: 15pt), grid( $encodepi() : "Prog"_"vCCS" to "Prog"$, $encode() : "Proc"_"vCCS" to "Proc"$ )) #set align(top) #grid( $encodepi(P) = encode(P) \ encodepi(k = P ";" pi) = ( k = encode(P); "encode"(pi) ) $, pause + $encode(0) = 0 \ encode(tau. P) = tau. encode(P) \ encode(k) = k \ encode(P + Q) = encode(P) + encode(Q) \ encode(P | Q) = encode(P) | encode(Q) $ ) ] #slide(title: [Evaluation -- expressions])[ #box(width: 100%, stroke: (bottom: 1pt), inset: (y: 15pt), $evale : "expr" to NN$ ) #pause $evale (n) = n \ evale (e_1 "op" e_2) = evale (e_1) "op" evale (e_2) \ #uncover("3-")[ // Fix indent #h(0pt) $evale (x) = thin ? #uncover(4)[#to `error: unbound variable x`]$ ] $ #uncover("2-")[ #box(stroke: 1.5pt, inset: .5em, inline-rule("op", $+$, $-$, $*$, $"/"$, )) ] ] #slide(title: [Evaluation -- booleans])[ #box(width: 100%, stroke: (bottom: 1pt), inset: (y: 15pt), $evalb : "boolean" to {"true", "false"}$ ) #pause $evalb ("true") = "true" & evalb ("false") = "false" \ evalb ("not" b) = not b \ evalb (b_1 "or" b_2) = b_1 or b_2 #h(3em) & evalb (b_1 "and" b_2) = b_1 and b_2 \ evalb (e_1 "op" e_2) = evale (e_1) "op" evale (e_2) \ $ #align(center, box(stroke: 1.5pt, inset: .5em, inline-rule("op", $=$, $!=$, $<$, $>$, $lt.eq.slant$, $gt.eq.slant$, ))) ] #slide(title: [Evaluation])[ #let grid = grid.with(columns: (4fr, 3fr)) #grid($encode(tick a(e). P) = tick a_n. encode(P)$, $n = evale(e)$) #grid($encode(k(e_1, ..., e_h)) = k_(n_1, ..., n_h)$, $n_i = evale(e_i)$) #grid(inset: (y: 5pt), $encode("if" b "then" P) = display(cases( encode(P) \ 0 )) $, $evalb(b) = "true" \ evalb(b) = "false"$ ) ] #focus-slide[ Let's start a small digression... ] #include "expansion.typ" #focus-slide[ Now, back to the encoder #utils.register-section[Encoder] ] #slide(title: [Expansion -- constants])[ Given a finite domain $D subset.eq NN$ $encodepi(k(x_1, ..., x_h) = P ";" pi) = encodepi(expandk(D, k(x_1, ..., x_h) = P) ";" pi)$ #pause $--> encodepi(k_n_1 (x_2, ..., x_h) = P ";" k_n_2 (x_2, ..., x_h) = P ";" ... ";" pi) \ --> encodepi(k_(n_1, m_1) (x_3, ..., x_h) = P ";" k_(n_1, m_2) (x_3, ..., x_h) = P ";" ... ";" pi) \ ... \ --> encodepi(k_(n_1, m_1, ...) = P ";" ... ";" pi) $ ] #slide(title: [Expansion -- input])[ Given a finite domain $D subset.eq NN$ $encode(a(x). P) = encode(expanda(D, a(x). P))$ $encode(a_n (x). P) = a_n. encode(P)$ ] #slide(title: [Expansion -- redirection])[ Given a finite domain $D subset.eq NN$ $encode(P[f]) = encode(expandf(D, P[f]))$ $encode(P[f_n_1, ..., f_n_h]) = encode(P)[f_n_1, ..., f_n_h]$ ] #slide(title: [Expansion -- restriction])[ Given a finite domain $D subset.eq NN$ $encode(P \\ L) = encode(expandL(D, P \\ L))$ $encode(P \\ (L_n_1 union L_n_2 union ... union L_n_h)) = encode(P) \\ (L_n_1 union L_n_2 union ... union L_n_h) $ ] #slide(title: [Bounded evaluation])[ Given a finite domain $D subset.eq NN$ #box(width: 100%, stroke: (bottom: 1pt), inset: (y: 15pt), $Devale : 2^NN to "expr" to NN$ ) #pause $Devale (e) = evale(e), quad evale(e) in D$ $Devale (e) = evale(e), quad evale(e) in.not D$ #uncover(3)[#to `error: out of bounds value evaluated`] ]
https://github.com/jamesrswift/ionio-illustrate
https://raw.githubusercontent.com/jamesrswift/ionio-illustrate/main/docs/manual.typ
typst
MIT License
#import "../src/lib.typ": * // -------------------------------------------- // Setup: tidy style // -------------------------------------------- #import "@preview/tidy:0.1.0" #let show-type(type) = tidy.styles.default.show-type(type) // -------------------------------------------- // Setup: gentle-clues style // -------------------------------------------- #import "@preview/gentle-clues:0.3.0": info, success, warning, error, clue #let example = clue.with(title: "Example", _color: teal,icon: emoji.page) // -------------------------------------------- // Setup: Page styling // -------------------------------------------- #set page( numbering: "1/1", header: align(right)[The `ionio-illustrate` package], ) #set heading(numbering: "1.") #set terms(indent: 1em) #show link: set text(blue) #set text(font: "Fira Sans", size: 10pt) // Example code setup #show raw.where(lang:"typ"): it => block( fill: rgb("#F6F4EB"), inset: 8pt, radius: 5pt, width: 100%, text(font:"Consolas", it), ) // -------------------------------------------- // Setup: Example data // -------------------------------------------- #let data = csv("../assets/isobutelene_epoxide.csv") #let massspec = data.slice(1) // -------------------------------------------- // Title page(s) // -------------------------------------------- #align(center, text(16pt)[*The `ionio-illustrate` package*]) #align(center)[Version 0.3.0] #set par(justify: true, leading: 0.618em) #v(3em) = Introduction This package implements a Cetz chart-like object for displaying mass spectrometric data in Typst documents. It allows for individually styled mass peaks, callouts, titles, and mass callipers. = Usage This is the minimal starting point: #example[```typ #import "@preview/ionio-illustrate:0.3.0": * #let data = csv("isobutelene_epoxide.csv") #let ms = mass-spectrum(massspec, args: ( size: (12,6), range: (0,100), )) #figure((ms.display)()) ```] The above code produces the following content: #let ms = mass-spectrum(massspec, args: ( size: (12,6), range: (0,100), ) ) #v(1em) #figure((ms.display)()) It is important to note at this point that the syntax for interacting with mass spectrum objects will certainly change with the introduction of a native type system. This document will be updated to reflect this upon implementation of those changes. // -------------------------------------------- // Table of Contents // -------------------------------------------- #pagebreak() #outline(indent: auto) // -------------------------------------------- // Documentation Pages // -------------------------------------------- #pagebreak() = Documentation This documentation is generated automatically for each package release, and is guaranteed to be an acurate representation of the API in the strictest of terms, but may lack the additional explanations and examples that make for a good documentation. For a more approachable documentation (at the cost of potentially incorrect descriptions due to oversight), please see the hand-written documentation in @humandoc. #{ let module = tidy.parse-module(read("../src/lib.typ")) tidy.show-module(module, style: tidy.styles.default) } // -------------------------------------------- // Humanist Documentation // -------------------------------------------- #pagebreak() = Humanist Documentation <humandoc> This documentation is hand-written, and therefore may sometimes be incorrect if it hasn't been updated to a recent API change (though hopefully those are few). If you see an issue in this documentation, please put in an issue or a pull request on the GitHub repository. That being said, a best effort is made to ensure that this section is useful. == `mass-spectrum()` The `mass-spectrum()` function takes two positional arguments: / `data1` (#show-type("array") or #show-type("dictionary")): This is a 2-dimensional array relating mass-charge ratios to their intensities. By default, the first column is the mass-charge ratio and the second column is the intensity. Data for a second mass spectrum is stored within the `args` parameter below. / `data2` (#show-type("array") or #show-type("dictionary")): An optional second mass spectrum to display. Data is in the same format as in `data1`. / `args` (#show-type("dictionary")): This contains suplemental data that can be used to change the style of the mass spectrum, or to add additional content using provided functions (see @extra-content). The defaults for the `args` dictionary are shown below: ```typ data1: none, data2: none, keys: ( mz: 0, intensity: 1 ), size: (14,5), range: (40, 400), style: mass-spectrum-default-style, labels: ( x: [Mass-Charge Ratio], y: [Relative Intensity (%)] ), linestyle: (this, idx)=>{}, plot-extras: (this)=>{}, plot-extras-bottom: (this)=>{}, ``` // -------------------------------------------- // Humanist Documentation: Keys // -------------------------------------------- === `keys` The `keys` entry in the `args` positional argument is a #show-type("dictionary") that can be used to change which fields in the provided `data` #show-type("array")/#show-type("dictionary") are to be used to plot the mass spectrum. An example usage of this may be to store several mass spectra within a single datafile. #info[Note that arrays are 0-index based.] #info[When two mass spectra are provided, both must use the same keys.] #example[ ```typ #let ms = mass-spectrum(massspec, args: ( keys: ( mz: 0, // mass-charge is contained in the first column intensity: 1 // intensity is contained in the second column ) )) ```] // -------------------------------------------- // Humanist Documentation: Size // -------------------------------------------- === `size` The `keys` entry in the `args` positional argument is a tuple specifying the size of the mass spectrum on the page, in `Cetz` units. #example[```typ #let ms = mass-spectrum(massspec, args: ( size: (12,6) )) ```] // -------------------------------------------- // Humanist Documentation: Range // -------------------------------------------- === `range` The `range` entry in the `args` positional argument is a tuple specifying the min and the max of the mass-charge axis. #example[ ```typ #let ms = mass-spectrum(massspec, args: ( range: (0,100) // Show mass spectrum between 0 m/z and 100 m/z )) ``` ] // -------------------------------------------- // Humanist Documentation: Style // -------------------------------------------- === `style` The `style` entry in the `args` positional argument is a cetz #show-type("style") dictionary. This dictionary accepts 5 entrys, each affecting a different part of the mass spectrum plot: / `axes`: This is a style dictionary that is passed to `cetz.axes.scientific` after expansion. Please refer to the Cetz documentation for the subentries that are available (at the time of writing, these include `tick`, `frame`, and `label` among other things)/ / `callouts`: This is passed directly to `cetz.draw.content` after expansion. Please refer to the Cetz documentation for the subentries that are available (at the time of writing, these include `stroke`, `fill`, and `frame`, and `padding`) / `peaks`: This is the style passed to all peaks being drawn(overriden by the `linestyle` function). Internally, this is passed to `cetz.draw.line`. Please refer to the Cetz documentation for the subentries that are available (at the time of writing, this include `stroke`) / `title`: This is passed directly to `cetz.draw.content` after expansion. Please refer to the Cetz documentation for the subentries that are available (at the time of writing, these include `stroke`, `fill`, and `frame`, and `padding`) / `callipers`: This dictionary entry itself is a dictionary that takes `line` (which is passed directly to `cetz.draw.line` after expansion) which allows customisation of the calliper's lines, and `content` (which is passed directly to `cetz.draw.content`) which allows for customizing the content placed above the callipers. / `peaks`: #text(fill: red, weight: "bold")[TO DO] / `data1`: #text(fill: red, weight: "bold")[TO DO] / `data2`: #text(fill: red, weight: "bold")[TO DO] / `shift-amount`: #text(fill: red, weight: "bold")[TO DO] // -------------------------------------------- // Humanist Documentation: Labels // -------------------------------------------- === `labels` The `labels` entry in the `args` positional argument is a dictionary specifying the labels to be used on each axis. #example[ ```typ #let ms = mass-spectrum(massspec, args: ( labels: ( x: [Mass-Charge Ratio], y: [Relative Intensity \[%\]] ) )) ```] #warning[Note that if you provide this entry, you must provide both child entries.] // -------------------------------------------- // Humanist Documentation: Linestyle // -------------------------------------------- === `linestyle` The `linestyle` entry in the `args` positional argument is a function taking two parameters: `this` (refering to the `#ms` object), and `idx` which is an #show-type("integer") representing the mass-charge ratio of the peak being drawn. Returning a cetz style dictionary will change the appearence of the peaks. This may be used to draw the reader's attention to a particular mass spectrum peak by colouring it in red, for example. #example[```typ #let ms = mass-spectrum(massspec, args: ( linestyle: (this, idx)=>{ if idx in (41,) {return (stroke: red)} } )) ```] // -------------------------------------------- // Humanist Documentation: Plot Extras // -------------------------------------------- === `plot-extras` and `plot-extras-bottom` <extra-content> The `plot-extras` entry in the `args` positional argument is a function taking one parameter, `this`, which refers to the `#ms` object. It can be used to add additional content to a mass spectrum using provided functions #example[```typ #let ms = mass-spectrum(massspec, args: ( range: (0,150), plot-extras: (this) => { (this.callout-above)(136, content: MolecularIon()) (this.callout-above)(121) (this.callout-above)(93) (this.callout-above)(80) (this.callout-above)(71) (this.callipers)(41, 55, content: [-CH#sub[2]]) (this.title)([Linalool, +70eV]) }, linestyle: (this, mz)=>{ if mz in (93,) { return (stroke: red) } if mz in (71,) { return (stroke: blue) } } )) #(ms.display)() ```] #[ #set text(font: "Fira Sans", size: 7pt) #let data1 = csv("../assets/linalool.csv") #let massspec1 = data1.slice(1) #let ms2 = mass-spectrum(massspec1, args: ( range: (0,150), plot-extras: (this) => { (this.callout-above)(136, content: MolecularIon()) (this.callout-above)(121) (this.callout-above)(93) (this.callout-above)(80) (this.callout-above)(71) (this.callipers)(41, 55, content: [-CH#sub[2]]) (this.title)([Linalool, +70eV]) }, linestyle: (this, mz)=>{ if mz in (93,) { return (stroke: red) } if mz in (71,) { return (stroke: blue) } } )) #v(1em) #figure((ms2.display)()) ] // -------------------------------------------- // Humanist Documentation: Methods on mass spec object // -------------------------------------------- // #pagebreak() == Method functions This section briefly outlines method functions and where/why they might be used. // -------------------------------------------- // Humanist Documentation: Display functions // -------------------------------------------- === Display function(s) These are the functions that will render the mass spectrum. For the moment there is only one, though as there are several desirable ways to render a mass spectrum, I envision adding more functions to this. #warning[Display functions *must not* be called within the context of a `plot-extras(this)` function.] ==== `#ms.display(mode)` The `#ms.display` method is used to place a single mass spectrum within a document. It can be called several times. `mode` can be any of / `single`: (default) Displays a single mass spectrum from the first dataset. / `dual-reflection`: displays both given mass spectra, with one reflected about the mass axis. / `dual-shift`: Displays both mass spectra on the same axis, offset from one another. // -------------------------------------------- // Humanist Documentation: Plot extras // -------------------------------------------- //#pagebreak() === `plot-extras` Functions #warning[The behaviour of `plot-extra` functions is *undefined* when called outside of the context of a `plot-extras(this)` function.] ==== `#ms.title(content)` The `#ms.title` method allows the addition of a title to a mass spectrum. It takes one positional argument, content (#show-type("content") or #show-type("string")). ==== `#ms.callout-above(mz, content: [], inset)` The `#ms.callout-above` method places a callout slightly above the intensity peak for a given mass-charge ratio. It takes one positional argument `mz` (#show-type("integer"), #show-type("float"), or #show-type("string")) and two named arguments, `content` (#show-type("content"), #show-type("string"), or #show-type("none")) to be displayed above the mass peak, and `y-offset` (#show-type("length")), which is the distance above the mass peak at which the content is displayed. - If `content` is #show-type("none"), the default value is that which is provided as `mz`. - If `inset` is #show-type("none"), the default value is `0.3em`. - If `mz` is outside of the mass spectrums rang x-axis range, it will not be shown ==== `#ms.callout-aside(mz, position, height, content, anchor, inset)` Behaves similarly to `#ms.calloutbove`, however, content is instead rendered at a specified position, with a faint line connecting the content to the mass peak at the specified height. If `height` is not specified, it is #show-type("auto"). If it is auto, it is set to `100%`. If `height` is a ratio, the height is set to that ratio of the mass-peak intensity at `mz`. If `content` is not provided, it defaults to `mz`. ==== `#ms.callipers(mz1, mz2, content: none, height: none, arrow-width: 1, inset: 0.5em)` The `#ms.callipers` method places a mass callipers between two mass spectrum peaks, along with any desired content centered above the callipers. It takes two positional arguments `mz1` and `mz2` (either of which are #show-type("integer"), #show-type("float"), or #show-type("string")) which represent the start and end of the callipers respectively, and two named arguments, `content` (#show-type("content"), #show-type("string"), or #show-type("none")) which is displayed centered above the callipers, and `height` (#show-type("length")), which is the distance at which the content floats above the mass peak. - If `content` is #show-type("none"), it is set automatically to represent the loss of mass between the specified peaks. - If `height` is #show-type("none"), the default value is `0.3em`. #warning[The behaviour is *undefined* when either `mz1` or `mz2` are outside the x-axis range.]
https://github.com/ilsubyeega/circuits-dalaby
https://raw.githubusercontent.com/ilsubyeega/circuits-dalaby/master/Type%201/1/14.typ
typst
#set enum(numbering: "(a)") #import "@preview/cetz:0.2.2": * #import "../common.typ": answer 1.14 전선을 통해 흐르는 전류(단위는 $"mA"$)가 다음과 같을 때, 물음에 답하라. $display( i(t) = cases( 0 (t < 0), 6t (0 <= t <= 5 s), 30e^(-0.6 (t-5)) (t >= 5s) ) )$ + 시간에 대한 전류 $i(t)$를 그려라. + 시간에 대한 전하 $q(t)$를 그려라. #answer[ + $i(t)$ 그래프는 다음과 같다. #canvas({ import draw: * plot.plot(size: (3, 3), x-grid: true, x-label: [t], y-label: [i(t)], x-tick-step: 4, y-tick-step: 4, { plot.add(domain: (0, 5), x => 6 * x, style: (stroke: red)) plot.add(domain: (5, 15), x => 30*calc.exp(-0.6*(x - 5)), style: (stroke: red)) }) }) + $q(t)$ 그래프는 다음과 같다. $I_1(t) = 3t^2, I_2(t)=75 + 50(1 - e ^(-0.6(t-5)))$ #canvas({ import draw: * plot.plot(size: (3, 3), x-grid: true, x-label: [t], y-label: [q(t)], x-tick-step: 4, y-tick-step: 25, { plot.add(domain: (0, 5), x => 3 * x * x, style: (stroke: red)) plot.add(domain: (5, 15), x => 75 + 50*(1 - calc.exp(-0.6*(x - 5))), style: (stroke: red)) }) }) ]
https://github.com/gongke6642/tuling
https://raw.githubusercontent.com/gongke6642/tuling/main/布局/measure/measure.typ
typst
= 测量内容的布局大小。 该函数允许您确定内容的布局大小。请注意,假设空间是无限的,因此测量的高度/宽度不一定与测量内容的最终高度/宽度匹配。如果要在当前布局维度中进行测量,则可以组合和布局。measuremeasure 例 相同的内容可以具有不同的大小,具体取决于其放置在其中的上下文。例如,在下面的示例中,当我们增加字体大小时,字体当然会更大。#content #image("屏幕截图 2024-04-16 161711.png")
https://github.com/TypstApp-team/typst
https://raw.githubusercontent.com/TypstApp-team/typst/master/tests/typ/meta/heading.typ
typst
Apache License 2.0
// Test headings. --- // Different number of equals signs. = Level 1 == Level 2 === Level 3 // After three, it stops shrinking. =========== Level 11 --- // Heading vs. no heading. // Parsed as headings if at start of the context. /**/ = Level 1 #[== Level 2] #box[=== Level 3] // Not at the start of the context. No = heading // Escaped. \= No heading --- // Blocks can continue the heading. = #[This is multiline. ] = This is not. --- // Test styling. #show heading.where(level: 5): it => block( text(font: "Roboto", fill: eastern, it.body + [!]) ) = Heading ===== Heading 🌍 #heading(level: 5)[Heading] --- // Edge cases. #set heading(numbering: "1.") = Not in heading =Nope
https://github.com/0x6e66/hbrs-typst
https://raw.githubusercontent.com/0x6e66/hbrs-typst/main/template/declaration.typ
typst
#import "../ads/meta.typ": * #import "utils.typ": * #let declaration = { set page(background: none) align( left, text( size: 20pt, if language == "de" { [Eigenständigkeiserklärung] } else if language == "en" { [Declaration] } ) ) align( left, text( if language == "de" { [Ich versichere hiermit, dass ich meine #language_switch(thesis_subject_type) mit dem Titel] } else if language == "en" { [I hereby affirm that my #language_switch(thesis_subject_type) entitled] } ) ) v(0.3cm) align( left, text( smallcaps[#language_switch(thesis_title)] ) ) v(0.3cm) align( left, text( if language == "de" { [selbstständig verfasst und keine anderen als die angegebenen Quellen und Hilfsmittel benutzt habe. Ich versichere zudem, dass die eingereichte elektronische Fassung mit der gedruckten Fassung übereinstimmt, falls beide Fassungen gefordert sind.] } else if language == "en" { [was authored independently and does not use any sources and aids other than those indicated. I also affirm that the submitted electronic version is identical with the printed version, if both versions are required.] } ) ) v(5cm) line(length: 40%, stroke: 0.5pt) v(-0.3cm) align( left, text( if language == "de" { [<NAME>, der #today] } else if language == "en" { [<NAME>, the #today] } ) ) }
https://github.com/leesum1/brilliant-cv
https://raw.githubusercontent.com/leesum1/brilliant-cv/master/modules_zh/certificates.typ
typst
// Imports #import "@preview/brilliant-cv:2.0.2": cvSection, cvHonor #let metadata = toml("../metadata.toml") #let cvSection = cvSection.with(metadata: metadata) #let cvHonor = cvHonor.with(metadata: metadata) #cvSection("比赛经历") #cvHonor( date: [2024], title: [华为软件精英挑战赛], issuer: [杭厦赛区二等奖], location: [浙江-杭州], ) #cvHonor( date: [2024], title: [华为嵌入式大赛-算法组], issuer: [杭厦赛区复赛], location: [浙江-杭州], )
https://github.com/a-mhamdi/graduation-report
https://raw.githubusercontent.com/a-mhamdi/graduation-report/main/Typst/en-Report/chaps/outro.typ
typst
MIT License
/* --------------------------------- DO NOT EDIT -------------------------------- */ #import "../Class.typ": * #show: report.with(isAbstract: false) #set page(header: none) #figure(chap("General Conclusion"), supplement: [Chapter], numbering: none) // GC #set page(header: smallcaps(title) + h(1fr) + emph("General Conclusion") + line(length: 100%)) #set heading(level: 2, outlined: false) /* ------------------------------------------------------------------------------ */ *Discussions* #lorem(64) *Future Work* #lorem(32)
https://github.com/floriandejonckheere/utu-thesis
https://raw.githubusercontent.com/floriandejonckheere/utu-thesis/master/thesis/figures/06-automated-modularization/artifacts.typ
typst
#import "@preview/cetz:0.2.2" #let artifacts = yaml("/bibliography/literature-review.yml").at("categories").at("artifacts") #let total = artifacts.values().sum().len() #let data = ( ([Codebase#h(2em)], artifacts.at("codebase").len()), ([#h(4em)Execution data], artifacts.at("execution").len()), ([#h(8em)Requirements documents\ #h(11em)and models], artifacts.at("requirements").len()), ([#h(5em)Design documents], artifacts.at("design").len()), ) #cetz.canvas(length: .75cm, { import cetz.chart import cetz.draw: * let colors = ( cmyk(0%, 75%, 79%, 0%), cmyk(29%, 26%, 0%, 28%), cmyk(65%, 0%, 2%, 35%), cmyk(34%, 0%, 60%, 18%), ) chart.piechart( data, clockwise: false, value-key: 1, label-key: 0, radius: 3, slice-style: colors, inner-radius: 1, inner-label: (content: (value, label) => [#text(size: 10pt, white, str(calc.round(100 * value / total, digits: 0)) + "%")], radius: 110%), outer-label: (content: (value, label) => [#label], radius: 130%)) })
https://github.com/alisa101rs/resume
https://raw.githubusercontent.com/alisa101rs/resume/master/README.md
markdown
Based on https://github.com/bamboovir/typst-resume-template/blob/main/resume.typ
https://github.com/arthurcadore/eng-telecom-workbook
https://raw.githubusercontent.com/arthurcadore/eng-telecom-workbook/main/README.md
markdown
MIT License
# Telecom Engineering Workbook ## Author: <NAME> - IFSC - São José #### This repository is dedicated to all homeworks and projects developed during the Telecom Engineering course at IFSC. The image below illustrates all the subjects present in the telecommunications engineering course at IFSC SJ - (PPC 2022) ![main](./pictures/main.png) --- ### Current progress on course: At this point, my progress on engineering course is ilustrated below: ![progress](./pictures/progress.png) ``` Percentage of course completed: 64% ``` #### How to Use the Repository: Browse the repository to find any homework or project you are interested in. Each folder contains the necessary files to replicate the results. You can download the files individually or clone the entire repository to your local machine using Git by running the following command in your terminal: ``` git clone https://github.com/arthurcadore/eng-telecom-workbook ``` --- #### How to Compile the Editable Files: This repository uses `typst` to format all homeworks and projects. You can find more information about `typst` [here](https://github.com/typst/typst). But, if you want to recompile or edit the files, you can simply do it by running the `.devcontainer` specifications. ---
https://github.com/Lslightly/TypstTemplates
https://raw.githubusercontent.com/Lslightly/TypstTemplates/main/templates/mycmd.typ
typst
MIT License
// 证毕 #let finishProof = align( $square.stroked.medium$, right )
https://github.com/pedrofp4444/BD
https://raw.githubusercontent.com/pedrofp4444/BD/main/report/content/[2] Levantamento e Análise de Requisitos/organizacao.typ
typst
#let organizacao = { [ == Organização dos Requisitos Levantados Após a definição do método de levantamento e análise de requisitos, e consequente aplicação do mesmo, procede-se à disposição tabelada e organizada de acordo com a informação explícita nos anexos ao presente relatório. === Requisitos de descrição #figure( kind: "null", supplement: [Null], block( table( columns: (21pt, 15pt, 55pt, 215pt, 1.2fr, 1.2fr), stroke: (thickness: 0.5pt), align: horizon, fill: (x, y) => if y == 0 { gray.lighten(50%) }, table.header([*Tipo*], [*Nº*], [*Data*], [*Descrição do requisito*], [*Fonte*],[*Analista*]), table.cell( rowspan: 14, align: horizon, rotate(-90deg, reflow: true)[ Descrição ], ), /* Requisito */ [1], [04/03/2024], [Um funcionário é identificado pelo seu identificador único e sequencial, nome, data de nascimento, salário, número de identificação fiscal, fotografia, se aplicável, e números de telemóvel.], [Representantes <NAME> e <NAME>], [<NAME> e <NAME>], /* Requisito */ [2], [04/03/2024], [Um funcionário tem 1 ou mais números de telemóvel.], [Representante <NAME>], [<NAME> e <NAME>], /* Requisito */ [3], [04/03/2024], [Uma função é identificada pelo seu identificador único e designação, que pode tomar os valores de Operacional, Detetive e Representante.], [Representante <NAME>], [<NAME> e <NAME>], /* Requisito */ [4], [04/03/2024], [Um funcionário desempenha uma única função e uma função é desempenhada por 1 ou mais funcionários.], [Representante Ana Sofia], [<NAME> e <NAME>], /* Requisito */ [5], [04/03/2024], [Um funcionário Operacional trabalha em 1 ou mais terrenos e num terreno trabalham 1 ou mais funcionários.], [Representante Ana Sofia], [<NAME> e <NAME>], /* Requisito */ [6], [04/03/2024], [Um terreno é identificado pelo seu identificador único e sequencial, minério previsto e minério coletado.], [Representantes da Lusium], [<NAME> e <NAME>], /* Requisito */ [7], [04/03/2024], [O minério previsto corresponde à quantidade mínima estimada de minério a coletar por dia.], [Representantes da Lusium], [<NAME> e <NAME>], /* Requisito */ [8], [04/03/2024], [O minério coletado corresponde à quantidade de minério efetivamente coletado por dia.], [Representantes da Lusium], [<NAME> e <NAME>], /* Requisito */ [9], [04/03/2024], [Um funcionário que desempenha a função de Representante gere os funcionários que desempenham a função de Operacional.], [Representantes da Lusium], [<NAME> e <NAME>], /* Requisito */ [10], [08/03/2024], [Um caso é identificado pelo seu identificador único e sequencial, data de abertura, data de encerramento, se aplicável, estado e estimativa de roubo.], [Detetive <NAME>], [<NAME> e <NAME>], /* Requisito */ [11], [08/03/2024], [A estimativa de roubo de um caso corresponde à diferença entre a quantidade de minério prevista a ser coletada e a quantidade de minério efetivamente obtida.], [Detetive <NAME>], [<NAME> e <NAME>], /* Requisito */ [12], [08/03/2024], [Um terreno pode ter casos e cada caso está associado a um só terreno.], [Detetive <NAME>], [<NAME> e <NAME>], /* Requisito */ [13], [08/03/2024], [O estado de um caso pode ser aberto ou fechado.], [Detetive Américo Costa], [<NAME> e <NAME>], /* Requisito */ [14], [08/03/2024], [Caso a quantidade de minério coletada de um determinado dia seja inferior à quantidade de minério mínima prevista, um novo caso é aberto no terreno e todos os funcionários associados tornam-se suspeitos.], [Representante <NAME>], [<NAME> e <NAME>], ) ) ) #figure( caption: "Requisitos de descrição.", kind: table, block( table( columns: (21pt, 15pt, 55pt, 215pt, 1.2fr, 1.2fr), stroke: (thickness: 0.5pt), align: horizon, table.cell( rowspan: 5, align: horizon, rotate(-90deg, reflow: true)[ Descrição ], ), /* Requisito */ [15], [08/03/2024], [Um funcionário pode ou não pertencer a um ou mais casos, enquanto que a um caso estão associados um ou mais funcionários, obrigatoriamente.], [Representante <NAME>], [<NAME> e <NAME>], /* Requisito */ [16], [08/03/2024], [Um funcionário associado a um caso é considerado suspeito.], [Representante <NAME>], [<NAME> e <NAME>], /* Requisito */ [17], [08/03/2024], [Um suspeito é caracterizado pelo seu estado, nível de envolvimento e notas, se aplicável.], [Detetive <NAME>], [<NAME> e <NAME>], /* Requisito */ [18], [08/03/2024], [O nível de envolvimento é caracterizado por um inteiro de 1 a 10, onde 1 corresponde a pouco envolvimento e 10 a extremamente envolvido.], [Detetive <NAME>], [<NAME> e <NAME>], /* Requisito */ [19], [08/03/2024], [O estado de um suspeito pode admitir os valores de inocente, em investigação ou culpado.], [Detetive <NAME>], [<NAME> e <NAME>], ) ) ) #label("Tabela1") === Requisitos de manipulação #figure( caption: "Requisitos de manipulação.", kind: table, table( columns: (21pt, 15pt, 55pt, 6fr, 71pt, 71pt), stroke: (thickness: 0.5pt), align: horizon, fill: (x, y) => if y == 0 { gray.lighten(50%) }, table.header([*Tipo*], [*Nº*], [*Data*], [*Descrição do requisito*], [*Fonte*],[*Analista*]), table.cell( rowspan: 7, align: horizon, rotate(-90deg, reflow: true)[ Manipulação ], ), /* Requisito */ [1], [04/03/2024], [Listar o prejuízo de um terreno.], [Representantes da Lusium], [<NAME> e <NAME>], /* Requisito */ [2], [08/03/2024], [Ver quando é que um funcionário se tornou suspeito de um determinado caso.], [Representantes da Lusium], [<NAME> e <NAME>], /* Requisito */ [3], [08/03/2024], [Listar os suspeitos de um determinado caso.], [Representantes da Lusium], [<NAME> e <NAME>], /* Requisito */ [4], [08/03/2024], [Ver a data do último caso de um determinado funcionário.], [Representantes da Lusium], [<NAME> e <NAME>], /* Requisito */ [5], [08/03/2024], [Listar os casos a que um determinado funcionário está associado.], [Detetives], [<NAME> e <NAME>], /* Requisito */ [6], [08/03/2024], [Ver o dia em que mais casos foram abertos.], [Detetives], [<NAME> e <NAME>], /* Requisito */ [7], [08/03/2024], [Listar os top 5 funcionários por quantidade de casos.], [Detetive Améric<NAME>], [<NAME> e <NAME>], ) ) #label("Tabela2") === Requisitos de controlo #figure( caption: "Requisitos de controlo.", kind: table, table( columns: (21pt, 15pt, 55pt, 6fr, 71pt, 71pt), stroke: (thickness: 0.5pt), align: horizon, fill: (x, y) => if y == 0 { gray.lighten(50%) }, table.header([*Tipo*], [*Nº*], [*Data*], [*Descrição do requisito*], [*Fonte*],[*Analista*]), table.cell( rowspan: 5, align: horizon, rotate(-90deg, reflow: true)[ Controlo ], ), /* Requisito */ [1], [04/03/2024], [Só um funcionário Representante deve conseguir criar um terreno e alterar os campos de minério previsto e minério coletado de um terreno.], [Representantes da Lusium], [<NAME> e <NAME>], /* Requisito */ [2], [04/03/2024], [Só um funcionário Representante deve conseguir criar um funcionário e alterar os campos nome, data de nascimento, função, salário, número de identificação fiscal, fotografia e telemóvel de um funcionário.], [Representantes da Lusium], [<NAME> e <NAME>], /* Requisito */ [3], [04/03/2024], [Funcionários Operacionais não têm acesso a qualquer informação da base de dados.], [Representante <NAME>], [<NAME> e <NAME>], /* Requisito */ [4], [08/03/2024], [Só um funcionário Detetive deve conseguir criar um caso e alterar o seu estado e estimativa de roubo.], [Detetives], [<NAME> e <NAME>], /* Requisito */ [5], [08/03/2024], [Só um funcionário Detetive deve conseguir tornar um funcionário suspeito num caso e mudar o estado de um suspeito.], [Detetives], [<NAME> e <NAME>], ) ) ] }
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/visualize/gradient-stroke_00.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page #align(center + top, square(size: 50pt, fill: black, stroke: 5pt + gradient.linear(red, blue)))
https://github.com/mhspradlin/wilson-2024
https://raw.githubusercontent.com/mhspradlin/wilson-2024/main/understanding-ai/day-4-exercises.typ
typst
MIT License
#set page( paper: "us-letter" ) #set document(author: "<NAME>", title: "Understanding AI Day 4 Exercises") #align(center)[ = Image Synthesis: Exercises ] #v(2em) Using Stable Diffusion web ( #link("https://stablediffusionweb.com/#demo") ) or another image synthesis tool: + Generate images related to an occupation such as `doctor` or `teacher`. - Are people of certain genders, ethnicities, and ages represented more than others? - Based on what you know about how image synthesis models work, why might this happen? + The style of particular artists can be emulated using particular prompt inputs. This lists many artists represented in Stable Diffusion: #link("https://supagruen.github.io/StableDiffusion-CheatSheet/") - Choose an artist and craft your own prompt to create images in their visual style. - How do you think that artist would feel about the image you've created? Would they likely be enthusiastic, upset, indifferent, or something else? #v(1fr) #figure(image("figures/warhol-coffee-cup.jpeg", height: 25%), caption: [`coffee cup in the style of andy warhol`]) #figure(image("figures/picasso-dog.jpeg", height: 25%), caption: [`dog in the style of pablo picasso`])
https://github.com/noahjutz/CV
https://raw.githubusercontent.com/noahjutz/CV/main/theme.typ
typst
#let theme = ( primary: purple, primary_light: purple.lighten(80%) )
https://github.com/j10ccc/algorithm-analysis-homework-template-typst
https://raw.githubusercontent.com/j10ccc/algorithm-analysis-homework-template-typst/main/main.typ
typst
#import "constants/fonts.typ": font_family #import "config.typ": frontmatter #import "layout/divider.typ": divider #import "layout/headers/index.typ": headers #import "layout/question.typ": question #let template(doc, config) = [ // globally override style #set text( font: font_family.default, size: 12pt ) #set par( leading: 1em ) #set page( header: [ #set text(size: 12pt) #let page_num = [ #locate(loc => [ #loc.page() ]) ] #let title = [ #text(font: font_family.kaiti)[习题] #emph[#config.homework_id] ] #locate(loc => [ #if loc.page() != 1 [ #grid( columns: (1fr, 1fr), align(left, [ #locate(loc => [ #if calc.odd(loc.page()) [ #title ] else [ #page_num ] ]) ]), align(right, [ #locate(loc => [ #if calc.odd(loc.page()) [ #page_num ] else [ #title ] ]) ]), ) ] ]) ], ) #show strong: set text(font: font_family.songti, weight: "bold") #show raw: set text(font: font_family.monospace) // document declaration #headers( homework_id: config.homework_id, date: config.date, deadline: config.deadline, time_consume: config.time_consume, intro: config.intro ) #doc ]
https://github.com/Nrosa01/TFG-2023-2024-UCM
https://raw.githubusercontent.com/Nrosa01/TFG-2023-2024-UCM/main/Memoria%20Typst/capitulos/tecnologiasWeb.typ
typst
Existe una gran diversidad de tecnologías webss para desarrollar y alojar sitios webs. En este capítulo se verán tecnologías específicas relacionadas al desarrollo de sitios web. Para poder crear una web, es necesario conocer su funcionamiento. Una página web se compone de 3 elementos principales: HTML, CSS Y código JavaScript @baxley-2002. Mediante HTML se define la estructura de la página y sus contenidos. CSS es un lenguaje de estilos que permite definir la apariencia de la web. Por último, JavaScript es un lenguaje de programación que permite añadir interactividad a la web. Aunque existen diferentes herramientas para crear webs, frameworks y librerías, al final los archivos que generan son estos 3 mencionados. == Frameworks de desarrollo Para diseñar un sitio web lo ideal es primero maquetar y conceptualizar. Existen herramientas especializadas para esto como Figma @staiano-2023 u otras más generales como Canva @k-2020. Este proceso es importante, pues ponerse a programar sin una idea clara puede dar lugar a problemas durante el desarrollo. Posteriormente a este proceso se debe pensar en el diseño de la web para finalmente, poder implementarlo en código. Los sitios webs a menudo presentan contenido interactivo que provoca que la página cambie de acorde a las acciones del usuario. Por ejemplo, un botón contador, cada vez que el usuario clicke este botón, cambiará un texto en la web que indica cuantas veces se ha pulsado. Tradicionalmente esto implica usar JavaScript para definir la función a ejecutar cuando el usuario pulsa el botón y actualizar el texto de la web manualmente. Sin embargo, este proceso tan manual es tedioso y su coste de implementación aumenta con la complejidad de la web. Por ejemplo, tener una lista de objetos que representar y poder reordenar. Tener que manipular los elementos de la web para sincronizar la vista con los datos resulta complejo. Para solucionar este y otros problemas, se han creado frameworks de desarrollo web. Existen muchos frameworks, pero los más populares son React, Angular y Vue. Cada uno de estos tiene sus particularidades, pero existen ciertos conceptos comunes entre ellos. El más importantes es la `reactividad` @macrae-2018. Cada framework implementa reactividad de un modo ligeramente diferente, pero el concepto es el mismo, usar variables JavaScript en el código HTML. El framework se encarga de detectar los cambios en estas variables y actualizar la vista automáticamente. Por ejemplo, si se tiene una lista de objetos y se añade un nuevo objeto a la lista, el framework se encargará de añadir un nuevo elemento a la vista. A pesar de que los framework de desarrollo aportan muchas ventajas, tienen también inconvenientes. El primero es que la mayoría requiere de un `bundler` @macrae-2018, esto es un programa que se encarga de unir todos los archivos de la web en uno solo. Esto puede ser un problema si se quiere hacer una web sencilla, pues añade complejidad innecesaria. Los bundler requieren archivos de configuración y un proceso de `build` para poder generar la web. Otro problema que puede surgir es el rendimiento, esto se da en mayor o menor medida en todos los frameworks, esto se debe a que los frameworks añaden una capa de abstracción que puede ralentizar la web. Al no tener tanto control sobre como se actualiza la vista, el framework puede hacer más operaciones de las necesarias. == Hosting y CI Una vez que se tiene la web, es necesario alojarla en un servidor para que pueda ser accesible desde cualquier parte del mundo. Existen muchos servicios de hosting, pero los más populares son Netlify y Vercel. Sin embargo, estos servicios son de pago. Para poder alojar webs de forma gratuita, la opción más popular es GitHub Pages. GitHub Pages es un servicio de GitHub que permite alojar webs estáticas de forma gratuita. Para poder alojar una web en GitHub Pages @uzayr-2022, es necesario subir los archivos de la web a un repositorio de GitHub y activar GitHub Pages en la configuración del repositorio. Como se vio antes, usar un framework suele implicar un proceso de build para poder generar la página final. Tener que hacer una build cada vez que se quiera subir una página a GitHub Pages u otro servicio resulta molesto. Para solucionar este problema, existen soluciones de integración continua (CI) específicas para webs. Los más populares son GitHub Actions y Netlify CI. Estos servicios permiten automatizar el proceso de build y subida de la web a GitHub Pages. En el caso de GitHub, GitHub Actions permite definir la acción de compilación que se ejecutará en los servidores de GitHub y se desplegará en GitHub Pages. Este proceso puede configurarse para que se realize automáticamente en cada subida de código (push) al respositorio. De esta forma, los desarrolladores no tienen que preocuparse de hacer la build manualmente.
https://github.com/Origami404/kaoyan-shuxueyi
https://raw.githubusercontent.com/Origami404/kaoyan-shuxueyi/main/微积分/06-多元微分学.typ
typst
#import "../template.typ": sectionline, gray_table, colored #let dx = $dif x$ #let dy = $dif y$ = 多元微积分 == 多元微分 === 概念与基础 #set list(marker: ([★], [⤥], [›])) - 二重极限证不存在 - 取常数 $lambda$ 后令 $y = lambda x$,代入原极限证明 $lambda$ 不同时极限不同即可 - 证连续 - 极限存在且等于函数值即可 - 求给定点的偏导数 - 根据定义求: $f'_x(x_0, y_0) = lim_(Delta x -> 0) (f(x_0 + Delta x, y_0) - f(x_0, y_0)) / (Delta x)$ - 全微分存在性 - 定义: 函数的差 = *所有*变量的差的线性组合 + 一个无穷小 - 证明存在常数使得可以通过变量的线性组合组出函数差即可 - 或者证明 $Delta z - (diff z) / (diff x) Delta x - (diff z) / (diff y) Delta y$ 是 $sqrt((Delta x)^2 + (Delta y)^2)$ 的高阶无穷小 - 相互关系 - 偏导连续 $->$ 可微 $->$ 连续 / 偏导存在 - 隐函数 / 复合函数 求导 - 使用微分形式不变性嗯做即可 - 隐函数在给定点上的偏导存在性 (隐函数存在定理) - 直接两边求全微分, 求完剩下什么变量的微分就能确定什么偏导 === 无条件极值 (Hessin 矩阵) - 必要条件: 所有偏导都是 $0$ - 充分条件: $f''_(x x) f''_(y y) - (f''_(x y))^2 > 0$ - 若式子是负则必不是极值, 是零则不能确定 - $f''_(x x)$ 是正则极小, 是负则极大 - 常记作 $A C - B^2 > 0$ 上面的条件是 Hessin 矩阵的行列式, 即函数梯度的雅可比行列式. === 有条件极值 (拉格朗日定理) 对于函数 $f(x, y)$ 和条件 $phi(x, y) = 0$, 构造拉格朗日函数 $L(x, y, lambda) = f(x, y) + lambda phi(x, y)$, 然后解方程组即可. $ cases( (diff L) / (diff x) = (diff L) / (diff y) = 0, phi(x, y) = 0, ) $ 如果让确定是最大还是最小, 那随便带几个数字进 $f$, 比较一下它们和求出来的值的大小即可. #pagebreak() == 多元积分 === 极座标 换极座标记得 $x = r cos theta$, 不要把三角函数对应错, 也不要随随便便换 $x - 1 = r cos theta$ 之类的. #let pp(f, v) = $(diff #f) / (diff #v)$ $ dx dy = mat(delim: "|", pp(x, r), pp(x, theta); pp(y, r), pp(y, theta)) dif r dif theta = r dif r dif theta $ 对三角函数积分时, 一定要注意函数在区间内的正负性, 应该先看积分区间判断积分是不是 $0$ 或者是否需要对半切, 然后再积. 点火公式要看准 $[0, pi / 2]$ 套, 不能乱套. === 看区域 先确定区域, 再积分; 多重积分中区域本身比积分函数还要重要一点, 从区域到积分限本身就是一个考点. 直接给出两个积分上下限的, 可能需要识别出区域然后换序积分. 区域的对称性也必须在积分之前优先考虑. - 函数对某个变量为奇函数可以直接让积分为 $0$ - 函数对某个变量为偶函数可以只在一半区域上积分 - 函数对两个变量具有轮换对称性, 可以在两倍的区域上积分 - 区间 $D$ 对 $y = x$ 对称, 则可以修改积分内的函数为 $1/2 integral.double_D f(x, y) + f(y, x) dx dy$ 使其强制具有轮换对称性 #pagebreak()
https://github.com/fredguth/abnt-typst
https://raw.githubusercontent.com/fredguth/abnt-typst/main/example/manual.typ
typst
manual.typ - introdução -- motivação -- citação bib - conformidade às normas - personalização - exemplos
https://github.com/LeptusHe/LeptusHe.github.io
https://raw.githubusercontent.com/LeptusHe/LeptusHe.github.io/main/source/_posts/temporal-antialiasing/temporal-antialiasing-01.typ
typst
#import "../typst-inc/blog-inc.typc": * #show: blog_setting.with( title: "Temporal Antialiasing - 01", author: ("<NAME>"), paper: "a1" ) #metadata("Temporal Antialiasing") <tags> #metadata("图形渲染") <categories> #metadata("2019-03-09") <date> 走样问题是渲染领域中经常遇到的一个问题。尤其是近几年,随着PBR(physically based rendering)技术不断地被应用在游戏中,实时渲染中的走样问题就变得严重。在PBR技术被应用以前,走样问题的主要来源是三角形光栅化所生成的锯齿问题,即几何走样问题。然而,在PBR技术被应用后,shading产生的高频的颜色信息成为了走样问题的另一个来源。 对于几何走样问题,实时渲染算法中已经存在的一系列算法,比如MSAA,FXAA等,都能够有效地解决该问题。然而,这些算法对于shading走样问题没有很好的效果。在离线渲染中,super sampling一直都是被普遍应用的一种反走样算法。该算法能够有效地解决几何走样和shading走样问题。然而,由于实时渲染对于算法效率的要求,这导致在实时渲染中应用super sampling算法变得不现实。 Super sampling算法的主要缺点在于该算法的效率不高,如何能够提高super sampling算法的效率成为了一个需要考虑的问题。通常而言,如果需要提高一个算法的效率,人们的主要考虑是否能够利用某些还没有利用的信息来减少算法中的计算量。对于实时渲染而言,一个可以被利用的信息是帧与帧之间的数据的重用。由于实时渲染每秒至少渲染30帧,从而导致帧与帧之间有大部分的像素信息基本是相同的。因此,如何重用这些帧与帧基本相同的像素信息是解决super sampling效率低下的一个方向。 Temporal antialiasing可以说就是在这种思想下出现的一种反走样算法,与其他利用空间信息来进行反走样的算法不同,该算法利用了帧与帧之间的信息来实现反走样。 = 静态场景下的TAA Super sampling技术通过在一个像素中分布多个样本(sample)的方式来进行反走样,而TAA(Temporal antialiasing)的基本思想是将一个像素中的多个样本分布在多帧中,然后通过将多帧中的样本信息进行加权平均来得到与super sampling相同的效果。 在静态场景下,即场景中的所有物体(几何物体、相机、灯光等)的属性都不变的情况下,TAA能够取得与super sampling相同的效果,而且TAA的效率相比较super sampling而言具有很大的提升。 因为TAA的基本思想是将多个样本分布到多帧中,然后进行加权平均。假设每个像素具有n个样本,则我们需要考虑以下几个问题: - 如何分布(生成)样本 - 如何为每个样本生成对应的投影矩阵 - 如何对每个样本的采样结果进行加权平均 == 样本的生成 假设我们需要在一个像素中生成n个样本,则样本的分布成为了一个需要考虑的问题。对于该问题,已经存在了许多的解决方法。这里,我们使用Halton低差异序列来生成样本。 == 样本的投影矩阵 如果我们继续采用原来的投影矩阵进行渲染,则我们的样本点是位于像素中心的。如果我们需要使得我们渲染的样本点不位于像素中心,则我们需要为每一个样本生成一个对应的投影矩阵,从而使得该帧的像素中心对应于我们所需要的样本。 为了达到这样的目的,我们需要根据样本点来对投影矩阵进行某些修改。 以OpenGL为例,对OpenGL的投影矩阵的修改如下 ```cpp ProjMatrix[2][0] += (Halton(2, N) * 2.0f – 1.0f ) / WindowWidth; ProjMatrix[2][1] += (Halton(3, N) * -2.0f + 1.0f ) / WindowHeight; ``` == 样本结果的加权平均 当获得了每个样本的结果以后,我们需要考虑如何对这些样本结果($X_i$)进行加权平均,主要考虑以下两个问题: - 平均的样本结果数量$N$ - 每个样本的权重$W_i$ 一个简单的方法是采用前$N$帧样本结果的平均值来作为当前帧的结果, $ S_t = 1/N sum_(i=0)^(N-1) X_(t-i) $ 如果使用这种方式来对样本进行加权平均,则我们需要在存储前$N$帧的样本结果。然而,这将消耗大量的显存。如果是在非静态场景下,还会存在另一个需要解决的问题——如何寻找在第$t-i$帧中与当前帧(第t帧)的某个像素$P(x_t,y_t)$所对应的像素$P(x_(t-i)^('), y_(t-i)^('))​$。 为了解决需要存储前$N​$帧历史样本的问题,我们可以采用一种称为#im[Exponent Moving Average]的方法。 $ S_t = alpha X_t + (1 - alpha) S_(t-1) $ 当$alpha$值很小时, $ S_t approx 1/N sum_(i=1)^N X_i $ 使用Exponent Moving Average方法后,我们不再需要存储前$N$帧的历史样本,而只需存储前一帧的加权平均结果$S_(t-1)$即可。一般而言,$alpha$的值设置为$0.05$。 = 动态场景下的TAA 在静态场景下,不同帧中处于相同位置$(x, y)$处的像素$X_t(x, y)$都是同一个像素中的不同采样点的采样结果,将它们进行加权平均后相当于进行了super sampling。然而,在动态场景(非静态场景)下,当相机运动时,不同帧中同一位置$(x, y)$处的像素$X_t(x, y)$可能不再是同一个像素中的样本点的采样结果。此时,如果将它们的采样结果进行加权平均,则会产生 #im[ghost现象]。 == Motion Vector 假设当前帧为第$t$帧,现在我们需要得到位于位置$(x, y)$处的加权平均后的结果$S(x_t, y_t)$,由于 $ S_t(x, y)= alpha X_t(x, y) + (1 - alpha)S_(t-1)(x^', y^') $ 则我们需要找到存储在history buffer $S_{t-1}$中$(x^', y^')$位置的像素点$S_(t-1)(x^', y^')$。 为了寻找对应的像素位置$(x^', y^')​$,我们只需要将当前帧中处于位置$(x, y)​$处的像素的世界坐标$ "pos"(x, y)​$投影到上一帧中即可获得对应的坐标$(x^', y^')​$,公式如下: $ (x^', y^') = "Proj"_(t-1) times "View"_(t-1) times "Pos"_w (x, y)​ $ 为了获取与像素$S_t(x, y)$相对应的像素$S_(t-1)(x^', y^')​$,在GBuffer Pass中,我们可以将两个像素位置之间的差值存储到一个运动向量(motion vector)缓存中。 $ arrow(Delta(x, y)) = arrow((x, y)) - arrow((x^', y^')) $ 通过运动向量缓存,我们可以直接获取到与像素$S_t (x, y)$相对应的像素$S_(t-1)(x^', y^')$,然后即可将它们加权获得最终的结果$S_t (x, y)$。 == 几何物体边缘的Motion Vector 当相机运动或者场景中的物体运动时,场景中物体的边缘会失去反走样效果。其原因在于物体边缘的pixel丢失了history color。我们通过对一种简单的情形进行分析,来对物体边缘pixel丢失history color的情况进行说明。 #figure( grid(columns: 2, row-gutter: 2mm, column-gutter: 1mm, image("./images/motion-vector-before.png"), image("./images/motion-vector-after.png"), "a) previous frame", "b) current frame"), caption: "物体边缘的pixel" ) <fig-pixel-history> 如 @fig-pixel-history 所示,蓝色表示物体,白色表背景颜色。每个小矩形代表一个pixel,其中含有四个sample。包围在圆中的sample为当前帧所采样的sample。图a)为前一帧的情况,图b)为当前帧的情况。物体相对于前一帧而言向右运动了一段距离。 在当前帧中,对图b)中被标记为红色的sample进行分析。该sample所属的pixel位于物体的左边缘,且该红色的sample没有被物体边缘覆盖,所以该sample的motion vector为背景的motion vector。假设背景没有运动,则该motion vector为0,从而导致该sample的history color为图1中的绿色sample的颜色,即背景颜色。则在当前帧中,该pixel的颜色为背景颜色。然而,在当前帧中,如果我们需要物体边缘的pixel具有反走样的效果,其history color应该使用图a)中的红色sample位置的历史颜色。通过将图a)中的红色sample位置的历史颜色和图b)中红色sample的当前颜色进行blending后才能够得到反走样效果。 #figure( image("./images/motion-vector-pattern.png", width: 70%), caption: "dilate pattern" ) <fig:dilate-pattern> 为了解决在运动状态下,物体边缘的pixel失去反走样效果的问题。一般而言,对于每个像素,在计算该像素的motion vector时,我们使用该像素邻域范围中深度最小的那个pixel的motion vector作为该pixel的motion vector。如 @fig:dilate-pattern 所示,当前pixel为图片中心的黑色pixel,我们使用中心的pixel和其邻域中的其他四个黑色pixel的深度进行比较,得到一个最小的深度,该具有最小深度的pixel的motion vector则为中心pixel的motion vector。该方法的主要目的在于,对于某些被多个物体(前景和背景)覆盖的pixel,使用前景的motion vector作为该pixel的motion vector,从而避免出现上述物体边缘的pixel丢失反走样效果的现象。 = References 1. #link("http://advances.realtimerendering.com/s2014/")[High-Quality Temporal Super Sampling[Siggraph 2014]] 2. #link("https://bartwronski.com/2014/03/15/temporal-supersampling-and-antialiasing/")[Temporal Super Sampling and Antialiasing[2014]] 3. #link("http://twvideo01.ubm-us.net/o1/vault/gdc2016/Presentations/Pedersen_LasseJonFuglsang_TemporalReprojectionAntiAliasing.pdf")[Temporal Reprojection Anti-Aliasing in INSIDE[GDC][2016]] 4. #link("https://developer.download.nvidia.com/gameworks/events/GDC2016/msalvi_temporal_supersampling.pdf")[An Excursion in Temporal Super Sampling[GDC][2016]] 5. #link("http://hhoppe.com/supersample.pdf")[Amortized Super Sampling[Siggraph Asia 2009]]
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/layout/par_00.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page // Test ragged-left. #set align(right) To the right! Where the sunlight peeks behind the mountain.
https://github.com/TypstApp-team/typst
https://raw.githubusercontent.com/TypstApp-team/typst/master/tests/typ/compiler/comment.typ
typst
Apache License 2.0
// Test line and block comments. --- // Line comment acts as spacing. A// you B // Block comment does not act as spacing, nested block comments. C/* /* */ */D // Works in code. #test(type(/*1*/ 1) // , int) // End of block comment in line comment. // Hello */ // Nested line comment. /*//*/ Still comment. */ E --- // End should not appear without start. // Error: 7-9 unexpected end of block comment /* */ */ // Unterminated is okay. /*
https://github.com/TeunSpithoven/Signals-And-Embedded-Systems
https://raw.githubusercontent.com/TeunSpithoven/Signals-And-Embedded-Systems/main/template/fhict-template.typ
typst
#import "@preview/codly:0.2.0": * #import "@preview/colorful-boxes:1.2.0": * #import "@preview/showybox:2.0.1": * #import "@preview/glossarium:0.2.6": make-glossary, print-glossary, gls, glspl #let fontys_purple_1 = rgb("663366") #let fontys_purple_2 = rgb("B59DB5") #let fontys_pink_1 = rgb("E4047C") #let fontys_blue_1 = rgb("1F3763") #let fontys_blue_2 = rgb("2F5496") #let code_name_color = fontys_blue_2.lighten(35%) // States #let censored_state = state("style", "0") // Misc functions #let hlink(url, content: none) = { link(url)[ #underline[#text([ #if content == none { url } else { content } ], fill: fontys_blue_2)] ] } #let sensitive(textl) = locate(loc => { if (censored_state.at(loc) == 1) { text( textl.replace(regex("."), "█"), fill: black, font: "Arial" ) } else { textl } }) #let fhict_table( columns: (), content: (), background_color_heading: fontys_purple_1, background_color: white, text_color_heading: white, text_color: black, top_colored: true, left_colored: false, ) = { table( columns: columns, inset: 7pt, align: horizon, fill: ( if top_colored and left_colored { (column, row) => if column==0 or row==0 { background_color_heading } else { background_color } } else if top_colored { (_, row) => if row==0 { background_color_heading } else { background_color } } else if left_colored { (column, _) => if column==0 { background_color_heading } else { background_color } } ), ..for row in content { if (row == content.at(0)) and top_colored { for item in row { (text(fill: text_color_heading)[#strong(item)],) } } else { for item in row { if (item == row.at(0)) and left_colored { (text(fill: text_color_heading)[#strong(item)],) } else { (text(fill: text_color)[#item],) } } } } ) } #let text_box(background-color: luma(240), stroke-color: black, text-color: black, content) = { rect(fill: background-color, width: 100%, stroke: (left: 0.25em + stroke-color))[ #text( fill: text-color, content ) ] } #let lined_box(title, body, line-color: red) = block( above: 2em, stroke: 0.5pt + line-color, width: 100%, inset: 14pt, breakable: false )[ #set text(font: "Roboto", fill: line-color) #place( top + left, dy: -6pt - 14pt, dx: 6pt - 14pt, block(fill: white, inset: 2pt)[*#title*] ) #body ] // Document #let fhict_doc( title: "Document Title", subtitle: "Document Subtitle", subtitle-lines: 1, authors: none, version-history: none, glossary-terms: none, glossary-front: false, bibliography-file: none, citation-style: "ieee", disable-toc: false, disable-chapter-numbering: false, pre-toc: none, table-of-figures: none, table-of-listings: none, watermark: none, censored: 0, body ) = { show: make-glossary let meta_authors = "" // Set metadata if authors != none and censored == 0 { if type(authors.at(0).name) == dictionary { meta_authors = authors.map(author => author.name.string) } else { meta_authors = authors.map(author => author.name) } } set document( title: title, author: meta_authors, ) // Set the document's style set text(font: "Roboto", fallback: false, size: 11pt, fill: black) set cite(style: citation-style) // Set the header style let numbering_set = none if disable-chapter-numbering == false { numbering_set = "1." } else { numbering_set = none } set heading(numbering: numbering_set) show heading.where(level: 1): h => {text(strong(upper(h)), size: 18pt, fill: fontys_purple_1)} show heading.where(level: 2): h => {text(strong(upper(h)), size: 14pt, fill: fontys_pink_1)} show heading.where(level: 3): h => {text(upper(h), size: 12pt, fill: fontys_blue_1)} show heading.where(level: 4): h => {text(upper(h), size: 11pt, fill: fontys_blue_2)} show heading.where(level: 5): h => {text(emph(upper(h)), size: 11pt, fill: fontys_blue_2, font: "Calibri")} // Set the listing style show figure.where(kind: raw): it => { set align(left) it.body it.caption } // Set Cover Page set page("a4", background: [ // Main background triangle #place(top + left, path( fill: fontys_purple_2, closed: true, (0%, 0%), (5%, 0%), ((70%, 45%), (-20pt, -20pt)), ((75%, 50%), (0%, -15pt)), ((70%, 55%), (20pt, -20pt)), (5%, 100%), (0%, 100%) )) // For scociety image #place(top + left, dx: 70pt, dy: 70pt, image( "assets/Picture1.png", height: 9%, )) // Title #place(left + horizon, dy: -20pt, dx: 40pt, box( height: 40pt, inset: 10pt, fill: fontys_pink_1, text(30pt, fill: white, font: "Roboto")[ *#upper(title)* ] ) ) // Sub title #place(left + horizon, dy: 20pt + ((22pt * (subtitle-lines - 1)) / 2), dx: 40pt, box( height: 30pt + (22pt * (subtitle-lines - 1)), inset: 10pt, fill: white, text(20pt, fill: fontys_purple_1, font: "Roboto")[ *#upper(subtitle)* ] ) ) // Authors #censored_state.update(censored) #set text(fill: fontys_purple_1) #if authors != none { if authors.all(x => "email" in x) { place(left + horizon, dy: 60pt + ( (authors.len() - 1) * 15pt ) + (22pt * (subtitle-lines - 1)), dx: 40pt, box( height: 35pt + ((authors.len() - 1) * 30pt), inset: 10pt, fill: white, text(10pt)[ #if type(authors.at(0).name) == dictionary { authors.map(author => strong(author.name.content) + linebreak() + " " + link("mailto:" + author.email)[#author.email]).join(",\n") } else { authors.map(author => strong(author.name) + linebreak() + " " + link("mailto:" + author.email)).join(",\n") }])) } else { place(left + horizon, dy: 48pt + ( if authors.len() == 1 { 5pt } else { (authors.len() - 1) * 10pt } ) + (22pt * (subtitle-lines - 1)), dx: 40pt, box( inset: 10pt, fill: white, height: 20pt + ((authors.len() - 1) * 15pt), text(10pt, fill: fontys_purple_1, font: "Roboto")[ #if type(authors.at(0).name) == dictionary { [*#authors.map(author => author.name.content).join(",\n")*] } else { [*#authors.map(author => author.name).join(",\n")*] } ])) } } #set text(fill: black) // Date #place(right + horizon, dy: 330pt, box( width: 40%, height: 35pt, fill: fontys_pink_1, place(left + horizon, dx: 10pt, text(30pt, fill: white, font: "Roboto")[ *#datetime.today().display()* ] ) ) ) ], foreground: [ #if watermark != none [ #place(center + horizon, rotate(24deg, text(60pt, fill: rgb(0, 0, 0, 70), font: "Roboto")[ *#upper(watermark)* ] )) ] ] ) // Show the cover page censored_state.update(censored) box() pagebreak() let pre_toc_numbering = "1" if (version-history != none) or (pre-toc != none) or (disable-toc == false) or (disable-toc == false) or (glossary-terms != none and glossary-front == true) or ((table-of-figures != none) and (table-of-figures != false)) or ((table-of-listings != none) and (table-of-listings != false)) { pre_toc_numbering = "I" } // Set the page style for non body pages set page("a4", background: [], footer: [ #place(left + horizon, dy: -25pt, image("assets/Picture2.png", height: 200%) ) #place(right + horizon, dy: -25pt, text(15pt, fill: fontys_purple_1, font: "Roboto")[ *#counter(page).display(pre_toc_numbering)* ] ) ], numbering: pre_toc_numbering ) counter(page).update(1) // Show the version history if version-history != none { heading("version history", outlined: false, numbering: none) fhict_table( columns: (auto, auto, auto, 1fr), content: ( ("Version", "Date", "Author", "Changes"), ..version-history.map(version => ( version.version, version.date, version.author, version.changes, )), ), ) pagebreak() } show: codly-init.with() codly(languages: ( rust: (name: "Rust", color: code_name_color), rs: (name: "Rust", color: code_name_color), cmake: (name: "CMake", color: code_name_color), cpp: (name: "C++", color: code_name_color), c: (name: "C", color: code_name_color), py: (name: "Python", color: code_name_color), java: (name: "Java", color: code_name_color), js: (name: "JavaScript", color: code_name_color), sh: (name: "Shell", color: code_name_color), bash: (name: "Bash", color: code_name_color), json: (name: "JSON", color: code_name_color), xml: (name: "XML", color: code_name_color), yaml: (name: "YAML", color: code_name_color), typst: (name: "Typst", color: code_name_color), ), enable-numbers: false, display-icon: false, ) // Show the pre-toc if pre-toc != none { // Disable heading numbering and appearing in the TOC set heading(numbering: none, outlined: false) pre-toc set heading(numbering: numbering_set, outlined: true) pagebreak() } // Show the table of contents if disable-toc == false { outline( title: "Table of Contents", depth: 3, indent: n => [#h(1em)] * n, ) pagebreak() } // Show the Glossary in the front if glossary-terms != none and glossary-front == true { heading("Glossary", numbering: none, outlined: false) print-glossary( ( glossary-terms ), ) pagebreak() } // Show the table of figures if requested if (table-of-figures != none) and (table-of-figures != false) { outline( title: "Table Of Figures", target: figure.where(kind: image), ) pagebreak() } // Show the table of listings if requested if (table-of-listings != none) and (table-of-listings != false) { outline( title: "Table Of Listings", target: figure.where(kind: raw), ) pagebreak() } // Set the page style for body pages set page("a4", background: [], footer: [ #place(left + horizon, dy: -25pt, image("assets/Picture2.png", height: 200%) ) #place(right + horizon, dy: -25pt, text(15pt, fill: fontys_purple_1, font: "Roboto")[ *#counter(page).display()* ] ) ], numbering: "1" ) counter(page).update(1) // Show the page's contents body // Show the Glossary in the back if glossary-terms != none and glossary-front == false { pagebreak() heading("Glossary", numbering: none) print-glossary( ( glossary-terms ), ) } // Show the bibliography if bibliography-file != none { pagebreak() bibliography(bibliography-file, title: "References", style: "ieee") } }
https://github.com/curvenote-templates/ncssm
https://raw.githubusercontent.com/curvenote-templates/ncssm/main/examples/david_nicholson_2023/nicholson.typ
typst
MIT License
// Created with jtex v.1.0.12 #import "../../ncssm.typ": * #show: template.with( frontmatter: ( title: "vak: a neural network framework for researchers studying animal acoustic communication", abstract: [ How is speech like birdsong? What do we mean when we say an animal learns their vocalizations? Questions like these are answered by studying how animals communicate with sound. As in many other fields, the study of acoustic communication is being revolutionized by deep neural network models. These models enable answering questions that were previously impossible to address, in part because the models automate analysis of very large datasets. Acoustic communication researchers have developed multiple models for similar tasks, often implemented as research code with one of several libraries, such as Keras and Pytorch. This situation has created a real need for a framework that allows researchers to easily benchmark multiple models, and test new models, with their own data. To address this need, we developed vak (#link("https://github.com/vocalpy/vak")[https://github.com/vocalpy/vak]), a neural network framework designed for acoustic communication researchers. ("vak" is pronounced like "talk" or "squawk" and was chosen for its similarity to the Latin root _voc_, as in "vocal".) Here we describe the design of the vak, and explain how the framework makes it easy for researchers to apply neural network models to their own data. We highlight enhancements made in version 1.0 that significantly improve user experience with the library. To provide researchers without expertise in deep learning access to these models, vak can be run via a command-line interface that uses configuration files. Vak can also be used directly in scripts by scientist-coders. To achieve this, vak adapts design patterns and an API from other domain-specific PyTorch libraries such as torchvision, with modules representing neural network operations, models, datasets, and transformations for pre- and post-processing. vak also leverages the Lightning library as a backend, so that vak developers and users can focus on the domain. We provide proof-of-concept results showing how vak can be used to test new models and compare existing models from multiple model families. In closing we discuss our roadmap for development and vision for the community of users. ], date: datetime( year: 2023, month: 7, day: 10, ), open-access: true, license: "CC-BY-4.0", keywords: ("animal acoustic communication","bioacoustics","neural networks",), doi: "10.25080/gerudo-f2bc6f59-008", authors: ( ( name: "<NAME>", orcid: "0000-0002-4261-4719", affiliations: "1", email: "<EMAIL>" ), ( name: "<NAME>", orcid: "0000-0002-8149-6954", affiliations: "2", ), ), github: "https://github.com/vocalpy/vak", affiliations: ( ( id: "1", name: "Independent researcher, Baltimore, Maryland, USA", ), ( id: "2", name: "Weizmann Institute of Science, Rehovot, Israel", ), ), ) ) /* Written by MyST v1.1.37 */ = Introduction <introduction> Are humans unique among animals? We seem to be the only species that speaks languages @hauserFacultyLanguageWhat2002, but is speech somehow like other forms of acoustic communication in other animals, such as birdsong @doupeBIRDSONGHUMANSPEECH1999? How should we even understand the ability of some animals to learn their vocalizations @wirthlinModularApproachVocal2019? Questions like these are answered by studying how animals communicate with sound @hopp2012animal. As others have argued, major advances in this research will require cutting edge computational methods and big team science across a wide range of disciplines, including ecology, ethology, bioacoustics, psychology, neuroscience, linguistics, and genomics @sainburgComputationalNeuroethologyVocal2021 @stowellComputationalBioacousticsDeep2022 @wirthlinModularApproachVocal2019 @hauserFacultyLanguageWhat2002. Research on animal acoustic communication is being revolutionized by deep learning algorithms @sainburgComputationalNeuroethologyVocal2021 @stowellComputationalBioacousticsDeep2022 @cohen2022recent. Deep neural network models enable answering questions that were previously impossible to address, in part because these models automate analysis of very large datasets. Within the study of animal acoustic communication, multiple models have been proposed for similar tasks--we review these briefly in the next section. These models have been implemented using a range of frameworks for neural networks, including PyTorch (as in @cohenAutomatedAnnotationBirdsong2022 and @goffinetLowdimensionalLearnedFeature2021), Keras and Tensorflow (as in @steinfathFastAccurateAnnotation2021 and @sainburgFindingVisualizingQuantifying2020), and even in programming environments outside Python such as Matlab (as in @coffeyDeepSqueakDeepLearningbased2019). Because of this, it is difficult for researchers to directly compare models, and to understand how each performs on their own data. #set page(columns: 2, margin: (x: 1.5cm, y: 2cm),) Additionally, many researchers will want to experiment with their own models to better understand the fit between tasks defined by machine learning researchers and their own question of interest. All of these factors have created a real need for a framework that allows researchers to easily benchmark models and apply trained models to their own data. To address this need, we developed vak @nicholsonVak2022 (#link("https://github.com/vocalpy/vak")[https:\/\/github.com/vocalpy/vak]), a neural network framework designed for researchers studying animal acoustic communication. vak is already in use in at least 10-20 research groups to our knowledge, and has already been used in several publications, including @cohenAutomatedAnnotationBirdsong2022 @goffinetLowdimensionalLearnedFeature2021 @mcgregorSharedMechanismsAuditory2022 @provostImpactsFinetuningPhylogenetic2022. Here we describe the design of the vak framework, and explain how vak makes it easy for acoustic communication researchers to work with neural network models. We have also recently published an alpha release of version 1.0 of the library, and throughout this article we highlight enhancements made in this version that we believe will significantly improve user experience. == Related work <related-work> First, we briefly review related literature, to further motivate the need for a framework. A very common workflow in studies of acoustic behavior is to take audio recordings of one individual animal and segment them into a sequence of units, after which further analyses can be done, as reviewed in @kershenbaumAcousticSequencesNonhuman2016. Some analyses require further annotation of the units to assign them to one of some set of classes, e.g. the unique syllables within an individual songbird's song. An example of segmenting audio of Bengalese finch song into syllables and annotating those syllables is shown in @fig:annotation. #figure( image("files/annotation-1b1adc5143bcb0c9c42624696eb7e262.png", width: 100%), caption: [Schematic of analyzing acoustic behavior as a sequence of units. Top panel shows a spectrogram of an individual Bengalese finch's song, consisting of units, often called syllables, separated by brief silent gaps. Bottom panel illustrates one method for segmenting audio into syllables that are annotated: a threshold is set on the audio amplitude to segment it into syllables (a continuous period above the threshold), and then a human annotator labels each syllable (e.g., with a GUI application). Adapted from @cohenAutomatedAnnotationBirdsong2022 under #link("https://creativecommons.org/licenses/by/4.0/")[CC BY 4.0 license].], kind: "figure", supplement: [Figure], ) <fig:annotation> Several models have been developed to detect and classify a large dataset of vocalizations from an individual animal. These are all essentially supervised machine learning tasks. Some of these models seek to align a neural network task with the common workflow just described @kershenbaumAcousticSequencesNonhuman2016, where audio is segmented into a sequence of units with any of several methods @fukuzawaComputationalMethodsGeneralised2022, that are then labeled by a human annotator. The first family of neural network models reduces this workflow to a frame classification problem @graves_framewise_2005 @graves_supervised_2012. That is, these models classify a series of _frames_, like the columns in a spectrogram. Sequences of units (e.g., syllables of speech or birdsong) are recovered from this series of frame classifications with post-processing. Essentially, the post-processing finds the start and stop times of each continuous run of a single label. Multiple neural network models have been developed for this frame classification approach, including @cohenAutomatedAnnotationBirdsong2022 and @steinfathFastAccurateAnnotation. A separate approach from frame classification models has been to formulate recognition of individual vocalizations as an object detection problem. To our knowledge this has been mainly applied to mouse ultrasonic vocalizations as in @coffeyDeepSqueakDeepLearningbased2019. Another line of research has investigated the use of unsupervised models to learn a latent space of vocalizations. This includes the work of @sainburgFindingVisualizingQuantifying2020 and @goffinetLowdimensionalLearnedFeature2021. These unsupervised neural network models allow for clustering vocalizations in the learned latent space, e.g., to efficiently provide a human annotator with an estimate of the number of classes of vocalizations in an animal's repertoire @sainburgFindingVisualizingQuantifying2020, and/or to measure similarity between vocalizations of two different animals @goffinetLowdimensionalLearnedFeature2021 @zandbergBirdSongComparison2022. It is apparent that unsupervised approaches are complementary to supervised models that automate labor-intensive human annotation. This is another reason that a single framework should provide access to both supervised and unsupervised models. = Methods <methods> In this section we describe the design of vak: its application programming interface (API) and its command-line interface (CLI). We begin by introducing the design of vak at the highest level. == Design <design> vak relies on PyTorch @paszkeAutomaticDifferentiationPyTorch2017 for neural networks, because PyTorch accommodates Pythonic idioms and low-level control flow within networks when needed. In version 1.0, we have additionally adopted the Lightning library @falconPyTorchLightning2023 as a backend, freeing us up as developers to focus on the research domain while benefiting from the Lightning team's engineering expertise. Of course, vak relies heavily on the core libraries of the scientific Python stack. Many functions make use of numpy @walt_numpy_2011 @harris2020array, scipy @virtanen_scipy_2019, and matplotlib @Hunter:2007 @thomas_a_caswell_2020_4030140. In particular, the built-in workflows for preparing datasets make frequent use of pandas @team_pandas-devpandas_2020 to work with tabular data formats, and dask @dask_development_team_dask_2016 to enable scalable, distributed processing of very large datasets with mixed file formats, which are common in acoustic communication research. Functionality for preparing datasets is specifically tailored to the needs of acoustic communication researchers in other ways as well. For example, to parse the wide range of annotation formats used by acoustic communication researchers across disciplines, we use the pyOpenSci package crowsetta @nicholson2023crowsetta. In terms of its API, the design of vak is most similar to other domain-specific libraries developed with torch, such as torchvision @torchvision2016, but here the domain is animal acoustic communication research. (Perhaps surprisingly, many of the models proposed to date in this area are essentially adopted from computer vision.) Thus, similar to the torchvision API, vak provides modules for neural network models, operations, transformations for loading data, and datasets. In addition to its torchvision-like API, vak provides a simple command-line interface (CLI) that allows researchers to work with neural network models without requiring significant expertise in Python programming or deep learning. We first describe the API, so that key concepts have been introduced when we explain the usage of the CLI. == Models <models> As its name implies, the `models` module is where implementations of neural network models are found. Our design is focused on a user who wants to benchmark different models within an established task and data processing pipeline as defined by our framework. In version 1.0 of vak, we have introduced abstractions that make it easier for researchers to work with the built-in models and with models they declare in code outside of the library, e.g., in a script or notebook. At a high level, we achieved this by adopting the Lightning library as a backend. By sub-classing the core `lightning.LightningModule` class, we provide users with per-model implementations of methods for training, validation, and even for forwarding a single batch or sample through the model. We briefly describe the abstractions we have developed to make it easier to work with models. == Abstractions for declaring a model in vak <abstractions-for-declaring-a-model-in-vak> Our goal is to make it so that a scientist-coder is able to use any of the built-in models, and experiment with their own models, without needing to contribute code to vak or to use a developer-focused mechanism like #link("https://packaging.python.org/en/latest/specifications/entry-points/")[entry points]. To achieve this, we provide a decorator, `vak.models.model`, that is applied to a _model definition_ to produce a sub-class of a _model family_. The `vak.models.model` decorator additionally adds any class it decorates to a _registry_. In the rest of the section we explain these abstractions and how they make it possible to easily test different models. A model definition takes the form of a class with four required class variables: `network`, `loss`, `optimizer`, and `metrics`. In other words, our abstraction asserts that the definition of a neural network model consists of the neural network function, the loss function used to optimize the network's parameters, the optimizer, and the metrics used to assess performance. To relate a model as declared with a definition to the machine learning tasks that we implement within the vak framework, we introduce the concept of model _families_. A model family is represented by a sub-class of the core `lightning.LightningModule` class. Each class representing a family implements family-specific methods: `training_step`, `validation_step`, `prediction_step`, and `forward`. In this way, model families are defined operationally: a model can belong to a family if it accepts the inputs provided by logic within the training, validation, and prediction steps, and the model also produces the appropriate outputs needed within those same steps. With these two abstractions in hand, we can add models to vak as follows: we start by applying the `model` decorator to create a new subclass of a model family. This new subclass has the same name as the class that it decorates, which is the class representing the model definition. The decorator then adds a single attribute to this sub-class, the `definition`, that is used when initializing a new instance of the specific model. After creating this sub-class and adding this attribute, the `model` decorator finally registers the model within the `vak.models.registry` module. This allows other functions within vak to find the model by its name in the registry. The registry is implemented with its own helper functions and module-level `dict` variables that are updated by those functions. We present a listing that demonstrates usage of the abstractions just described. ```python from vak.models import ( model, FrameClassificationModel ) from vak.metrics import ( Accuracy, Levenshtein, SegmentErrorRate, ) @model(family=FrameClassificationModel) class TweetyNoLSTMNet: """TweetyNet model without LSTM layer""" network = TweetyNetNoLSTM loss = torch.nn.CrossEntropyLoss optimizer = torch.optim.Adam metrics = { 'acc': Accuracy, 'levenshtein': Levenshtein, 'segment_error_rate': SegmentErrorRate, 'loss': torch.nn.CrossEntropyLoss} default_config = { 'optimizer': {'lr': 0.003} } ``` This example is used in an experiment accompanying this paper, as described below in Results. That experiment demonstrates how the decorator enables models to be declared and used in a script outside of vak. Here we can notice that we apply the `model` decorator to the class `TweetyNoLSTMNet`, which is the model definition. Notice also that we pass in as an argument to the decorator the name of the model family that we wish to sub-class, `FrameClassificationModel`. When Python's import machinery parses the script, the model class will be created and added to vak's registry, so that it can be found by other functions for training and evaluating models. The models that are built in to vak use the exact same decorator. == Model families <model-families> Having introduced the abstraction needed to declare models within the vak framework, we now describe the families we have implemented to date. *Frame classification.* As stated in the Related Work section, one way to formulate the problem of segmenting audio into sequences of units so that it can be solved by neural networks is to classify each frame of audio, or a spectrogram produced from that audio, and to then recover segments from this series of labeled frames @graves_framewise_2005 @graves_supervised_2012. This problem formulation works, but an issue arises from the fact that audio signals used by acoustic communication researchers very often vary in length. E.g., a bout of Bengalese finch birdsong can vary from 1-10 seconds, and bouts of canary song can vary roughly from 10 seconds to several minutes. In contrast, the vast majority of neural network models assume a "rectangular" tensor as input and output, in part because they were originally developed for computer vision applications applied to batches. One way to work around this issue is to convert inputs of varying lengths into rectangular batches with a combination of windowing and padding. E.g., pick a window size $w$, find the minimum number of consecutive non-overlapping strides $s$ of that window that will cover an entire input $x$ of length $T$, $s * w gt.eq T$, and then pad $x$ to a new length $T_(p a d d e d) = s * w$. This approach then requires a post-processing step where the outputs are stitched back together into a single continuous sequence $x_(p a d d e d)$. The padding is removed by tracking which time bins are padded, e.g., with a separate vector that acts as a "padded" flag for each time bin. Of course there are other ways to address the issue of varying lengths, such as using the `torch.nn.utils.rnn` API to pad and unpad tensors (or using a different family of neural network models). Because more than one model has been developed that uses this post-processing approach to solve the problem of frame classification, we define this as a family of models within vak, the `FrameClassification` model. Both the TweetyNet model from @cohenAutomatedAnnotationBirdsong2022 and the Deep Audio Segmenter (DAS) from @steinfathFastAccurateAnnotation2021 are examples of such models. We provide an implementation of TweetyNet now built directly into vak in version 1.0. We also provide a PyTorch implementation of the Encoder Decoder-Temporal Convolutional (ED-TCN) Network, that was previously applied to frames of video features for an action segmentation task @lea2017temporal. Below in Results we show how vak can be used to benchmark and compare both models on the same dataset. *Parametric UMAP.* To minimally demonstrate that our framework is capable of providing researchers with access to multiple families of models, we have added an initial implementation of a Parametric UMAP model family. The original algorithm for UMAP (Uniform Manifold Approximation and Projection) consists of two steps: computing a graph on a dataset, and then optimizing an embedding of that graph in a lower dimensional space that preserves local relationships between points @mcinnes2018umap. The parametrized version of UMAP replaces the second step with optimization of a neural network architecture @sainburg2021parametric. Because the parametrized version can be used with a wide variety of neural network functions, we declare this as a family. We provide an implementation of a single model, an encoder with a convolutional front-end that can map spectrograms of units extracted from audio to a latent space. Our implementation is adapted from #link("https://github.com/elyxlz/umap_pytorch")[https:\/\/github.com/elyxlz/umap_pytorch] and #link("https://github.com/lmcinnes/umap/issues/580\#issuecomment-1368649550")[https:\/\/github.com/lmcinnes/umap/issues/580\#issuecomment-1368649550]. == Neural network layers and operations <neural-network-layers-and-operations> Like PyTorch, vak provides a module for neural network operations and layers named `nn`. This module contains layers used by more than one network. For example, it includes a 2-D convolutional layer with the `'SAME'` padding provided by Tensorflow, that is used both by the TweetyNet model @cohenAutomatedAnnotationBirdsong2022 and by our implementation of the ED-TCN model @lea2017temporal. (PyTorch has added this padding from version 1.10 on, but we maintain our original implementation for purposes of replicability.) Another example of an operation in `vak.nn` is a PyTorch implementation of the normalized ReLu activation used by @lea2017temporal with their ED-TCN model. == Transformations <transformations> Like torchvision, vak provides a module for transformations of data that will become input to a neural network model or will be applied to the outputs of model, i.e., pre- and post-processing. *Standardization of spectrograms.* A key transform that we provide for use during training is the `StandardizeSpect` class, that standardizes spectrograms so they are all on the same scale, by subtracting off a mean and dividing by a standard deviation (often called "normalization"). This transform is distinct from the normalization done by computer vision frameworks like torchvision, because it normalizes separately for each frequency bin in the spectrogram, doing so across all time bins. Using a scikit-learn-like API, this `StandardizeSpect` is fit to a set of spectrograms, such as the training set. The fit transform is saved during training as part of the results and then loaded automatically by vak for evaluation or when generating predictions for new data. *Transforms for frame labels.* Many of the transforms we provide relate to what we call _frame labels_, that is, vectors where each element represents a label for a time bin from a spectrogram or a sample in an audio signal. These vectors of class labels are used as targets when training models in a supervised setting to perform frame classification. The `from_segments` transform is used when loading annotations to produce a vector of labeled timebins from the segmented units, which are specified in terms of their onset and offset times along with their label. Conversely, the `to_segments` takes a vector of labeled timebins and returns segments, by finding each continuous run of labels and then converting the onset and offsets from indices in the timebins vector to times in seconds. This post-processing transformation can be configured to perform additional clean-up steps: removing all segments shorter than a minimum duration, and taking a "majority vote" within each series of labels that are bordered by a "background" or "unlabeled" class. In version 1.0, we have added the ability to evaluate models with and without the clean-up steps of the `to_segments` transform applied, so that a user can easily understand how the model is performing before and after these steps. This enhancement allows users to replicate a finding from @cohenAutomatedAnnotationBirdsong2022, which showed, while the TweetyNet model achieved quite low segment error rates without post-processing, these simple clean-up steps allowed for significant further reduction of error. This finding was originally shown with an ad hoc analysis done with a script, but is now available directly through vak. This makes it easier for users to compare their model to a sort of empirical upper bound on performance, a strong baseline that indicates the "room for improvement" any given model has. One more transformation worth highlighting here is the `to_labels` transformation, that converts a vector of labeled timebins directly to labels without recovering the onset or offset times. Essentially this transform consists of a `numpy.diff` operation, that we use to find the start of each run of continuous labels, and we then take the label at the start of each run. This transformation can be efficient when evaluating models where we want to measure just the segment error rate. (Of course we preclude the use of other metrics related to onset and offset times when throwing away that information, but for some research questions the main goal is to simply have the correct labels for each segment.) == Metrics <metrics> Vak additionally declares a `metrics` module for evaluation metrics that are specific to acoustic communication models. The main metric we have found it necessary to implement at this time is the (Levenshtein) string edit distance, and its normalized form, known in speech recognition as the word error rate. Our results have shown that edit distances such as this are crucial for evaluating frame classification models. We provide a well-tested implementation tailored for use with neural network models. In version 1.0 of vak, we have additionally adopted as a dependency the `torchmetrics` library, that makes it easier to compute a wide array of metrics for models. == Datasets <datasets> Lastly, vak provides a `dataset` module, again similar in spirit to the module of the same name in torchvision. Each family of models has its own dataset class or classes. We introduce these below, but first we describe our standardized dataset format. *Dataset directory format.* In version 1.0 of vak we have adopted a standard for datasets that includes a directory structure and associated metadata. This addressed several limitations from version 0.x: datasets were not portable because of absolute paths, and certain expensive computations were done by other commands that should really have been done when preparing the dataset, such as validating the timebin size in spectrograms or generating multiple random subsets from a training set for learning curves. A listing that demonstrates the directory structure and some key contents is shown below. ```bash dataset/ train/ song1.wav.npz song1.csv song2.wav.npz song2.csv val/ song3.wav.npz song3.csv dataset.csv config.toml # config used to generate dataset prep.log # log from run of prep metadata.json # any metadata ``` We can observe from the listing that, after collating files and separating them into splits as just described, the files are either moved (if we generated them) or copied (if a user supplied them) to directories corresponding to each split. For annotation formats where there is a one-to-one mapping from annotation file to the file that it annotates, we copy the annotation files to the split subdirectories as well. For annotation formats that place all annotations in a single file, we place this file in the root of the dataset directory. After moving these files, we change the paths in the pandas dataframe representing the entire dataset so that they are written relative to the root of the directory. This makes the dataset portable. In addition to these split sub-directories containing the data itself, we note a few other files. These include a csv file containing the dataset files and the splits they belong to, whose format we describe next. They also include the `metadata.json` file that captures important parameters that do not fit well in the tabular data format of the csv file. For example, the metadata file for a frame classification dataset contains the duration of the timebin in every spectrogram. Finally, we note two other files in a dataset as shown above. The first is the configuration file used to generate it, copied into the dataset as another form of metadata. The second is a log file that captures any other data about choices made during dataset preparation, e.g., what files were omitted because they contained labels that were not specified in the labelset option of the configuration file. *Dataset csv file format.* Next we outline the format of the csv file that represents a dataset. This csv (and the dataframe loaded from it) has four essential columns: `'audio_path'`, `'spect_path'`, `'annot_path'`, and `'split'`. These columns serve as provenance for the prepared dataset. Each row represents one sample in the dataset, where the meaning of sample may vary depending on the model family. For example, a sample for a frame classification model is typically an entire bout of vocalizations, whereas a sample for a Parametric UMAP model is typically a single unit from the bout. The csv format allows for tracing the provenance of each sample back to the source files used to generate the dataset. Each row must minimally contain either an `audio_path` or a `spectrogram_path`; if a user provides pre-computed spectrograms, the `audio_path` column is left empty. For models that use these files directly, the files will be copied into a sub-directory for each split, and the paths are written relative to the dataset root. The `'annot_path'` column points to annotation files. These again may be in the split sub-directories with the file that each annotates, or in the case of a single file will be in the root of the dataset directory, meaning that this single path will be repeated for every row in the csv. Logic in vak uses this fact to determine whether annotations can be loaded from a single file or must be loaded separately for each file when working with models. == Frame classification datasets <frame-classification-datasets> There are two generalized dataset classes for frame classification models in vak. Both these classes can operate on a single dataset prepared by the `vak prep` command; one class is used for training and the other for evaluation. We describe the workflow for preparing this dataset so that the difference between classes is clearer. The initial step is to pair data that will be the source of inputs $x$ to a neural network model with the annotations that will be the source of training targets $y$ for that model. This is done by collecting audio files or array files containing spectrograms from a "data directory", and then optionally pairing these files with annotation files. For models that take spectrograms as input, vak can use audio files to generate spectrograms that are then saved in array files and paired with any annotations. Alternatively a user can provide pre-computed spectrograms. This dataset can also be prepared without the targets $y$, for the case where a model is used to predict annotations for previously unseen data. *WindowDataset.* This dataset class represents all possible time windows of a fixed width from a set of audio recordings or spectrograms. It is used for training frame classification models. Each call to `WindowDataset.__getitem__` with an `index` returns one window $x$ from an audio signal or a spectrogram loaded into a tensor, along with the annotations that will be the target for the model $y$. Because this is a frame classification dataset, the annotations are converted during dataset preparation to vectors of frame labels, and $y$ will be the window from this vector that corresponds to the window $x$. This is achieved by using a set of vectors to represent indices of valid windows from the total dataset, as described in detail in the docstring for the class. This use of a set of vectors to represent valid windows also enables training on a dataset of a specified duration without modifying the underlying data. *FramesDataset.* As with the `WindowDataset`, every call to `FramesDataset.__getitem__` returns a single sample from the dataset. Here though, instead of a window, the sample will be the entire audio signal or spectrogram $x$ and a corresponding vector of frame labels $y$. The default transforms used with this dataset apply additional pre-processing to the sample that facilitate evaluation. Specifically, the frames $x$ and the frame labels $y$ in a single sample are transformed to a batch of consecutive, non-overlapping windows. This is done by padding both $x$ and $y$ so their length is an integer multiple $w$ of the window size used when training the model, and then returning a `view` of the sample as a stack of those $w$ windows. Post-processing the output batch allows us to compute metrics on a per-sample basis, to answer questions such as "what is the average segment error rate per bout of vocalizations?". == Parametric UMAP datasets <parametric-umap-datasets> For the parametric UMAP model, we provide a single dataset class, `ParametricUMAPDataset`. The underlying dataset consists of single units extracted from audio with a segmenting algorithm. The parameters of the dataset class configure the first step in the UMAP algorithm, that of building a graph on the dataset before embedding. == Command-line interface and configuration file <cli-config> Having described the API, we now walk through vak's CLI. An example screenshot of a training run started from the command line is shown in Figure @fig:cli. A key design choice is to avoid any sub-commands or even options for the CLI, and instead move all such logic to a configuration file. Thus, commands through the CLI all take the form of `vak command configuration-file.toml`, e.g., `vak train gy6or6_train.toml`. This avoids the need for users to understand options and sub-commands, and minimizes the likelihood that important metadata about experiments will be lost because they were specified as options. The configuration file follows the TOML format (#link("https://toml.io/en/")[Tom's Obvious Minimal Language]) that has been adopted by the Python and Rust communities among others. #figure( image("files/vak-cli-screenshot-85478f615f765dfaa6d83667f6897c9d.png", width: 100%), caption: [Screenshots of vak, demonstrating the command-line interface and logging. In top panel (a), an example is shown of using the command-line interface to train a model with a configuration file. In the bottom panel (b) an example is shown of how vak logs progress and reports metrics during training], kind: "figure", supplement: [Figure], ) <fig:cli> The few commands available through the CLI correspond to built-in, model-specific workflows. There are five commands: `prep`, `train`, `eval`, `predict`, and `learncurve`. These commands are shown in @fig:workflows as part of a chart illustrating the built-in workflows, using as an example a frame classification model as we define them below. As their names suggest, the commands `train`, `eval`, and `predict` are used to train a model, evaluate it, and generate predictions with it once trained. The `prep` and `learncurve` commands require more explanation. A user makes a separate configuration file for each of the other four commands, but `prep` can be used with any configuration file. As can be seen in the figure, the typical workflow starts with a call to `vak prep`, which prepares a canonicalized form of a dataset for the specific machine learning task associated with a model, and then adds the path to that dataset to the configuration file. Thus, there is a `prep_frame_classification_dataset` function that will be called for the example model in the figure. If a dataset has already been prepared and is being re-used for another experiment, this step would not be necessary. Once any needed dataset is prepared, the user can run the command related to the model, using the same configuration file. #figure( image("files/vak-workflow-chart-ebe8b99156eee4a7582c0a51ff19d822.png", width: 100%), caption: [A chart showing workflows in vak, using an example a frame classification model as defined below. See text for description of workflows.], kind: "figure", supplement: [Figure], ) <fig:workflows> The `learncurve` command is used to generate results for a learning curve, that plots model performance as a function of training set size in seconds. Although technically a learning curve, its use is distinct from common uses in machine learning, e.g., looking for evidence of high bias or high variance models. Instead, the learning curve functionality allows vak users to answer important practical questions for their research. Most importantly, what is the optimal performance that can be achieved with the minimum amount of labor-intensive, hand-annotated training data? = Results <results> In this section we present proof-of-concept results demonstrating the utility of our framework. The project that produced these results can be found at: #link("https://github.com/vocalpy/scipy-proceedings-2023-vak")[https:\/\/github.com/vocalpy/scipy-proceedings-2023-vak] == Ablation experiment <ablation-experiment> We first show how vak allows researchers to experiment with a model not built into the library. For this purpose, we carry out an "ablation experiment" as the term is used in the artificial neural network literature, where an operation is removed from a neural network function to show that operation plays an important role in the model's performance. Using a script, we define a version of the TweetyNet model in @cohenAutomatedAnnotationBirdsong2022 without the recurrent Long Short Term Memory (LSTM) layer (thus "ablating" it). This model without the LSTM makes a prediction for each frame using the output of the convolutional layers, instead of using the hidden state of the recurrent layer at each time step. If the hidden state contains features that are useful for predicting across time steps, we would expect that "ablating" (removing) it would impair performance. To show that removing the LSTM layer impairs performance, we compare with the full TweetyNet model (now built into vak). For all experiments, we prepared a single dataset and then trained both models on that same dataset. We specifically ran learning curves as described above, but here we consider only the performance using 10 minutes of data for training, because as we previously reported @cohenAutomatedAnnotationBirdsong2022 this was the minimum amount of training data required to achieve the lowest error rates. As shown in the top row of Figure @fig:ablation-experiment, ablating the recurrent layer increased the frame error rate (left column, right group of bars), and this produced an inflated syllable error rate (right column, right group of bars). #figure( image("files/ablation-experiment-9560f0c10c7b12654df0f99807c4f9f9.png", width: 100%), caption: [Ablation experiment carried out by declaring a model in a script using the vak framework. Bar plots show frame error (left column) and syllable error rate (right column), without post-processing clean-up (blue bars) and with (orange bars). Within each axes, the grouped bars on the left indicate results from the TweetyNet model built into the vak library, and the grouped bars on the right indicate results from a model declared in a script where the recurrent LSTM layer has been removed ("ablated") from the TweetyNet architecture. In the top row, values are the average across models trained on data from four different Bengalese finches, with five training replicates per bird (see text for detail). In the bottom row, single models were trained to classify syllables from all four birds.], kind: "figure", supplement: [Figure], ) <fig:ablation-experiment> This first result is the average across models trained on datasets prepared from individual birds in the Bengalese finch song repository dataset @nicholson_bengalese_2017, as we did previously in @cohenAutomatedAnnotationBirdsong2022. (There are four birds, and five training replicates per bird, where each replicate is trained on different subsets from a larger pool of training data.) Other studies using the same benchmark data repository have trained models on datasets prepared from all four birds @steinfathFastAccurateAnnotation2021 (so that the model predicts 37 classes, the syllables from all four birds, instead of 5-10 per bird). We provide this result for the TweetyNet model with and without LSTM in the bottom row of Figure @fig:ablation-experiment. It can be seen that asking the models to predict a greater number of classes further magnified the difference between them (as would be expected). TweetyNet without the LSTM layer has a syllable error rate greater than 230%. (Because the syllable error rate is an edit distance, it can be greater than 1.0. It is typically written as a percentage for readability of smaller values.) == Comparison of TweetyNet and ED-TCN <comparison-of-tweetynet-and-ed-tcn> We next show how vak allows researchers to compare models. For this we compare the TweetyNet model in @cohenAutomatedAnnotationBirdsong2022 with the ED-TCN model of @lea2017temporal. As for the ablation experiment, we ran full learning curves, but here just focus on the performance of models trained on 10 minutes of data. Likewise, the grouped box plots are as in Figure @fig:ablation-experiment, with performance of TweetyNet again on the left and in this case the ED-TCN model on the right. Here we only show performance of models trained on data from all four birds (the same dataset we prepared for the ablation experiment above). We observed that on this dataset the ED-TCN had a higher frame error and syllable error rate, as shown in Figure @fig:tweetynet-v-edtcn. However, there was no clear difference when training models on individual birds (results not shown because of limited space). Our goal here is not to make any strong claim about either model, but simply to show that our framework makes it possible to more easily compare two models on the exact same dataset. #figure( image("files/TweetyNet-v-EDTCN-60631050bd2d5abf681ce49ba8d23b67.png", width: 100%), caption: [Comparison of TweetyNet model @cohenAutomatedAnnotationBirdsong2022 with ED-TCN model. Plots are as in @fig:ablation-experiment. Each axes shows results for one individual bird from the Bengalese finch song repository dataset @nicholson_bengalese_2017. Bar plots show frame error (left column) and syllable error rate (right column), without post-processing clean-up (blue bars) and with (orange bars).], kind: "figure", supplement: [Figure], ) <fig:tweetynet-v-edtcn> == Applying Parametric UMAP to Bengalese finch syllables with a convolutional encoder <applying-parametric-umap-to-bengalese-finch-syllables-with-a-convolutional-encoder> Finally we provide a result demonstrating that a researcher can apply multiple families of models to their data with our framework. As stated above, the vak framework includes an implementation of a Parametric UMAP family, and one model in this family, a simple encoder network with convolutional layers on the front end. To demonstrate this model, we train it on the song of an individual bird from the Bengalese finch song repository. We use a training set with a duration of 40 seconds total, containing clips of all syllable classes from the bird's song, taken from songs that were drawn at random from a larger data pool by the vak dataset preparation function. We then embed a separate test set. It can be seen in Figure @fig:parametric-umap that points that are close to each other are almost always the same color, indicating that syllables that were given the same label by a human annotator are also nearer to each other after mapping to 2-D space with the trained parametric UMAP model. #figure( image("files/parametric-umap-e87627c6cc9ccd35e1a5427cb34667a4.png", width: 100%), caption: [Scatter plot showing syllables from the song of one Bengalese finch, embeeded in a 2-D space using a convolutional encoder trained using the Parametric UMAP algorithm. Each marker is a point produced from a spectrograms of a single syllable rendition, mapped down to the 2-D space, from 40 seconds of training data. Colors indicate the label applied to each syllable by an expert human when annotating the spectrograms with a GUI.], kind: "figure", supplement: [Figure], ) <fig:parametric-umap> = Discussion <discussion> Researchers studying acoustic behavior need to benchmark multiple neural network models on their data, evaluate training performance for different training set sizes, and use trained models to make predictions on newly acquired data. Here we presented vak, a neural network framework developed to meet these needs. In the Methods we described its design and development. Then in the Results we provide proof-of-concept results demonstrating how researchers can easily use our framework. Finally, we summarize the roadmap for further development of version 1.0 of vak. In the spirit of taking an open approach, we are tracking issues related to this roadmap on GitHub: #link("https://github.com/vocalpy/vak/issues/614")[https:\/\/github.com/vocalpy/vak/issues/614]. A key goal will be to add benchmark datasets, generated by running the vak prep command, that a user can download and use to benchmark models with publicly shared configuration files. Another key goal will be to add models that are pre-trained on these benchmark datasets. Additionally we plan to refactor the prep module to make use of the vocalpy package @nicholson_vocalpyvocalpy_2023, developed to make acoustic communication research code in Python more concise and readable. Another key step will be inclusion of additional models like those reviewed in the Related Work. Along with this expansion of existing functionality, the final release of version 1.0 will include several quality-of-life improvements, including a revised schema for the configuration file format that better leverages the strengths of TOML, and dataclasses that represent outputs of vak, such as dataset directories and results directories, to make it easier to work with outputs programmatically. It is our hope that these conveniences plus the expanded models and datasets will provide a framework that can be developed collaboratively by the entire research community studying acoustic communication in animals. #bibliography("main.bib")
https://github.com/OctarineSourcerer/NeonInTheDark
https://raw.githubusercontent.com/OctarineSourcerer/NeonInTheDark/main/templates/crewTemplate.typ
typst
// TODO: Ensure the headings order is correct #let specialAbility( name, description, clarification: none, headingFont: "Bebas Neue" ) = block(breakable: false, width: 100%)[ #set par(justify: true) #block(below: 0.8em, text(font: headingFont, size: 17pt, name)) #description // Figure out last line justifying left without all this hyphenating? #align(center, box( width: 92%, align(left, text(style: "italic", clarification)), )) ] #let crew( name, shortDescriptor, description, xpEarn, questionToPlayers, startingUpgrades, expertise, contacts, upgrades, claims, specials ) = [ #show <blockheader>: set block( fill: luma(230), inset: 5pt, width: 100%) #set grid(gutter: 2em) #block(below:1em, text(size: 4em)[= #name]) #smallcaps(text(style:"italic", shortDescriptor)) #description *#xpEarn* _ #questionToPlayers _ #grid( columns: (60%, 40%), rows: 1, grid(columns: 1, rows: 2, gutter:2em, [ ==== Starting Upgrades <blockheader> #startingUpgrades ], [ ==== Operational Expertise <blockheader> Choose a favoured operation type: #expertise ]), [ ==== Contacts <blockheader> #contacts ], ) == #name Upgrades #upgrades // See about ensuring this is on a separate page without manual pagebreaks == #name Claims // Oh man I wish I could equalise the columns a little easier than just giving a height here // https://github.com/typst/typst/issues/466 #box(height: 50%, columns(2, claims)) == #name Special Abilities // Special abiities are laid out a little differently #specials ]
https://github.com/giZoes/justsit-thesis-typst-template
https://raw.githubusercontent.com/giZoes/justsit-thesis-typst-template/main/resources/pages/list-of-figures.typ
typst
MIT License
#import "@preview/i-figured:0.2.4" #import "@preview/outrageous:0.1.0" #import "../utils/invisible-heading.typ": invisible-heading #import "../utils/style.typ": 字号, 字体 // 表格目录生成 #let list-of-figures( // documentclass 传入参数 twoside: false, fonts: (:), // 其他参数 title: "插图目录", outlined: false, title-vspace: 32pt, title-text-args: auto, // caption 的 separator separator: " ", // 字体与字号 font: auto, size: 字号.小四, // 垂直间距 vspace: 14pt, // 是否显示点号 fill: auto, ..args, ) = { // 1. 默认参数 fonts = 字体 + fonts if (title-text-args == auto) { title-text-args = (font: fonts.宋体, size: 字号.三号, weight: "bold") } // 字体与字号 if (font == auto) { font = fonts.宋体 } // 2. 正式渲染 pagebreak(weak: true, to: if twoside { "odd" }) // 默认显示的字体 set text(font: font, size: size) { set align(center) text(..title-text-args, title) // 标记一个不可见的标题用于目录生成 invisible-heading(level: 1, outlined: outlined, title) } v(title-vspace) show outline.entry: outrageous.show-entry.with( // 保留 Typst 基础样式 ..outrageous.presets.typst, body-transform: (level, it) => { // 因为好像没找到 separator 的参数,所以这里就手动寻找替换了 if (it.has("children") and it.children.at(3, default: none) == [#": "]) { it.children.slice(0, 3).sum() + separator + it.children.slice(4).sum() } else { it } }, vspace: (vspace,), fill: (fill,), ) // 显示目录 i-figured.outline(target-kind: image, title: none) // 手动分页 if (twoside) { pagebreak() + " " } }
https://github.com/crd2333/crd2333.github.io
https://raw.githubusercontent.com/crd2333/crd2333.github.io/main/src/docs/weekly/24-9-9.typ
typst
--- draft: true --- #import "/src/components/TypstTemplate/lib.typ": * #show: project.with( title: "AI 笔记之强化学习", lang: "zh", ) - 我决定开始写周报,记录一下自己干了啥。区别于 Obsidian 的日记,这里更多是归纳 == 李沐在上交的讲座:大语言模型的实践经验和未来预测 - *语言模型(model)的三要素*:本质上是用算法把数据“压进”模型里 - 算力(compute):硬件的发展,如带宽和内存 - 数据(data):数据的质量和多样性 - 算法(algorithm):算法的持续进步,且相对以前深度学习时代更注重泛化性 - *硬件发展* - 带宽和通讯:PCIe 每几年翻倍,但慢一些;趋势是堆到一起(单核->多核->多卡)加快通讯(已经到了考虑光速影响的级别) - 内存:如果没有工艺突破的话,限制模型上限的是内存大小 - 算力:摩尔定律仍然适用,算力成本没有 NVIDIA 垄断的话可能降低;需要考虑供电的问题;浮点数精度的影响 - *模型发展* - 10\~50T tokens pre-training - 开源模型规模在 100\~500B 之间,受限于内存和数据质量。 - 闭源模型规模在 2\~5T 之间 - 多模态(Multimodal)模型的发展(趋势:融合多模态的输入输出) + speech & 语音的趋势是端到端模型和更低的延迟; + Music 的问题更多在于版权而不是技术 + Images 已经发展得很好,可以做到 1M 像素的图像生成,目前问题仅在于 images with "soul" + Video 的问题在于一致性以及训练数据的收集相对昂贵 - Language gets good; audio good enough; video usable for special applications - *技术应用* - 语言模型在文科白领领域的应用,如写作、教育等。 - 工科白领领域的应用,如编程辅助。 - 蓝领领域的应用,仅有自动驾驶有一定应用,其它方面还需远未成熟(主要原因在于数据难以收集) #tbl( columns: 3, [领域],[Simple Task],[Complex Task], [文科白领],[easy],[正在改进], [工科白领],[正在改进],[hard], [蓝领],[hard],[hard] ) - *未来预测* - 人机交互方式可能因技术进步而改变,相比ChatGPT这种长文本交互,语音交互可能变得更自然。 - 拥有足够数据的领域将会迅速自动化。 - 语言模型可能在特定领域达到新的高度,但通用性仍然是挑战。 - *创业感悟* - 模型 - 预训练和后训练同等重要(pre-post:技术-工程$->$工程-技术,因为高质量的数据、数据的结构化假设、改进的算法越发重要) - 垂直模型很重要,但它也需要通用技能 - 评估很重要但也很困难,事实上很多所谓的刷榜没有考虑到实际应用场景 - 数据决定上限,模型决定下限,(内存决定参数上限) - 计算 - 从成本上来讲,自己搭建GPU from scratch可能比租要更便宜 - *个人经历分享* - 在不同机构和公司的经历,包括学术界和工业界。 - 对于不同职业路径的看法,包括大公司、读博、创业等。 - 强调了动机在职业选择中的重要性,以及如何根据动机做出决策。 - 结论 - 李沐认为当前是技术带来新机会的最好时代,也是竞争激烈的挑战时代。 - 他鼓励听众思考自己的动机,并根据这些动机做出职业和生活上的选择。 - 然后啥也没做,好摆啊。。。
https://github.com/davawen/Cours
https://raw.githubusercontent.com/davawen/Cours/main/typst/physique/0.1.0/lib.typ
typst
#import "utils.typ": * #import "optique.typ" #import "elec.typ" #let def(x) = underline([*#x*]) #let ov = math.overline #let arw(x) = math.accent(x, math.arrow) #let half_mark = (end: "straight", pos: -63%) #let template(doc) = { show figure.caption: emph set heading(numbering: "1.1.1)") show heading.where(level: 1): h => { h // reset equation counter for each chapter counter(math.equation).update(0) } show ref: it => { let eq = math.equation let el = it.element if el != none and el.func() == eq { // Override equation references. numbering( el.numbering, ..counter(eq).at(el.location()) ) } else { // Other references as usual. it } } doc } #let resultb(c) = align(center, (box(c, inset: 0.7em, stroke: rgb("#ee3050")))) #let titleb(c) = { set document(title: c) align(center, box(text(c, size: 2em), inset: 1em, stroke: black)) } #let infobox(symbol: none, info_text: "", color: black) = c => [ #pad(left: 2em, box(stroke: color, width: 80%, inset: 10pt, [ #text(fill: color, [#symbol #info_text]) \ #c ])) #parbreak() ] #let note = infobox(symbol: $Phi$, info_text: "Note:", color: blue) #let tip = infobox(symbol: $checkmark$, info_text: "Tip:", color: green) #let caution = infobox(symbol: $excl.double$, info_text: "Caution:", color: red) #let warn = infobox(symbol: $minus.triangle$, info_text: "Warn:", color: orange) #let todo = infobox(symbol: $supset.double$, info_text: text(fill: teal)[TODO:], color: teal) #let hp = infobox(symbol: $alpha$, color: yellow, info_text: text(fill: yellow)[ Hors-programme: ]) // Sir Lanceléthanol le best <3 -U
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-2070.typ
typst
Apache License 2.0
#let data = ( ("SUPERSCRIPT ZERO", "No", 0), ("SUPERSCRIPT LATIN SMALL LETTER I", "Lm", 0), (), (), ("SUPERSCRIPT FOUR", "No", 0), ("SUPERSCRIPT FIVE", "No", 0), ("SUPERSCRIPT SIX", "No", 0), ("SUPERSCRIPT SEVEN", "No", 0), ("SUPERSCRIPT EIGHT", "No", 0), ("SUPERSCRIPT NINE", "No", 0), ("SUPERSCRIPT PLUS SIGN", "Sm", 0), ("SUPERSCRIPT MINUS", "Sm", 0), ("SUPERSCRIPT EQUALS SIGN", "Sm", 0), ("SUPERSCRIPT LEFT PARENTHESIS", "Ps", 0), ("SUPERSCRIPT RIGHT PARENTHESIS", "Pe", 0), ("SUPERSCRIPT LATIN SMALL LETTER N", "Lm", 0), ("SUBSCRIPT ZERO", "No", 0), ("SUBSCRIPT ONE", "No", 0), ("SUBSCRIPT TWO", "No", 0), ("SUBSCRIPT THREE", "No", 0), ("SUBSCRIPT FOUR", "No", 0), ("SUBSCRIPT FIVE", "No", 0), ("SUBSCRIPT SIX", "No", 0), ("SUBSCRIPT SEVEN", "No", 0), ("SUBSCRIPT EIGHT", "No", 0), ("SUBSCRIPT NINE", "No", 0), ("SUBSCRIPT PLUS SIGN", "Sm", 0), ("SUBSCRIPT MINUS", "Sm", 0), ("SUBSCRIPT EQUALS SIGN", "Sm", 0), ("SUBSCRIPT LEFT PARENTHESIS", "Ps", 0), ("SUBSCRIPT RIGHT PARENTHESIS", "Pe", 0), (), ("LATIN SUBSCRIPT SMALL LETTER A", "Lm", 0), ("LATIN SUBSCRIPT SMALL LETTER E", "Lm", 0), ("LATIN SUBSCRIPT SMALL LETTER O", "Lm", 0), ("LATIN SUBSCRIPT SMALL LETTER X", "Lm", 0), ("LATIN SUBSCRIPT SMALL LETTER SCHWA", "Lm", 0), ("LATIN SUBSCRIPT SMALL LETTER H", "Lm", 0), ("LATIN SUBSCRIPT SMALL LETTER K", "Lm", 0), ("LATIN SUBSCRIPT SMALL LETTER L", "Lm", 0), ("LATIN SUBSCRIPT SMALL LETTER M", "Lm", 0), ("LATIN SUBSCRIPT SMALL LETTER N", "Lm", 0), ("LATIN SUBSCRIPT SMALL LETTER P", "Lm", 0), ("LATIN SUBSCRIPT SMALL LETTER S", "Lm", 0), ("LATIN SUBSCRIPT SMALL LETTER T", "Lm", 0), )
https://github.com/katamyra/Notes
https://raw.githubusercontent.com/katamyra/Notes/main/Compiled%20School%20Notes/CS3001/Modules/StakeHolder.typ
typst
#import "../../../template.typ": * = Stakeholder Analysis #definition[ *Stakeholders* are interested parties: people or entities that wil be affected in a given situation ] Make a list of all the stakeholders involved, and try to balance the positive and negative impact on people. This is not an ethical theory, but it is a _useful way of looking at things_. = Kohlberg's Stages of Moral Development == Stages 1 & 2 Egocentric understanding of fairness based on individual need (kids) + Obedience and punishment orientation + Self interest orientation == Stages 3 & 4 Shared concept of fairness based in societal agreement + Interpersonal accord and conformity (good boy/girl attitude) + Authority and social-order maintaining (law and order morality) == Stage 5 & 6 Free standing logic of equality and reciprocity + Social Contract Orientation + Universal Ethical Principles (Kant)
https://github.com/AU-Master-Thesis/thesis
https://raw.githubusercontent.com/AU-Master-Thesis/thesis/main/lib/dict.typ
typst
MIT License
#let leafmap(dict, f) = { assert(type(dict) == dictionary, message: "expected `dict` to have type 'dictionary', got " + type(dict)) assert(type(f) == function, message: "expected `f` to have type `function` with signature (k: string, v: any) => any, got " + type(f)) dict .pairs() .fold((:), (acc, pair) => { let k = pair.at(0) let v = pair.at(1) let v_mapped = if type(v) == dictionary { leafmap(v, f) } else if type(v) == array { v.map(it => f(k, it)) } else { f(k, v) } acc.insert(k, v_mapped) acc }) } #let leafassoc(dict, f) = { assert(type(dict) == dictionary, message: "expected `dict` to have type 'dictionary', got " + type(dict)) assert(type(f) == function, message: "expected `f` to have type `function` with signature (k: string, v: any) => any, got " + type(f)) dict .pairs() .fold((:), (acc, pair) => { let k = pair.at(0) let v = pair.at(1) let v_mapped = if type(v) == dictionary { leafassoc(v, f) } else if type(v) == array { v.map(it => (it, f(k, it))) } else { (v, f(k, v)) } acc.insert(k, v_mapped) acc }) } #let leafzip(a_dict, b_dict) = { assert(type(a_dict) == dictionary, message: "expected `a_dict` to have type 'dictionary', got " + type(a_dict)) assert(type(b_dict) == dictionary, message: "expected `b_dict` to have type 'dictionary', got " + type(b_dict)) a_dict .pairs() .fold((:), (acc, pair) => { let k = pair.at(0) let va = pair.at(1) assert(k in b_dict, message: "diffdict only works if both dicts contains, the same keys :(") let vb = b_dict.at(k) let v_zipped = if type(va) == array { assert(va.len() == vb.len()) va.zip(vb) } else if type(va) == dictionary { leafzip(va, vb) } else { (va, vb) } acc.insert(k, v_zipped) acc }) } #let leafflatten(dict) = { assert(type(dict) == dictionary, message: "expected `dict` to have type 'dictionary', got " + type(dict)) dict .pairs() .fold((:), (acc, pair) => { let k = pair.at(0) let v = pair.at(1) if type(v) == dictionary { acc + leafflatten(v) } else { acc.insert(k, v) acc } }) }
https://github.com/AxiomOfChoices/Typst
https://raw.githubusercontent.com/AxiomOfChoices/Typst/master/Research/Summer%202024/Geometry%20School/Mean%20Curvature%20Flows.typ
typst
#import "/Templates/generic.typ": latex #import "/Templates/notes.typ": chapter_heading #import "@preview/ctheorems:1.1.0": * #import "/Templates/math.typ": * #show: latex #show: chapter_heading #show: thmrules #show: symbol_replacing #show: equation_references #set pagebreak(weak: true) #set page(margin: (x: 2cm, top: 2cm, bottom: 2cm), numbering: "1") #set enum(numbering: "(1)", indent: 1em) #show heading: it => { if (it.numbering == none or it.level > 1) { return it } let numbers = counter(heading).at(it.location()) let body = it.body //pagebreak(weak: true) block([*#body*]) } #outline() = Lecture 1 The setting is as follows, we take a surface $M_0 seq RR^3$ evolving under the following evolution equation $ diff_t x = arrow(H)(x). $ Here $arrow(H) = H arrow(nu)$ is the mean curvature vector and $H = kappa_1 + kappa_2 = tr(A)$ is the mean curvature. // TODO: Maybe insert diagram here. #example[ The standard example of MCF is the evolution of the round sphere, if we start with a sphere $S_R$ then the equation reduces to the ode $dot(r) = 2/r$ which gives us the equation $ r(t) = sqrt(R^2 - 4t). $ ] #example[ Another example is the cylinder $C_R$, which evolves as $ r(t) = sqrt(R^2 - 2t). $ ] #exercise[ Show that if $M_t$ is an entire graph of a function $u_t$, then the function $u_t$ solves the PDE $ diff_t = sqrt(1 + |D u|^2) div((D u)/(sqrt(1 + |D u|^2))) $ ] More precisely if $M_t = X(M,t)$ where $X : M times I -> RR^3$ then we get the following PDE $ diff_t X(p,t) = laplace_(M_t) X(p,t) $ #theorem("Huisken 1984")[ If $M_t$ is ever then $M_t$ converges to a _round point_ in finite time, meaning that under rescaling it approaches a sphere. ] #remark[ $M$ being convex is equivalent to $kappa_1 >= 0$ and $kappa_2 >= 0$. ] Without convexity the situation is a lot more difficult, and typically the flow encounters local singularities. #example[ There exists a surface resembling a dumbbell whose neck pinches under MCF resulting in a local singularity. If the flow is continued after the singularity the flow splits into two connected components. If we call $X_0$ the point of singularity, and if one rescales this example by $lambda$ around $X_0$ and then lets $lambda -> infinity$, then by passing to this limit the neck pinch looks like a round shrinking cylinder. Thus in some sense, the self-shrinking cylinder is a _model_ for this singularity. Around $X_0$, we have $ |A| <= C / (|t_0 - t|^(1 / 2)) $ which is the blow up rate we expect, since $A tilde 1/r tilde 1/(t^(1/2))$ so in some sense this singularity is well behaved. ] #example[ If one changes the dumbbell in just the right way, we can get much worse behaviour, which we call a type two singularity, characterized by $ lim_(t -> t_0) sqrt(t - t_0) |A| = infinity $ ] There are now several natural questions we can ask about MCF: + What do singularities look like? + How can we continue the flow past its singularities? + What is the size and structure of the singular set? + Is evolution through singularities unique or non-unique? We will now start developing the theory necessary to give full/partial answers to these questions. == Basic properties of MCF #proposition("Existence")[ Let $M_0$ be any closed embedded initial hypersurface, then there exists a unique smooth solution ${ M_t }_(t in [0,T))$ for some maximal $T > 0$, which is characterized by $ lim_(t -> T) max_(M_t) |A| = infinity. $ ] #proof[ Standard consequence of short-time existence for parabolic PDEs. ] #proposition("Parabolic rescaling")[ If $M_t$ is a solution to MCF, then $ M_t^lambda := lambda M_(lambda^(-2) t) $ is also a solution to MCF. ] #exercise[ Prove this. ] Due to this rescaling our estimates will always live in $ P(x_0, t_0, r) = B(x_0,r) times (t_0 - r^2, t_0] $ #proposition("Avoidance principle")[ #let dist = math.op("dist") If $M_t, N_t$ are both solutions to MCF then $t -> dist(M_t, N_t)$ is non-decreasing. In particular if $M_0,N_0$ are disjoint then $M_t, N_t$ are also disjoint. ] #proposition("Evolution equations")[ If $M_t$ is a solution to MCF, then its geometric quantities evolve as: + $diff_t g_(i j) = - 2 H A_(i j)$ + $diff_t dif mu = - H^2 dif mu$ + $diff_t arrow(nu) = - nabla H$ + $diff_t H = laplace H + |A|^(2)H$ + $diff_t A^i_j = laplace A^i_j + |A|^(2)A^i_j$ ] #proof[ For the induced metric we have $ diff_t g_(i j) = 2 diff_i ( H arrow(nu) ) dot diff_j X = 2 H diff_i arrow(nu) dot diff_j X = - 2 H A_(i j) $ #exercise[ Prove the other evolution equations. ] ] #corollary[ Under MCF the area evolves as $ d / (d t) Area(M_t) = - integral_(M_t) H^2 dif mu. $ In fact MCF is the $L^2$ gradient flow of the area functional. ] #corollary[ If $H >= 0$ at $t = 0$ then $H >= 0$ for all $t > 0$, similarly if $A >= 0$ at $t = 0$ then $A >= 0$ for all $t > 0$. ] = Lecture 2 Now studying area of MCF is not particularly useful since the blow up will cause the area to explode to infinity. Instead, we will look at a weighted area that will be scale invariant. Specifically, we define $ GaussArea(M_t) = integral_(M_t) rho_(x_0,t_0) (x,t) dif mu $ where $ rho_(x_0,t_0) (x,t) = 1 / (4 pi (t_0 - t)) exp(- (|x-x_0|^2)/(4 (t_0-t))). $ #theorem("Huisken's monotonicity formule")[ Under MCF we have $ (d) / (d t) integral_(M_t) rho_(x_0, t_0) dif mu = - integral_(M_t) lr(|arrow(H) - (( x-x_0 )^perp) / (2 (t-t_0))|)^2 rho_(x_0, t_0) dif mu_t $ ]<thrm-Huisken> #exercise[ Let $x' = lambda(x - x_0)$, $t' = lambda^2 (t - t_0)$ and consider the rescaled flow $ M_t^lambda = lambda (M_(t_0 + lambda^(-2) t') - x_0). $ Show that $ integral_(M_t) rho_(x_0, t_0)(x,t) dif mu (x) = integral_(M_t) rho_(0, 0)(x', t') dif mu_t' (x') $ ] #exercise[ Let ${ M_t }_(t in (-infinity, 0))$ be an ancient MCF. Prove that $ arrow(H) - x^perp / (2t) = 0, quad forall t <0 <=> M_t = sqrt(-t) M_(-1), quad forall t < 0 $ ] #proof([of @thrm-Huisken])[ WLOG we assume that $(x_0, t_0) = (0,0)$ and write $rho = rho_(0, 0)$. #claim[ $(d / (d t) + laplace_(M_t) - H^2) rho = -|arrow(H) - x^perp / (2 t)|^2rho$ ] Note that this claim would prove the theorem since it would imply that $ d / (d t) integral rho dif mu_t = integral ( d / (d t) rho - H^2 rho ) dif mu_t = integral lr(|arrow(H) - x^perp/(2 t)|)^2 rho dif mu_t - integral laplace_(M_t) rho dif mu_t $ and the last term vanishes due to divergence theorem. Now let us prove the claim, first note that the tangential derivative of $rho$ is its projection onto the tangent plane of $M_t$, which can be written as: $ nabla^(M_t) rho = D rho - (D rho dot nu) nu. $ Taking the divergence gives us that $ laplace_M_t rho = div_(M_t) (nabla^(M_t) rho) = div_(M_t) ( D rho ) + arrow(H) dot D rho. $ We also have that $ (d) / (d t) rho = diff_t rho + arrow(H) dot D rho $ along MCF so we get $ (d / (d t) + laplace_(M_t)) rho &= diff_t rho + div_(M_t) ( D rho ) + 2 arrow(H) dot D rho \ &= diff_t rho + div_(M_t) ( D rho ) + (|nabla^perp rho|^2) / rho - lr(|arrow(H) - (nabla^perp rho)/rho|)^2 + H^2 rho $ One can then explicitly check using the formula for $rho$ that $ diff_t rho + div_(M_t) (D rho) + (|nabla^perp rho|^2) / rho = 0 $ and so we are done. ] Motivated by this monotonicity we define $ Theta(M, x_0, t_0, r) = integral_(M_(t_0 - r^2)) rho_(x_0) dif mu. $ Note that $ Theta(M, x_0, r) = 1, quad forall r > 0 <=> M "is a static multiplicity one plane". $ #theorem([$epsilon$-regularity])[ There exists constants $epsilon > 0, C < infinity$ universal such that if $M$ is a smooth MCF with $ sup_((x,t) in P(x_0, t_0, r)) Theta(M, x, t, r) < 1+ epsilon $ then $ sup_(P(x, r slash 2)) |A| <= C / r $ ] #proof[ We prove by contradiction, then by rescaling, there exists a sequence $M^j$ of mean curvature flows such that $ sup_((x,t) in P(0,1)) Theta(M^j, x, t, 1) < 1 + 1 / j $ but there is some $(x_j, t_j) in P(0, 1/2)$ such that $ |A|(x_j, t_j) > j $ By point selection, there are points $(x_j', t_j') in P(0, 3/4)$ such that $ Q_j = |A|(x_j', t_j') > j "and also" sup_(P(x_j', t_j', j / (10 Q_j))) <= 2 Q_j $ Now let $hat(M)^j$ be the sequence of flows we get by shifting $M^j$ to place $(x_j', t_j')$ at the origin and parabolically rescaling by $Q_j$. This guarantees that $ |A|(0) = 1 "and" sup_(P(0, j / 10))|A|<= 2 $ so by standard parabolic theory we also get similar estimates on the derivatives of $A$ so $hat(M)^j$ converges smoothly to a non-flat limit $M^infinity$. But now $ Theta(M^j, 0, Q_j) < 1 + 1 / j $ so $Theta(M^infinity, 0, r) = 1$ so $M^theta$ is a flat plane, which is a contradiction. ] #exercise[ Prove Allards $epsilon$-regularity theorem using the same method. ] = Lecture 3 We will call a closed embbedded mean-convex surface $alpha$-noncollapsed if each $p in M$ admits exterior & interior balls of radius $alpha/H(p)$. Each embedded surface is clearly $alpha$-non-collapsed for _some_ $alpha$, but a key property of MCF is that it preserves this property. #theorem("Andrews")[ If $M_t$ is a MCF and $M_0$ is $alpha$-noncollapsing then so its $M_t$. ] #proof[ For $x in M$ we define $c(x) = x + r(x) nu$, $r(x) = alpha/H(x)$, then $ "interior ball at" x <=> ||y - c(x)||^2 >= r(x)^2, quad forall y in M $ Rewriting this we have $ ||y-c(x)||^2 = ||y-x||^2 - 2r(x) ip(y -x, r(x)) + r(x)^2 $ so $ ||y - c(x)||^2 >= r(x)^2 <=> (2 ip(y-x, r(x))) / (||y-x||^2) <= H(x) / alpha. $ Now given $M_t$ a MCF we consider $ Z^* (x,t) := sup_(y != x) (2ip(X(y,t) - X(x,t), nu(x,t))) / (||X(y,t) - X( x,t )||^2) $ a computation shows that $Z^*$ evolves (weakly in the sense of viscosity solutions) under the equation $ diff_t Z^* (x,t) <= laplace Z^* + |A|^2Z^* $ and comparing this with the evolution equation of $H$: $ diff_t H = nabla H + |A|^2H $ we see that since $H$ and $Z^*$ are positive then we can consider the quotient $Z^* /H$ which gives us $ diff_t Z^* / H <= laplace Z^* / H + 2 ip(nabla ln(H), nabla Z^* /H) $ and so by standard maximum principle $max (Z^* / H)$ is non-increasing. A similar computation is done for the exterior ball. ] We can now use this preservation to prove curvature estimates. #theorem[ For all $alpha > 0$, we have constants $rho = rho(alpha) > 0$ and $C = C(alpha) < infinity$ such that if $M$ is an $alpha$-noncollapsed flow in $P(p,t,r)$ where $H(p,t) <= r^(-1)$ then $ H <= C r^(-1) "in" P(p,t,rho r) $ ]<thrm-local_curv_estimate> Note that this is very similar to Harnack inequalities for positive solutions of elliptic/parabolic PDEs. #proof[ Assume that this does not hold, so we have MCFs $M^j$ all $alpha$-noncollapsed in $P(0,0,j)$ with $ H(0,0) <= j^(-1) "but" sup_(P(0,0,1)) H >=j. $ For $j -> infinity$, we have that $M^j$ converges in the Hausdorff sense to a static plane. #exercise[ Show using Stokes theorem that this setup guarantees an estimate of the form $ Area(M_t^j sect B(x,r)) <= (1 + epsilon) pi r^2 $ ] So $epsilon$-regularity then guarantees that $limsup_(j -> infinity) sup_(P(0,0,1)) |A| = 0$ ] #theorem("Convexity Estimate")[ For all $epsilon > 0, alpha > 0$ there exists some constant $eta(epsilon, alpha) < infinity$ such that if $M$ is $alpha$-noncollapsed in $P(p,t,eta r)$ and $H(p,t) <= r^(-1)$ then $ kappa_1(p,t) >= - epsilon r^(-1) $ where $kappa_1$ is the smallest principal curvature. ]<thrm-convex_estimate> #corollary[ Any ancient $alpha$-noncollapsed flow $M_t$ is convex. ] #proof([of @thrm-convex_estimate])[ Fix $alpha$ and let $0 < epsilon_0 <= 1/alpha$ be the infimum of $epsilon$'s for which the assertion holds. Then we know that there exist MCFs $M^j$ that are $alpha$-noncollapsed in $P(0,0,j)$ with $H(0,0) <= 1$ and $kappa_1 (0,0) -> - epsilon_0$. Then by @thrm-local_curv_estimate we get that $M^j -> M^infinity$ smoothly in $P(0,0, rho/2)$ modulo passing to a subsequence. Now $M^infinity$ satisfies $kappa_1 (0,0) = - epsilon_0$ and $H(0,0) = 1$ so by continuity $H > 1/2$ in some smaller ball $P(0,0,r)$. #exercise[ Show that $kappa_1/H > - epsilon_0$ everywhere in $P(0,0,r)$. ] Thus $kappa_1/H$ attains a negative minimum inside $P(0,0,r)$, which contradicts strong maximum principle (???). ] #remark[ As a consequence of this, a grim repear times $RR$ cannot be a blowup limit of embedded mean-convex MCF. ] #remark[ Blowup limits of $alpha$-noncollapsed flows are always smooth and convex. ] #theorem("Brendle-Choi")[ Any singularity at the first singular time is modelled by $S^2$, $S^1 times R$ or Bowl soliton. ] = Lecture 4 A family of weak closed sets ${C_t}$ is called a subsolution of MCF if $forall {M_t}_(t in [t_0,t_1])$ a smooth compact MCF then $ C_(t_0) sect M_(t_0) = nothing => C_t sect M_t = nothing, quad forall t in [t_0, t_1] $ The level-set flow ${F_t(C)}_(t >= 0)$ of any closed set $C$ is the maximal subsolution ${C_t}_(t >= 0} $ with $C_0 = C$. #proposition[ + The level set flow exists and is unique. + $F_0 (C) = C, F_(t + t') (C) = F_t (F_(t') (C))$. + $F_t (C+x) = F_t (C) + x$. + $C seq C' => F_t (C) seq F_t (C')$. ] #proof[ We first show existence and uniqueness, this is easy by showing that $ F_t (C) = overline(union {C_(t') | {C_t}_(t >= 0) "is a subsolution with" C_0 = C}) $ ] Then
https://github.com/hanxuanliang/opentyp
https://raw.githubusercontent.com/hanxuanliang/opentyp/main/typ/README.md
markdown
MIT License
# type source > typ原稿。需要安装:[typst-lsp](https://marketplace.visualstudio.com/items?itemName=nvarner.typst-lsp) - [risinglight](https://github.com/risinglightdb/risinglight) - [opendal](https://github.com/apache/incubator-opendal) next?...
https://github.com/quarto-ext/typst-templates
https://raw.githubusercontent.com/quarto-ext/typst-templates/main/dept-news/README.md
markdown
Creative Commons Zero v1.0 Universal
# Dept News Format Based on the dept-news template published by the Typst team at <https://github.com/typst/templates/tree/main/dept-news>. **NOTE**: This format requires the pre-release version of Quarto v1.4, which you can download here: <https://quarto.org/docs/download/prerelease>. ## Installing ```bash quarto use template quarto-ext/typst-templates/dept-news ``` This will install the extension and create an example qmd file that you can use as a starting place for your document. ## Using The example qmd demonstrates the document options supported by the dept-news format (`title`, `edition`, `hero-image`, `publication-info`, etc.). For example, your document options might look something like this: ```yaml --- title: "Chemistry Department" edition: | March 18th, 2023 \ Purview College hero-image: path: "newsletter-cover.jpg" caption: "Award winning science" publication-info: | The Dean of the Department of Chemistry. \ Purview College, 17 Earlmeyer D, Exampleville, TN 59341. \ <mailto:<EMAIL>> format: dept-news-typst: default --- ``` Dept News documents are rendered as follows: ![](dept-news.png)
https://github.com/rickysixx/unimore-informatica
https://raw.githubusercontent.com/rickysixx/unimore-informatica/main/algoritmi-di-crittografia/riassunto_algoritmi_crittografia.typ
typst
#import "@preview/algo:0.3.3": algo, i, d, comment, code #import "@preview/physica:0.9.0": pdv #set par(leading: 0.55em, justify: true, linebreaks: "optimized") #set text(font: "New Computer Modern", lang: "it") #set heading(numbering: "1. ") #show raw: set text(font: "New Computer Modern Mono") #show par: set block(spacing: 1em) #outline( indent: auto ) #pagebreak(weak: true) = Nozioni preliminari #figure( table( columns: (auto, auto), align: (center + horizon, left), [confidenzialità], [il messaggio dev'essere comprensibile solo alle persone autorizzate], [autenticazione \ del mittente], [il destinatario dev'essere certo dell'*identità* di chi gli ha mandato il messaggio], [integrità], [il destinatario deve avere modo di accorgersi se il messaggio è stato alterato in un qualche modo rispetto a quello inviato dal mittente], [non ripudio], [il mittente non può negare di aver inviato il messaggio ed il destinatario non può negare di averlo ricevuto], ), caption: [Requisiti per una comunicazione sicura] ) #table( columns: 1fr, align: left, [*Principio di Kerckhoff*: in uno schema di cifratura, la sicurezza deve risiedere *solo* nella *segretezza della chiave*. Non deve risiedere nella segretezza dell'algoritmo.] ) #figure( table( columns: (1fr, 1fr, 1fr), align: (center + horizon, left, left), [], [#align(center)[*Crittografia simmetrica*]], [#align(center)[*Crittografia asimmetrica*]], [chiavi per partecipante], [una sola chiave], [due chiavi distinte, una per cifrare e l'altra per decifrare], [problema principale], [mantenere segreta la chiave], [*autenticare* le parti coinvolte nella comunicazione], [scalabilità], [limitata perché $n$ partecipanti occorrono $O(n^2)$ chiavi totali (ogni partecipante deve avere $n - 1$ chiavi)], [buona perché il numero di chiavi è $O(2n)$ (ogni partecipante ha una sola coppia di chiavi)], [costo computazionale], [basso grazie ad algoritmi estremamente efficienti (anche con supporto hardware)], [alto a causa di algoritmi lenti basati su *problemi matematici*] ), caption: [Differenze tra crittografia simmetrica e asimmetrica] ) == Funzioni one-way e funzioni trapdoor Se $f$ è una *funzione one-way*, significa che è facile calcolare $f(x)$, ma è *computazionalemnte complesso* (o impossibile) calcolare $f^(-1)(x)$. Se $f$ è una *funzione trapdoor*, anche calcolare $f^(-1)(x)$ è facile, ma *solo a determinate condizioni*. Nel contesto della crittografia asimmetrica, la condizione è *conoscere la chiave privata*. La crittografia asimmetrica utilizza delle funzioni trapdoor e non delle funzioni one-way. Se la cifratura fosse fatta con una funzione one-way, *nemmeno il legittimo destinatario* riuscirebbe a rimettere in chiaro il messaggio cifrato, perché la funzione usata per la cifratura non è invertibile. #pagebreak(weak: true) = Crittografia simmetrica Componenti di un cifrario simmetrico: - *algoritmo*: applica una certa *permutazione*#footnote[dal punto di vista matematico, una permutazione è una funzione $f : I arrow.r I$ invertibile] ad una porzione del plaintext - *mode of operation*: definisce come rimettere insieme i blocchi del ciphertext == Cifrario di Cesare Semplice cifrario mono-alfabetico in cui la permutazione consiste in uno *shift* della lettera di $k$ posizioni in avanti: $ c_i = p_i plus.circle k $ dove $c_i$ e $p_i$ sono rispettivamente l'$i$-esimo carattere cifrato e in chiaro. La chiave segreta è proprio il valore $k$. Il numero di possibili chiavi dipende dalla cardinalità dell'alfabeto considerato. == Crittoanalisi per frequenze Tutti i cifrari mono-alfabetici sono vulnerabili a crittoanalisi per frequenze, perché (in quanto mono-alfabetici) non sono in grado di mascherare l'*identità* di una lettera all'interno di un testo. == Cifrario di Vigenère Si tratta di un cifrario poli-alfabetico che, in un'epoca in cui non esistevano i computer, era considerato il cifrario più robusto (ma era anche complesso utilizzarlo concretamente). Il cifrario esegue sempre un'operazione di *shifting*, ma il valore non è più uguale per tutti i caratteri, ma è determinato dal carattere della chiave: $ c_i = p_i plus.circle k_i $ dove $k_i$ è l'$i$-esimo carattere della chiave. Se $k$ è la lunghezza della chiave, le possibili permutazioni per ogni carattere del plaintext sono $26^k$. Si tratta quindi di un cifrario molto robusto rispetto agli attacchi di forza bruta. Se la chiave è relativamente corta rispetto al testo, si può però fare un attacco per individuarne la *lunghezza della chiave*: + si individuano due porzioni di testo uguali all'interno del ciphertext e si calcola la distanza (in numero di caratteri) tra di loro; + è molto probabile che la lunghezza della chiave sia pari a questa distanza oppure ad un suo *divisore*; + si può provare una crittoanalisi per frequenze (o un *dictionary attack*) con le diverse ipotesi di lunghezza della chiave == One-time pad Si tratta dell'unico cifrario simmetrico per cui esiste una *dimostrazione matematica* del fatto che sia inviolabile (a determinate condizioni). Nella pratica questo cifrario viene utilizzato raramente, perché richiede un'enorme quantità di *bit casuali*: la lunghezza della chiave infatti dev'essere pari alla lunghezza del messaggio. Messaggio e chiave vengono interpretati come *sequenze di bit*. La cifratura consiste in uno XOR dei bit del messaggio con i bit della chiave: $ c_i = p_i plus.circle k_i $ Per essere sicuro, la chiave dev'essere una sequenza *casuale* di bit, cioè ogni bit della chiave deve avere probabilità $= frac(1, 2)$ di essere 0 o 1. La chiave non deve mai essere riutilizzata per cifrare due messaggi distinti. Se ciò viene fatto, grazie alle proprietà dello XOR è possibile mettere in evidenza lo XOR dei due plaintext: $ C_1 plus.circle C_2 &= (P_1 plus.circle K) plus.circle (P_2 plus.circle K) \ &= P_1 plus.circle P_2 $ Conoscendo uno fra $P_1$ e $P_2$, o una parte di uno/entrambi, invertendo le formule è possibile sia mettere in chiaro l'altro che risalire alla chiave segreta. == Obiettivi di sicurezza di un cifrario - *indistinguibilità*: per un eventuale eavesdropper, un messaggio cifrato dev'essere indistinguibile da una sequenza di bit casuali; - non *malleabilità*: un cifrario si dice malleabile se dato un ciphertext $C_1$ è possibile crearne un altro il cui plaintext $P_2$ abbia una qualche *relazione forte* con $P_1$ == Cifrari a blocchi In un cifrario a blocchi il plaintext viene diviso in varie parti (*blocchi*), ognuna con una dimensione fissa $B$. La *mode of operation* stabilisce come questi blocchi vengono rimessi insieme dopo che ogni blocco è stato cifrato. La scelta sulla dimensione dei blocchi dev'essere "bilanciata": - non può essere troppo grande, altrimenti si avrebbe molto overhead in caso di messaggi molto corti; - non può essere troppo piccola, altrimenti si avrebbero delle falle di sicurezza. Se $B$ è sufficientemente piccolo, l'attaccante può costruirsi una *lookup table* che mappa tutti i possibili plaintext con tutti i possibili ciphertext, rendendo molto semplice il processo di decifratura I block cipher possono essere descritti con uno schema matematico astratto: - se $k$ è la lunghezza della chiave, ci sono $2^k$ possibili *tabelle di permutazione*. La chiave determina la tabella da utilizzare; - se $B$ è la dimensione del blocco, ogni tabella ha $2^B$ righe. Una volta stabilita la tabella da utilizzare, il processo di cifratura/decifratura si riduce ad un lookup a questa tabella In realtà, poiché ognuna delle $2^k$ tabelle è generabile permutandone qualcun'altra, il numero di tabelle a disposizione è $(2^B)!$, perché questo è il numero di possibili permutazioni di $B$ bit. Per poter utilizzare tutte queste permutazioni, chiave dev'essere di dimensioni molto grandi perché altrimenti non si potrebbero indicizzare tutte queste tabelle. La dimensione della chiave per poter utilizzare $(2^B)!$ tabelle cresce vertiginosamente al crescere di $B$ (già per $B = 7$ servirebbe una chiave da 621 bit per poter indicizzare tutte le $(2^B)!$ tabelle), rendendo di fatto *inutilizzabili* la stragrande maggioranza di queste $(2^B)!$ permutazioni. Di questa piccola parte delle $(2^B)!$ permutazioni utilizzabili, ce ne sono delle altre ancora che non possono essere utilizzate perché aprirebbero delle falle di sicurezza (ad esempio non possono essere utilizzate tutte quelle permutazioni che lasciano in chiaro un qualche bit del plaintext). In generale, tutte le permutazioni che possono essere espresse come *trasformazioni lineari affini invertibili* sul campo $bb(Z)_2$ non sono utilizzabili: $ x P + b = y(x) $ dove $P$ è una matrice invertibile di dimensioni $B times B$ su $bb(Z)_2$ mentre $x$ ed $y(x)$ sono due vettori riga di $B$ elementi con coefficienti in $bb(Z)_2$. Se la trasformazione è di questo tipo, nel modello *chosen plaintext attack* l'attaccante può ricostruire $b$ e $P$ e dunque riuscire ad *invertire* la trasformazione: - per determinare $b$ è sufficiente chiedere all'oracolo di cifrare un blocco contenente solo degli zeri (1 query necessaria); - per determinare $P$ si effettuano $B$ query all'oracolo di cifratura utilizzando ogni volta un *vettore della base canonica* diverso Quindi con $B + 1$ query l'attaccante riesce a rompere la cifratura. La trasformazione utilizzata per la cifratura deve quindi essere *non lineare*. Oltre a questa proprietà, le permutazioni che si possono utilizzare devono averne altre 2: - *diffusione*: una piccola modifica nel plaintext deve avere un fortissimo *effetto valanga* su tutto il ciphertext; - *confusione*: il ciphertext deve distruggere qualsiasi possibile *pattern* presente nel plaintext (ad esempio lettere ripetute) === Modelli d'attacco In ordine decrescente di "potenza" da parte dell'attaccante: + *ciphertext-only attacker*: l'unica informazione nota all'attaccante è il ciphertext; + *known-plaintext attacker*: l'attaccante conosce una o più mappature plaintext $arrow.r$ ciphertext, ma in generale non sa come decifrare un generico ciphertext; + *chosen plaintext attacker*: l'attaccante ha modo di fare delle *query di cifratura* ad un oracolo. Nella crittografia asimmetrica si ha *sempre* a che fare con questo tipo di attaccanti, perché la chiave è pubblica; + *chosen ciphertext attacker*: l'attaccante ha modo di fare delle *query di decifratura*, cioè è in grado di decifrare un qualunque ciphertext == Feistel network Non è un cifrario a blocchi di per sè, ma è uno *schema* per costruirli. Tra gli altri, è stata utilizzata per costruire il cifrario DES. Una Feistel network è composta da un certo numero di *stadi* (o round), ognuno fatto sempre allo stesso modo. Il numero di stadi è strettamente legato al concetto di *diffusione*. Ognuno di questi stadi lavora con 3 parametri: - $L_i$ ed $R_i$, che sono rispettivamente la parte sinistra e destra del ciphertext intermedio su lo stadio sta lavorando (nel caso del 1° stadio, $L_0$ è la parte sinistra del plaintext ed $R_0$ è la parte destra); - $k_i$, che è una *round key* che viene *derivata* a partire dalla chiave generale $k$ usata dall'algoritmo tramite un processo denominato *key schedule* #figure( image("assets/feistel.jpg", width: 60%), caption: [Feistel network] ) Come si vede dalla figura: $ L_(i + 1) = R_i quad quad R_(i + 1) = F(k_i, R_i) plus.circle L_i $ dove $F$ è una trasformazione che dipende dalla chiave $k_i$. Invertire il processo di cifratura è molto semplice grazie alle proprietà dello XOR: basta invertire le frecce nello schema (cioè si parte dal fondo anziché dall'inizio). In realtà però si può fare ancora meglio: si parte sempre dall'inizio ma si invertono i ruoli di $R_0$ ed $L_0$, cioè $R_0$ diventa la parte *sinistra* del ciphertext ed $L_0$ diventa la parte *destra*. === Funzione $F$ La funzione $F$ è composta da due blocchi: - S-box: effettua una *sostituzione* dei bit; - P-box: effettua una *permutazione* dei bit #figure( image("assets/desround.jpg", width: 40%), caption: [S-box e P-box] ) Lo scopo di $F$ è duplice: + sfruttare la chiave nel processo, per mescolarla con i bit del messaggio; + forzare la *non linearità* della trasformazione da plaintext a ciphertext Il modo in cui è implementata $F$ dipende dal cifrario. Nel caso di DES: + si espandono i 32 bit di $R_i$ in 48 bit, tramite *duplicazione* di alcuni bit; + si XORano questi 48 bit con quelli della round key $k_i$; + si riporta questo risultato ad un valore a 32 bit (per poterlo successivamente XORare con $L_i$) tramite l'S-box, che esegue un lookup ad una tabella: + si divide il blocco da 48 bit in 8 blocchi da 6 bit ciascuno; + i primi 2 bit di ogni blocco definiscono la riga da utilizzare per il lookup, mentre gli altri 4 definiscono la colonna; + il risultato del lookup è un numero a 4 bit; + si concatenano gli 8 risultati da 4 bit ottenuti (uno per ogni blocco da 6 bit), ottenendo un valore a 32 bit + si permuta il risultato ottenuto e poi lo si XORa con $L_i$ == Mode of operation === ECB mode Si tratta dell'algoritmo più semplice (ma anche il più vulnerabile) con cui vengono rimessi insieme i blocchi dopo che sono stati cifrati. + si cifra ogni blocco, in maniera *indipendente* dagli altri; + il ciphertext finale è dato dalla concatenazione dei ciphertext di ciascun blocco #figure( image("assets/ecb.png", width: 90%), caption: [ECB mode] ) La vulnerabilità di questo approccio sta proprio nel fatto che i blocchi sono cifrati in modo *indipendente*: lo stesso blocco di plaintext viene cifrato sempre allo stesso modo, rendendo poco robusta la cifratura. === CBC mode *Prima* di procedere con la cifratura dell'$i$-esimo blocco di plaintext: - per $i = 1$ si XORa il blocco con l'*initialization vector* - per $i > 1$ lo XOR viene fatto con il blocco di ciphertext precedente #figure( image("assets/cbc.png", width: 80%), caption: [CBC mode] ) L'IV deve essere *sempre diverso* ed è fondamentale che sia scelto in modo *random*. L'IV non è un'informazione segreta: viene inviato assieme al ciphertext su un canale pubblicamente accessibile (potrebbe quindi essere intercettato). #pagebreak(weak: true) = Aritmetica modulare == Insiemi $bb(Z)_n$ Insiemi di numeri *interi* che vanno da 0 ad $n - 1$: $ bb(Z)_n = {0, 1, ..., n - 1} $ L'insieme $bb(Z)_n$ può essere visto anche come l'*insieme dei possibili resti* che si ottengono dividendo gli $n - 1$ numeri per $n$: $ bb(Z)_n = {i mod n} quad quad forall i = 0, 1, ..., n - 1 $ Negli insiemi $bb(Z)_n$ sono sempre definite le operazioni di *somma*, *sottrazione* e *moltiplicazione*: $ x +_n y &= (x + y) mod n \ x -_n y &= (x - y) mod n \ x dot_n y &= (x y) mod n $ La sottrazione è definita anche quando $x - y < 0$, perché il resto della divisione intera per $n$ è comunque un valore in $bb(Z)_n$ (es. $(5 - 8) mod 7 = 4$). L'*opposto* di ogni elemento è definito: si tratta del valore $y$ tale che $(x + y) mod n = 0$, da cui risulta $y = n - x$. L'*inverso moltiplicativo* invece esiste solo per gli elementi $x in bb(Z)_p$ che sono *coprimi* con $n$. Segue quindi che se $n$ è primo, ogni elemento di $bb(Z)_n \\ {0}$ ha inverso moltiplicativo. #table( columns: (1fr), [*Proprietà del modulo rispetto ai suoi divisori*: se $m$ è un divisore di $n$, allora per ogni $x in bb(Z)$ vale: $ (x mod n) mod m = x mod m $] ) == Vantaggio computazionale dell'aritmetica modulare Lavorare con l'aritmetica modulare è un grosso vantaggio perché permette di *mantenere piccoli i numeri*. Quando si esegue un'operazione in modulo, si può sceglie se: - applicare il modulo *solo alla fine* (es. $(x + y + z) mod n$); - applicare il modulo *ad ogni passo* (es. $[(x mod n) + (y mod n) + (z mod n)] mod n$) == Algoritmo di Euclide Algoritmo estremamente efficiente per il calcolo del *massimo comune divisore* tra due numeri. La sua efficienza è dovuta all'uso dell'aritmetica modulare. $ gcd(x, y) = cases( x "se" y = 0, gcd(y, x mod y) "altrimenti", ) $ Esiste anche una versione estesa dell'algoritmo: $ upright("ExtEuclid")(x, y) = cases( (x, 1, 0) "if" y = 0, (m, b, a - b floor(frac(x, y))) "if" (m, a, b) = upright("ExtEuclid")(y, x mod y) ) $ dove $m = gcd(x, y)$, mentre $a$ e $b$ sono due numeri che soddisfano la *Bezout's identity*: $ gcd(x, y) = m = a x + b y $ #pagebreak(weak: true) Se $gcd(x, n) = 1$, la versione estesa dell'algoritmo può essere utilizzata per *calcolare l'inverso modulare* di $x in bb(Z)_n$: + si parte dalla Bezout's identity: $m = a x + b y$; + se $m = 1$ (unico caso in cui può esistere l'inverso), si riscrive l'espressione come $a x = 1 - b n$; + si applica il modulo $n$ ad entrambi i membri, ottenendo $(a x) mod n = 1$ + l'inverso di $x$ è dunque $a$ === Efficienza dell'algoritmo L'algoritmo di Euclide (in entrambe le sue versioni) ha un *costo lineare* nella dimensione in bit dei parametri. Grazie all'utilizzo del modulo, ogni *2 iterazioni* la dimensione dei parametri si *dimezza*. == Teorema cinese dei resti Il teorema cinese dei resti è importante sia dal punto di vista teorico che dal punto di vista pratico, in quanto permette di velocizzare alcune operazioni (ad esempio il processo di decifratura in RSA). Sia $n$ un intero esprimibile come *prodotto* di $r > 1$ interi tutti *relativamente primi* tra loro: $ n = n_1 dot n_2 dot ... dot n_r quad quad gcd(n_i, n_(j eq.not i)) = 1 $ Il resto della divisione di un qualsiasi intero $a$ per $n$ è completamente determinato dai resti delle divisioni per $n_1, n_2, ..., n_r$. In altre parole, esiste una *corrispondenza biunivoca* tra l'insieme $bb(Z)_n$ e l'insieme dato dal *prodotto cartesiano* $bb(Z)_(n_1) times bb(Z)_(n_2) times ... times bb(Z)_(n_r)$. === Dimostrazione ==== Corrispondenza $bb(Z)_n arrow.r.long.double bb(Z)_(n_1) times bb(Z)_(n_2) times ... times bb(Z)_(n_r)$ Dato un valore $a in bb(Z)_n$, definire la tupla degli $r$-possibili resti è immediato: $ a arrow.r.long (a mod n_1, a mod n_2, ..., a mod n_r) $ Ogni elemento $a mod n_i$ della tupla è un valore in $bb(Z)_(n_i)$. ==== Corrispondenza $bb(Z)_(n_1) times bb(Z)_(n_2) times ... times bb(Z)_(n_r) arrow.r.long.double bb(Z)_n$ Per dimostrare questa corrispondenza, bisogna trovare i coefficienti $c_i in {0, 1}$ che soddisfano la combinazione lineare $ C = c_1a_1 + c_2a_2 + ... + c_r a_r $ tali che $C mod n_i = a_i$ per $i = 1, ..., r$, con $a_i in bb(Z)_(n_i)$. Se si riescono a trovare questi coefficienti, allora, data la *cardinalità* dei due insiemi considerati ($bb(Z)_n$ e l'insieme prodotto cartesiano), significa che $C = a$ (con $a in bb(Z)_n$). Per trovare i $c_i$: 1. per ogni $i = 1, ..., r$, si calcola $m_i$ come il prodotto di tutti i moduli $n_1, ..., n_r$ ad eccezione dell'$i$-esimo: $ m_i = product_(j eq.not i) n_j $ 2. dato che $m_i$ non contiene $n_i$ tra i suoi coefficienti, e siccome tutti gli $n_i$ sono primi tra loro, $gcd(m_i, n_i) = 1$ e dunque esiste l'*inverso* di $m_i$ modulo $n_i$; 3. si definisce $c_i = m_i dot (m_i^(-1) mod n_i)$ A questo punto possono succedere 2 cose: - per $j eq.not i$, $c_i mod n_j = 0$ perché $c_i = m_i dot (m_i^(-1) mod n_i)$ è un multiplo di $n_j$, dato che $m_i$ contiene $n_j$ al suo interno come fattore; - per $i = j$, $c_i mod n_i = 1$ grazie alle proprietà del modulo: $ c_i mod n_i & = [m_i dot (m_i^(-1) mod n_i)] mod n_i \ & = (m dot m_i^(-1)) mod n_i \ & = 1 $ A questo punto, se $C = sum_(i = 1)^r c_i a_i$ allora $C mod n_i = a_i$ per $i = 1, ..., r$, dunque $C$ ha gli stessi resti di $a$ per ciascuno degli $n_i$. Dato che la cardinalità dei due insiemi è la stessa, deve necessariamente risultare che $C = a$, altrimenti significherebbe che esiste un elemento di $bb(Z)_n$ a cui non corrisponde nessun elemento dell'insieme prodotto cartesiano (cosa impossibile, perché i due insiemi hanno la stessa cardinalità). == Gruppi e gruppi ciclici Un gruppo è un insieme numerico dov'è definita un'operazione che soddisfa 4 proprietà: + *chiusura* rispetto al gruppo; + *associatività* + esistenza dell'*elemento neutro* + esistenza dell'*inverso* Gli insiemi $bb(Z)_n$ sono dunque dei gruppi rispetto alle operazioni di somma (gruppo *additivo* $bb(Z)_n^+$). Solo nel caso in cui $n$ è primo, l'insieme $bb(Z)_n$ è un gruppo anche rispetto all'operazione di moltiplicazione (gruppo *moltiplicativo* $bb(Z)_n^*$). Se $n$ non è primo non si può parlare di gruppo rispetto alla moltiplicazione, perché non tutti gli elementi di $bb(Z)_n$ hanno inverso modulare. I gruppi moltiplicativi sono dei *campi finiti*, in quanto vale anche la proprietà distributiva della moltiplicazione rispetto all'addizione. L'*ordine* di un elemento $x$ di un generico gruppo $G$ è il numero di volte con cui si può sommare (o moltiplicare, per i gruppi moltiplicativi) $x$ a sè stesso (partendo dall'elemento neutro) prima di riottenere nuovamente $x$ come risultato. L'ordine di un generico gruppo $G$ invece è il *numero di elementi* contenuti nel gruppo stesso. Un gruppo si dice *ciclico* se esiste *almeno un elemento* il cui ordine è pari a quello del gruppo. Tale elemento si dice *generatore* (o *radice primitiva*). Se $n$ è primo, $bb(Z)_n$ è sempre un gruppo ciclico (sia nel caso additivo che nel caso moltiplicativo). Se $g$ è un generatore e $p$ è un numero primo, tutte le potenze intere di $g$ fino a $p - 1$ formano un gruppo moltiplicativo: $ bb(Z)_p^* = {g^i mod p | i = 1, ..., p - 1} $ Questa cosa vale anche per ogni elemento $h in bb(Z)_p^*$ di ordine $s < p - 1$. $h$ non è un generatore del gruppo, ma le sue potenze fino ad $s$ generano un *sottogruppo ciclico*: $ H = {h^i mod p | i = 1, ..., s} $ #table( [*Teorema di Lagrange*: tutti i sottogruppi di un gruppo di ordine $k$ hanno ordine pari ad un *divisore* di $k$.] ) #table( [*Teorema fondamentale dei gruppi ciclici*: per ogni divisore $k$ dell'ordine del gruppo esiste *uno ed un solo* sottogruppo di ordine $k$] ) == Safe prime Un *safe prime* è un numero primo $p$ del tipo $p = 2q + 1$, con $q$ primo a sua volta. L'ordine di $bb(Z)_p$ è $p - 1 = 2q$, dunque $bb(Z)_p$ ha due soli sottogruppi: - quello banale, di ordine 2, ${1, -1}$; - il sottogruppo di ordine $q$ Escludendo il sottogruppo banale ${1, -1}$, la metà degli elementi di $bb(Z)_p$ genera $bb(Z)_p$ stesso, mentre l'altra metà genera il sottogruppo di ordine $q$. Dunque un qualunque elemento $x$ può avere o ordine $q$ o ordine $p - 1$. Il teorema di Lagrange, preso un valore $x in bb(Z)_p$, permette di calcolare immediatamente l'ordine di $x$: se $x^q mod p = 1$ allora $x$ ha ordine $q$, altrimenti ha ordine $p - 1$. Il sottogruppo di ordine $q$ è formato dai *residui quadratici* modulo $p$, ovvero da quei numeri $y$ tali che $x^2 mod p = y$. Come nel caso reale, poiché $p$ è primo, $y$ in realtà ha *due* radici, che sono l'una l'opposto dell'altra. Entrambe queste radici fanno parte del sottogruppo di ordine $q$. == Esponenziale modulare L'aritmetica modulare permette di calcolare efficientemente il valore di un'esponenziale anche quando si ha a che fare con numeri molto grandi. L'idea dell'algoritmo per l'esponenziale modulare parte dal calcolo del prodotto modulare. Un'espressione del tipo $z = (a b) mod n$ può essere calcolata efficientemente in questo modo: 1. si inizializza un accumulatore $z = 0$; 2. si scrive $b$ in forma binaria e si itera su ogni suo bit; 3. se l'$i$-esimo bit di $b$ ha valore 1, si aggiunge all'accumulatore il valore $a_k = a dot 2^i$; #figure( ```python def mod_prod(a: int, b: int, n: int) -> int: a %= n b %= n z = 0 while b != 0: if b & 1: z = (z + a) % n a = (a << 1) % n b >>= 1 return z ```, caption: [Algoritmo per il calcolo del prodotto modulare] ) L'algoritmo per l'esponenziale modulare sfrutta lo stesso trucco, ma l'accumulazione viene fatta per moltiplicazione (anziché per addizione): #figure( ```python def mod_exp(a: int, b: int, n: int) -> int: a %= n b %= n z = 1 while b != 0: if b & 1: z = (z * a) % n a = (a * a) % n b >>= 1 return z ```, caption: [Algoritmo per il calcolo dell'esponenziale modulare] ) === Logaritmo discreto Se $b^e = x mod n$, l'esponente $e$ è detto *logaritmo discreto* in base $b$ di $x$ e si indica come $e = log_b(x) mod n$. Il logaritmo discreto è definito anche sui gruppi additivi: il logaritmo discreto in base $g$ di $x$ è il valore $k$ tale per cui $k dot g = x mod n$. Il calcolo del logaritmo discreto, anche conoscendo la base $b$ ed il modulo $n$, è un *problema difficile*. Il logaritmo discreto $e = log_b(x) mod n$ è definito solo se $b$ è un generatore del gruppo $bb(Z)_n$. Trovare i generatori di un gruppo, quindi i valori per cui il logaritmo discreto è definito, è a sua volta un problema difficile. Il numero di radici primitive di $bb(Z)_n^*$ è dato dal *toziente di Eulero*, che equivale al numero di numeri che sono coprimi con $n$: $ phi.alt(n) = |{i = 1, ..., n | gcd(i, n) = 1}| $ ==== Algoritmo baby-steps giant-steps Si tratta di un algoritmo per il calcolo del logaritmo discreto. Ha un costo esponenziale dell'ordine di $O(sqrt(2^n))$, ma il modo migliore per valutarne il costo è il prodotto spazio $times$ tempo impiegato. L'algoritmo infatti permette di stabilire quale di queste 2 componenti privilegiare. L'algoritmo parte dalla base $g$ del logaritmo, il suo argomento $x$ e dal modulo $p$, dopodiché calcola 2 successioni di numeri indipendenti: - una di queste successioni avrà incrementi piccoli tra un numero e l'altro (baby) - l'altra li avrà molto più grandi (giant) L'algoritmo termina quando la successione giant genera un numero già incontrato nella successione baby. L'algoritmo è il seguente: 1. si scelgono due interi $r, s$ tali che $r s >= p$; 2. si calcolano le due successioni: - baby steps: $g^0, g^1, g^2, ..., g^(r - 1)$ - giant steps: $x, x g^(-r), x g^(-2r), ..., x g^(-(s - 1)r)$ 3. se per un qualche valore $i, j$ risulta che $g^i = x g^(-j r)$, ovvero che $g^(i + j r) = x$, allora significa che $i + j r = log_g(x)$ La correttezza dell'algoritmo è dovuta al fatto che ogni elemento $t in bb(Z)_p^*$ può essere espresso come $t = j + i r$ per $i = 0, 1, ..., s - 1$ e $j = 0, 1, ..., r - 1$. L'implementazione dell'algoritmo consiste nel memorizzare in una lookup table i valori della successione baby steps, per poi controllare ad ogni passo se l'elemento corrente della successione giant step è presente nella tabella. Supponendo che il tempo per la ricerca nella lookup table sia $O(1)$, il tempo d'esecuzione dell'algoritmo è $O(r + s)$, con un tempo minimo di $O(sqrt(p))$ dato che $r s >= p$. Dato che la lookup table memorizza elementi della successione baby, il consumo di spazio è $O(r)$. Non ci sono valori particolari di $r, s$ da cui partire, dunque, in base a come si scelgono, è possibile decidere quanto spazio usare per l'algoritmo: più spazio si usa e più velocemente si troverà la collisione. #pagebreak(weak: true) = Diffie-Hellman key-exchange protocol Il protocollo di Diffie-Hellman permette a due parti di ottenere un *segreto condiviso*, cioè un elemento del gruppo $bb(Z)_p^*$. + Alice e Bob si mettono d'accordo su un numero primo $p$ e su una *radice primitiva* $g$ di $bb(Z)_p^*$; + Alice sceglie un numero $a in bb(Z)_p^*$, calcola $x_a = g^a mod p$ ed invia $x_a$ a Bob; + Bob sceglie un numero $b in bb(Z)_p^*$, calcola $x_b = g^b mod p$ ed invia $x_b$ ad Alice; + Alice e Bob, sfruttando il valore ricevuto dalla controparte, calcolano il valore $g^(a b) = (x_a)^b = (x_b)^a$; + il valore $g^(a b)$ è il segreto condiviso == Efficienza Il calcolo di $g^a, g^b, g^(a b)$ è molto efficiente, perché l'algoritmo per l'esponenziale modulare ha un costo *logaritmico* nella dimensione dell'esponente. Non esistono invece algoritmi efficienti per trovare un generatore $g$ di un gruppo $bb(Z)_p^*$. Nella pratica però questo problema viene aggirato: + si genera un *safe prime* $p$; + si controlla se un certo *valore fisso* (es. 2, 3 o 5, come fa OpenSSL) è un generatore di $bb(Z)_p$ == Sicurezza Per poter risalire al segreto condiviso $g^(a b)$, Eve deve riuscire a calcolare $a = log_g(g^a) mod p$ e $b = log_g(g^b) mod p$, ovvero deve calcolare due *logaritmi discreti*. #table( columns: (1fr), [*Computational Diffie-Hellman assumption*: Se $g, a, b$ sono scelti a caso in $bb(Z)_p^*$, allora il calcolo di $g^(a b)$ conoscendo soltanto $g^a$ e $g^b$ è computazionalmente intrattabile.] ) L'assuzione di Diffie-Hellman non è ancora stata dimostrata matematicamente, ma la si ritiene vera. == Nota sul segreto condiviso $g^(a b)$ Il valore $g^(a b) mod p$ non può essere utilizzato *direttamente* come chiave per un algoritmo di cifratura simmetrica, perché i suoi bit non rispetano le proprietà di *equiprobabilità* ed *indipendenza* richiesti per una chiave simmetrica. $g^(a b) mod p$ infatti non è una sequenza di bit casuali, ma è un valore di $bb(Z)_p^*$, di conseguenza non tutti i bit sono equiprobabili. #figure( image("assets/z11_bin_values.png", width: 21%), caption: [ Valori binari dei numeri nell'insieme $bb(Z)_11^*$. ] ) Nella pratica, al valore $g^(a b) mod p$ viene applicata una *funzione hash crittografica* il cui valore è utilizzato come chiave. == Problema dell'autenticazione Nella sua versione originale, il protocollo DH *non prevede autenticazione*. È quindi vulnerabile ad attacchi di tipo MITM. Per mitigare questo rischio, Alice e Bob *firmano* i valori $x_a, x_b$ quando li inviano all'altra parte. == Chiavi DH effimere Grazie alla cifratura asimmetrica, piuttosto che iniziare uno scambio di chiavi con il protocollo Diffie-Hellman sarebbe in teoria possibile cifrare una chiave simmetrica direttamente utilizzando la propria coppia di chiavi asimmetriche (come veniva fatto nelle versioni più vecchie del protocollo TLS). Questo approccio però non garantisce *forward secrecy*: se un attaccante, che si è salvato precedentemente tutti i messaggi cifrati, riesce in un qualche modo ad ottenere la chiave privata di una delle due parti, questi può mettere in chiaro *tutti* i messaggi che le due parti si sono scambiate fino a quel momento. Per mitigare questo rischio si utilizza il protocollo Diffie-Hellman con delle *chiavi effimere*: dopo che sono stati utilizzati una volta, i valori $a$ e $b$ vengono buttati via. In questo modo l'attaccante se anche riuscisse a compromettere le chiavi RSA di una delle due parti, non riuscirebbe comunque a mettere in chiaro i messaggi. #pagebreak(weak: true) = Casualità e algoritmi probabilistici Le sequenze di bit *casuali* rilevanti in ambito crittografico devono avere due proprietà: + *ogni* bit deve avere probabilità pari ad $frac(1, 2)$ di essere 0 o 1; + i bit devono essere *indipendenti* tra di loro L'unico modo per generare sequenze di bit *realmente* casuali è tramite l'uso di *generatori hardware* che sfruttano fenomeni fisici stocastici. Questi generatori però sono molto costosi e poco flessibili, perché è difficile generare in breve tempo sequenze di bit di *lunghezza arbitraria*. Per questi motivi sono stati sviluppati diversi *algoritmi software* per generare sequenze di bit *pseudo*-casuali. Tutti questi algoritmi prevedono in input un parametro, detto *seed*, che è la sorgente di casualità. I CSPRNG sono algoritmi *deterministici*: eseguendo l'algoritmo con lo stesso seed si ottiene lo stesso output. Esempi di sorgenti di casualità sono la velocità di battitura sulla tastiera oppure i movimenti del mouse. I vari sistemi operativi raccolgono le varie sequenze di casualità all'interno di un *entropy pool*. Quando un'applicazione ha bisogno di bit casuali, il kernel li preleva da questo file. == Algoritmi probabilistici Un algoritmo si dice *probabilistico* se al suo interno utilizza una *sorgente di casualità* che fornisce sequenze di bit indipendenti e con probabilità uniforme. Per valutare la complessità di questi algoritmi si usa il modello *bit cost*: il costo di un'operazione logico/aritmetica dipende dal *numero di bit* dei suoi operandi. Nella valutazione di un algoritmo probabilistico bisogna tenere in considerazione anche la *quantità di bit casuali richiesti*, dato che non sono una risorsa infinita. === Algoritmi di tipo Monte Carlo Sono algoritmi di tipo *decisionale* che vengono chiamati anche *probability-bounded one-sided error*: - probability bounded perché la probabilità che la risposta sia sbagliata è $> 0$, ma è *limitata* e *non dipende dall'input*; - one-sided error perché l'algoritmo può sbagliare *solo su una* delle due possibili risposte ==== Esempio di algoritmo NON è probability-bounded #figure( algo( title: "IsPrime", parameters: ("n",), line-numbers: false, )[ if $n mod 2 = 0$:#i\ return False#d\ choose an odd $p in [2, frac(n, 2)]$ randomly\ if $n mod p = 0$:#i\ return False#d\ return True ], caption: [Algoritmo probabilistico per determinare se $n$ è primo] ) #pagebreak(weak: true) #figure( table( columns: (1fr, 1fr), align: (left, left), [*input*], [*output*], [ - se $n$ è primo, l'algoritmo restituisce sempre la risposta corretta; - se invece $n$ è composto, la risposta dell'algoritmo potrebbe essere sbagliata ], [ - se la risposta è `False`, allora è sempre quella corretta; - se invece la risposta è `True`, potrebbe essere sbagliata perché potrebbe essere dovuta alla sfortuna nella scelta di $p$ ] ), caption: [Dimostrazione del fatto che l'algoritmo è one-sided error, sia dal lato degli input che dal lato degli output] ) L'algoritmo però non è probability bounded. Per dimostrarlo, si consideri che per ogni divisore $x$ di $n$ anche $frac(n, x)$ è un divisore di $n$. Uno fra questi due divisori è $< sqrt(n)$, dunque $n$ può avere al massimo $2sqrt(n)$ divisori. Scegliendo un $p$ dispari nell'intervallo $[2, frac(n, 2)]$, la probabilità che $p$ sia un divisore di $n$ è data da: $ frac(2sqrt(n), frac(n, 4)) = frac(8, sqrt(n)) limits(arrow.r.long)_(n -> oo) 0 $ Dunque la probabilità che la risposta sia giusta *diminuisce sempre di più* al crescere di $n$. ==== Importanza del limite sull'errore probabilistico Se la probabilità che l'algoritmo dia la risposta sbagliata è *limitata superiormente*, *non dipende dall'input* e le run dell'algoritmo sono *indipendenti* tra di loro, allora è possibile aumentare arbitrariamente la probabilità che la risposta sia corretta semplicemente eseguendo lo stesso algoritmo con lo stesso input per più volte. Esempio: se la probabilità di risposta giusta di un algoritmo è dell'1%, la probabilità che dopo $k$ run con lo stesso input la risposta continui ad essere sbagliata è $0.99^k$. Se l'algoritmo viene eseguito circa 70 volte, la probabilità che la risposta sia ancora sbagliata è "solo" del 50%: $ 0.99^k lt.eq frac(1, 2) arrow.r.long.double & k lt.eq frac(ln(frac(1, 2)), ln(0.99)) approx 69 $ === Algoritmi di tipo Las Vegas Gli algoritmi di tipo Las Vegas restituiscono *sempre la risposta corretta*, ma ad essere probabilistico è il *tempo* con cui la trovano. Un algoritmo decisionale di tipo Las Vegas risponde quasi sempre "non so", ma quando restituisce una delle due possibili risposte allora è sempre quella giusta. ==== Esempio: algoritmo per aggiustare l'uniformità di una sequenza casuale Si supponga di avere una sequenza ${z_i}_(i gt.eq 1)$ di bit casuali indipendenti ma non uniformi: $ bb(P)[z_i = 0] = p quad bb(P)[z_i = 1] = 1 - p quad 0 < p < 1 $ #figure( algo( title: "FixUniformity", parameters: ("z", "n",), line-numbers: false )[ for $i = 1$ to $n$, $i = i + 2$:#i\ if $z_i != z_(i + 1)$:#i\ return $z_i$ ], caption: [Algoritmo di tipo Las Vegas per aggiustare l'uniformità dei bit di $z$] ) La probabilità che l'algoritmo restituisca 0 è pari a $bb(P)[z_i = 0] dot bb(P)[z_(i + 1) = 1] = p(1 - p)$, che equivale alla probabilità che l'algoritmo restituisca 1. Dunque quando l'algoritmo restituisce una risposta, questa è sempre corretta, perché entrambe le risposte sono equiprobabili. Invece la probabilità che l'algoritmo non restituisca niente è pari alla probabilità che $z_i = z_(i + 1)$: $ bb(P)[z_i = 0] dot bb(P)[z_(i + 1) = 0] + bb(P)[z_i = 1] dot bb(P)[z_(i + 1) = 1] = 2p(p - 1) + 1 $ dunque se $s = 2p(p - 1) + 1$, la probabilità che il risultato sia restituito al $k$-esimo tentativo è data da $s^(k - 1)(1 - s)$, che coincide con la *serie geometrica*. Conoscendo $p$, il numero di volte in cui dev'essere eseguito l'algoritmo è: $ sum_(k = 1)^(oo) k s^(k - 1)(1 - s) = frac(1, 1 - s) = -frac(1, 2p(p - 1)) $ Questo risultato si può sfruttare anche per fare il ragionamento inverso: qual è la probabilità che $2k$ bit siano sufficienti per ottenere il risultato? $ sum_(i = 1)^(k)s^(i - 1)(1 - s) = 1 - s^k $ == Numeri primi e test di primalità #table( columns: (1fr), [ *Prime number theorem*: se $pi(n)$ è il numero di numeri primi $<= n$, allora: $ pi(n) tilde frac(n, ln(n)) $ ] ) Questo implica che nell'intervallo $[2, n]$ ci sono circa $frac(1, ln(n))$ numeri primi, ovvero i numeri primi non sono poi tanto rari. Il prime number theorem ci dice che per trovare un numero primo scegliendo a caso nell'intervallo $[2, n]$ occorrono circa $n ln(2) approx 0.69 dot n$ tentativi. === Test di primalità basato sul piccolo teorema di Fermat Una prima idea per un test di primalità potrebbe essere quella di sfruttare il *piccolo teorema di Fermat*: #table( columns: (1fr), [*Piccolo teorema di Fermat*: se $p$ è primo, allora vale la seguente relazione: $ x^(p - 1) mod p = 1 $ per ogni $x$ tale che $gcd(x, p) = 1$ (cioè per ogni $0 < x < p$, dato che $p$ è primo) ] ) Questo teorema però non vale al contrario: se $x^(n - 1) mod n = 1$, non è detto che $n$ sia primo. Un numero $n$ composto per cui risulta che $x^(n - 1) mod n = 1$ è detto *pseudo-primo base $n$*. L'esistenza di questa categoria di numeri impedisce di utilizzare l'FLT come criterio di primalità. Tuttavia si può dimostrare che gli pseudo-primi base 2, ad esempio, sono *estremamente rari*. Se quindi $n$ è scelto a caso e si pone $x = 2$, è molto improbabile che $2^(n - 1) mod n = 1$. #figure( algo( title: "Fermat-Primality-Test", parameters: ("n",), line-numbers: false, )[ return $(2^(n - 1) mod n) == 1$ ], caption: [Algoritmo per stabilire se $n$ è primo basato sul piccolo teorema di Fermat] ) Questo è un algoritmo one-sided error: - se $n$ è primo, il risultato è sempre `True`; - se invece $n$ è composto, il risultato potrebbe essere o `True` o `False` La probabilità d'errore dell'algoritmo si può ulteriormente abbassare provando *più di una base*. L'idea è che se $n$ è pseudo-primo base 2, è improbabile che lo sia anche base 3 e così via. #figure( algo( title: "Improved-Fermat-Primality-Test", parameters: ("n",), line-numbers: false, )[ pick $a$ randomly \in ${3, ..., n - 1}$ \ if $gcd(a, n) > 1$: #i \ return False #d \ return $(2^(n - 1) mod n) == 1$ and $(a^(n - 1) mod n) == 1$ ], caption: [Versione migliorata del test di primalità basato sul piccolo teorema di Fermat] ) In generale, questa versione dell'algoritmo è più efficace perché: + aggiunge un test sul GCD, che con probabilità $> 0$ potrebbe individuare un divisore di $n$; + esegue il test basato sull'FLT con 2 basi anziché con una sola Esistono però dei numeri composti, detti *numeri di Charmichael*, per cui per *qualunque* $a$ tale che $gcd(a, n) = 1$ risulta che $a^(n - 1) mod n = 1$. Sebbene i numeri di Charmichael siano estremamente rari (ancora più degli pseudo-primi base 2), questo algoritmo non è comunque considerabile un algoritmo Monte Carlo perché non usa nessuna *sorgente di casualità*: se la risposta dell'algoritmo è `True` è inutile rieseguirlo più volte per vedere se la risposta cambia, perché non lo farà mai. === Test di primalità di Solovay-Strassen #table( columns: (1fr), [*Residuo quadratico*: un elemento $a in bb(Z)_n$ è detto *residuo quadratico* modulo $n$ se esiste un $x in bb(Z)_p$ tale che $a = x^2 mod n$.] ) L'insieme dei residui quadratici forma un *sottogruppo* di $bb(Z)_n^*$ il cui ordine, se $n$ è primo, è pari alla metà dell'ordine di $bb(Z)_n^*$. Il sottogruppo dei residuo quadratici è formato da tutte le *potenze pari* di un generatore $g$. #table( columns: (1fr), [*Simbolo di Legendre*: se $p$ è un numero primo ed $a in bb(Z)_p$, il *simbolo di Legendre* è una funzione definita come: $ (frac(a, p)) = cases( 0 "se" gcd(a, p) > 1, 1 "se" a "è un residuo qudratico" mod p, -1 "se" a "non è un residuo quadratico" mod p ) $ ] ) Il simbolo di Legendre può essere calcolato con il *criterio di Eulero*: $ (frac(a, p)) = a^(frac(p - 1, 2)) mod p $ Dunque per capire se $a$ è un residuo quadratico modulo $p$ è sufficiente calcolare un'esponenziale modulare. Il simbolo di Legendre consente di verificare agevolmente se 2 o 5 sono dei residui quadratici: - 2 è un residuo quadratico modulo $p$ se e solo se $p equiv 1, 7 (mod 8)$, perché vale la seguente proprietà: $ (frac(2, p)) = (-1)^(frac(p^2 - 1, 8)) $ - 5 è un residuo quadratico modulo $p$ se e solo se $p equiv 1, 4 (mod 5)$ Queste due proprietà vengono utilizzate da OpenSSL durante la fase di generazione dei parametri per Diffie-Hellman. #table( columns: (1fr), [*Simbolo di Jacobi*: sia $n$ un numero composto e sia $p_1 dot p_2 dot ... dot p_k$ la sua *scomposizione in fattori primi*. Per ogni intero $a$ il simbolo di Jacobi è definit come: $ (frac(a, n)) = product_(i = 1)^(k)(frac(a, p_i)) $ ] ) Il simbolo di Jacobi è una generalizzazione del simbolo di Legendre che vale anche per numeri composti. A differenza del simbolo di Legendre, se $(frac(a, n)) = 1$ non si può dire che $a$ è un residuo quadratico modulo $n$, perché potrebbe capitare una cosa di questo tipo: $ (frac(2, 15)) = (frac(2, 3))(frac(2, 5)) = (-1)^2 = 1 $ Se invece $(frac(a, n)) = -1$ allora si può dire che $a$ *non* è un residuo quadratico modulo $n$. Se invece $a$ è un residuo quadratico modulo $n$, allora: - se $gcd(a, n) > 1$ può succedere che $(frac(a, n)) = 0$ - altrimenti $(frac(a, n)) = 1$ Il simbolo di Jacobi gode di diverse proprietà che permettono di calcolarlo *senza passare per la fattorizzazione di $n$*. #figure( algo( title: "IsPrime", parameters: ("n",), line-numbers: false )[ pick $a in {2, ..., n - 1}$ randomly \ if $gcd(a, n) > 1$: #i\ return False#d\ $J = (frac(a, n))$ #comment[Jacobi's symbol] \ $P = a^(frac(p - 1, 2)) mod n$ #comment[Euler's criterion] \ return $J == P$ ], caption: [Test di primalità di Solovay-Strassen] ) L'idea di base del test di primalità di Solovay-Strassen è che: - se $J != P$, allora sicuramente $n$ è composto, perché significa che il simbolo di Jacobi è diverso da quello di Legendre (calcolato con il criterio di Eulero), mentre sarebbero uguali se $n$ fosse primo; - se $J = P$ allora $n$ *potrebbe* essere primo Un numero composto $n$ che soddisfa l'uguaglianza $J = P$ per un qualche valore $a in {2, ..., n - 1}$ viene detto *pseudo-primo di Eulero* rispetto alla base $a$. Un numero non può essere pseudo-primo di Eulero rispetto a *tutte* le possibili basi $a$, dunque ripetendo più volte l'algoritmo è possibile abbassare arbitrariamente la probabilità che la risposta sia sbagliata. Il test di Solovay-Strassen è un algoritmo di tipo Monte Carlo, perché: - è *one-sided error*: l'algoritmo può restituire la risposta sbagliata solo se $n$ è composto; - è *probability-bounded*: Solovay e Strassen hanno dimostrato che per ogni $a in {2, ..., n - 1}$ esistono $frac(n, 2)$ valori di $a$ che sono testimoni della non primalità di $n$. Su input $n$ composto, quindi, l'algoritmo sbaglia con probabilità $<= frac(1, 2)$. == Fattorizzazione di interi La fattorizzazione di numeri interi è un altro *problema difficile* che sta alla base di alcuni cifrari asimmetrici (ad esempio RSA e Rabin). Un algoritmo di tipo brute-force che prova tutti i numeri nell'intervallo $[2, sqrt(n)]$ per cercare un fattore di $n$ ha costo *esponenziale* rispetto alla dimensione di $n$. Il miglior algoritmo noto per la fattorizzazione è il *General Number Field Sieve*, che ha un costo *sub-esponenziale* (più di polinomiale ma meno di esponenziale). === Algoritmo di fattorizzazione $rho$ di Pollard Si tratta di un *algoritmo iterativo*, di *costo esponenziale*, che permette di trovare un fattore di $n$. L'idea dell'algoritmo è di costruire una sequenza di valori $x_j$ che sembra apparentemente casuale, ma che ad un certo punto *inizia a ripetersi*. Ogni $2^t$ iterazioni, con $t = 0, 1, ...$ che cresce di continuo, il valore dell'iterata corrente $x_j$ viene memorizzato dall'algoritmo. Se per una qualche iterazione il valore dell'iterata corrente $x_j$ è pari a questo valore salvato $y$, allora è stato trovato un fattore. #figure( algo( title: "Pollard-Rho", parameters: ("n", "f"), line-numbers: false )[ $i = 2$ \ $j = 1$ \ pick $x in {0, ..., n - 1}$ randomly \ $y = x$ #comment[$y$ is the stored value] \ while True: #i \ $x = f(x)$ \ $m = gcd(y - x, n)$ \ if $m > 1$ and $m != n$: #i \ return $m$ #d \ $j = j + 1$ \ if $j == i$: #i \ $y = x$ \ $i = 2i$ #d \ ], caption: [Algoritmo $rho$ di Pollard per la fattorizzazione] ) $f$ è una funzione che ha lo scopo di generare valori (apparentemente) casuali all'interno di $ZZ_p$. Le due funzioni che si utilizzano di più nella pratica sono $f(x) = (x^2 + 1) mod n$ ed $f(x) = (x^2 - 1) mod n$. Di solito si preferisce scegliere $x = 2$ come valore iniziale, anziché scegliere un valore a caso in $bb(Z)_n$. Nella pratica però questo algoritmo è utilizzato in *contesti multi-thread*: thread diversi eseguono l'algoritmo partendo da valori iniziali diversi con la speranza che un qualche thread riesca a trovare un fattore di $n$. L'algoritmo funziona perché per ogni termine della successione $x_i = f(x) mod n$, esiste un termine di un'altra successione "ombra" $x'_i = x_i mod p$, dove $p$ è un fattore non banale di $n$. La successione "ombra" ha le stesse proprietà della successione degli $x_i$: $ x'_(i + 1) & = x_(i + 1) mod p \ & = (x_i^2 - 1 mod n) mod p \ & = (x_i^2 - 1) mod p \ & = ((x_i mod p)^2 - 1) mod p \ & = ((x'_i)^2 - 1) mod p $ Supponendo che la funzione $f$ sia una funzione casuale (cosa che in realtà non è vera, ma è coerente con il comportamento dell'algoritmo), si può dire che ogni valore della successione $x_i$ è uniformemente distribuito nell'insieme $bb(Z)_n$. Essendo questo un insieme finito, per il *birthday paradox* si può dire che servono $Theta(sqrt(n))$ iterazioni dell'algoritmo per ri-ottenere uno stesso valore di $x_i$. Seguendo lo stesso ragionamento per la successione $x'_i$, che è una successione di valori in $bb(Z)_p$, servono $Theta(sqrt(p))$ iterazioni per trovare una collisione tra due valori della successione. Poiché l'algoritmo termina quando i due valori $x$ ed $y$ sono congruenti modulo $p$, il costo computazionale dell'algoritmo è proprio $O(sqrt(p)) = O(sqrt(sqrt(n))) = O(root(4, n))$, dato che $p < sqrt(n)$. ==== Casi sfavorevoli Ci sono alcuni casi sfavorevoli in cui l'algoritmo non si accorge di avere un fattore di $n$ già "in mano", per cui continua ad eseguire delle altre iterazioni. Un caso lo si ha quando $n = p dot q$ con $p$ e $q$ primi. L'algoritmo arriverà ad un punto in cui $x$ ed $y$ sono *entrambi* congruenti sia modulo $p$ che modulo $q$ e dunque lo sono anche modulo $n$ per il CRT, perciò risulterà che $gcd(y - x, n) = n$. Un altro caso simile lo si ha quando $n = p^k$, dove risulterà che $y = x$ e dunque $gcd(y - x, n) = 0$. Nella pratica questi casi sfavorevoli sono trascurabili. L'algoritmo è comunque in grado di trovare un fattore in questi casi, semplicemente ci vorrà più tempo a causa di queste iterazioni "sprecate". #pagebreak(weak: true) = RSA Storicamente, RSA è stato il primo cifrario asimmetrico realizzato. Generazione della chiave: + Alice sceglie la *dimensione in bit* della chiave $N$; + Alice sceglie a caso due *numeri primi* $p, q$, entrambi di dimensione $frac(N, 2)$; + Alice calcola $n = p q$ e $phi.alt(n) = (p - 1)(q - 1)$; + Alice sceglie un numero intero $e$ tale che $gcd(e, phi.alt(n)) = 1$; + Alice calcola $d = e^(-1) mod phi.alt(n)$; La coppia $(n, e)$ è la chiave pubblica di Alice, mentre $d$ è la chiave privata. Per cifrare un messaggio $M$, con $M in [0, n - 1]$, Alice calcola $C = M^e mod n$, utilizzando l'esponente $e$ di Bob. Per decifrare il messaggio, Bob calcola $M = C^d mod n$. == Dimostrazione della correttezza Dimostrare la correttezza di RSA significa dimostrare che $(M^e)^d mod n = M$. Innanzi tutto si sfrutta il fatto che $(e d) mod phi.alt(n) = 1$, ovvero $e d = k dot phi.alt(n) + 1$ per un qualche intero $k$. Dopodiché si riscrive l'espressione in questo modo: $ (M^e)^d mod n & = [M^(k dot phi.alt(n) + 1)] mod n \ & = [M dot M^(k(p - 1)(q - 1))] mod n \ $ Il problema poi diventa dimostare queste uguaglianze: #table( stroke: none, align: (center + horizon, center + horizon), columns: (1fr, 1fr), [ $ (M dot M^(k(p - 1)(q - 1))) mod p = M mod p $ ], [ $ (M dot M^(k(p - 1)(q - 1))) mod q = M mod q $ ] ) Se queste uguaglianze sono vere, allora per il *teorema cinese dei resti* sarà vero anche che $(M dot M^(k(p - 1)(q - 1))) mod n = M mod n$. Per dimostrare l'uguaglianza con $p$ si distinguono 2 casi: + $M mod p = 0$ + $M mod p eq.not 0$ Nel 1° caso $p$ è un divisore di $M$, dunque l'uguaglianza è sicuramente dimostrata perché entrambi i membri valgono 0: $ M mod p = 0 arrow.double.r.l.long (M dot M^(k(p - 1)(q - 1))) mod p = 0 $ Anche il 2° caso è immediato grazie al piccolo teorema di Fermat: $ (M dot M^(k(p - 1)(q - 1))) mod p & = [M dot (M^(p - 1)^(k(q - 1)))] mod p \ & = [M dot (M^(p - 1) mod p)^(k(q - 1))] mod p \ & = M mod p $ Dunque anche in questo caso l'uguaglianza è verificata. Per dimostrare l'uguaglianza con $q$ è sufficiente ripetere gli stessi passaggi. #pagebreak(weak: true) == Efficienza di RSA Considerato che: - non sono richiesti requisiti particolari per i primi $p$ e $q$ (in particolare non è richiesto che siano dei *safe prime*); - il calcolo dell'esponenziale modulare è molto efficiente si può dire che RSA sia piuttosto efficiente. Il problema di trovare $e$ tale che $gcd(e, phi.alt(n)) = 1$ viene girato al contrario: si tiene un valore fisso di $e$ e si generano due numeri primi $p$ e $q$ finché $gcd(e, (p - 1)(q - 1))$ non risulta uguale ad 1. == Sicurezza di RSA La sicurezza di RSA sta nella *difficoltà di fattorizzare numeri molto grandi*. Se questo problema fosse facile, allora lo sarebbe anche il violare RSA. Non è (ancora) stato dimostrato che invertire la funzione di cifratura di RSA sia equivalente alla fattorizzazione, ma lo si ritiene vero. === Malleabilità La versione textbook di RSA è *malleabile*. Sia $C_1 = (M_1)^e$ il messaggio da decifrare. L'attacco che si può fare è il seguente: + si costruire un *ciphertext intermedio* $C_I = 2^e mod n$; + si calcola $C_2 = C_1 dot C_I$; + si richiede una decifratura di $C_2$, ottenendo $M_2$; Per risalire ad $M_1$ è sufficiente calcolare $frac(M_2, 2)$, perché: $ M_2 = (C_2)^d mod n & = (C_1 dot C_I)^d mod n \ & = [(M_1^e mod n) dot (2^e mod n)]^d mod n \ & = (M_1 dot 2)^(e d) mod n \ & = 2 M_1 mod n $ Per mitigare questa vulnerabilità si utilizzano delle tecniche di *hashing*. === $e$ troppo piccolo Se $e$ è troppo piccolo, c'è il rischio che $M^e < n$ e che quindi $C = M^e mod n = M^e$. In questo caso per ottenere $M$ basterebbe calcolare la *radice $e$-esima di $C$*, senza doversi impegnare nella fattorizzazione di $n$. Per questa ragione l'attuale standard implementativo prevede di scegliere sempre $e = 2^16 + 1 = 65.537$, in quanto è sufficientemente grande da far lavorare sempre il modulo. === Algoritmo di fattorizzazione di Fermat Questo algoritmo permette di semplificare la fattorizzazione di $n$ quando $p$ e $q$ sono *sufficientemente vicini* tra loro. In binario, il "sufficientemente vicini" significa che le *metà più significative* di $p$ e $q$ sono uguali. #pagebreak(weak: true) L'algoritmo parte considerando il fatto che il prodotto di due numeri interi dispari può essere espresso come *differenza di quadrati*: $ n = a^2 - b^2 = (underbrace(frac(p + q, 2), a))^2 - (underbrace(frac(p - q, 2), b))^2 $ $a^2$ e $b^2$ sono dei *quadrati perfetti*, infatti: $ n = a^2 - b^2 arrow.l.r.double.long a = sqrt(b^2 + n) arrow.l.r.double.long b = sqrt(n - a^2) $ Dunque si può tentare un approccio *bruteforce* per trovare un quadrato perfetto: #figure( algo( title: "Fermat-Factor", parameters: ("n", $x_0$), line-numbers: false )[ $z_i = x_0^2 - n$ \ if $z_i$ is a perfect square: #i \ return $(p = x_0 + sqrt(z), q = x_0 - sqrt(z))$ #d \ return $upright("Fermat-Factor")(n, x_0 + 1)$ ], caption: [Algoritmo di fattorizzazione di Fermat] ) ==== Scelta del punto di partenza Da quale valore $x_0$ partire nell'algoritmo? Partendo dalla seguente uguaglianza: $ (frac(p + q, 2))^2 - n = (frac(p - q, 2))^2 $ dato che l'algoritmo incrementa sempre il valore di $x_0$, sicuramente per partire bisogna scegliere un valore $<= frac(p + q, 2)$. Non conoscendo $p$ e $q$ però in realtà quest'informazione è poco utile all'algoritmo (non saprebbe comunque quando fermarsi). Tuttavia si può dimostrare che se $n = p dot q$ con $p != q$, allora $frac(p + q, 2) > sqrt(n)$. Per farlo è sufficiente una *dimostrazione per assurdo*: $ frac(p + q, 2) <= sqrt(n) arrow.long.r frac(1, 4)(p^2 + q^2) + p q <= n $ il che è falso, dunque $frac(p + q, 2) > sqrt(n)$. Un buon punto di partenza quindi è $x_0 = ceil(sqrt(n))$. ==== Efficienza dell'algoritmo Sia $p > q$ e dunque $q < sqrt(n) < p$ (il ragionamento è analogo nel caso in cui $q > p$). Si supponga che la *metà più significativa* dei bit di $p$ coincida con quella di $q$, ovvero vale la seguente relazione: $ frac(p - q, 2) < c sqrt(2q) $ per un qualche valore di $c$. #pagebreak(weak: true) L'uguaglianza da cui parte l'algoritmo di Fermat può essere riscritta in questo modo: $ (frac(p + q, 2))^2 - n = (frac(p - q, 2))^2 \ arrow.b.double \ (frac(p + q, 2) + sqrt(n))(frac(p + q, 2) - sqrt(n)) = (frac(p - q, 2))^2 $ Dato che $q < sqrt(n) < p$ per ipotesi, vale che $2q < frac(p + q, 2) + sqrt(n)$ e dunque si può passare a questa disuguaglianza: $ 2q(frac(p + q, 2) - sqrt(n)) < 2q c^2 arrow.r.long frac(p + q, 2) - sqrt(n) < c^2 $ Questo dimostra che il *numero di passi* che l'algoritmo deve eseguire, che è proprio $frac(p + q, 2) - sqrt(n)$, è *limitato superiormente* da $c^2$. Dato che $q = O(sqrt(n))$ e $q < sqrt(n) < p$ per ipotesi, l'assunzione iniziale $frac(p - q, 2) < c sqrt(2q)$ implica che $p - q = O(root(4, n))$, ovvero che $p$ e $q$ coincidono nella loro *metà più significativa*. === Riutilizzo di $p$ e $q$ Riutilizzare lo stesso valore di $p$ (o di $q$) per due moduli distinti $n_1, n_2$ rende il processo di cifratura totalmente inutile, perché l'attaccante riesce a recuperare in un colpo solo *entrambe* le chiavi private calcolando il GCD delle due chiavi pubbliche: #figure( algo( title: "RSA-CommonFactor", parameters: ($(e_1, n_1)$, $(e_2, n_2)$,), line-numbers: false, )[ $p = gcd(n_1, n_2)$ \ $q_1 = frac(n_1, p)$ \ $q_2 = frac(n_2, p)$ \ $d_1 = e_1^(-1) mod (p - 1)(q_1 - 1)$ \ $d_2 = e_2^(-1) mod (p - 1)(q_2 - 1)$ \ return $(d_1, d_2)$ ], caption: [Algoritmo per rompere RSA in caso di riuso di $p$ o $q$] ) == Aspetti implementativi di RSA === Velocizzare la decifratura Sfruttando il CRT si può rendere più efficiente il processo di cifratura arrivando a fare a meno della chiave privata $d$. Si considerino le seguenti quantità: $ M_p = C^d mod p quad M_q = C^d mod q $ Dato che $d = e^(-1) mod (p - 1)(q - 1)$, si può "scorporare" in due valori $s, t$ tali che: $ s = d mod (p - 1) quad quad t = d mod (q - 1) $ Dalla *definizione di resto* segue che: $ d = s + a(p - 1) quad quad d = t + b(q - 1) $ per una qualche coppia di interi $a$ e $b$. Le due quantità $C^d mod p$ e $C^d mod q$ possono quindi essere riscritte in questo modo: #table( columns: (50%, auto), stroke: none, [ $ C^d mod p &= C^(s + a(p - 1)) mod p \ &= [C^s dot (C^(p - 1))^a] mod p \ &= C^s mod p $ ], [ $ C^d mod q &= C^(t + b(q - 1)) mod q \ &= [C^t dot (C^(q - 1))^b] mod q \ &= C^t mod q $ ] ) da cui segue che $ M_p = C^s mod p quad quad M_q = C^t mod q $ Per ottenere $M$ è sufficiente rimettere insieme i pezzi con il CRT: $ M = [q(q^(-1) mod p)M_p + p(p^(-1) mod q)M_q] mod n $ Questo metodo richiede il calcolo di 4 valori aggiuntivi rispetto alla versione "classica": - $C^s mod p$ e $C^t mod q$, da calcolare per ogni messaggio; - $q(q^(-1) mod p)$ e $p(p^(-1) mod q)$, da calcolare una sola volta ma nonostante questi conti aggiuntivi, questo metodo è *fino a quattro volte più efficiente* rispetto alla decifratura classica perché permette di lavorare con esponenti $s$ e $t$ di dimensione *dimezzata* rispetto a quella di $d$. == Optimal Asymmetric Encryption Padding La *malleabilità* in RSA è dovuta al fatto che il processo di cifratura è *deterministico*. Lo schema di padding OAEP ha lo scopo di inserire degli *elementi casuali* nel processo al fine di irrobustirlo. Parametri dello schema: - la dimensione in bit del modulo ($N$); - una sequenza $r$ di *bit casuali*, di lunghezza $k$; - il numero di *bit di padding* $h$ che s'intende utilizzare; - due *funzioni hash crittografiche* $G$ ed $H$; - una sequenza di bit $P$ composta da soli zeri, da appendere in fondo al messaggio, di lunghezza $h$; #figure( table( align: (center + horizon, center + horizon), columns: (1fr, 1fr), stroke: none, [#algo( title: "RSA-OAEP-Pad", parameters: ("m", "P", "r", "G", "H",), line-numbers: false, )[ $m_1 = G(r) plus.circle (m || P)$ \ $r_1 = H(m_1) plus.circle r_1$ \ return $x = 00 || m_1 || r_1$ ]], [#algo( title: "RSA-OAEP-Unpad", parameters: ("x",), line-numbers: false, )[ split $x$ \to get $m_1$ \and $r_1$ \ $r = H(m_1) plus.circle r_1$ \ $m = G(r) plus.circle m_1$ \ if last $h$ bits of $m$ are \not 0: #i \ throw error #d \ remove last $h$ bits of $m$ \ return $m$ ]] ), caption: [Padding e unpadding con OAEP] ) #figure( image("assets/OAEP.jpg", width: 40%), caption: [ Schema OAEP ] ) #pagebreak(weak: true) = Protocollo di Rabin Si tratta di un algoritmo che non è mai stato utilizzato nella pratica, ma che è comunque interessante dal punto di vista teorico perché è dimostrato che invertire la funzione di cifratura è *equivalente* alla fattorizzazione di interi (dimostrazione che invece non esiste per RSA). Il problema che ha impedito l'uso pratico del protocollo è per 1 ciphertext l'algoritmo di decifratura restituisce *4* possibili plaintext, quindi è necessario aggiungere della complessità all'algoritmo per capire quale delle 4 alternative è quella corretta. Risalire all'alternativa corretta può essere fatto solo per messaggi di *testo*; non si può usare Rabin per cifrare *sequenze di bit* arbitrari perché poi il destinatario non sarebbe in grado di decifrarle. == Calcolo delle radici quadrate modulari Sia $n = p dot q$ (con $p$ e $q$ primi) e sia $y in ZZ_n$ un valore di cui si vuole calcolare la *radice quadrata*. Il problema può essere approcciato in senso opposto: siano $x in ZZ_n$ ed $y = x^2 mod n$. Avendolo definito così, una delle radici di $y$ è nota, ed è esattamente $x$. Come nel caso reale, anche l'*opposto modulare* di $x$ è una radice di $y$, dunque si ha: $ sqrt(y) = plus.minus x mod n $ Dato però che $n = p dot q$, le radici di $y$ non sono 2, ma 4. $y mod p$ ed $y mod q$ sono a loro volta dei *residui quadratici* modulo $p$ e modulo $q$ rispettivamente, ed $x mod p$ ed $x mod q$ sono due delle loro radici: $ [(x mod p) dot (x mod p)] mod p &= (x dot x) mod p \ &= [(x dot x) mod n] mod p \ &= y mod p $ Le 4 radici di $y mod n$ possono essere calcolate quindi in questo modo: #table( columns: (1fr, 1fr), align: (right + horizon, center + horizon), stroke: none, [ $r_1 = (c_1 z_1 + c_2 w_1) mod n$ \ $r_2 = (c_1 z_1 - c_2 w_1) mod n$ \ $r_3 = (-c_1 z_1 + c_2 w_1) mod n$ \ $r_4 = (-c_1 z_1 - c_2 w_1) mod n$ ], [ $c_1 = q(q^(-1) mod p)$ \ $c_2 = p(p^(-1) mod q)$ \ ] ) per un qualche valore $z_1 in ZZ_p$ e $w_1 in ZZ_q$. Questi risultati valgono anche se si prendono gli *opposti modulari* di $z_1$ e $w_1$. Due di queste 4 radici sono congrue modulo $p$, mentre le altre lo sono modulo $q$. == Cifratura e decifratura Si scelgono due numeri primi $p, q$ tali che $(p, q) equiv 3 (mod 4)$, poi si calcola $n = p dot q$. Dato il messaggio in chiaro $M$, il messaggio cifrato si calcola come $C = M^2 mod n$. La decifratura consiste nel calcolare le 4 radici quadrate modulo $n$ di $C$. Uno di questi 4 valori è il messaggio in chiaro. Per trovare le radici, si parte considerando le due quantità $ M_p = C^(frac(p + 1, 4)) quad quad M_q = C^(frac(q + 1, 4)) $ #pagebreak(weak: true) $M_p$ ed $M_q$ sono delle radici quadrate di $C$ modulo $p$ e modulo $q$ rispettivamente, perché: $ M_p^2 mod p & = (C^(frac(p + 1, 4))^2) mod p \ & = (C dot C^(frac(p - 1, 2))) mod p \ & = C mod p $ e analogo per $M_q^2$. Siano ora $C_p = q(q^(-1) mod p)$ e $C_q = p(p^(-1) mod q)$. Vale quanto segue: $ (C_p M_p + C_q M_q) mod p = M_p quad quad (C_p M_p + C_q M_q) mod q = M_q $ dunque per il CRT significa che $C_p M_p + C_q M_q$ è una delle 4 radici di $C mod n$. Le altre radici si ottengono combinando i segni: $ (C_p M_p + C_q M_q) & mod n \ (C_p M_p - C_q M_q) & mod n \ (-C_p M_p + C_q M_q) & mod n \ (-C_p M_p - C_q M_q) & mod n $ == Dimostrazione dell'equivalenza alla fattorizzazione #figure( algo( title: "Rabin-Factor", parameters: ("n",), line-numbers: false, )[ pick a random $M$ such that $0 <= M < n$ \ $M_i = upright("Rabin-Decrypt")(M^2 mod n)$ \ $m = gcd(n, M - M_i)$ \ if $m > 1$ and $m != n$: #i \ return m #d \ else: #i \ pick another random $M$ \and try again #d \ ], caption: [Algoritmo di tipo Las Vegas per trovare un fattore di $n$ con l'algoritmo di Rabin] ) Supponendo che il messaggio reale sia $M = (C_p M_p + C_q M_q) mod n$: - se Rabin restituisce $M_i = (C_p M_p + C_q M_q) mod n$, allora $M - M_i = 0$ e dunque $gcd(n, M - M_i) = 0$, quindi non si trova un fattore di $n$; - se Rabin restituisce $M_i = (-C_p M_p - C_q M_q) mod n$, allora $M - M_i = 2M$ e quindi $gcd(n, M - M_i) = n$, quindi nemmeno in questo caso si trova un fattore di $n$; - se invece Rabin restituisce una delle altre 2 radici, allora si trova un fattore di $n$ L'algoritmo ha quindi probabilità di errore pari ad $frac(1, 2)$, dato che restituisce la risposta corretta in 2 casi su 4. #pagebreak(weak: true) = Protocollo di ElGamal Il protocollo di ElGamal è molto simile a quello di Diffie-Hellman per lo scambio di chiavi. Anch'esso si basa sulla difficoltà nel risolvere il *logaritmo discreto*. Il protocollo di ElGamal era utilizzato soprattutto in ambito open source, in quanto RSA ai tempi era brevettato e c'era da pagare una licenza. #table( columns: (auto, auto, auto), [#align(center)[*Generazione della chiave* \ (Alice)]], [#align(center)[*Cifratura* \ (Bob $arrow.r$ Alice)]], [#align(center)[*Decifratura* \ (Alice)]], [ - numero primo $p$ - generatore $g$ del gruppo $ZZ_p^*$ - elemento $a in bb(Z)_p^*$ (chiave privata) - calcola $A = g^a mod p$ La chiave pubblica di Alice è la tupla $(p, g, A)$ ], [ + sceglie un $b in ZZ_p^*$ + calcola $B = g^b mod p$ La coppia $(A^b dot M, B)$ è il messaggio cifrato. ], [ + calcola $B^a = g^(a b) = A^b$ + calcola $(B^a)^(-1)$ modulo $p$ $ M = cancel(A^b) dot cancel((B^a)^(-1)) dot M $ ] ) L'unica differenza rispetto al protocollo di Diffie-Hellman è che il segreto condiviso $g^(a b)$ viene utilizzato per manipolare il messaggio in chiaro. == Riuso di $b$ Il parametro $b$, con cui si cifra il messaggio da mandare all'altra parte, dev'essere *sempre diverso*. In caso di riuso di $b$: $ M_1 = (B, A^b M_1) \ M_2 = (B, A^b M_2) $ Se l'attaccante in un qualche modo è in grado di mettere in chiaro $C_1 = A^b M_1$ (e dunque è in grado di risalire ad $A^b$ invertendo l'equazione) allora è in grado di mettere in chiaro *tutti* i messaggi cifrati con la stessa chiave, perché: $ (A^b)^(-1) C_i = M_i $ (dove $C_i$ è l'$i$-esimo ciphertext ed $M_i$ è l'$i$-esimo messaggio cifrato con la stessa chiave $b$). == Sicurezza - ElGamal vs RSA Il protocollo di ElGamal è considerato *più sicuro* rispetto ad RSA: per ottenere lo stesso livello di sicurezza sono sufficienti chiavi di dimensione *minore*, perché il logaritmo discreto è un problema più difficile della fattorizzazione. #pagebreak(weak: true) = Firma digitale La firma digitale è il risultato più importante raggiunto dalla crittografia asimmetrica, perché aggiunge *autenticazione* alla comunicazione, elemento fondamentale per tutte le comunicazioni sicure. La firma digitale prevede l'uso delle due chiavi in *ordine inverso* rispetto alla cifratura: #table( columns: (auto, auto, auto), [], [*cifratura*], [*firma*], [cifratura (firma)], [Alice usa la chiave *pubblica* di Bob per cifrare il messaggio], [Alice usa la sua chiave *privata* per firmare il messaggio], [decifratura (verifica della firma)], [Bob usa la sua chiave *privata* per decifrare il messaggio di Alice], [Bob usa la chiave *pubblica* di Alice per validare la firma sul messaggio che ha ricevuto] ) == Uso delle funzioni hash durante la firma digitale Con la firma digitale si vuole poter essere liberi di firmare messaggi di *dimensione arbitraria*, anche file da diversi MB/GB. Dato il costo computazionale della crittografia asimmetrica, questo sarebbe molto difficile da ottenere se si firmasse il messaggio effettivo. Per questo in realtà la firma non viene fatta sul messaggio, ma sul suo *hash*. Una volta scelta la funzione di hashing (che dev'essere *crittograficamente sicura*), la dimensione dell'hash rimane costante indipendentemente dalla dimensione del messaggio. == Firma digitale con RSA Per utilizzare RSA come protocollo di firma digitale non c'è bisogno di fare alcuna modifica a quanto già visto per il processo di cifratura, se non usare le due chiavi in ordine inverso. Dal punto di vista matematico, poiché $M = M^(e d) mod n$, le due chiavi $e$ e $d$ sono *simmetriche* (usare $e$ per cifrare e $d$ per decifrare o viceversa porta allo stesso risultato). === Blinding attack Eve vuole trovare un sistema per far firmare ad Alice un messaggio che non firmerebbe mai. L'idea è quella di nascondere il messaggio "malevolo" all'interno di un messaggio "innocuo". Eve cerca un numero $R$ tale che $overline(M) = (R^e dot M) mod n$, dove $M$ è il messaggio "malevolo" ed $overline(M)$ è il messaggio "innocuo". Se Alice viene convinta in un qualche modo a firmare $overline(M)$, si ha: $ F &= overline(M)^d mod n \ &= [(R^e dot M) mod n]^d mod n \ &= R^(e d) dot M^d mod n \ &= R dot M^d mod n $ Per ottenere il messaggio "malevolo" $M$ firmato da Alice, Eve deve semplicemente calcolare $F dot R^(-1) mod n = cancel(R) dot cancel(R^(-1)) dot M^d mod n$. Eve non ha bisogno di rubare la chiave privata di Alice, deve soltanto convircela a firmare $overline(M)$. Questo attacco è praticabile solo quando il protocollo di firma digitale non prevede l'uso di una funzione di hashing. #pagebreak(weak: true) == Firma digitale con ElGamal A differenza di RSA, il protocollo per la firma non è identico a quello per la cifratura. Per firmare un messaggio, Alice: + sceglie un numero $k$ tale che $gcd(k, p - 1) = 1$; + calcola $r = g^k mod p$ ed $s = k^(-1)(M - a r) mod (p - 1)$; + invia a Bob $(M, (r, s))$ === Verifica della firma Sapendo che $s = k^(-1)(M - a r) mod (p - 1)$, l'idea potrebbe essere quella di risolvere quest'equazione per $M$: $ M = (k s + a r) mod (p - 1) $ In realtà però quest'equazione Bob non la può risolvere, perché non conosce nè $a$ (chiave *privata* di Alice) nè $k$ (chiave effimera). Il fatto che Bob (e, in generale, qualunque eavesdropper) non conosca $k$ è essenziale, perché grazie ad esso può risalire alla chiave privata di Alice risolvendo l'equazione per $a$. Il fatto che un protocollo di questo tipo sia sbagliato lo si evince anche perché, risolvendo semplicemente l'equazione, non verrebbe mai usata la chiave *pubblica* di Alice. Dato che $r = g^k mod p$ ed $s$ contiene l'inverso di $k$ nella sua definizione, sebbene modulo $(p - 1)$ e non modulo $p$, si calcola $r^s mod p$: $ r^s mod p & = r^(k^(-1)(M - a r) mod (p - 1)) mod p \ &= g^(k[k^(-1)(M - a r) mod (p - 1)]) mod p $ L'obiettivo ora è togliere il $mod (p - 1)$ ad esponente. Per farlo bisogna sfruttare un paio di definizioni al fine di riscrivere l'espressione: + definizione di inverso modulare: $k^(-1) mod (p - 1) = t$, con $0 <= t < p - 1$, ovvero $k t = v(p - 1) + 1$ per un qualche intero $v$; + definizione di resto: per qualsiasi coppia di interi $a$ e $b$, $a mod b = a - q b$, dove $q$ è il quoziente della divisione di $a$ per $b$ Mettendo insieme queste due espressioni si può riscrivere l'esponente in questo modo: $ k^(-1)(M - a r) mod (p - 1) = t (M - a r) - q(p - 1) $ Inserendo quest'espressione nello sviluppo di $r^s mod p$, si ottiene questo: $ r^s mod p &= g^(k[k^(-1)(M - a r) mod (p - 1)]) mod p \ &= g^(k[t(M - a r) - q(p - 1)]) mod p \ &= (g^(k t(M - a r)) dot g^(-k q(p - 1))) mod p \ &= (g^([1 + v(p - 1)](M - a r)) dot g^(-k q(p - 1))) mod p \ &= (g^(M - a r) mod p) dot cancel([(g^(#text(fill: red)[p - 1]))^(v(M - a r)) mod #text(fill: red)[p]]) dot cancel([(g^(#text(fill: red)[p - 1]))^(- k q) mod #text(fill: red)[p]]) \ &= g^(M - a r) mod p \ &= g^M dot (g^a mod p)^(- r) mod p $ Nell'ultimo passo è stata messa in evidenza la chiave pubblica di Alice $g^a mod p$. Notare che se l'ultima equazione viene moltiplicata per la chiave pubblica di Alice, ciò che rimane è $g^M mod p$. Per verificare la firma di Alice quindi, Bob: + calcola $x_1 = (r^s dot g^(a r) mod p)$ ed $x_2 = g^M mod p$ + accetta il messaggio solo se $x_1 = x_2$ Per calcolare $x_1$ Bob ha bisogno della coppia $(r, s)$ ricevuta da Alice, mentre $x_2$ Bob lo può calcolare autonomamente (visto che il messaggio in chiaro è noto). In sostanza quello che viene fatto è calcolare $g^M mod p$ in due modi diversi e accettare il messaggio solo se entrambi questi calcoli danno lo stesso risultato. === Riuso di $k$ Nella firma digitale con ElGamal, $k$ funge da chiave di sessione, dunque è importante non riutilizzarla mai. Se Alice usa più volte lo stesso $k$ (che implica il riuso dello stesso $r$), Eve può risalire alla sua chiave *privata* utilizzando soltanto 2 messaggi, perché: $ cases( a r + k s_1 = M_1 mod p, a r + k s_2 = M_2 mod p ) $ === Message forgery attack Il protocollo di ElGamal, se viene firmato il messaggio in sè e non il suo *hash*, ha una vulnerabilità che consente ad Eve di generare *messaggi arbitrari* firmati da Alice, senza bisogno di conoscere la sua chiave privata. Eve sceglie due numeri $x, y$ tali che $gcd(y, p - 1) = 1$, dopodiché calcola $r$ ed $s$ utilizzando la #text(fill: orange)[chiave pubblica di Alice]: $ r &= g^x dot #text(fill: orange)[g]^(#text(fill: orange)[a]b) mod p = #text(fill: orange)[g]^(x + #text(fill: orange)[a] y) mod p \ s &= -r dot y^(-1) mod (p - 1) $ Sapendo che Bob, per verificare il messaggio, controllerà che $r^s dot g^(a r) = g^M mod p$, Eve pone $M = (x s) mod (p - 1)$. In questo caso infatti la firma verrà considerata valida da Bob, perché: $ r^s dot g^(a r) mod p &= g^(a r) dot g^((x + a y)s) mod p \ &= g^(a r + x s + a y s) mod p \ &= g^(x s) mod p \ &= g^((x s) mod (p - 1)) mod p \ &= g^M mod p $ Per proteggersi da questo attacco, anziché calcolare l'hash di $M$ si calcola l'hash di $M || r$. Per fare un message forgery attack, Eve a questo punto deve trovare un messaggio $M' eq.not M$ tale per cui $H(M' || r) = (x s) mod (p - 1)$, cosa computazionalmente impossibile se $H$ è first pre-image resistant. == Firma digitale con DSA Una particolarità che contraddistingue DSA (Digital Signature Algorithm) dagli altri protocolli di firma digitale è che DSA *fin da subito* prevede l'uso di una funzione di hashing (inizialmente era SHA-1). === Generazione delle chiavi Alice sceglie: + un numero primo $q$ di dimensioni pari a quelle dell'output della funzione di hashing (160 bit per SHA-1); + un numero primo $p$ da 1024 bit (dimensione gestibile in base al grado di sicurezza che si vuole avere) tale che $p - 1 mod q = 0$ (cioè $p - 1$ è un divisore di $q$) e $gcd(p - 1, q) = q$. + un generatore $g$ del sottogruppo di ordine $q$ di $bb(Z)_p^*$ - dal *teorema fondamentale dei gruppi ciclici*, poiché $p$ è primo, questo sottogruppo di ordine $q$ di $bb(Z)_p^*$ è *unico* + un numero $a in bb(Z)_p^*$, che sarà la sua chiave *privata* Dopodiché calcola $A = g^a mod p$ ed espone la tupla $(p, q, g, A)$ come chiave *pubblica*. === Firma Dato il messaggio $M$, Alice: + calcola $m = upright("SHA-1")(M)$; + sceglie a caso un valore $k in bb(Z)_q^*$; + calcola $r = (g^k mod p) mod q$ e $s = k^(-1)(m + a r) mod q$ - se $r$ o $s$ è pari a 0, sceglie un altro $k$ e riprova a generare $r$ ed $s$ + invia a Bob la tupla $(M, (r, s))$ === Verifica della firma Data la coppia messaggio + firma $(M, (r, s))$ ricevuta da Alice, Bob: + recupera la tupla $(p, q, g, A)$ (chiave pubblica di Alice); + controlla che $0 < r$ ed $s < q$ - in caso contrario, scarta immediatamente il messaggio in quanto la firma non è valida + calcola $m = upright("SHA-1")(M)$ ed $x = m (s^(-1) mod q)$; + calcola $y = r(s^(-1) mod q)$ + la firma è valida solo se $(g^x A^y mod p) mod q = r$ ==== Dimostrazione della correttezza Come nella dimostrazione di ElGamal, ci sarà bisogno di utilizzare le definizioni di inverso modulare e di modulo. Siano: - $#text(fill: blue)[R] = (m + a r)^(-1) mod q$; - $Q$ il quoziente della divisione di $k R$ per $q$, ovvero il valore tale che $k R mod q = k R - Q q$; - $n$ l'intero tale che $(m + a r) R = 1 + n q$ La catena di uguaglianze è la seguente: $ g^x A^y mod p &= (g^(m(s^(-1) mod q)) dot g^(a y)) mod p \ &= (g^(m(s^(-1) mod q)) dot g^(a r(s^(-1) mod q))) mod p \ &= g^((m + a r)(s^(-1) mod q)) mod p \ &= g^((m + a r)[k(#text(fill: blue)[m + a r])^(#text(fill: blue)[-1])] mod q) mod p \ &= g^((m + a r)(k R mod q)) mod p \ &= g^((m + a r)(k R - Q q)) mod p \ &= g^(k R(m + a r) - Q q(m + a r)) mod p \ &= g^(k(1 + n q) - n q(m + a r)) mod p \ &= g^(k + k n q - (m + a r) n q) mod p \ &= [g^k dot cancel((#text(fill: red)[g]^#text(fill: red)[q])^(n k)) dot cancel((#text(fill: red)[g]^#text(fill: red)[q])^(-(m + a r)n))] mod p \ &= g^k mod p $ Le cancellazioni all'ultimo passaggio sono possibili perché $g$ è un generatore del sottogruppo di ordine $q$ di $bb(Z)_p^*$. Dalla definizione di generatore, moltiplicandolo per sè stesso $q$ volte (pari all'ordine del sottogruppo che genera) si ottiene l'elemento neutro della moltiplicazione, cioè 1. Dunque, dato che $g^x A^y mod p = g^k mod p$, per assicurarsi che la firma sia valida Bob deve verificare la seguente uguaglianza: $ (g^x A^y mod p) mod q = underbrace((g^k mod p) mod q, r) $ === Generazione di $p$ Per generare un numero primo $p$ dale che $gcd(p - 1, q) = q$, un metodo abbastanza spartano ma efficacie è il seguente: + si genera un primo $q$ da 160 bit; + si genera un primo $r$ di $1024 - 160 - 1$ bit (dove 1024 è la dimensione in bit che si vuole abbia $p$); + si calcola $p = 2 r q + 1$; + si controlla se $p$ è primo, e in caso contrario si ripete la procedura === Ottenere $g$ Se $g$ è un generatore del sottogruppo di ordine $q$ di $bb(Z)_p^*$, deve risultare che $g^q mod p = 1$. Sapendo questo, un modo per ricavare $g$ è il seguente: + si pone $e = frac(p - 1, q)$; + si sceglie un $h$ a caso tale che $1 < h < p - 1$ - 1 e $p - 1$ sono esclusi perché andrebbero a generare il sottogruppo di ordine 2 di $bb(Z)_p^*$, dunque sono irrilevanti + si calcola $g = h^e mod p$ - se risulta che $g = 1$, si sceglie un nuovo valore $h$ e si ripete la procedura; - altrimenti $g$ è il generatore che cerchiamo, infatti: $ g^q mod p &= (h^e mod p)^q mod p \ &= h^(eq) mod p \ &= h^(#text(fill: red)[p - 1]) mod #text(fill: red)[p] arrow.l.double upright("FLT")\ &= 1 $ === Sicurezza DSA, come ElGamal e Diffie-Hellman, si basa sulla difficoltà nel calcolare il *logaritmo discreto*. Per $q$ di dimensione 160 bit (quindi usando SHA-1 come funzione di hashing), il livello di sicurezza è $log(sqrt(2^160)) = 80$ bit. == Autenticità delle chiavi pubbliche Affinché tutto il sistema di firma digitale (e crittografia asimmetrica in generale) sia robusto, occorre stabilire dei criteri per determinare se una chiave pubblica è *autentica*, ovvero proviene #text(style: "italic")[realmente] da colui che l'ha pubblicata e non da qualcuno che si spaccia per qualcun altro. Nel corso del tempo si sono sviluppati due approcci: uno *centralizzato* ed uno *decentralizzato*. === Approccio TLS Il protocollo TLS, per garantire l'autenticità delle chiavi pubbliche, si basa sulle *certification authority*. Quando un client si collega ad un server tramite protocollo TLS, il server è tenuto a presentare al client il proprio *certificato*, che è sostanzialmente una chiave pubblica con l'aggiunta di vari metadati (es. periodo di validità, dominio, ecc.). Il certificato che il server fornisce al client include al suo interno un altro certificato, ovvero quello della *certification authority* che ha *firmato* il certificato del server. Il problema del client non è più quello di verificare l'autenticità della chiave pubblica del server, ma l'autenticità della firma della CA che ha firmato la chiave pubblica del server. La firma della certification authority può essere a sua volta firmata da un'altra CA che sta più in alto nella gerarchia. Alla fine, il server non manda al client solo un certificato, ma tutta la *catena* di certificati che va fino alla *root certification authority*. I certificati delle root certification authority sono installati direttamente nel sistema operativo (e nei browser). === Approccio OpenPGP/GPG L'approccio di OpenPGP non è gerarchico (verticale), ma *peer-to-peer* (orizzontale). Chiunque può creare una chiave pubblica ed esporla su un *keyserver*. Ogni chiave pubblica può essere firmata da altre persone. In pratica, più persone firmano una chiave pubblica e più questa è ritenuta affidabile. OpenPGP mette a disposizione vari strumenti a seconda del grado di fiducia che si vuole dare ad una chiave quando la si firma. #pagebreak(weak: true) = Crittografia su curve ellittiche == Campi finiti Un *campo* è un insieme di numeri su cui sono definite le due operazioni di somma e prodotto (ciascuna con il proprio elemento neutro ed inverso). Queste operazioni sono *chiuse* rispetto al campo. La particolarità che distingue un campo da un *gruppo* è che nei campi vale la proprietà distributiva dell'addizione rispetto alla moltiplicazione. Un campo è *finito* se è composto da un numero finito di elementi. Per $p$ primo, tutti i gruppi $bb(Z)_p$ sono campi finiti. In generale, campi finiti di $n$ elementi esistono se e solo se $n = p^k$, con $p$ primo e $k >= 1$. Nel caso in cui $k > 1$, il campo è formato da tutti i polinomi di grado $< k$ con coefficienti in $bb(Z)_p$. Esempio: il campo $G F(3^2)$ è formato da: - $0, 1, 2$ - $x, x + 1, x + 2$ - $2x, 2x + 1, 2x + 2$ Questo campo è comunque chiuso rispetto a somma e moltiplicazione, perché tutte le operazioni vengono ridotte tramite un *polinomio irriducibile*, cioè un polinomio che non può essere riscritto come prodotto di due polinomi distinti. Esempio: $(2x + 1)(x + 1) = 2x^2 + 3x + 1$, che non sarebbe nel campo, ma: - si applica il modulo 3 a tutti i coefficienti: $(2 mod 3)x^2 + (3 mod 3)x + (1 mod 3) = 2x^2 +1$; - si calcola il resto della divisione con il polinomio irriducibile per il polinomio che rimane: $(2x^2 + 1) mod (x^2 + 1) = 2$ Quindi $(2x + 1)(x + 1) = 2$ nel campo finito $G F(3^2)$. L'*ordine* di un campo finito è il numero dei suoi elementi. La *caratteristica* di un campo finito è il numero di volte con cui si può incrementare di 1 il risultato (partendo da $1 + 0$) prima di ottenere 0. Nei campi infiniti (ad esempio $bb(R)$) la caratteristica è 0, perché incrementando di 1 non si ottiene mai 0. Nei campi finiti di ordine $p^k$, invece, la caratteristica è $p$. Un campo si dice *algebricamente chiuso* se ogni polinomio ha uno *zero* all'interno del campo. Nessun campo finito è algebricamente chiuso. Un esempio di campo algebricamente chiuso è $bb(C)$, il campo dei numeri complessi. == Definizione di curva ellittica Una *curva ellittica* è un insieme di punti del piano che soddisfa l'equazione $ y^2 = x^3 + a x + b $ con $a, b$ coefficienti all'interno del campo $F$. Una curva ellittica è *simmetrica rispetto all'asse $x$*, perché la $y$ compare soltanto al quadrato all'interno dell'equazione. In altre parole, per ogni coppia di punti $(overline(x), overline(y))$ che soddisfa l'equazione, anche la coppia $(overline(x), -overline(y))$ la soddisfa. Se $P = (x_p, y_p)$, per questioni di notazione si indica con $-P$ la coppia $(x_p, -y_p)$. #pagebreak(weak: true) == Curve ellittiche smooth #table( columns: (1fr), [ *Curva ellittica smooth*: una curva ellittica si dice *smooth* se non esistono punti in cui le *derivate parziali* si annullano simultaneamente. ] ) Le curve smooth sono importanti in ambito crittografico perché è possibile dimostrare che esiste un'*unica tangente* per ogni loro punto. #figure( table( columns: (1fr, 1fr), [$ pdv(E, y) & = 0 arrow.r.long 2y = 0 $], [$ pdv(E, x) & = 0 arrow.r.long -3x^2 - a = 0 $] ), caption: [Derivate parziali di $E(x, y) = y^2 - x^3 - a x - b$] ) Riscrivendo la 1° equazione in termini di $x$: $ 2y = 0 arrow.long.l.r.double x^3 + a x + b = 0 $ e mettendo insieme le due equazioni, i valori di $x$ che rendono la curva non-smooth sono le soluzioni di questo sistema: $ cases( x^3 + a x + b = 0, 3x^2 + a = 0 ) $ Prima di poter risolvere il sistema è necessario gestire alcuni casi particolari: - se $x = 0$, la curva è non-smooth se e solo se $a = b = 0$. La curva $y^2 = x^3$ ad esempio è non-smooth, perché in $(0, 0)$ entrambe le derivate parziali si annullano. Se invece $a = 0$ e $b != 0$ (o viceversa) la curva è smooth, perché le due equazioni non si annullano mai simultaneamente; - se $a < 0$ ed $x != 0$, la curva *potrebbe* essere non smooth se $a$ è sufficientemente negativo da annullare entrambe le equazioni Imponendo quindi $x != 0$ e risolvendo il sistema, si ottiene: $ cases( x = -frac(3b, 2a), frac(27b^2 + 4a^3, 4a^2) = 0 ) $ La curva quindi è non-smooth se e solo se $27b^2 + 4a^3 = 0$, ed il punto di singolarità ha coordinate $(-frac(3b, 2a), 0)$. La quantità $27b^2 + 4a^3$ viene detta anche *discriminante*. == Intersezioni con una retta #figure( table( columns: (1fr, auto, auto), align: (center, center, center), [*retta standard* \ $y = m x + q$], [*retta orizzontale* \ $y = q$], [*retta verticale* \ $x = c$], [$x^3 - m x^2 + (a - 2m q)x + b - q^2 = 0$], [$x^3 + a x + b - q^2 = 0$], [$y^2 - c^3 - a c - b = 0$] ), caption: [Coordinate $x$ dei punti d'intersezione di una retta $R$ con $E$] ) Nel primo caso e nel 2° caso si ha un polinomio di grado 3, che ha *uno o tre* zeri reali (se ne ha uno solo, gli altri due sono valori *complessi coniugati*). Nel caso di retta verticale, invece, si ha un polinomio di grado 2 che può avere zeri entrambi reali o entrambi complessi coniugati. In realtà la situazione è un po' più complessa, perché la curva $E$ è definita su un *campo finito* $F$, e non è detto che un polinomio di grado *dispari* abbia almeno una soluzione in $F$. Se però il polinomio ha almeno 2 zeri in $F$, allora anche il 3° sta in $F$. == Punto all'infinito L'unico caso rilevante in ambito crittografico è quello in cui esistono *tre* punti d'intersezione tra la curva e la retta, perché è l'unico caso in cui i punti della curva formano un *gruppo* rispetto all'operazione di *addizione*. Per gestire le *rette verticali*, che hanno solo due punti d'intersezione con la retta senza che ci sia il terzo, è necessario considerare il *punto all'infinito*, indicato con $cal(O)$. Questo punto si ritiene facente parte di *qualunque* curva ellittica $E$ su $F$. Il punto $cal(O)$ viene definito in modo da soddisfare alcune proprietà: - qualsiasi *retta verticale* interseca $E$ in $cal(O)$ con molteplicità 1. Dato che la retta verticale ha già 2 punti d'intersezione con la curva, ne consegue che $cal(O)$ è il terzo e dunque la logica "se ci sono due punti, c'è anche il terzo" funziona anche nel caso di rette verticali; - nessuna retta non-verticale interseca $cal(O)$; - nel punto $cal(O)$ la curva ha una tangente $t$ e si suppone che $t$ abbia un'*unica* intersezione con $E$ proprio nel punto $cal(O)$, con molteplicità 3; - $cal(O) = -cal(O)$ == Addizione Se $A$ e $B$ sono due punti di $E$, la retta che passa per questi due punti interseca $E$ nel punto $-C$. Il punto somma è $C$, che è il *simmetrico* del punto d'intersezione $-C$ rispetto all'asse $x$. L'operazione di addizione configura i punti della curva come *gruppo abelliano*, infatti tutte le proprietà sono rispettate: - *chiusura*. Questa proprietà è rispettata anche grazie all'introduzione di $cal(O)$, infatti: - se $A eq.not cal(O)$ e $B eq.not cal(O)$, il punto somma è nuovamente un punto sulla curva (eventualmente si tratta di $cal(O)$ stesso); - $cal(O) + cal(O) = cal(O)$, la retta tangente ad $E$ nel punto $cal(O)$ ha intersezione con $E$ proprio nel punto $cal(O)$, con molteplicità 3; - $A + cal(O) = A$, perché la retta che passa per $A$ e $cal(O)$ interseca la curva nel punto $-A$, il cui simmetrico è proprio $A$ - esistenza dell'*elemento neutro*: $A + cal(O) = cal(O) + A = A$; - esistenza dell'*opposto*: $A + (-A) = cal(O)$; - *associatività* - *commutatività*. Questa proprietà è "ereditata" dal fatto che la retta passante per $A$ e $B$ è la stessa indipendentemente dall'ordine con cui si considerano i punti. === Calcolo delle coordinate di $C$ ==== Caso $A != B$ Se $A != B$, i punti di intersezione tra $E$ e la retta passante per $A = (x_a, y_a)$ e $B = (x_b, y_b)$ sono le soluzioni del sistema: $ cases( y^2 = x^3 + a x + b, y = m x + q ) $ con $m$ e $q$ definiti in questo modo: $ m = frac(y_b - y_a, x_b - x_a) quad quad q = y_a - m x_a $ Questo sistema può essere risolto facilmente per sostituzione, sfruttando la 2° equazione $y = m x + q$: $ (m x + q)^2 = x^3 + a x + b $ Due delle soluzioni di quest'equazione sono note: sono $x_a$ ed $x_b$. Si può quindi riscrivere l'equazione in questo modo per mettere in evidenza l'unica incognita $x_tilde(c)$: $ (x - x_a)(x - x_b)(x - x_tilde(c)) arrow.long.r x_tilde(c) = m^2 - x_a - x_b $ Una volta ricavato $x_tilde(c)$, per ricavare $y_tilde(c)$ si sfrutta l'equazione della retta: $ y_tilde(c) = m x_tilde(c) + q $ e dunque $C = (x_tilde(c), -y_tilde(c))$. ==== Caso $A = B$ (retta verticale) In questo caso occorre calcolare il coefficiente angolare della retta tangente ad $E$ nel punto $A = B$. Anche se la curva non è definita in modo esplicito, è possibile rappresentarla come *unione* dei grafici di due funzioni distinte: $ y_1(x) = sqrt(x^3 + a x + b) quad quad y_2(x) = -sqrt(x^3 + a x + b) $ queste due funzioni si ottengono calcolando risolvendo per $y$ l'equazione della curva $y^2 = x^3 + a x + b$. Dato che si parla di tangente, si deve considerare la *derivata* di queste funzioni: $ y'_1(x) = frac(3x^2 + a, 2sqrt(x^3 + a x + b)) = frac(3x^2 + a, 2y_1(x)) quad quad y'_2(x) = -frac(3x^2 + a, 2sqrt(x^3 + a x + b)) = -frac(3x^2 + a, 2(-y_2(x))) $ Se $A = B = (x', y')$ è un punto sulla curva, si distinguono i due casi: $ cases( y' = y'_1(x') arrow.r.long m = frac(3x'^2 + a, 2y') "if" y' >= 0, y' = y_2(x') arrow.r.long m = frac(3x'^2 + a, 2y') "if" y' < 0 ) $ dunque il coefficiente angolare $m$ è uguale in entrambi i casi. #figure( algo( line-numbers: false, title: "EC-SUM", parameters: ("A", "B"), )[ if $A = cal(O)$:#i\ return $B$#d\ if $B = cal(O)$:#i\ return $A$#d\ if $B = -A$:#i\ return $cal(O)$#d\ if $x_a eq.not x_b$:#i\ $m <- frac(y_b - y_a, x_b - x_a)$#d\ else:#i\ $m <- frac(3x_a^2 + a, 2y_a)$#d\ $q <- y_a - m x_a$\ return $(x_c = m^2 - x_a - x_b, y_c = -(m x_c + q))$ ], caption: [Algoritmo per il calcolo delle coordinate del punto $C = A + B$] ) == Curve ellittiche su $bb(Z)_p$ Anche per le curve definite su $bb(Z)_p$ vale che se un'equazione cubica a coefficienti in $bb(Z)_p$ ha *due* radici in $bb(Z)_p$, allora anche la terza radice è in $bb(Z)_p$. Questa proprietà consente quindi di definire l'operazione di *addizione* anche per le curve definite su $ZZ_p$ e dunque di considerare i punti di questa curva come appartenenti ad un *gruppo*. Il *sottogruppo* generato da un punto $g$ di una curva $E_(a, b)(bb(Z)_p)$ è definito come: $ S_(E_(a, b)(bb(Z)_p))(g) = {a in E_(a, b)(bb(Z)_p) | exists k >= 0, a = k dot g} $ L'operazione $k dot g$, con $k$ intero e $g$ punto della curva, è detta *moltiplicazione scalare*. In generale, il gruppo definito dai punti di una curva ellittica $E_(a, b)(bb(Z)_p)$ non è ciclico. Tuttavia esiste *almeno un sottogruppo ciclico*. === Logaritmo discreto su curva ellittica Se $g$ è un punto sulla curva $E_(a, b)(bb(Z)_p)$ che genera l'intero gruppo, allora per ogni elemento $z in E_(a, b)(bb(Z)_p)$ esiste un $k >= 0$ tale che $z = (k g) mod p$. Il minimo valore di $k$ che soddisfa l'uguaglianza è il *logaritmo a base $g$ di $z$* e si indica con $k = log_g x mod p$. Dati $k$ e $g$ è facile calcolare $z = k dot g mod p$, ma dati $z$ e $g$ risalire a $k = log_g x mod p$ è un problema difficile. L'algoritmo per il calcolo di $z$ è sulla falsa riga di quello per l'esponenziale modulare (*raddoppiamento ricorsivo*): $2g = g + g$, $4g = 2g + 2g$ e così via. La difficoltà nel calcolo del logaritmo discreto è il motivo dell'interesse per le curve ellittiche in ambito crittografico. Tuttavia una curva ellittica è un oggetto più complesso di un gruppo numerico $bb(Z)_p$ e che, se non aggiustato opportunamente, rischia di rendere *debole* la crittografia. Uno dei parametri più importanti da considerare è il *numero di punti* della curva, che permette di definire la dimensione del gruppo additivo. Questo valore può essere considerato l'analogo dell'ordine del gruppo di $bb(Z)_p$ (ed è dunque facile capire se il gruppo è "buono" oppure no), ma il grosso problema è che se calcolare l'ordine di $bb(Z)_p$ è facile, calcolare il numero di punti della curva è decisamente più complesso (non sono noti algoritmi di costo polinomiale). === Numero di punti su una curva Dato il modulo $p$, il numero di punti di $E_(a, b)(bb(Z)_p)$ è sicuramente $<= 2p + 1$ (considerando anche il punto $cal(O)$). Data l'equazione $x^3 + a x + b mod p eq.not 0$, per ogni valore $x in bb(Z)_p$, il risultato dell'equazione o è un *residuo quadratico* modulo $p$ oppure non lo è. È ragionevole supporre che il valore sia un residuo quadratico per circa la metà delle $x in {0, 1, ..., p - 1}$. Se il valore è un residuo quadratico, allora ha *due* radici in $bb(Z)_p$ ed ogni radice identifica un punto sulla curva. Quindi: - se il valore è un residuo quadratico, vengono identificati 2 punti sulla curva; - se il valore non è un residuo quadratico, non viene identificato nessun punto da cui segue che il numero di punti debba essere all'incirca dell'ordine di $p$. #table( columns: (1fr), [*Teorema di Hasse*: il massimo numero di punti di una curva $E_(a, b)(bb(Z)_p)$ è $<= 2sqrt(p).$] ) L'algoritmo più efficiente per il calcolo del numero di punti di una curva è quello di *<NAME>*, che ha costo $O(log(p)^8)$. Il costo è quindi polinomiale, ma l'esponente è elevato, dunque non è esattamente un algoritmo efficiente. C'è di buono però che una volta stabiliti i coefficienti $a, b$ da cui dipende la curva, il calcolo del numero di punti dev'essere fatto *una sola volta*. Nella pratica, le curve ellittiche sicure per contesti crittografici sono ben note (la lista è publicamente disponibile sul web), dunque non c'è mai bisogno di calcolare questo valore perché è già stato calcolato da qualcun altro. === Attacco al logaritmo discreto su curve ellittiche Le informazioni pubbliche in un protocollo basato su curva ellittica sono: - l'equazione della curva $E$, cioè il modulo $p$ ed i due parametri $a, b$ da cui la curva dipende; - un punto $P$ che è il generatore di un *sottogruppo ciclico* formato da tutti i punti della curva; - un secondo punto $Q = k dot P$, per un qualche valore *segreto* $k$ L'attaccante vuole risalire al valore di $k$. Per farlo deve trovare due coppie di valori $(alpha_P, alpha_Q), (beta_P, beta_Q)$ tali da verificare la combinazione lineare $alpha_P P + alpha_Q Q = beta_P P + beta_Q Q$, che sostituendo $Q = k dot P$ diventa: $ (alpha_P + k alpha_Q)P = (beta_P + k beta_Q)P $ I due coefficienti $(alpha_P + k alpha_Q)$ e $(beta_P + k beta_Q)$ devono esseere *congrumenti modulo $N$*, dove $N$ è il numero di punti di $E$, perché, per il teorema di Lagrange, l'ordine del sottogruppo generato da $P$ è un divisore dell'ordine del gruppo dei punti sulla curva $\#E$, quindi se $alpha_P + alpha k_Q$ e $beta_P + k beta_Q$ sono congrui modulo $\#E$, sono anche congrui modulo l'ordine del sottogruppo. In altre parole, se esistono coefficienti che rendono vera $alpha_P - beta_P equiv k(beta_Q - alpha_Q) mod \#E$, allora se $gcd(beta_Q - alpha_Q, \#E) = 1$ esiste l'inverso di $beta_Q - alpha_Q mod \#E$ e dunque si può risalire al valore di $k$: $ k = (alpha_P - beta_P)(beta_Q - alpha_Q)^(-1) mod \#E $ ==== Complessità dell'algoritmo Con una dimostrazione simile a quella del *paradosso del compleanno*, si può dire che per trovare questa coppia di coefficienti occorrono circa $sqrt(2^n)$ operazioni per numeri da $n$ bit. == Diffie-Hellman su curve ellittiche (ECDH) + Alice e Bob concordano una curva ellittica $E$ da utilizzare e su un punto base $P in E$; + Alice sceglie a caso un valore $k_a$ (chiave privata) ed invia $A = k_a dot P$ a Bob (chiave pubblica); + Bob sceglie a caso un valore $k_b$ (chiave privata) ed invia $B = k_b dot P$ ad Alice (chiave pubblica); + Alice calcola $Z_A = k_a dot B = k_a dot k_b dot P$; + Bob calcola $Z_B = k_b dot A = k_b dot k_a dot P$; + il valore $Z = Z_A = Z_B$ è il segreto condiviso + entrambi applicano una *funzione di hashing* per calcolare la *chiave simmetrica* $h = H(Z)$ che verrà usata nelle successive comunicazioni tra Alice e Bob Il costo complessivo per il calcolo dei prodotti scalari $k_a dot P$, $k_b dot P$, $k_a dot B$ e $k_b dot A$ è di $O(n^3)$ (con $n$ numero di bit delle chiavi segrete). #pagebreak(weak: true) === Attacco basato sull'uso di una curva debole L'attacco parte dalla seguente considerazione: dalle formule per il calcolo di $C = A + B$, che passano per il calcolo di $m$, non compare mai il parametro $b$ della curva ellittica, mentre il parametro $a$ compare soltanto nel caso in cui $A = B$: $ cases( m = frac(y_b - y_a, x_b - x_a) mod p "if" A != B, m = frac(3x_a^2 + a, 2y_a) mod p "if" A = B ) $ Sebbene possa sembrare strano, in entrambi i casi la curva ellittica è sempre ben definita: - se $A != B$ esiste una sola curva che passa per questi 2 punti; - se $A = B$ ma il parametro $a$ è definito, esiste una sola curva con quel valore $a$ e che passa per il punto $A = B$ Se Bob non controlla che il punto somma $C$ ricevuto da Alice sia interno alla curva, Alice può utilizzare per suo conto una *curva più debole* rispetto a quella concordata con Bob al fine di risalire alla sua chiave privata $k_b$. ==== Esempio 1. Alice e Bob concordano sull'uso di $E_(-4, 0)(bb(Z)_10531)$ e $P = (339, 115)$. I punti di questa curva non formano un gruppo ciclico, perché: - il numero di punti della curva in questo caso è esattamente $p + 1 = 10532$, che ha fattorizzazione $2^2 dot 2633$; - se si prendono tutti i punti della curva e si controlla l'ordine del sottogruppo generato da ogni punto, si hanno soltanto 3 sottogruppi di ordine rispettivamente ${2633, 2, 5266}$. Mancano il sottogruppo di ordine 4 e quello di ordine pari alla dimensione dell'intero gruppo. Il punto $P = (339, 115)$ è però una buona scelta, perché, tra questi 3 sottogruppi, genera quello col più alto numero di elementi. 2. Bob sceglie la sua chiave segreta $k_b$ ed invia ad Alice $B = k_b dot P$. 3. Alice, per calcolare il suo $A$, altera il parametro $b$ della curva $E$, poiché, come già visto, il calcolo del punto somma non dipende da questo parametro. Sceglie quindi di usare la curva $E_(-4, 1)(bb(Z)_10531)$ e un punto $A = (9123, 1166)$. Il numero di elementi della curva di Alice è 10481, che ha fattorizzazione $47 dot 223$, ed il punto $A$ scelto da Alice è generatore del sottogruppo di ordine 47. Questo sottogruppo è di ordine (estremamente) più basso rispetto a quello del sottogruppo generato dal punto $P$ usato da Bob (che ha ordine 5266). 4. Bob *non controlla* che l'$A$ ricevuto da Alice stia sulla curva $E_(4, 0)(bb(Z)_10531)$, ma calcola il segreto condiviso $Z = k_b dot A$ Poiché l'ordine di $A$ è 47 e $Z$ è un multiplo di $A$, ne segue che l'ordine del sottogruppo generato da $Z$ è a sua volta 47. Se $Q$ ed $R$ sono rispettivamente quoziente e resto della divisione intera per 47, allora $k_b = Q dot 47 + R$, ovvero $Z = k_b dot A = R dot A$. In altre parole, per trovare $k_b$ è sufficiente trovare il *resto* della sua divisione per 47, e questo può essere fatto anche tramite *bruteforce* perché $R$ deve necessariamente essere un valore $0 <= R < 47$. Il valore $Z$ calcolato da Bob quindi non è un punto sulla curva $E_(-4, 0)(bb(Z)_10531)$, ma è un punto sulla curva debole di Alice $E_(-4, 1)(bb(Z)_10531)$. 5. Bob, ignaro di tutto questo, calcola $h = H(Z)$ ed invia un messaggio ad Alice cifrato con la chiave $h$; 6. per decifrare il messaggio, Alice deve trovare tramite bruteforce un valore $k_b_1$ nel range intero $[0, 47)$ tale che il messaggio decifrato con la chiave $h' = H(Z' = k_b_1 dot A)$ non sia pattume. 7. a questo punto Alice è riuscita a decifrare il messaggio, ma non ha (ancora) modo di conoscere la chiave privata $k_b$ di Bob. Per ora ha soltanto trovato il valore $k_b_1$ tale che $k_b mod 47 = k_b_1$. Tuttavia Alice può *ripetere il giro* convincendo Bob ad utilizzare un altro punto $A'$ ed a mandargli un nuovo messaggio cifrato. Tramite bruteforce Alice troverà un altro valore $k_b_2$ tale che $k_b mod N = k_b_2$, dove $N$ è l'ordine del sottogruppo generato dal punto $A'$ (valore che è noto ad Alice). Per risalire a $k_b$ ora Alice deve semplicemente usare il CRT per risolvere il sistema: $ cases( k_b mod 47 = k_b_1, k_b mod N = k_b_2 ) $ dove l'unica incognita è $k_b$. ==== Considerazioni Questo attacco è percorribile solo se Bob sta mandando dei *messaggi di testo*, altrimenti Alice non saprebbe quando fermarsi nel bruteforce. Tuttavia questo è un problema poco rilevante per l'attaccante, perché a questo punto della comunicazione (con il segreto condiviso già scambiato tra Alice e Bob) è *molto probabile* che i messaggi siano testuali. Per difendersi da questo attacco Bob, dopo aver calcolato $Z$, deve controllare che $Z$ stia sulla curva che sta pensando di utilizzare. Sebbene possa sembrare scontato, nelle prime implementazioni della crittografia su curve ellittiche questo controllo non veniva fatto, rendendo possibile l'attacco descritto sul protocollo TLS. == DSA su curve ellittiche (ECDSA) Per generare la chiave, Alice: + sceglie una curva ellittica $E_(a, b)(bb(Z)_p)$ e un punto base $P in E_(a, b)(bb(Z)_p)$; + determina il *numero di punti* della curva $N = \#E_(a, b)(bb(Z)_p)$; + sceglie a caso un valore $a in bb(Z)_p$ (chiave privata); + calcola $A = a dot P$ (chiave pubblica) Per firmare un messaggio, Alice: + calcola $h = H(M) mod N$ (dove $H$ è una qualche *funzione di hashing*) - per costruzione, $0 <= h < N$ + sceglie un valore $k in [0, N)$ tale che $gcd(k, N) = 1$ e calcola $Q = k dot P = (Q_x, Q_y)$; + calcola $r = Q_x mod N$ ed $s = k^(-1)(h + r a) mod N$; + invia a Bob la coppia $(M, (r, s))$ Per validare la firma, Bob: + calcola $w = s^(-1) mod N = k(h + r a)^(-1) mod N$; + calcola $u = h w$ e $v = r w$, dove $h$ è l'hash del messaggio; + determina il punto $R = (u P + v A) mod N = (R_x, R_y)$; + accetta il messaggio se e solo se $R_x = r = Q_x mod N$ #pagebreak(weak: true) === Dimostrazione della correttezza $ R &= u P + v A \ &= h w P + r w A \ &= h w P + r w a P \ &= w(h + r a)P \ &= k cancel((h + r a)^(-1)) cancel((h + r a)) P \ &= k P \ &= Q $ === Attacco in caso di riuso di $k$ Anche la versione su curva ellittica è vulnerabile se $k$ viene riutilizzato. Riutilizzare $k$ per messaggi diversi porta ad avere anche lo stesso valore di $r$ per entrambi i messaggi. Dati due hash $h_1, h_2$ di due messaggi e le due firme $(r, s_1), (r, s_2)$, l'attaccante può calcolare: $ s_1 - s_2 &= (h_1 + r a)k^(-1) - (h_2 + r a)k^(-1) \ &= (h_1 + r a - h_2 - r a)k^(-1) \ &= (h_1 - h_2)k^(-1) $ da cui può calcolare facilmente il valore di $k$: $ k = (h_1 - h_2)(s_1 - s_2)^(-1) mod N $ Ovviamente l'attacco funziona solo se $gcd(s_1 - s_2, N) = 1$. In generale, se la curva ellittica è scelta bene, è raro che $s_1 - s_2$ sia un divisore di $N$, ma in ogni caso se anche dovesse capitare l'attaccante può sempre ritentare il calcolo di $k$ al messaggio successivo (che avrà due valori $s_1, s_2$ differenti). Una volta trovato $k$ si può risalire alla chiave privata: $ (k dot s_1 - h_1)r^(-1) &= [(h_1 + r a) - h_1]r^(-1) \ &= (r + a)r^(-1) \ &= a $ == Vantaggi e svantaggi delle curve ellittiche Il problema principale delle curve ellittiche è che è molto difficile trovarne di *buone* per scopi crittografici. Il grosso vantaggio delle curve ellittiche è che permettono di usare delle chiavi decisamente più *piccole* rispetto alla crittografia senza curve ellittiche: 256 bit danno già una curva molto sicura, contro i 2.048 bit richiesti per RSA oggi.
https://github.com/8LWXpg/typst-ansi-render
https://raw.githubusercontent.com/8LWXpg/typst-ansi-render/master/test/themes.typ
typst
MIT License
#import "../ansi-render.typ": * #set document(date: none) #set page(width: auto, height: auto) #let preview = ansi-render.with(read("color.txt"), inset: 5pt, radius: 3pt) = List of built-in themes == VSCode #preview(theme: terminal-themes.vscode) == VSCode Light #preview(theme: terminal-themes.vscode-light) == Putty #preview(theme: terminal-themes.putty) == Campbell #preview(theme: terminal-themes.campbell) == Campbell Powershell #preview(theme: terminal-themes.campbell-powershell) == Vintage #preview(theme: terminal-themes.vintage) == One Half Dark #preview(theme: terminal-themes.one-half-dark) == One Half Light #preview(theme: terminal-themes.one-half-light) == Solarized Dark #preview(theme: terminal-themes.solarized-dark) == Solarized Light #preview(theme: terminal-themes.solarized-light) == Tango Dark #preview(theme: terminal-themes.tango-dark) == Tango Light #preview(theme: terminal-themes.tango-light) == Gruvbox Dark #preview(theme: terminal-themes.gruvbox-dark) == Gruvbox Light #preview(theme: terminal-themes.gruvbox-light)
https://github.com/MultisampledNight/flow
https://raw.githubusercontent.com/MultisampledNight/flow/main/src/checkbox.typ
typst
MIT License
#import "gfx.typ" // Returns `default` in place of missing values when slicing out-of-bounds towards the positive end. #let _graceful-slice(it, start, end, default: []) = { if end == -1 { end = it.len() } let missing = calc.max(start, end - it.len(), 0) it += (default,) * missing it.slice(start, end) } #let _handle-item(it, kind: "list") = { // body can be a sequence (=> have children) or be directly text // either way we just want the children as array let body = it.body.fields().at( "children", default: (it.body,), ) let checkbox = _graceful-slice(body, 0, 3) let fill = checkbox.at(1).fields().at("text", default: " ") let is-checkbox = ( checkbox.at(0) == [\[] and fill.len() == 1 and checkbox.at(-1) == [\]] ) if not is-checkbox { return it } // convert the fill character to a showable icon let checkbox = gfx.markers.at(fill, default: gfx.markers.at("?")).icon // just a few minor positioning tweaks let checkbox = box(move(dx: -0.1em, checkbox())) // and remove the description from the checkbox itself let desc = _graceful-slice(body, 3, -1).join() // then throw them together let full-entry = [#checkbox #desc] if kind == "list" { [- #full-entry] } else if kind == "enum" { [+ #full-entry] } } // Goes through the whole given document, // converts all item and enum entries that follow the checkbox syntax // into visually appealing checkboxes, // and returns the modified document. // You should use this via a show rule. #let process(body) = { show list.item: _handle-item.with(kind: "list") show enum.item: _handle-item.with(kind: "enum") body }
https://github.com/Student-Smart-Printing-Service-HCMUT/ssps-docs
https://raw.githubusercontent.com/Student-Smart-Printing-Service-HCMUT/ssps-docs/main/contents/categories/task1/1.2.typ
typst
Apache License 2.0
== Mô tả tất cả yêu cầu chức năng và phi chức năng có thể suy ra từ tổng thể bài toán === _Yêu cầu chức năng (functional requirements) (Ít nhất 5 yêu cầu chức năng cho mỗi bên liên quan)_ Sinh viên (user): #block(inset: (left: 1cm))[ - Đăng ký/đăng nhập tài khoản. - Thay đổi mật khẩu. - Đặt yêu cầu in ấn từ xa. - Chọn máy in và theo dõi trạng thái in ấn. - Kiểm tra thông báo về tình trạng của tài liệu đã gửi. - Theo dõi lịch sử in ấn của mình. ] Người quản lý hệ thống (SPSO): #block(inset: (left: 1cm))[ - Theo dõi lịch sử in ấn của sinh viên. - Quản lý hoạt động của máy in trong hệ thống như thêm/xóa/điều chỉnh trạng thái hoạt động, thông tin định danh của máy in. - Quản lý các thông số khác của hệ thống như số trang mặc định, ngày gửi tài liệu, các kiểu tài liệu có thể phục vụ. - Hủy tài liệu trong trường hợp học sinh không đến lấy đúng thời hạn. - Theo dõi báo cáo về việc sử dụng hệ thống in ấn sau mỗi tháng và mỗi năm. ] Người quản lý máy in (trường hợp flow truyền thống): #block(inset: (left: 1cm))[ - Tiếp nhận đơn đặt in - In ấn tài liệu theo yêu cầu - Cất xếp tài liệu và gửi sinh viên ] Đơn vị cung cấp giấy, mực in và kỹ thuật viên bảo trì: #block(inset: (left: 1cm))[ - Cung cấp giấy in, mực in cho hệ thống trong trường hợp hết giấy, hết mực. - Điều động kỹ thuật viên đến bảo trì trang thiết bị định kỳ và kịp thời khi xuất hiện lỗi kỹ thuật. ] === _Yêu cầu phi chức năng (non-functional requirements)_ #h(1cm)Yêu cầu phi chức năng liên quan đến việc xác định những ràng buộc trong dịch vụ hoặc chức năng có ảnh hưởng đến hệ thống như ràng buộc về thời gian, quá trình phát triển, hiệu suất và thường áp dụng tới toàn bộ hệ thống hơn là những đặc điểm hay dịch vụ riêng lẻ. Đối với SSPS, chúng ta có những yêu cầu phi chức năng như sau: #block(inset: (left: 1cm))[ - Sự hiệu quả (Efficiency): Trang web/app đảm bảo đáp ứng yêu cầu in ấn của sinh viên với nhiều loại tài liệu khác nhau như giáo trình, đề thi, nghiên cứu khoa học,... (hỗ trợ in màu). Cung cấp thông tin về tình trạng sử dụng trang thiết bị, chi phí sử dụng và tài khoản cá nhân để người dùng chủ động sắp xếp, đưa ra quyết định phù hợp nhất. - Tính khả dụng (Availability): Trang web/app hoạt động tốt và an toàn 24/7 để phục vụ yêu cầu đặt in và kiểm soát quá trình in ấn của các máy in. - Độ tin cậy (Reliability): Hệ thống phải có cơ chế sao lưu dữ liệu định kỳ hoặc khi xuất hiện lỗi kỹ thuật, có khả năng thống kê, ghi nhận lại lỗi phát sinh trong quá trình hoạt động. Tất cả tác vụ phải được hoàn thành theo yêu cầu đặt in của người dùng, hoặc có cơ chế thông báo và hướng dẫn xử lý rõ ràng nếu không thành công. - Tính dễ sử dụng (Ease Of Use): Cung cấp hướng dẫn sử dụng bằng một bản hướng dẫn, xử lý các tình huống phát sinh và có thể thuận tiện thao tác chỉ với sự hướng dẫn/điều hướng và tài liệu của ứng dụng. Người dùng không cần thiết lập thông số kỹ thuật để kết nối với máy in, chỉ cần khoảng từ 5-10 phút đọc hướng dẫn để có thể sử dụng dịch vụ một cách mượt mà nhất - Thời gian phản hồi nhanh chóng (Fast response): Đảm bào thời gian từ lúc người dùng tải tài liệu lên cho đến khi hệ thống xác nhận và đưa vào hàng đợi từ 1-2(s) cho trường hợp chậm nhất. - Giới hạn kiểu tài liệu (Limited files types): Hệ thống chỉ chấp nhận những file có độ lớn tối đa là 100MB và chỉ chấp nhận tài liệu dưới dạng PDF, JPEG, PNG, DOCX. ]
https://github.com/teamdailypractice/pdf-tools
https://raw.githubusercontent.com/teamdailypractice/pdf-tools/main/typst-pdf/examples/example-08.typ
typst
#figure( image("images/Glacier-640px.jpg", width: 70%), caption: [ _Glaciers_ form an important part of the earth's climate system. ], )
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/call-02.typ
typst
Other
// Error: 26-30 duplicate argument: font #set text(font: "Arial", font: "Helvetica")
https://github.com/1taroh/typst_font_list
https://raw.githubusercontent.com/1taroh/typst_font_list/main/fonts.typ
typst
MIT License
== フォント #let text_example = "Hello World! 木曾路はすべて山の中である。" #for entry in json("fonts.json").at("entries") { text( font:entry.name, [- #entry.name : #text_example] ) }
https://github.com/rangerjo/tutor
https://raw.githubusercontent.com/rangerjo/tutor/main/docs/manual.typ
typst
MIT License
#import "@preview/tidy:0.2.0" #import "/lib.typ" #let docs = tidy.parse-module( read("/lib.typ"), name: "tutor", scope: (tutor: lib) ) #let VERSION = toml("/typst.toml").package.version #align(center)[ #text(size: 24pt)[tutor] \ #datetime.today().display() - <NAME> - Version #VERSION ] - question mode: No solutions - solution mode: Show solutions - exercise mode: No points, no comments on how points are distributed when correcting an exam - test mode: Show points and comments on how points are distributed when correcting an exam (in solution mode). #tidy.show-module(docs, style: tidy.styles.default)
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/text/linebreak-03.typ
typst
Other
Hard #linebreak() break.
https://github.com/kdog3682/typkit
https://raw.githubusercontent.com/kdog3682/typkit/main/0.1.0/src/str-utils.typ
typst
#import "is.typ": is-string, is-number, test, is-content #import "misc.typ": get-sink #let str-sub(s, pattern, replacement) = { return s.replace(regex(pattern), replacement) } #let sub(s, pattern, replacement) = { return s.replace(regex(pattern), replacement) } #let oxford(items) = { let length = items.len() if length == 0 { "" } else if length == 1 { items[0] } else if length == 2 { items.join(" and ") } else { items.slice(0, -1).join(", ") + ", and " + items.at(-1) } } #let resolve-str(s) = { if is-content(s) { return s.body.fields().at("text") } return str(s) } #let split(s, ..sink) = { let pattern = get-sink(sink, "") let a = resolve-str(s).split(regex(pattern)) if a.at(0) == "" { a.remove(0) } if a.at(-1) == "" { a.remove(-1) } return a } #let templater(s, ref) = { let callback(s) = { let key = s.text.slice(1) if is-string(ref) or is-number(ref) { return str(ref) } return if test(key, "^\d") { ref.at(int(key) - 1) } else { ref.at(key) } } return sub(s, "\$\w+", callback) } // THIS DOESNT WORK DUE TO MUTATION #let strfmt(s, ..sink) = { let args = sink.pos() let count = 0 let callback(key) = { panic(key) let value = str(args.at(count)) count += 1 return value } return s.replace(regex("%s"), callback) } #let has-extension(s) = { return test(s, "\.\w+$") } #let pluralize(n, suffix) = { if n == 1 { str(n) + " " + suffix } else { str(n) + " " + suffix + "s" } } #let has-newline(s) = { return test(s, "\n") } #let match(s, r) = { let m = s.match(regex(r)) if m != none { let len = m.captures.len() if len > 1 { m.captures } else if len == 1 { m.captures.at(0) } else { m.text } } } #let str-call(fn, ..sink) = { let args = sink.pos().map(str).join(", ") return fn + "(" + args + ")" } #let str-wrap(s, d) = { return d + str(s) = d } #let str-add(a, b) = { return str(a) + b } #let str-repeat(a, b) = { let el = str(a) let s = "" for i in range(0, b) { s += el } return s } #let get-integers(s) = { let m = s.matches(regex("\d+")) return m.map((x) => int(x.text)) } #let is-exponent-content(c) = { test(s, "\^$") } #let is-factorial(s) = { test(s, "!$") } #let is-fraction(s) = { test(s, "!$") } #let is-multiplication(s) = { test(s, "times") } #let stringify(s) = { return json.encode(s, pretty: false) } // #panic(test("abc", "a"))
https://github.com/tingerrr/chiral-thesis-fhe
https://raw.githubusercontent.com/tingerrr/chiral-thesis-fhe/main/template/main.typ
typst
#import "@preview/chiral-thesis-fhe:0.1.0" as ctf #import ctf.prelude: * #show: doc( kind: bachelors-thesis( id: [AI-1970-MA-999], title: [Mustertitel], // subtitle: [], author: "<NAME>", date: datetime(year: 1970, month: 01, day: 01), field: [Angewandte Informatik], ), outlines: ( // you can remove any of these if you don't need them (target: image, title: [Abbildungsverzeichnis]), (target: table, title: [Tabellenverzeichnis]), (target: raw, title: [Listingverzeichnis]), ), outlines-position: start, bibliography: bibliography("/bibliography.yaml"), ) // these are your main document chapters, you can reference them using @chap:... #chapter[Einleitung] <chap:intro> #include "chapters/1 - intro.typ" #chapter[Grundlagen] <chap:basics> #include "chapters/2 - basics.typ" #chapter[Konzept] <chap:concept> #include "chapters/3 - concept.typ" #chapter[Implementierung] <chap:impl> #include "chapters/4 - impl.typ" #chapter[Fazit] <chap:conclusion> #include "chapters/5 - conclusion.typ"
https://github.com/kitashimauni/report-tools
https://raw.githubusercontent.com/kitashimauni/report-tools/main/example/example.typ
typst
#import "../lib.typ": mysetting, showCode, itembox // 個人用設定の読み込み #show: mysetting // 設定の上書き #set par(first-line-indent: 0em) // tips: コードブロック内の日本語のフォント指定方法 // font: (日本語以外のフォント, 日本語のフォント) で指定 // #show raw: set text(lang: "ja", font: ("Consolas", "MS Mincho"), size: 10.5pt) // 以下使い方 // このような関数をあらかじめ定義すると便利 #let Code(path, ..args) = { showCode(read(path), path, ..args) } = showCode コードを表示する関数 == 基本的な使い方 普通に表示 #Code("./main.py") 関数指定で表示(`caption`属性で説明を付けられる) #Code("./main.py", func: "add", caption: "add関数") "差分を指定(初期値は`(-1, 0)`となっており端の空行は表示されない) #Code("./main.py", func: "mul", diff: (-2, 1)) `showlines`を`true`にすると端の空行も表示される #Code("./main.py", func: "mul", diff: (-2, 1), showlines: true) クラス指定で表示 #Code("./main.py", class: "counter") 範囲指定で表示`showrange`の代わりにこちらも使用可 #Code("./main.py", range: (1, 2)) == 特殊な指定 #raw("funcに__main__を指定すると if __name__ == \"__main__\"部分が出力される") #Code("./main.py", func: "__main__") == 注意点 以下のようにインデントの合わないコメントがあるとそのコメント以下が表示されなくなるので注意 #showCode( " def func(): a = 0 b = 1 # このコメントはダメ c = 2 # この位置はok ", ".py" ) = itembox Latexのアイテムボックスのようなものを表示する関数 == 使い方 普通に使用 #itembox( caption: "課題1" )[ Typstの利便性について説明せよ ] `width`等の変更も可(`outsetやinsetの方向別指定をするとずれる`) #itembox( caption: "課題2", width: 100%, radius: 10pt, inset: 12pt )[ Typstについて調べ、Latexと比較せよ ] `caption`の横の空白サイズを変更 #itembox( caption: "課題3", width: 100%, caption_padding: 15pt )[ Typst最高! ] // ページ際で使うと表示がおかしくなる可能性がある #pagebreak() `caption`と本文の間隔を変更 #text(size: 20pt)[ #itembox( caption: "Tips", space: 10pt, )[ かなり力技で実装している\ もう少しいい感じにしたいカモ ] ] == 注意点 力技で実装したため、文字サイズは6ptから30ptを想定しているのでこれを超えると`caption`がずれる\ `caption`の調整をうまく関数化してどんな文字サイズにも対応させたい
https://github.com/Ngan-Ngoc-Dang-Nguyen/thesis
https://raw.githubusercontent.com/Ngan-Ngoc-Dang-Nguyen/thesis/main/hoi-nghi-Nha-Trang/slide.typ
typst
#import "@preview/polylux:0.3.1": * // #import themes.clean: * #import themes.university: * // #import themes.simple: * #import "@preview/cetz:0.1.2": canvas, plot #import "@preview/cetz:0.2.2" #show: university-theme.with( short-author: "<NAME>", short-title: "Machine Learning and Safe Screening", short-date: "23-25/08/2024", ) #import "macros.typ": * // #title-slide( // authors: [#underline[<NAME>], #h(1em) <NAME>, #h(1em) <NAME>, #h(1em) <NAME>], // // title: [Upgrading Stability Radius Model for Enhancing Robustness of Median Location on Tree Networks ], // title: [Upgrading Stability Radius Model for Enhancing Robustness of Median Location on Tree Networks ], // subtitle: "", // date: "Nha Trang 23-25/08-2024", // institution-name: "<NAME>, Đ<NAME>", // // logo: image("dummy-logo.png", width: 60mm), // // footer: none // ) #pagebreak() #title-slide( title: [Upgrading Stability Radius Model for Enhancing Robustness of Median Location on Tree Networks ], //subtitle: "An overview over all the features", authors:[#underline[<NAME>], <NAME>, <NAME>, <NAME>], date: "August 2024", ) #slide(title: [#text(eastern. darken(10%))[The main content]])[ *1. Stability Radius.* *2. Upgrading Stability Radidus.*] // #new-section-slide("1. Stability Radidus" ) #slide(title: "About this presentation")[ Let $T = (V, E)$ be a tree network with vertex set $V = {v_1, ..., v_n}$ and edge set $E$. Each vertex $v_i ∈ V$ has a nonnegative weight $w_i$. #import "@preview/bob-draw:0.1.0": * #show raw.where(lang: "bob"): it => render(it) #align(center)[#render( ``` 10 70 *-----------------* *------------* / \ / \ / \5 7 / *----------* / \ *--------* / \ / / \ / *------------* * / \ / \ / *--------------* * ```, width: 52%, )] // This presentation is supposed to briefly showcase what you can do with this // package. // For a full documentation, read the // #link("https://polylux.dev/book/")[online book]. ] #slide[ #import "@preview/cetz:0.1.2" #canvas(length: 10%, { import cetz.draw: * let (y1, y2, y3, y4) = (3,2,1, 4) let (x1, x2, x3, x4) = (1, 3, 5, 7) let x0 = 0 let r = 0.5 let h =-2 rect((0,0), (2, 0.5), name: "p1") rect((5,0), (5+2, 0.5), name: "p2") rect((0,h), (2, h+0.5), name: "p4") rect((5,h), (5+2, h+0.5), name: "p3") line("p1.right", "p2.left", mark: (end: ">"), name: "l1") line("p2.bottom", "p3.top", mark: (end: ">"), name: "l2") line("p3.left", "p4.right", mark: (end: ">"), name: "l3") content("p1.center", [box 1], anchor: none, padding: 0.2) content("p2.center", [box 2], anchor: none, padding: 0.2) content("p3.center", [box 3], anchor: none, padding: 0.2) content("p4.center", [box 4], anchor: none, padding: 0.2) content("l1.bottom", [because], anchor: "bottom", padding: 0.2) content("l2.left", [because], anchor: "left", padding: 0.2) content("l3.bottom", [because], anchor: "bottom", padding: 0.2) }) ] #slide[ #import "@preview/cetz:0.1.2" #canvas(length: 10%, { import cetz.draw: * let (y1, y2, y3, y4) = (3,2,1, 4) let (x1, x2, x3, x4) = (1, 3, 5, 7) let x0 = 0 let r = 0.5 rect((x1, y1), (x1+5, y1+0.5), name: "a1") rect((x1, y2), (x1+5, y2+0.5), name: "a2") rect((x1, y3), (x1+5, y3+0.5), name: "a3") rect((x1+6, y1), (x1+6.5, y1+0.5), name: "b1") rect((x1+6, y2), (x1+6.5, y2+0.5), name: "b2") rect((x1+6, y3), (x1+6.5, y3+0.5), name: "b3") content("a1.left", $alpha_1$, anchor: "right", padding: 0.1) content("a2.left", $alpha_j$, anchor: "right", padding: 0.1) content("a3.left", $alpha_m$, anchor: "right", padding: 0.1) content("b1.right", $beta_1$, anchor: "left", padding: 0.1) content("b2.right", $beta_j$, anchor: "left", padding: 0.1) content("b3.right", $beta_m$, anchor: "left", padding: 0.1) content((x1+0.3, y1+0.25), $alpha_(11)$) content((x1+2.6, y1+0.25), $alpha_(1i)$) content((x1+4.6, y1+0.25), $alpha_(1n)$) line((x1, y4), (x1+5, y4), name: "x-axis", mark: (start: ">", end: ">"), stroke: gray+2pt) content("x-axis.top", [$n$ features], anchor: "bottom") line((x0, y1+0.5), (x0, y3), name: "y-axis", mark: (start: ">", end: ">"), stroke: gray+2pt) content("y-axis.left", [$n$ samples], anchor: "right") }) #v(1em) Machine learning model: $beta_j approx phi(alpha_j, x)$ for all $j=1,..., m$ Machine learning problem: $min_(x) L(x, alpha, beta)$ ] #matrix-slide(columns: (3fr, 3fr))[ #set align(left) Consider machine learning problems: $ min_(x in RR^n) quad f(A x+b )+ g(x) $ <eq-primal-problem> // #pause #v(3em) where - $x in RR^n$ (feature space) - $b in RR^m$ (sample space) - $A in RR^(m times n)$ - $f: RR^m -> RR$ is convex (data fitting) - $g: RR^n -> RR$ is convex (penalization) // - $f, g$ are separable gauge functions ][ #pause #set align(left) *Examples:* - Regression: - linear (L2) - Ridge (L2+L2) - LAD (L1+0) - LASSO (L2+L1) - Classification - Logistic (Logistic+0) - SVM (Hinge loss+constr.) - Soft SVM (Hinge loss+L2) - Sparse SVM (Hinge loss+L1) ] #matrix-slide[ Regression with feature sparsity (LASSO, Basis Pursuit) $ 1/2 sum_(i=1)^n (b_i- angle.l x, a_i angle.r)^2 + norm(x)_1 $ #v(2em) // #pause Classification with sample sparsity (Soft-SVM) $ sum_(i=1)^n max(0, 1 - b_i angle.l x, a_i angle.r) + 1/2 norm(x)_2^2 $ ][ #image("./img-LR.png",width: 100%) #image("./img-SVM.jpg", width: 100%) ] // #slide[ // Safe screening uses the sparsity to reduce the problem size // ] #slide[ #align(center)[#box(stroke: blue, inset: 0.5em)[ *Safe screening* exploits the sparsity to reduce the problem size. ]] #import "@preview/cetz:0.1.2" #canvas(length: 10%, { import cetz.draw: * line((-1,0), (8.5, 0), mark: (end: ">"), name: "axis") content("axis.end", [_time_], anchor: "top", padding: 0.2) // anchor("a", (0, 0)) circle((0,0), radius: 0.1, fill: gray, name: "A") line("A.top", (0, -0.5), mark: (start: ">"), name: "lineA") content("lineA.end", box(stroke: gray, inset: 0.3em)[feature screening\ @ghaoui_safe_2011], anchor: "top") circle((3,0), radius: 0.1, fill: gray, name: "B") line("B.bottom", (3, 0.5), mark: (start: ">"), name: "lineB") content("lineB.end", box(stroke: gray, inset: 0.3em)[sample screening\ @ogawa_safe_nodate], anchor: "bottom") circle((6,0), radius: 0.1, fill: gray, name: "C") line("C.top", (6, -0.5), mark: (start: ">"), name: "lineC") content("lineC.end", box(stroke: gray, inset: 0.3em)[feature and sample screening\ @shibagaki_simultaneous_2016], anchor: "top") // circle((3,0), name: "B", radius: 0.1, fill: red) // circle((6,0), name: "C", radius: 0.1, fill: red) // content((3,1), box(stroke: gray, inset: 0.3em)[2013\ sample screening], ) // content((6,-1), box(stroke: gray, inset: 0.3em)[2016\ feature and sample screening], ) // content("plot.R", $g_i^+$) }) ] // #slide[What is t] #slide[ #import "@preview/cetz:0.1.2" #only(1)[ #canvas(length: 10%, { import cetz.draw: * let y = 2 let x = 4 let y-space = 1 circle((0, 0), radius: 0.2, name: "v1") content("v1.bottom", $v_1$, anchor: "left", padding: 0.2) circle((0, -1), radius: 0.2, name: "v2") content("v2.bottom", $v_2$, anchor: "left", padding: 0.2) circle((1, -1), radius: 0.2, name: "v3") content("v3.bottom", $v_3$, anchor: "left", padding: 0.2) line("v1.top", "v2.bottom") line("v1.top", "v3.bottom") // content((0, y), box(stroke: gray, inset: 0.3em)[Feature Sparsity], anchor: "right", name: "AA") // circle((x, y), radius: 0.2, name: "BB") // content("BB.bottom", $v_1$, anchor: "left", padding: 0.2) // line("AA.right", "BB.left", stroke: 3pt+gray) // content((0, y - 2*y-space), box(stroke: gray, inset: 0.3em)[Sample Sparsity], anchor: "right", name: "CC") // content((x, y - 2*y-space), box(stroke: gray, inset: 0.3em)[Smaller dual problem], anchor: "left", name: "DD") // line("CC.right", "DD.left", mark: (end: ">"), stroke: 3pt+gray) // circle((x/3, y), radius: 0.1, fill: gray, name: "point1") // circle((2*x/3, y), radius: 0.1, fill: gray, name: "point2") // circle((x/3, y - 2*y-space), radius: 0.1, fill: gray, name: "point3") // circle((2*x/3, y - 2*y-space), radius: 0.1, fill: gray, name: "point4") // content((x/3, y - y-space), box(stroke: gray, inset: 0.3em)[Why?], name: "why") // content((2*x/3, y - y-space), box(stroke: gray, inset: 0.3em)[How?], name: "how") // line("point1.top", "why.top", mark: (start: ">"), stroke: 3pt+gray) // line("point2.top", "how.top", mark: (start: ">"), stroke: 3pt+gray) // line("point3.bottom", "why.bottom", mark: (start: ">"), stroke: 3pt+gray) // line("point4.bottom", "how.bottom", mark: (start: ">"), stroke: 3pt+gray) // content((0, y - y-space), box(stroke: gray, inset: 0.3em)[separable gauge\ #text(blue)[(this talk)]], anchor: "right", name: "gauge") // content((x, y - y-space), box(stroke: gray, inset: 0.3em)[safe screening\ #text(blue)[(this talk)]], anchor: "left", name: "safe-region") // line("gauge.right", "why.left", mark: (end: ">"), stroke: 3pt+gray) // line("safe-region.left", "how.right", mark: (end: ">"), stroke: 3pt+gray) })] #only(2)[ #canvas(length: 10%, { import cetz.draw: * let y = 2 let x = 4 let y-space = 1 content((0, y), box(stroke: gray, inset: 0.3em)[Feature Sparsity], anchor: "right", name: "AA") content((x, y), box(stroke: gray, inset: 0.3em)[Smaller primal problem], anchor: "left", name: "BB") line("AA.right", "BB.left", mark: (end: ">"), stroke: 3pt+gray) content((0, y - 2*y-space), box(stroke: gray, inset: 0.3em)[Sample Sparsity], anchor: "right", name: "CC") content((x, y - 2*y-space), box(stroke: gray, inset: 0.3em)[Smaller dual problem], anchor: "left", name: "DD") line("CC.right", "DD.left", mark: (end: ">"), stroke: 3pt+gray) circle((x/3, y), radius: 0.1, fill: gray, name: "point1") circle((2*x/3, y), radius: 0.1, fill: gray, name: "point2") circle((x/3, y - 2*y-space), radius: 0.1, fill: gray, name: "point3") circle((2*x/3, y - 2*y-space), radius: 0.1, fill: gray, name: "point4") content((x/3, y - y-space), box(stroke: gray, inset: 0.3em)[Why?], name: "why") content((2*x/3, y - y-space), box(stroke: gray, inset: 0.3em)[How?], name: "how") line("point1.top", "why.top", mark: (start: ">"), stroke: 3pt+gray) line("point2.top", "how.top", mark: (start: ">"), stroke: 3pt+gray) line("point3.bottom", "why.bottom", mark: (start: ">"), stroke: 3pt+gray) line("point4.bottom", "how.bottom", mark: (start: ">"), stroke: 3pt+gray) // content((0, y - y-space), box(stroke: gray, inset: 0.3em)[separable gauge\ #text(blue)[(this talk)]], anchor: "right", name: "gauge") // content((x, y - y-space), box(stroke: gray, inset: 0.3em)[safe screening\ #text(blue)[(this talk)]], anchor: "left", name: "safe-region") // line("gauge.right", "why.left", mark: (end: ">"), stroke: 3pt+gray) // line("safe-region.left", "how.right", mark: (end: ">"), stroke: 3pt+gray) })] #only(3)[ #canvas(length: 10%, { import cetz.draw: * let y = 2 let x = 4 let y-space = 1 content((0, y), box(stroke: gray, inset: 0.3em)[Feature Sparsity], anchor: "right", name: "AA") content((x, y), box(stroke: gray, inset: 0.3em)[Smaller primal problem], anchor: "left", name: "BB") line("AA.right", "BB.left", mark: (end: ">"), stroke: 3pt+gray) content((0, y - 2*y-space), box(stroke: gray, inset: 0.3em)[Sample Sparsity], anchor: "right", name: "CC") content((x, y - 2*y-space), box(stroke: gray, inset: 0.3em)[Smaller dual problem], anchor: "left", name: "DD") line("CC.right", "DD.left", mark: (end: ">"), stroke: 3pt+gray) circle((x/3, y), radius: 0.1, fill: gray, name: "point1") circle((2*x/3, y), radius: 0.1, fill: gray, name: "point2") circle((x/3, y - 2*y-space), radius: 0.1, fill: gray, name: "point3") circle((2*x/3, y - 2*y-space), radius: 0.1, fill: gray, name: "point4") content((x/3, y - y-space), box(stroke: gray, inset: 0.3em)[Why?], name: "why") content((2*x/3, y - y-space), box(stroke: gray, inset: 0.3em)[How?], name: "how") line("point1.top", "why.top", mark: (start: ">"), stroke: 3pt+gray) line("point2.top", "how.top", mark: (start: ">"), stroke: 3pt+gray) line("point3.bottom", "why.bottom", mark: (start: ">"), stroke: 3pt+gray) line("point4.bottom", "how.bottom", mark: (start: ">"), stroke: 3pt+gray) content((0, y - y-space), box(stroke: gray, inset: 0.3em)[separable gauge\ #text(blue)[(this talk)]], anchor: "right", name: "gauge") content((x, y - y-space), box(stroke: gray, inset: 0.3em)[safe screening\ #text(blue)[(this talk)]], anchor: "left", name: "safe-region") line("gauge.right", "why.left", mark: (end: ">"), stroke: 3pt+gray) line("safe-region.left", "how.right", mark: (end: ">"), stroke: 3pt+gray) })] ] #matrix-slide(columns: (3fr, 2fr))[ #set align(left) $ min_(x in RR^n) quad f(A x+b )+ g(x) $ <eq-primal-problem> // #pause where $f, g$ are separable gauge functions #uncover(3)[ *Examples:* L1-norm $g(x) = norm(x)_1$ Hinge loss/RELU $g(x) = max(0, x)$ Quantile/Pinball loss $ g(x) = -(1-tau) x 1_(x< 0 ) + tau x 1_(x>=0 ) $ ] ][ #pause $g$ is a separable gauge func. $ g(x)= sum_(i=1,..., n) g_i (x_i) $ #import "@preview/cetz:0.1.2" #canvas(length: 10%, { import cetz.draw: * plot.plot( name: "plot", size: (5, 2), axis-style: "school-book", // x-ticks: (-1, 1,), // y-ticks: (0, -3,), // x-tick-step: 1, y-tick-step: 1, // x-min: -20, x-max: 20, // y-min: -2, y-max: 2, x-tick-step: none, y-tick-step: none, // x-grid: true, y-grid: true, { plot.add( domain: (0,2), samples: 100, x => (x, 3*x) ) plot.add( domain: (-2, 0), samples: 100, x => (x, -x) ) plot.add-anchor("L", (-1,3.5)) plot.add-anchor("R", (1,5)) }, ) content("plot.L", $-g_i^-$) content("plot.R", $g_i^+$) }) ] // #matrix-slide(columns: (3fr, 2fr))[ // #set align(left) // *Examples:* // L1-norm $g(x) = norm(x)_1$ // Hinge loss/RELU $g(x) = max(0, x)$ // Quantile/Pinball loss $ g(x) = -(1-tau) x 1_(x< 0 ) + tau x 1_(x>=0 ) $ // ][ // $g$ is a separable gauge func. // $ g(x)= sum_(i=1,..., n) g_i (x_i) $ // #import "@preview/cetz:0.1.2" // #canvas(length: 10%, { // import cetz.draw: * // plot.plot( // name: "plot", // size: (5, 2), // axis-style: "school-book", // // x-ticks: (-1, 1,), // // y-ticks: (0, -3,), // // x-tick-step: 1, y-tick-step: 1, // // x-min: -20, x-max: 20, // // y-min: -2, y-max: 2, // x-tick-step: none, y-tick-step: none, // // x-grid: true, y-grid: true, // { // plot.add( // domain: (0,2), // samples: 100, // x => (x, 3*x) // ) // plot.add( // domain: (-2, 0), // samples: 100, // x => (x, -x) // ) // plot.add-anchor("L", (-1,3.5)) // plot.add-anchor("R", (1,5)) // }, // ) // content("plot.L", $-g_i^-$) // content("plot.R", $g_i^+$) // }) // ] // #slide[ // #question[Why do we assume that $f, g$ are separable gauge function?] // #pause // #conclusion[ // $g$ is separable gauge $==> $ sparsity in primal solution // $f$ is separable gauge $==> $ "sparsity" in dual solution // Pros of sparsity: smaller problem, faster solver, efficient memory // ] // ] // #matrix-slide[ // *Machine Learning Lang.* // Feature screening // Sample screening // ][ // *Optimization Lang.* // Reducing primal problem // Reducing dual problem // ] #slide[ Assume that $g$ is a separable gauge function. $ min_(x in RR^n) quad f(A x+b )+ g(x) $ Let $IndexNonZero subset {i: xopt_i != 0}$ $ min_(x_IndexNonZero in RR^(abs(IndexNonZero))) quad f(A_IndexNonZero x_IndexNonZero + b) + g_IndexNonZero (x_IndexNonZero). $ If $xopt$ is sparse, then $IndexNonZero$ is small, #ie new problem has smaller dimension ] #slide[ The Fenchel-Rockafellar dual problem is $ max_(u in RR^m) quad quad -f^*(-u) - innerprod(b\, u) - g^*(A^T u ) $ Let $(f^+, f^-) in RR^m_+ times RR^m_+$ be the #emph[slopes] of separable gauge functions $f$. Since $f$ is a gauge, $f^*$ is an indicator function: $ max quad & innerprod(-b\, u) + g^*(A^T u)\ "s.t." quad & -u in [-f^-, f^+]\ $ ] #slide[ $ max quad & innerprod(-b\, u) + g^*(A^T u)\ "s.t." quad & -u in [-f^-, f^+]\ $ Now, let $IndexNonSaturated={i: uopt_i in (-f^-, f^+)}$. $ max quad & innerprod(-b_IndexNonSaturated\, u_IndexNonSaturated) + g^*(A^T_IndexNonSaturated u_IndexNonSaturated + k)\ "s.t." quad & -u_IndexNonSaturated in [-f^-_IndexNonSaturated, f^+_IndexNonSaturated] $ If $uopt$ is "almost saturated", then $IndexNonSaturated$ is small, #ie the new dual problem has smaller dimension. ] #slide[ Optimality condition (KKT) $ -uopt & in partial f(A xopt + b)\ A^T uopt & in partial g(xopt) $ ] #matrix-slide(columns: (3fr, 2fr))[ // The optimality condition for $xopt$ and $uopt$ reads as follows $ -uopt & in partial f(A xopt + b) $ // From #eqref(<eq-opt-f>), we derive the following screening rule referred to as #emph[sample elimination rule]: $ [A xopt + b]_i>0 & ==> -uopt_i = f_i^+\ [A xopt + b]_i<0 & ==> -uopt_i = -f_i^- $ For dual screening, #goal[Estimate $innerprod(alpha\, xopt)$] ][ #import "@preview/cetz:0.1.2" #canvas(length: 10%, { import cetz.draw: * plot.plot( name: "plot", size: (5, 2), axis-style: "school-book", // x-ticks: (-1, 1,), // y-ticks: (0, -3,), // x-tick-step: 1, y-tick-step: 1, // x-min: -20, x-max: 20, // y-min: -2, y-max: 2, x-tick-step: none, y-tick-step: none, // x-grid: true, y-grid: true, { plot.add( domain: (0,2), samples: 100, x => (x, 3*x) ) plot.add( domain: (-2, 0), samples: 100, x => (x, -x) ) plot.add-anchor("L", (-1,3)) plot.add-anchor("R", (1,5)) }, ) content("plot.L", $-f_i^-$) content("plot.R", $f_i^+$) }) ] #matrix-slide(columns: (3fr, 2fr))[ $ A^T uopt & in partial g(xopt) $ $ [A^T uopt]_i in (-g^-_i, g^+_i) ==> xopt_i=0. $ For primal screening, #goal[Estimate $innerprod(beta\, uopt)$] ][ #import "@preview/cetz:0.1.2" #canvas(length: 10%, { import cetz.draw: * plot.plot( name: "plot", size: (5, 2), axis-style: "school-book", x-tick-step: none, y-tick-step: none, { plot.add( domain: (0,2), samples: 100, x => (x, 3*x) ) plot.add( domain: (-2, 0), samples: 100, x => (x, -x) ) plot.add-anchor("L", (-1,3)) plot.add-anchor("R", (1,5)) }, ) content("plot.L", $-g_i^-$) content("plot.R", $g_i^+$) }) ] #slide[ *Primal safe region.* We prove that $ xopt in X:={ x: g(x) + innerprod(partial f(A x_0 + b) \, x) <= g(x_0) + innerprod(partial f(A x_0 + b) \, x_0)} $ Screening test $ min quad & innerprod(alpha\, x)\ "s.t." quad & x in X $ FACT: Its dual problem is 1D and can be solved by a combinatorial approach. ] #slide[ *Dual safe region.* We proved that $ uopt in U:={u in RR^m: innerprod(b \, u_0) <= innerprod(b\, u) <= P(x_0), u in [ -f^- , f^+ ] } $ Screening test $ min quad & innerprod(beta\, u)\ "s.t." quad & u in U $ FACT: This is a continuous knapsack problems with two bouding values, that can be solved in linear time using combinatorial approach. ] #slide[ #table( columns: (1fr, 1.5fr), inset: 0.5em, [*Existing safe regions*], [*Type of safe screening*], [Ghaoui], [feature], [XXX], [sample], [Shibagaki], [feature & sample], [Cedric], [feature], [#text(blue)[Our safe regions]], [feature & sample (simpler, tighter)] ) ] #slide[ #bibliography("references.bib", style: "annual-reviews-author-date") ] // #focus-slide(background-img: image("background.svg"))[ // *Another variant with an image in background...* // ] // #matrix-slide[ // left // ][ // middle // ][ // right // ] // #matrix-slide(columns: 1)[ // top // ][ // bottom // ] // #matrix-slide(columns: (1fr, 2fr, 1fr), ..(lorem(8),) * 9)