repo
stringlengths
26
115
file
stringlengths
54
212
language
stringclasses
2 values
license
stringclasses
16 values
content
stringlengths
19
1.07M
https://github.com/smorad/um_cisc_7026
https://raw.githubusercontent.com/smorad/um_cisc_7026/main/common.typ
typst
#import "@preview/polylux:0.3.1": * #import themes.university: * #import "@preview/cetz:0.2.2": canvas, draw, plot #import "@preview/cetz:0.2.2" #set text(size: 25pt) #set math.vec(delim: "[") #set math.mat(delim: "[") #let argmin = $op("arg min", limits: #true)$ #let scan = $op("scan")$ #let cimage(..args) = { align(center + horizon, image(..args)) } #let side-by-side(columns: none, gutter: 1em, align: center + horizon, ..bodies) = { let bodies = bodies.pos() let columns = if columns == none { (1fr,) * bodies.len() } else { columns } if columns.len() != bodies.len() { panic("number of columns must match number of content arguments") } grid(columns: columns, gutter: gutter, align: align, ..bodies) } #let slide_template(doc) = { set text(size: 25pt) set math.vec(delim: "[") set math.mat(delim: "[") doc } #let redm(x) = { text(fill: color.red, $#x$) } #let bluem(x) = { text(fill: color.blue, $#x$) } #let greenm(x) = { text(fill: color.green, $#x$) } // Agenda #let slide_state = state("current_section", none) #let agenda(ag, index) = { /* ag should be list of sections ([Review], [Intro]) index can be none or int */ for i in range(ag.len()){ if index == i { enum.item(i + 1)[#text(weight: "bold", ag.at(i))] {context slide_state.update(ag.at(i))} } else { enum.item(i + 1)[#ag.at(i)] } } } // Slide with preset section #let sslide(content) = { let title = {context slide_state.get()} slide(title: [#title])[ #content ] } // Agenda slide #let aslide(ag, index) = { slide(title: [Agenda])[ #{ context agenda(ag, index) } ] } // Plots of activation functions #let sigmoid = { set text(size: 25pt) canvas(length: 1cm, { plot.plot(size: (8, 6), x-tick-step: 2, y-tick-step: none, y-ticks: (0, 1), y-min: 0, y-max: 1, { plot.add( domain: (-5, 5), style: (stroke: (thickness: 5pt, paint: red)), label: $ sigma(x) $, x => 1 / (1 + calc.pow(2.718, -x)), ) plot.add( domain: (-5, 5), style: (stroke: (thickness: 3pt, paint: blue)), label: $ gradient sigma(x)$, x => (1 / (1 + calc.pow(2.718, -x))) * (1 - 1 / (1 + calc.pow(2.718, -x))), ) }) })} #let relu = { set text(size: 25pt) canvas(length: 1cm, { plot.plot(size: (8, 6), x-tick-step: 2.5, //y-tick-step: 1, y-tick-step: none, y-ticks: (1, 3, 5), y-min: 0, y-max: 5, { plot.add( domain: (-5, 5), style: (stroke: (thickness: 5pt, paint: red)), label: $ sigma(x) $, line: (type: "linear"), x => calc.max(0, x) ) plot.add( domain: (-5, 0), style: (stroke: (thickness: 3pt, paint: blue)), x => 0, ) plot.add( domain: (0, 5), style: (stroke: (thickness: 3pt, paint: blue)), label: $ gradient sigma(x)$, x => 1, ) }) })} #let lrelu = { set text(size: 25pt) canvas(length: 1cm, { plot.plot(size: (8, 6), x-tick-step: 2.5, //y-tick-step: 1, y-tick-step: none, y-ticks: (-0.1, 3, 5), y-min: -1, y-max: 5, { plot.add( domain: (-5, 5), style: (stroke: (thickness: 5pt, paint: red)), label: $ sigma(x) $, line: (type: "linear"), x => calc.max(0.1 * x, x) ) plot.add( domain: (-5, 0), style: (stroke: (thickness: 3pt, paint: blue)), x => -0.1, ) plot.add( domain: (0, 5), style: (stroke: (thickness: 3pt, paint: blue)), label: $ gradient sigma(x)$, x => 1, ) }) })} #let draw_filter(x, y, cells, colors: none) = { import cetz.draw: * grid((x, y), (x + cells.len(), y + cells.at(0).len())) for i in range(cells.len()) { for j in range(cells.at(i).len()) { if (colors != none) { let cell_color = colors.at(cells.at(i).len() - j - 1).at(i) if (cell_color != none){ rect((i, j), (i + 1, j + 1), fill: cell_color) } content((x + i + 0.4, y + j + 0.6), (i, j), cells.at(cells.at(i).len() - j - 1).at(i)) } else { content((x + i + 0.4, y + j + 0.6), (i, j), str(cells.at(cells.at(i).len() - j - 1).at(i))) } } } }
https://github.com/xkevio/parcio-typst
https://raw.githubusercontent.com/xkevio/parcio-typst/main/parcio-thesis/chapters/abstract.typ
typst
MIT License
#lorem(130) This thesis template is available at https://github.com/parcio/templates and consists of Chapters @intro[] to @conc[]. It also contains @appendix.
https://github.com/Nerixyz/icu-typ
https://raw.githubusercontent.com/Nerixyz/icu-typ/main/docs/docs/index.md
markdown
MIT License
# icu-datetime This library is a wrapper around [ICU4X](https://github.com/unicode-org/icu4x)' `datetime` formatting for Typst which provides internationalized formatting for dates, times, and timezones. ## Usage ```typst +preview(vertical mode="basic") #import "@preview/icu-datetime:0.1.2": fmt-date, fmt-time, fmt-datetime // These functions may change at any time #import "@preview/icu-datetime:0.1.2": experimental #import experimental: fmt-timezone, fmt-zoned-datetime This documentation was built on #fmt-date(datetime.today()). ``` ## Date ```typst +preview #let day = datetime( year: 2024, month: 5, day: 31, ) #fmt-date(day, locale: "km", length: "full") \ #fmt-date(day, locale: "af", length: "full") \ #fmt-date(day, locale: "za", length: "full") \ ``` ## Time ```typst +preview #let time = datetime( hour: 18, minute: 2, second: 23, ) #fmt-time(time, locale: "id", length: "medium") \ #fmt-time(time, locale: "en", length: "medium") \ #fmt-time(time, locale: "ga", length: "medium") \ ``` ## Date and Time ```typst +preview(vertical) #let dt = datetime( year: 2024, month: 5, day: 31, hour: 18, minute: 2, second: 23, ) #fmt-datetime(dt, locale: "ru", date-length: "full") \ #fmt-datetime(dt, locale: "en-US", date-length: "full") \ #fmt-datetime(dt, locale: "zh-Hans-CN", date-length: "full") \ #fmt-datetime(dt, locale: "ar", date-length: "full") \ #fmt-datetime(dt, locale: "fi", date-length: "full") ``` ## Timezones (🚧 experimental) ```typst +preview #let tz = ( offset: "-07", iana: "America/Los_Angeles", zone-variant: "st", // standard ) #fmt-timezone( ..tz, local-date: datetime.today(), format: "specific-non-location-long" ) \ #fmt-timezone( ..tz, format: ( iso8601: ( format: "utc-extended", minutes: "required", seconds: "optional", ) ) ) ``` ## Zoned Datetimes (🚧 experimental) ```typst +preview(vertical) #let dt = datetime( year: 2024, month: 5, day: 31, hour: 18, minute: 2, second: 23, ) #let tz = ( offset: "-07", iana: "America/Los_Angeles", zone-variant: "st", // standard ) #fmt-zoned-datetime(dt, tz) \ #fmt-zoned-datetime(dt, tz, locale: "lv") \ #fmt-zoned-datetime( dt, tz, locale: "en-CA-u-hc-h24-ca-buddhist" ) ```
https://github.com/Mc-Zen/tidy
https://raw.githubusercontent.com/Mc-Zen/tidy/main/docs/template.typ
typst
MIT License
#import "@preview/codly:0.1.0": * // The project function defines how your document looks. // It takes your content and some metadata and formats it. // Go ahead and customize it to your liking! #let project( title: "", subtitle: "", abstract: [], authors: (), url: none, date: none, version: none, body, ) = { // Set the document's basic properties. set document(author: authors, title: title) set page(numbering: "1", number-align: center) set text(font: "Linux Libertine", lang: "en") show heading.where(level: 1): it => block(smallcaps(it), below: 1em) // set heading(numbering: (..args) => if args.pos().len() == 1 { numbering("I", ..args) }) set heading(numbering: "I.a") show list: pad.with(x: 5%) // show link: set text(fill: purple.darken(30%)) show link: set text(fill: rgb("#1e8f6f")) show link: underline v(4em) // Title row. align(center)[ #block(text(weight: 700, 1.75em, title)) #block(text(1.0em, subtitle)) #v(4em, weak: true) v#version #h(1.2cm) #date #block(link(url)) #v(1.5em, weak: true) ] // Author information. pad( top: 0.5em, x: 2em, grid( columns: (1fr,) * calc.min(3, authors.len()), gutter: 1em, ..authors.map(author => align(center, strong(author))), ), ) v(3cm, weak: true) // Abstract. pad( x: 3.8em, top: 1em, bottom: 1.1em, align(center)[ #heading( outlined: false, numbering: none, text(0.85em, smallcaps[Abstract]), ) #abstract ], ) // Main body. set par(justify: true) v(10em) show: codly-init codly( languages: ( // typ: (name: "typ", icon: none, color: rgb("#239DAE")), ), ) show raw.where(block: true): set text(size: .95em) show raw.where(block: true): it => pad(x: 4%, it) show raw.where(block: false, lang: "typ").or(raw.where(lang: "notnone")): it => box(inset: (x: 3pt), outset: (y: 3pt), radius: 40%, fill: luma(235), it) set raw(lang: "notnone") body } #let ref-fn(name) = link(label("tidy" + name), raw(name)) #let file-code(filename, code) = pad(x: 4%, block( width: 100%, fill: rgb("#239DAE").lighten(80%), inset: 1pt, stroke: rgb("#239DAE") + 1pt, radius: 3pt, { block(align(right, text(raw(filename))), width: 100%, inset: 5pt) v(1pt, weak: true) move(dx: -1pt, line(length: 100% + 2pt, stroke: 1pt + rgb("#239DAE"))) v(1pt, weak: true) pad(x: -4.3%, code) } )) #let tidy-output-figure(output) = { set heading(numbering: none) set text(size: .8em) disable-codly() figure(align(left, box( width: 80%, stroke: 0.5pt + luma(200), inset: 20pt, radius: 10pt, block( breakable: false, output ) ))) codly() }
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-1FA00.typ
typst
Apache License 2.0
#let data = ( ("NEUTRAL CHESS KING", "So", 0), ("NEUTRAL CHESS QUEEN", "So", 0), ("NEUTRAL CHESS ROOK", "So", 0), ("NEUTRAL CHESS BISHOP", "So", 0), ("NEUTRAL CHESS KNIGHT", "So", 0), ("NEUTRAL CHESS PAWN", "So", 0), ("WHITE CHESS KNIGHT ROTATED FORTY-FIVE DEGREES", "So", 0), ("BLACK CHESS KNIGHT ROTATED FORTY-FIVE DEGREES", "So", 0), ("NEUTRAL CHESS KNIGHT ROTATED FORTY-FIVE DEGREES", "So", 0), ("WHITE CHESS KING ROTATED NINETY DEGREES", "So", 0), ("WHITE CHESS QUEEN ROTATED NINETY DEGREES", "So", 0), ("WHITE CHESS ROOK ROTATED NINETY DEGREES", "So", 0), ("WHITE CHESS BISHOP ROTATED NINETY DEGREES", "So", 0), ("WHITE CHESS KNIGHT ROTATED NINETY DEGREES", "So", 0), ("WHITE CHESS PAWN ROTATED NINETY DEGREES", "So", 0), ("BLACK CHESS KING ROTATED NINETY DEGREES", "So", 0), ("BLACK CHESS QUEEN ROTATED NINETY DEGREES", "So", 0), ("BLACK CHESS ROOK ROTATED NINETY DEGREES", "So", 0), ("BLACK CHESS BISHOP ROTATED NINETY DEGREES", "So", 0), ("BLACK CHESS KNIGHT ROTATED NINETY DEGREES", "So", 0), ("BLACK CHESS PAWN ROTATED NINETY DEGREES", "So", 0), ("NEUTRAL CHESS KING ROTATED NINETY DEGREES", "So", 0), ("NEUTRAL CHESS QUEEN ROTATED NINETY DEGREES", "So", 0), ("NEUTRAL CHESS ROOK ROTATED NINETY DEGREES", "So", 0), ("NEUTRAL CHESS BISHOP ROTATED NINETY DEGREES", "So", 0), ("NEUTRAL CHESS KNIGHT ROTATED NINETY DEGREES", "So", 0), ("NEUTRAL CHESS PAWN ROTATED NINETY DEGREES", "So", 0), ("WHITE CHESS KNIGHT ROTATED ONE HUNDRED THIRTY-FIVE DEGREES", "So", 0), ("BLACK CHESS KNIGHT ROTATED ONE HUNDRED THIRTY-FIVE DEGREES", "So", 0), ("NEUTRAL CHESS KNIGHT ROTATED ONE HUNDRED THIRTY-FIVE DEGREES", "So", 0), ("WHITE CHESS TURNED KING", "So", 0), ("WHITE CHESS TURNED QUEEN", "So", 0), ("WHITE CHESS TURNED ROOK", "So", 0), ("WHITE CHESS TURNED BISHOP", "So", 0), ("WHITE CHESS TURNED KNIGHT", "So", 0), ("WHITE CHESS TURNED PAWN", "So", 0), ("BLACK CHESS TURNED KING", "So", 0), ("BLACK CHESS TURNED QUEEN", "So", 0), ("BLACK CHESS TURNED ROOK", "So", 0), ("BLACK CHESS TURNED BISHOP", "So", 0), ("BLACK CHESS TURNED KNIGHT", "So", 0), ("BLACK CHESS TURNED PAWN", "So", 0), ("NEUTRAL CHESS TURNED KING", "So", 0), ("NEUTRAL CHESS TURNED QUEEN", "So", 0), ("NEUTRAL CHESS TURNED ROOK", "So", 0), ("NEUTRAL CHESS TURNED BISHOP", "So", 0), ("NEUTRAL CHESS TURNED KNIGHT", "So", 0), ("NEUTRAL CHESS TURNED PAWN", "So", 0), ("WHITE CHESS KNIGHT ROTATED TWO HUNDRED TWENTY-FIVE DEGREES", "So", 0), ("BLACK CHESS KNIGHT ROTATED TWO HUNDRED TWENTY-FIVE DEGREES", "So", 0), ("NEUTRAL CHESS KNIGHT ROTATED TWO HUNDRED TWENTY-FIVE DEGREES", "So", 0), ("WHITE CHESS KING ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("WHITE CHESS QUEEN ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("WHITE CHESS ROOK ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("WHITE CHESS BISHOP ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("WHITE CHESS KNIGHT ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("WHITE CHESS PAWN ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("BLACK CHESS KING ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("BLACK CHESS QUEEN ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("BLACK CHESS ROOK ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("BLACK CHESS BISHOP ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("BLACK CHESS KNIGHT ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("BLACK CHESS PAWN ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("NEUTRAL CHESS KING ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("NEUTRAL CHESS QUEEN ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("NEUTRAL CHESS ROOK ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("NEUTRAL CHESS BISHOP ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("NEUTRAL CHESS KNIGHT ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("NEUTRAL CHESS PAWN ROTATED TWO HUNDRED SEVENTY DEGREES", "So", 0), ("WHITE CHESS KNIGHT ROTATED THREE HUNDRED FIFTEEN DEGREES", "So", 0), ("BLACK CHESS KNIGHT ROTATED THREE HUNDRED FIFTEEN DEGREES", "So", 0), ("NEUTRAL CHESS KNIGHT ROTATED THREE HUNDRED FIFTEEN DEGREES", "So", 0), ("WHITE CHESS EQUIHOPPER", "So", 0), ("BLACK CHESS EQUIHOPPER", "So", 0), ("NEUTRAL CHESS EQUIHOPPER", "So", 0), ("WHITE CHESS EQUIHOPPER ROTATED NINETY DEGREES", "So", 0), ("BLACK CHESS EQUIHOPPER ROTATED NINETY DEGREES", "So", 0), ("NEUTRAL CHESS EQUIHOPPER ROTATED NINETY DEGREES", "So", 0), ("WHITE CHESS KNIGHT-QUEEN", "So", 0), ("WHITE CHESS KNIGHT-ROOK", "So", 0), ("WHITE CHESS KNIGHT-BISHOP", "So", 0), ("BLACK CHESS KNIGHT-QUEEN", "So", 0), ("BLACK CHESS KNIGHT-ROOK", "So", 0), ("BLACK CHESS KNIGHT-BISHOP", "So", 0), (), (), (), (), (), (), (), (), (), (), (), (), ("XIANGQI RED GENERAL", "So", 0), ("XIANGQI RED MANDARIN", "So", 0), ("XIANGQI RED ELEPHANT", "So", 0), ("XIANGQI RED HORSE", "So", 0), ("XIANGQI RED CHARIOT", "So", 0), ("XIANGQI RED CANNON", "So", 0), ("XIANGQI RED SOLDIER", "So", 0), ("XIANGQI BLACK GENERAL", "So", 0), ("XIANGQI BLACK MANDARIN", "So", 0), ("XIANGQI BLACK ELEPHANT", "So", 0), ("XIANGQI BLACK HORSE", "So", 0), ("XIANGQI BLACK CHARIOT", "So", 0), ("XIANGQI BLACK CANNON", "So", 0), ("XIANGQI BLACK SOLDIER", "So", 0), )
https://github.com/Myriad-Dreamin/tinymist
https://raw.githubusercontent.com/Myriad-Dreamin/tinymist/main/crates/tinymist-query/src/fixtures/match_def/param_in_init.typ
typst
Apache License 2.0
#let f(a) = { show it: /* ident after */ it => it; };
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/math/delimited_02.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page // Test manual matching. $ [|a/b|] != lr(|]a/b|]) != [a/b) $ $ lr(| ]1,2\[ + 1/2|) $
https://github.com/N3M0-dev/Notes
https://raw.githubusercontent.com/N3M0-dev/Notes/main/CS/Algorithm/Intro_to_Algor/Review_Final/review.typ
typst
#import "@local/note_template:0.0.1": * = Review For the Final Exam == Exam Content + Time complexity calc + Sort and divide-and-conquer + Dynamic programming + Greedy algorithms + Search algorithms + Amortized analysis + Graph theory + String algorithms #note()[ According to reliable information, pseudocode is not required except for divide-and-conquer, dynamic programming and greedy algorithms, and this final review is extreamly meant to get high points regardless of wether you turely understand these knowledges or not or if you will be capabel of using these knowledges IRL. This review is ONLY targeted at PONTS ON PAPER. ] _Being able to wirte good code in real life has nothing to do with getting high points in exams._ #align(right)[_said by *me*_] == Time Complexity Calculation === Notations First of all, we need strict mathematical definitions of the $O, Omega, Theta, o, omega, theta$ notations to further discuss the time complexity of the algorithms. #def(( [The O-notation: $ O(g(n))={f(n): &"there exist positive constants" c "and" n_0 "such that"\ &0<=f(n)<=c g(n) "for all" n>=n_0} $ ], [The o-notation: $ o(g(n))={f(n): &"for any positive constant" c>0, "there exists a constant" n_0>0\ &"such that" 0<=f(n)<c g(n) "for all" n>=n_0 } $ ], [The $Theta$-notation: $ Theta(g(n))={f(n): &"there exist positive constants" c_1,c_2 "and" n_0 "such that"\ &0<=c_1 g(n) <= f(n) <=c_2 g(n) "for all" n>=n_0} $ ], [The $Omega$-notation: $ Omega(g(n))={f(n): &"there exist positive constants" c "and" n_0 "such that"\ &0 <= c g(n) <= f(n) "for all" n>=n_0} $ ], [The $omega$-nonation: $ omega(g(n))={f(n): &"for any positive constant" c>0, "there exists a constant" n_0>0\ &"such that" 0<=c g(n)<=f(n) "for all" n>=n_0 } $ ] )) === Solving the Recurrences ==== Subsitution Method + Guess the solution + Subsitutite and prove through induction ==== Recurtion-tree Method + Draw the recurtion tree + Sum up the nodes at the same depth + Calculate the tree height + Sum up all costs ==== Master Method #theorem()[ The Master Theorem: For recurrences like $T(n)=a(T/b)+f(n)$: + If $f(n) = O(n^(log_b (a-epsilon)))$ for some constant $epsilon>0$, then $T(n) = Theta(n log_b a)$. + If $f(n) = Theta(n^(log_b a))$, then $T(n) = Theta(n^(log_b a))$. + If $f(n) = O(n^(log_b (a+epsilon)))$ for some constant $epsilon>0$, and if $a f(n/b) <= c f(n)$ for some constant $c<1$ and all sufficiently largh n, then $T(n) = Theta(f(n))$. ] == Sort and divide-and-conquer === Divide-and-Conquer Steps of divide-and-conquer: - Divide: Divide the problem into subproblems - Conquer: Solve the subproblems recursively. If the subproblems are small enough, solve them in a straightforword manner. - Combine: Combine the solutions to the subproblems to construct the solution to the original problem. ==== Merge Sort ==== Chess Board Cover ==== Multiplication of Big numbers ==== Linear Time Select (PPT) == Dynamic Programming Proof is not required in the final, so we only need to know the problem it can solve and the procedure of applying dp. The key to the solution to the problems is the clearly defined optimal structure and the recursive solution, or simply, the equation. === Knapsack Problem === Matrix Chain Multiplication === Longest Common Subsequence === Optimal Binary Tree == Greedy algorithms Greedy algorithms can only solve problems which the optimal solution can be obtained by adding the optiaml solution *at the moment* together. === Activity Selection Problem === Huffman Codes === Minimum Spanning Tree == Search Algorithms === DFS and BFS DFS using stack, BFS using queue. === Hill Climbing Similar to gradient desent. === Best-First Creat a heap that satisify a estimate function, use the heap to determine the search order. Priority queue. === Branch and Bound Pruning branches that cannot lead to the optimal solution in advance to reduce cost. === A\* Algorithm === Staffing Problem === Travelling Salesman Problem
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/text/raw-code-01.typ
typst
Other
#set page(width: 180pt) #set text(6pt) ```rust /// A carefully designed state machine. #[derive(Debug)] enum State<'a> { A(u8), B(&'a str) } fn advance(state: State<'_>) -> State<'_> { unimplemented!("state machine") } ```
https://github.com/TechnoElf/mqt-qcec-diff-thesis
https://raw.githubusercontent.com/TechnoElf/mqt-qcec-diff-thesis/main/template/contents.typ
typst
#import "colour.typ": * #let contents_page() = { set text(font: "TUM Neue Helvetica") show outline.entry.where( level: 1 ): it => { v(0.5em) strong(it) } outline(indent: auto) }
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/set-02.typ
typst
Other
// Test that that block spacing and text style are respected from // the outside, but the more specific fill is respected. #set block(spacing: 4pt) #set text(style: "italic", fill: eastern) #let x = [And the red #parbreak() lay silent!] #text(fill: red, x)
https://github.com/EGmux/PCOM-2023.2
https://raw.githubusercontent.com/EGmux/PCOM-2023.2/main/lista2/lista2q8.typ
typst
=== Um sinal análgocio é amostrado à taxa de Nyquist *$f_s$* e quantizado em *$L$* níveis. Determine o tempo de duração *$tau$* de 1 bit do sinal binário codificado. \ como relacionar $f_s$ e $L$ ? sabemos que #math.equation(block: true, $ l = log_2 L $) e #math.equation(block: true, $ R_b = f_s dot l $) e finalmente #math.equation(block: true, $ tau = R_b^(-1) "s/bit"$) fazendo substituição apropriadas... #math.equation(block: true, $ tau &= 1/(f_s dot l) && \ &= 1/(f_s dot log_2 L)$) e é isso
https://github.com/mangkoran/utm-thesis-typst
https://raw.githubusercontent.com/mangkoran/utm-thesis-typst/main/sample.typ
typst
MIT License
#import "@preview/tablex:0.0.7": tablex, rowspanx, colspanx // template #import "01_cover.typ" as cover #import "02_declaration_thesis.typ" as declaration_thesis #import "03_declaration_supervisor.typ" as declaration_supervisor #import "04_declaration_cooperation.typ" as declaration_cooperation #import "05_certification.typ" as certification #import "06_title.typ" as title #import "07_declaration_originality.typ" as declaration_originality #import "08_acknowledgement.typ" as acknowledgement #import "09a_abstract_en.typ" as abstract_en #import "09b_abstract_my.typ" as abstract_my #let data = ( title: "UTM Thesis Typst", author: "<NAME>", degree: "Bachelor of Computer Science", supervisor: ( "Dr. Foad", ) ) #cover.content( // title: data.title, // author: data.author, ) #declaration_thesis.content( // title: data.title, // author: data.author, // supervisor: data.supervisor, ) #declaration_supervisor.content( // degree: data.degree, // supervisor: data.supervisor, ) #declaration_cooperation.content() #certification.content() #title.content() #declaration_originality.content() #acknowledgement.content() #abstract_en.content() #abstract_my.content() #set heading(numbering: "1.1.") = General Requirements == Thesis, Dissertation, and Master Project Report A thesis, dissertation or Master project report is a document that presents students' research and findings in support of their candidature for an academic degree. In UTM, a project report refers to the documentation of the research component prepared and submitted by the candidate for the award of a master's programme by coursework. On the other hand, a dissertation refers to a document submitted by a student in partial fulfilment for the award of a degree through a mixed-mode program. Finally, a thesis refers to the documentation of original research prepared and submitted by a candidate for the award of a degree for a doctoral or master's programme based on research. == UTM Thesis Format A thesis can be written in one of the following formats: - Conventional thesis: A conventional thesis typically consists of five chapters: the introduction, the literature review, the methodology, the results, and the conclusion. - Thesis by publications: A thesis by publications consists of a compilation of scholarly papers published by the student as the first author during their candidature. Students who intend to submit their theses in this format must adhere to the publication requirements outlined in the graduate school regulations. Following are guidelines for composing and preparing a dissertation or thesis in accordance with the two formats. === Conventional Thesis/Dissertation In general, a conventional thesis/dissertation consists of three main parts: preliminary, main body and supplementary. Details on typesetting are provided in @layout. The general structure of a conventional thesis/dissertation is shown in @thesis_structure[Table] below. #figure( tablex( columns: (auto, auto), align: center + horizon, [*Component*], [*Note*], colspanx(2)[*Preliminary*], (), [Front Cover], rowspanx(6)[No page number], [Blank Page], (), [Declaration of Thesis/Dissertation/Project Form], (), [Declaration by Supervisor(s)], (), [Declaration of Cooperation], (), [Certification of Examination], (), [Title Page], [Roman numeral (i), page number hidden], [Author's Declaration of Originality], [Roman numeral (iii)], [Acknowledgement], [Roman numeral (v)], [Abstract (English)], [Roman numeral (vi)], [Abstract (Malay)], [Roman numeral (vii)], [Table of Contents], rowspanx(6)[Roman numeral (cont.)], [List of Tables], (), [List of Figures], (), [List of Abbreviations], (), [List of Symbols], (), [List of Appendices], (), colspanx(2)[*Main Body*], (), [Ch 1: Introduction], [Arabic numeral (1)], [Ch 2: Literature Review], rowspanx(6)[Arabic numeral (cont.)], [Ch 3: Methodology], (), [Ch 4: Results], (), [Ch 5: Discussion], (), [Ch 6: Conclusion], (), [References], (), colspanx(2)[*Supplementary*], (), [Appendices], rowspanx(2)[Arabic numeral (cont.)], [List of Publications and Papers Presented], (), ), supplement: [Table], caption: [General Structure of a Conventional Thesis/Dissertation] ) <thesis_structure> = Layout and Content <layout> == Page Layout The text should be presented in portrait layout. Landscape layout may be used for figures and tables == Type of Paper For viva-voce copies, high-quality A4 size (210 x 297 mm) paper with a weight of 80 grams per square metre (gsm) in white simili should be used. For hardbound copies, it is necessary to use B5-size paper. Photocopies of the thesis must be on similar quality paper. == Typeface and Font Size A thesis should be typed using a word or text processor. The 12-point font size commonly used in printed academic materials should be used. Examples of suggested font types are Serif (Times New Roman or Book Antiqua only) and Sans Serif (Arial, Tahoma or Calibri only). The same font type should be used throughout the thesis. == Margins All pages should have the same margin. The left and right margins should be 3.25 cm (for binding purposes), and the top and bottom margins should be 2.5 cm == Spacing The following guidelines should be observed: - The spacing between the chapter title and the first line of a text should be four (4) line spaces. - The spacing between the last line of a text and the title of a sub-section should be four (4) line spaces. - The spacing between the title of a sub-section and the first line of a text should be two (2) line spaces. - The spacing between paragraphs should be two (2) line spaces. - The number and the title of a sub-section should be aligned with the left margin. - The first line of a paragraph should be indented by 1.27 cm (0.5 in) from the left margin. - A new paragraph should not begin on the last line of a page. - The spacing between the last line of a text and a table or a figure or an illustration should be two (2) line spaces. - The spacing after a comma (,) should be one (1) character space. == Pagination Pages should be numbered consecutively including the blank page when transitioning to a new chapter. The page numbers should be positioned at the bottom centre, 1.25 cm from the bottom edge. Numbering should adhere to the following guidelines: - Preliminary pages of a thesis, starting from the title page, should be numbered using lowercase Roman numerals (i, ii, iii, etc.). Pages in the main text should be numbered using Arabic numerals (1, 2, 3, etc.). - The first page should be the title page. This page should be counted as 'i' but should not be printed. - Each chapter's first page should commence on an odd page with the page number printed. - Theses that require partition pages should not have their partition pages numbered or counted. - If a chapter terminates on an odd page number, a blank page should be added and counted, but the page number should not be printed. (In MS Word, insert 'Section Breaks Odd Page' under the Layout Tab).
https://github.com/polarkac/MTG-Stories
https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/037%20-%20Ravnica%20Allegiance/010_The%20Gathering%20Storm%3A%20Chapter%2016.typ
typst
#import "@local/mtgstory:0.2.0": conf #show: doc => conf( "The Gathering Storm: Chapter 16", set_name: "Ravnica Allegiance", story_date: datetime(day: 25, month: 09, year: 2019), author: "<NAME>", doc ) Ral and Tomik walked side by side, heading nowhere in particular. Rain poured down, drumming on the awnings of shops along the street, lines of splashes marching across the puddles that formed between the cobblestones. The drops bent away above the pair, leaving a dry space around them and a curtain of extra-thick rain beyond. When it was coming down this hard, it left them in their own tiny world, isolated from everything beyond by a curtain of rushing, foaming water. Mist rose from ricochets and coiled around their boots. "I wasn’t sure you’d come," Tomik said eventually. After the euphoria had worn off, he’d pulled away from Ral, huddled in on himself in a way that made Ral’s throat go thick. Ral wanted to take his hand, but didn’t. #emph[Not yet.] Tomik’s glasses were beaded with raindrops. "I wasn’t going to," Ral said. "Not at first." "What changed your mind?" "I . . ." Ral glanced at Tomik. "You want the truth?" Tomik, arms crossed, gave a jerky nod. "I want to say I was worried about you," Ral said. "But I know you can take care of yourself. Most of the time, anyway." Tomik smiled, very slightly, and Ral felt himself relax a little. "I know you," Ral said. "I know how much you care about your work with Teysa, what it means to you. I thought, if you were willing to go against her on this, to risk everything . . . it’s probably pretty damn important." "More important than working on your machine?" Tomik said. "We’re nearly done," Ral said. "All that’s left is actually building the thing, and I can only do so much. I just get . . . involved." "I know you, too," Tomik said. "You don’t," Ral said. "Not really. There are parts of my life that I . . . don’t talk about." "Because they didn’t happen on Ravnica?" Tomik said. "Has anyone ever told you that you’re too clever for your own good?" Tomik smiled. "You. Repeatedly." "Yes," Ral said. "Because I wasn’t living on Ravnica. And . . ." He took a deep breath. "Some of the things that happened to me made it hard to trust people. To see them as anything but tools." "Teysa’s like that," Tomik said quietly. "She’s not a bad person, Ral. But she was raised in this nightmare, and she can’t escape." "You and I . . ." Ral shook his head. "We don’t have to be that way. Not to each other. I . . ." He stroked his beard, irritably. "I want something different." "Like, actually caring about someone?" Tomik said. "Like that," Ral allowed. "Well." Tomik slipped his hand into Ral’s and bumped against his shoulder. "I don’t know if you’re there yet. But you’re learning." #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) In Ral’s dream, he leaned over the desk, adding the finishing touches. Building the thing he wanted hadn’t been easy, and he already had ideas for improving it. The power-storage cells were heavy and awkward, and didn’t hold nearly enough energy in their complicated network of metal and ceramic as he would have liked. At least he’d managed to get away from liquid storage—#emph[carrying around a couple of gallons of acid on my back, now there’s a recipe for disaster . . .] In some other place, in some other Plane, there might be better materials to be found. He had a vision of a crystal lattice, and spinning coils, but finding something with the right properties had thus far proved impossible. Even so. He looked down at his creation and smiled, closing the last compartment on one side. Gingerly, he picked it up and slid his arms through the straps, letting the weight of the thing settle on his back. A pair of gloves hung off it from long, insulated cords, and he slipped them on, flexing his fingers and feeling the faintest crackle of energy. It needed to be charged, of course. But even empty, the accumulator gave him a feeling of power. The energy of Ral’s magic came from the storms that raged overhead, and so his strength had always waxed and waned as unpredictably as the weather. #emph[Not anymore.] Now he would carry his own storm, in leather, ceramic, and steel. "Ingenious," said a voice from the doorway. "You have learned a great deal since we last spoke, my friend." Ral looked up, alarmed. The front door was locked, he knew for certain, and so was the door to his office. Nonetheless, it now stood open, and an older man looked around the threshold. He was tall, gray-haired, impeccably well-dressed in clothes of a cut that somehow suggested he was from . . . elsewhere. Though it had now been a decade since they’d last spoken, Ral could hardly forget him. "Hello, Bolas," he said, forcing calm into his voice. "Zarek," <NAME> said politely. "May I come in?" Ral nodded. "Doors and locks don’t appear to mean much to you." "Ah, but politeness has a power greater than any padlock," Bolas said, stepping into the office. He looked around, approvingly, at the blueprints pinned to the walls, the desk cluttered with tools and parts. "You’ve been busy." Ral shrugged. "I do my best." "And your best is quite extraordinary," Bolas said. "Stranded here, without a penny to your name, bleeding out in an alley. And within ten years, here you are. Master of a tidy little empire, with a dozen inventors bowing and scraping for the privilege of assisting you. You didn’t even have to kill very many people to do it." Bolas grinned, his teeth white and very slightly sharp. "Not that that would necessarily be a drawback, of course." "Did you know?" Ral said. "Back in Tovrna. Did you know what I was?" "That you were a Planeswalker?" Bolas said. He shrugged. "Let us say I . . . suspected. Planeswalkers are exceedingly rare, and they cannot be taught to use their Spark. It must ignite on its own, or not at all, which often entails a certain amount of trauma." "So you set me up," Ral said. "I did nothing of the kind. I gave you what you wanted, did I not?" Bolas’s grin widened. "It’s hardly my fault that it went wrong. Young passions, you know." "Why?" Ral said. "You’re a Planeswalker, too, or you wouldn’t be standing here. So why bother having me shake down those poor bastards for coppers?" "It was never about them," Bolas said. "It was always about you. As I said, Planeswalkers are rare. When I think someone has potential, I do my best to . . . encourage them. And to place them in my debt, to facilitate our later collaboration." "I think my debts to you are paid," Ral said, stepping around the desk. "On the contrary," Bolas said. "Do you think you would have accomplished this—any of this—without my help?" "Your #emph[help] nearly got me killed." "I pushed you to find out what you were truly capable of," Bolas said. "And you have. Isn’t that worth something? Haven’t I done you a favor?" Ral stared at the man, with his sharp-toothed smile. Very slowly, he nodded. "You might put it that way," he said. "Then we agree that you owe me," Bolas said. "And I’ve come to collect, <NAME>. Join me, and we will accomplish wonders." "Let me tell you what you taught me," Ral said. "Loyalty is for fools. Trust is for suckers. And allies are there to be used, until they’re no longer useful." He shrugged, settling the weight of the pack. "So, thank you for the lesson. But I won’t be repaying whatever debt you think I owe." "Regrettable," Bolas said. His smile had disappeared. "Your position here—" "You’re going to threaten to take everything I’ve built," Ral said. "Go ahead. I’m done with it. I have this"—he patted his backpack, then touched the side of his head—"and what’s in here. That’s all I need, in the end." "Don’t imagine you can escape me, Zarek," Bolas said. "Anywhere you can go, I can follow." "I don’t need to escape," Ral said. "Just stay one step ahead." He focused his mind. Planeswalking was just like falling, once you learned the trick of it. Among the myriad worlds, he directed his mind’s eye to a familiar one. #emph[Time to go home.] To Ravnica. But not to Tovrna. #emph[No more wasting my time in the boondocks. ] The Tenth District was the heart of the city-Plane, and that was where he had to be. How, precisely, he would fit in, he didn’t yet know, but he was no longer worried. With his talents, and his power, there would always be a place for him. #emph[And if someone’s already in that place, well, that’s just too bad for them . . .] #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) Vraska stared at her throne. It had felt good, in the moment. Righteous, even. The elves whose contorted, petrified bodies comprised the gruesome chair had been her enemies, and they’d spent decades pushing down anyone in the Golgari who wasn’t one of them. Gorgons and kraul alike had suffered under the devkarin boot, and each screaming prisoner dragged to the throne and frozen in place with a wave of Vraska’s power was a tiny measure of revenge. When it was finished, she’d promised herself, she’d do better. #emph[And what have I accomplished?] An ancient and beautiful kraul city reduced to rubble. Thousands of Golgari dead. All for nothing. All for #emph[Bolas] . #emph[Xeddick.] The albino kraul should never have been on the battlefield, but he’d insisted, and she’d been too soft-hearted to refuse. As a result she’d had to watch, helpless, as Aurelia carved him apart, then flee lest she be the next to be spitted on the angel’s blade. #emph[I’ll have her in my garden someday. I swear it.] She clenched her fists, knowing how pitiful that sounded. #emph[Xeddick was right all along. I never should have let him unlock my memories.] Her time on Ixalan had dulled her purpose. Made her soft. #emph[I would have been better off if I’d never remembered meeting Jace, or . . . or any of it.] Bolas still had his claws around her throat, so what did it matter? #emph[At least if I’d forgotten all of it, I might have had a chance at being happy in his service.] She stood alone, breathing hard, her tendrils waving in agitation. She wanted to hit something, #emph[hurt] someone. To feel the heat behind her eyes and the softness of flesh hardening into stone. She wanted . . . #emph[<NAME>. ] Vraska leaned against one of the pillars, turning away from the hateful throne. #emph[Someone who understands.] But there was no one left. A scrape of claws on stone announced a visitor. Vraska looked up, lips stretching to bare her needle-sharp teeth. Mazirek entered, forelegs waving in brief obeisance. "You wished to see me, my queen," the kraul said, in his clicking, buzzing tone. "When I wished to see you was by my side, at the battle," Vraska said, pushing herself away from the pillar. "Strangely, that was when I found you absent." "I regret that I was forced to leave you," the death priest said. "I was set on by a pack of Orzhov thrulls, and it took a few moments to destroy them. The tides of battle are difficult to navigate, even for me." "Indeed." Vraska felt power building in her eyes, unbidden. #emph[It would be so much easier to just mount him in my garden.] She blinked it away, and shook her head. #emph[He’s still too useful.] Mazirek, apparently unaware how close he had come to destruction, did his little half-bow again. "Was there anything else you needed of me, my queen?" "No," Vraska said. "Get out of my sight." The dark green kraul withdrew. Vraska stalked across the empty throne room, one hand on her saber, and threw herself restlessly into twisted throne. When someone rapped at one of the doors, she nearly screamed in frustration. "What?" "A guest." A man’s voice, and not one she recognized. "Hoping for a moment of your time." Only a few people would dare to disturb her in her sanctum. Mazirek, Xeddick, Storrev. #emph[And] — "Come in, then," Vraska said. "I can’t stop you." The puppet Bolas had sent this time was a young woman in the tattered remains of a Boros Legion uniform. She was caked with mud and slime, and a long cut across her cheek had already gone foul in the ever-damp heat of the Undercity, swollen red and dripping pus. Two Erstwhile escorted her, moving with their stiffly formal gait in their ancient finery. #emph[Showing off.] The Erstwhile were the knife Bolas held to her throat, a knife she’d placed there with her own hands. "Are you here to chastise me?" Vraska said, lounging on the throne with an affected casualness. "Lecture me like a disappointed schoolmaster?" "What would be the point?" Bolas’s puppet said, stepping forward. At an unseen command, the zombie escorts turned and walked out. "It’s obvious you did your best. Your best was simply not good enough." Somehow, that stung more than she’d expected. "The Golgari can’t stand alone against an alliance of half the other guilds. I would have thought someone of your intelligence could have figured that out." "I am only a shadow of my master," the puppet said. "Delivering his instructions." "If you want me to attack that machine they’re building, you can tell your #emph[master] that it can’t be done." Her spies had been observing the work, and the defenses going up around it. "Ral’s people have been putting in minefields and flame turrets and who knows what else, and the Azorius lawmages have surrounded the site with so many wards that a herd of trolls couldn’t dent them. Whatever it is they’re making, it’s here to stay. I’m not sending more of my people to their deaths." "An attack on the resonator is not required," Bolas’s puppet said, smiling slightly. "I have assigned that task to a more . . . competent agent. For you, my master has reserved the job of disrupting Zarek’s backup plan." "What backup plan?" Vraska said. "There is a tower, on the surface, that contains a very clever machine. When Zarek’s scheme fails—and it #emph[will] fail—he will realize he has lost, and reach for his final throw of the dice. You will be there to stop him. My master requires all possible contingencies be accounted for, even remote ones. You will send your forces to block him." "No." Vraska got up, abruptly, and stalked across the throne room. "No?" The puppet quirked an eyebrow, the expression out of place on her filthy, bloodstained face. "Need I remind you of the consequences of betrayal again, Vraska?" "I won’t send my forces. I’m done spending Golgari lives for you." Vraska stood opposite the puppet and bared her teeth. "I’ll go myself, and kill Zarek. I trust that will be sufficient?" "It will." The puppet leaned closer. It smelled of rot. "But failure is not an option. Not for you. <NAME> may have mercy on you, but Bolas will have none. When he is victorious—and he #emph[will] be victorious—he will deal with you as your service to him deserves." "I understand," Vraska said. "Are you done making threats?" "For now." The puppet smiled. "And I am finished with this shell. Dispose of it, will you?" The Boros woman blinked, and her eyes focused on Vraska and went very wide. She screamed until Vraska grabbed her by the throat, focused her power, and let it pulse through her eyes. When she let the stone statue of the terrified soldier slip through her fingers, it shattered into a hundred pieces on the floor. #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em)  #linebreak Vraska kept her armory in a small room adjoining her personal chambers. Over the years, she’d accumulated quite a bit of armor and weaponry, and once she’d taken control of the Golgari she’d moved her various stashes from their hidey-holes to the palace. It was as much a repository of her memories as anything else. There were suits of armor, each from a different time in her life—the tight blacks of a rooftop assassin, more elaborate suits of scale-mail she’d worn to impress, the costume she’d returned to Ravnica in from her life in Ixalan. There were sabers in a long rack, from the plain weapon she’d carried in her early days through jeweled showpieces given to her as gifts on her ascension to queen. She ran her fingers along the steel blades, lost in thought. Finally she stopped in front of one sword, its edge serrated like a shark’s tooth, a brutal punching spike built into the hilt. It was an ugly weapon, viciously functional, designed to inflict maximum pain on an opponent. #emph[Perfect.] #emph[] #linebreak "Queen." Storrev glided into the room, her voice a thready whisper. "You summoned me." "I have been thinking," Vraska said. "About you and the other Erstwhile. You are bound to obey Mazirek, are you not?" Storrev inclined her head. "He raised us from our tombs, my queen. But he has given us instructions to obey you as well." Mazirek, who’d vanished at the critical moment. Who spoke to her with such unweaning arrogance. Vraska felt her suspicion harden into certainty. "Are you required to tell him everything you do?" Vraska said. "Only if he specifically asks, my queen," Storrev said. "Do you have a task for me?" "I do." Vraska slipped the shark’s-tooth sword into her sheath. "I may be . . . absent for some time. In the meanwhile, I would like you to deliver this note." She handed a sheet of spongy fungus paper to the lich, who read it carefully. "I trust you can figure out the rest." Storrev was always expressionless, but Vraska could have sworn that the ghost of a smile crossed her face. She gave a formally correct bow. "Of course, my queen. Your will be done." The lich glided out. Vraska looked across the suits of armor, shed her formal robe, and began donning the simplest, leather and scale from her days as an assassin. #emph[I’m done being blackmailed, one way or the other.] As she tightened the straps, she found a certain peace coming to her. #emph[Kill Zarek, and let the rest take care of itself. That’s all I have left.] #emph[] #linebreak #emph[Sorry, Jace.]
https://github.com/donRumata03/aim-report
https://raw.githubusercontent.com/donRumata03/aim-report/master/lib/todos.typ
typst
#import "@preview/big-todo:0.2.0": * // Small todo #let stodo = todo.with(inline: true) #let todo = it => pad(y: 2em, (todo.with(gap: 0.2cm))[#it])
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/layout/clip-02.typ
typst
Other
// Test cliping svg glyphs Emoji: #box(height: 0.5em, stroke: 1pt + black)[🐪, 🌋, 🏞] Emoji: #box(height: 0.5em, clip: true, stroke: 1pt + black)[🐪, 🌋, 🏞]
https://github.com/mrknorman/evolving_attention_thesis
https://raw.githubusercontent.com/mrknorman/evolving_attention_thesis/main/03_machine_learning/03_machine_learning.typ
typst
#set page(numbering: "1", number-align: center) #set math.equation(numbering: it => {[3.#it]}) #import "../notation.typ": vectorn, uvectorn, dvectorn, udvectorn, matrixn #counter(math.equation).update(0) = Machine Learning <machine-learning-sec> Machine learning techniques can be applied to almost any area of gravitational-wave data science; therefore, an exhaustive list would be difficult to compile and quickly outdated. However, here are some current areas of investigation: transient detection @george_huerta_cnn @gabbard_messenger_cnn @gebhard_conv_only_cnn and parameterisation @george_huerta_cnn @bbh_pe_1 @vitamin, including compact binary coalesces @george_huerta_cnn @gabbard_messenger_cnn @gebhard_conv_only_cnn, bursts @supernovae_cnn_1 @supernovae_cnn_2 @MLy @semi-supervised, and detector glitches @glitch_detection_1 @gravity_spy; continuous waveform detection @continious_1 @continious_2 @continious_3 and parameterisaction @continious_clustering; stochastic background detection and parameterisation @stocastic_1; detector noise characterisation @noise_characterisation and cleaning @deepclean; detector control and calibration @detector_control_1 @detector_control_2; and approximant generation @aproximant_generation_1. This thesis will focus on the application of machine learning to transients, including compact binary coalesces and burst events. To contextualise this research, this chapter will serve as a brief introduction to machine learning. Many ambiguous, sometimes contradictory definitions exist within machine learning and artificial intelligence. The definitions used throughout this thesis will be discussed here, attempting to use the most technically correct, or failing that, most commonly used definitions available. *Artificial Intelligence* is perhaps the broadest of the terms associated with machine learning and perhaps also the vaguest. It has various, sometimes conflicting, definitions but is often defined as a property of human-designed intelligent agents --- systems that take, as input, information about the world and process that data, along with any internal state, to produce an output that maximises the chance of achieving a specific goal @ai_modern. This broad definition can be applied to an extensive range of artificial devices, from a toaster, which takes as an input the twist of a dial and tries to maximise its goal of applying heat for an amount of time relating to the position of the dial, to a chess engine with the all-consuming goal of checkmating its opponent. Most people would probably not consider a toaster artificially intelligent, and indeed, in the years since DeepBlue first defeated <NAME> @deep_blue, many have come to consider chess engines in much the same light. This phenomenon is known as the 'A.I. effect', wherein a task is only considered something requiring intelligence until it has been successfully demonstrated by a machine @ai_effect. At that point, it is pushed out of the realm of intellectual endeavour and into the mundane, therefore preserving human supremacy over their cognitive dominion. I fear that with the rise of large language models, a few years is all that separates the act of writing a thesis such as this from the same relegation @scientific_writing. This transience can make artificial intelligence a tricky definition to use in a technical sense, so the term will, where possible, be avoided. *Machine Learning* is somewhat easier to define. Depending on your definition of artificial intelligence, it could be considered either a subset of that field or merely at an intersection with it @machine_learning_intersection. It is loosely defined as the study of agents who can gain competency at a task without explicit human instruction @machine_learning. This is achieved through the use of specialised algorithms and statistical methods @machine_learning. Since, for the context of this thesis, it is probably more helpful to think of these agents as statistical techniques rather than actors that react to the world, the rest of this thesis will use the term *model* to refer to these agents, as they often model the relationship between a specific distribution of input data and a specific distribution of output data. Machine learning can be subdivided in multiple ways, but one of the most common distinctions separates it into three basic paradigms: supervised learning, unsupervised learning, and reinforcement learning @pattern_matching_and_machine_learning. *Supervised Learning* refers to any machine learning task wherein the model attempts to match its outputs with preexisting values labelled by humans or another technique @pattern_matching_and_machine_learning. Training a model through supervised learning requires datasets of labelled training data from which the model learns. After which, if successful, the model should be able to approximate the desired output given new unseen input data. *Unsupervised learning*, on the other hand, does not provide the model with any preexisting values to attempt to match its outputs with @pattern_matching_and_machine_learning. This can include techniques that use the input data as the desired output data, such as in autoencoders @autoencoder_unsupervised, or techniques that attempt to divine patterns within the dataset previously unknown to the model and, often, the model user. For example, clustering tasks look for similar latent features between groups of training examples @unsupervised_clustering. *Semi-supervised learning* lies, perhaps unsurprisingly, in the lacuna between supervised and unsupervised learning @semi_supervised. Whilst training under this paradigm, some of the training data is labelled and some unlabeled. This can be used when the labels are too computationally expensive to compute for the entirety of the training dataset or when some of the labels are intractable by other techniques or simply unknown. *Reinforcement Learning* is a paradigm based on slightly different principles. Instead of using extensive data sets to train an agent, reinforcement learning utilises algorithms that try to maximise themselves against an externally defined reward function @pattern_matching_and_machine_learning. While training a model using reinforcement learning, the model can take actions that affect the state of the environment in which the model is allowed to act. The state of its environment will then be mapped to a score; this score is used to update the model. Through an iterative process, the model is updated to improve its ability to maximise the score of its environment. Reinforcement learning is commonly used in scenarios where huge training datasets are not available, and the model is primarily designed to interact with an environment (virtual or real), such as training a robot to walk @robot_walk or training a virtual car to drive around a virtual track @robot_drive. Though this has proved a powerful technique for many machine learning applications, it has not been investigated in this thesis and thus will not be discussed in detail. == The Artificial Neural Network The Artificial Neural Network is a machine-learning technique that has seen rapid innovation, development, and adoption over the last decade @ann_history. They've shown the ability to solve many long-standing problems in artificial intelligence, including image, audio, and text classification, captioning, and generation, @image_classification @audio_classification @text_classification @image_captioning @audio_captioning @text_summarisation @image_generation @audio_generation @text_generation, as well as producing game-playing algorithms that have attained superhuman performance in previously human-superior games like Go @alpha_go. They can teach themselves the rules from scratch in a matter of hours @alpha_zero --- compared to the many years of development required for previous game-playing engines. They can compete in complex, highly-dimensional computer games like Starcraft 2 @starcraft and League of Legends @league and they have achieved large-scale adoption across many industrial sectors, managing power grids @power_grid_management, performing quality control @quality_control, and paving the way, albeit slowly, toward fully autonomous self-driving cars @self_driving_cars. Artificial neural networks have also been applied to many scientific problems, such as AlphaFold @alpha_fold, a method that, to some extent, solved the protein folding problem. With their rampant and rapid success across many domains previously thought intractable or at least many decades away from a solution, it is easy to ascribe to artificial neural networks more than what they are, but it is also easy to underestimate their potential to solve previously unthinkable problems. Artificial neural networks are little more than simple statistical structures compiled into complex architectures, which allow them to perform intricate tasks @artifical_neurons @deep_learning_review @perceptron_and_neural_network_chapter. They are loosely inspired by the structures of biological neurons inside animal brains @biological_inspiration @perceptron_and_neural_network_chapter. Although they indeed show a greater likeness to the workings of biological systems than most computers, this analogy should not be taken too literally. Biological brains are far more complex than current artificial neural networks, and there is much about them we do not yet understand. There may still be something missing from state-of-the-art models that prevents them from the full range of computation available to a biological brain @biological_differences. Having said that, there are still ample further developments that can be made with artificial neural networks, even considering their possible limits. We do not yet seem close to unlocking their full potential @future_improvements. There is no universally agreed-upon definition of *deep learning*, but one of the most widely accepted definitions is that it must have a Credit Assignment Path (CAP) depth greater than two. This means that there must be more than two data transformations from input to output @deep_learning_3. This equates to a dense artificial neural network with more than two layers, or in other words, one or more hidden layers. This enables *representation learning*, where a network can learn to identify hierarchical features in the model @deep_learning_2. It is proven that models with a CAP of two can act as universal function approximators @universal_aproximators, so adding more layers beyond this act only improves convergence on a parameter solution by reducing training difficulty. In practice, almost all contemporary applications of artificial neural networks are more than two layers deep. The hierarchical relationship between A.I. and machine learning is illustrated by @ai_relationships. #figure( image("ai_relationships.png", width: 80%), caption: [The loose hierarchical relationship between different umbrella terms used in artificial intelligence @deep_learning_review.], ) <ai_relationships> There are a plethora of different types and arrangements of artificial neural networks, often known as architectures @deep_learning_review. The following sections will introduce the main concepts surrounding artificial neural networks. === The Artificial Neuron <artificial_neuron_sec> As mentioned previously, artificial neural networks are loosely inspired by biological neural networks @biological_inspiration @perceptron_and_neural_network_chapter, and as one might expect, their base unit is analogous to the biological base unit, the neuron @artifical_neurons. Artificial neurons form the basic building block of all artificial neural networks, though their form and design can vary between architectures @artifical_neurons. The artificial neuron takes a number, $N$, of continuous numerical inputs $ vectorn(x) = [x_1, ..., x_i, ..., x_N]$ and outputs a single numerical output $A(vectorn(x))$ @artifical_neurons. Each neuron has a number of tunable parameters associated with it, $vectorn(theta)$. A single neuron has many weight values $vectorn(w) = [w_1, ..., w_i, ..., w_N] $ and a single bias value $b$. Suppose these parameters, $vectorn(theta)$, are selected correctly. In that case, the artificial neuron can, in some simple cases, act as a binary classifier that can correctly sort input vectors, $vectorn(x)$, drawn from a limited distribution into two classes @perceptron_and_neural_network_chapter @artifical_neurons. This kind of single-neuron classifier is often known as a perceptron, the original name given to this kind of classifier @perceptron. #figure( image("artificial_neuron.png", width: 60%), caption: [_Upper_: The Artificial Neuron. This figure illustrates the operations that compose the archetypical artificial neuron, where $vectorn(x)$ is the input vector, $f$ is the activation function, $vectorn(w)$ is the weights vector, and b is the neuron bias. An artificial neuron takes an input vector, $vectorn(x)$, and performs some useful calculations (hopefully). Both the weights vector, $vectorn(w)$, and bias value, $b$, comprise the neuron's adjustable parameters, $vectorn(theta)$, that must be tuned for the neuron to perform any useful operations @artifical_neurons. _Note_: During computation, the bias, $b$, is not normally added in a separate operation; instead, it is added as an extra $x_0$ term included in the same calculation as the summation of the product of the weights, $vectorn(w)$, and input values, $vectorn(x)$. _Lower_: An abstraction of the more complicated interior structure of the artificial neuron. Abstraction is common and necessary when describing artificial neural networks as networks are often comprised of thousands if not millions of artificial neurons.], ) <artificial_neuron_diagram> As can be seen in @artificial_neuron_diagram, the standard artificial neuron is comprised of several consecutive mathematical operations. First, the input vector, $vectorn(x)$, is multiplied by the weights vector, $vectorn(w)$, and then the result of this operation is summed along with the bias value, $b$ @artifical_neurons @perceptron_and_neural_network_chapter. Finally, the output is then fed into an activation function $f$; see @activation_functions_sec. This sequence of operations is given by: $ op("A")(vectorn(x)) = f(sum_(i=1)^N w_i x_i + b) = f(vectorn(x) dot vectorn(w) + b), $ <artificial_neuron> where N is the number of elements in the input vector. In the case of the single-layer perceptron, the output of the neuron, $op("A")(vectorn(x))$, is equivalent to the output of the perceptron, $accent(y, hat)$, where our desired ground-truth output value is $y$. Since each element of the weights vector, $vectorn(w)$, is multiplied by each component of the input vector, $vectorn(x)$, the weights can be thought of as representing the significance of their corresponding input value, $x_i$, to the desired output value, $y$. The bias, $b$, acts as a linear shift to the activation function, and tuning this value can make it more or less difficult for the neuron to activate. Having well-tuned parameters, $vectorn(theta)$, is crucial for the performance of the artificial neuron. The purpose of the activation function, $f$, is to coerce the distribution of the output value, $op("A")(vectorn(x))$, into a particular shape @activation_functions_ref. The intricacies of why you might want to do this will not become apparent until the model training is understood. Therefore a more detailed discussion of activation functions follows in @activation_functions_sec. === Training Artificial Neurons <training_arificial_neurons> Now that the structure of the artificial neuron has been described, the question becomes, how does one go about ascertaining useful values for the neuron's tunable parameters, $vectorn(theta)$, namely the weights vector, $vectorn(w) $, and the bias, $b$. It would, in theory, be possible to approach this problem by manually discovering values for each parameter, $theta_i$, using human-guided trial and error. Whilst this would be unwise, we can use this thought experiment to arrive at the automated solution to the problem. This section will describe the step-by-step process of training an artificial neuron, or in this case, multiple neurons, and for each step, illustrate how the manual approach can be automated, displaying a Python @python function demonstrating this. @an_training_import shows the required library imports to run all subsequent code listings in this section. An iPython notebook containing the described code can be found here: https://tinyurl.com/4m4n3m7n. #show figure: set block(breakable: true) #figure( ```py # Importing necessary libraries import numpy as np import tensorflow as tf from tensorflow.keras.datasets import mnist from bokeh.plotting import figure, show from bokeh.io import output_notebook ```, caption : [_Python @python ._ Required imports to run subsequent code listings in this section. NumPy @numpy is used for its fast numerical CPU operations. TensorFlow @tensorflow is used for fast numerical GPU operations, machine learning functionality, and loading the Modified National Institute of Standards and Technology (MNIST) dataset @mnist. Bokeh @bokeh is used to plot figures.] ) <an_training_import> We will attempt to train an ensemble of ten artificial neurons to classify the Modified National Institute of Standards and Technology (MNIST) example dataset @mnist correctly. The MNIST dataset consists of 70,000 black-and-white images of handwritten numbers with a resolution of 28 by 28. Pixel values range from 0 for black pixels to 255 for white pixels, with the integer values representing 253 shades of grey. 10,000 images are reserved for testing, with the remaining 60,000 used for training. See @mnist_examples for examples of the images contained within the dataset. Though slightly confusing, this ensemble of multiple neurons is often known as a single-layer perceptron @perceptron_and_neural_network_chapter, as it consists of many neurons acting (almost) independently in a single layer; see @single_layer_perceptron. The only collaboration between neurons is the normalisation that is applied to each neuron by the softmax activation function @softmax, which ensures the produced output vector sums to one and can act as a probability; see @softmax-sec. Because we are moving from a single neuron with one bias value $b$, and a vector of weights values $vectorn(w)$, to multiple neurons, the bias value becomes a vector $vectorn(b)$, and the weights vector becomes a matrix $matrixn(W)$. $ matrixn(W) = mat( w_(1,1), ..., w_(1,j), ..., w_(1, P); dots.v, dots.down, dots.v, dots.up, dots.v; w_(i, 1), ..., w_(i, j), ..., w_(i, P); dots.v, dots.up, dots.v, dots.down, dots.v; w_(N, 1), ..., w_(N, j), ..., w_(N, P); ), $ where $N$ is the number of neurons in the layer, and $P$ is the number of weights per neuron, typically determined by the number of neurons in the previous layer or the number of elements in the input vector if the layer is the input layer. #figure( image("mnist_examples.png", width: 80%), caption: [Example MNIST data @mnist. A single example of each of the ten classes within the MNIST example dataset. As can be seen, the classes range from zero to nine inclusive. Each example consists of a grid of 28 by 28 pixels containing one float value between 0.0 and 1.0. In the above image, values near one are represented as nearly white, and values near 0.0 as black. When ingested by our single-layer perception, they will be flattened into a 1D vector; see @flatten-sec.], ) <mnist_examples> #figure( grid( columns: 1, rows: 2, gutter: 1em, [ #image("single_layer_perceptron.png", width: 100%) ], [ #align(center)[#image("single_layer_perceptron_abstract.png", width: 100%)] ], ), caption: [Various representations of a Single-Layer Perceptron or Single-Layer Artificial Neural Network. _Upper:_ Diagram illustrating the structure and operation of a single-layer perceptron. In the example shown, a handwritten zero is fed into the single-layer perceptron. The 2D image is first flattened into a 1D vector, see @flatten-sec; then, the entire vector is fed into each neuron. If the training process has worked correctly, each neuron will have learned to identify one of the possible classes, in this case, digits. As can be seen from the output values, $uvectorn(y) = [accent(y, hat)_0, ..., accent(y, hat)_9]$, which are taken from a real trained model, this model can correctly identify this input as a zero with high confidence. _Middle:_ An abridged version of the upper diagram demonstrating the operation of feeding a handwritten one into the perceptron. This shows how future network diagrams will be abstracted for simplicity and that the perceptron outputs a different, correct value when it ingests a one rather than a zero. _Lower:_ A further abstraction of the network. This type of abstraction will be used commonly throughout this thesis when dealing with networks consisting of multiple layers. A dense layer, wherein all neurons are attached to all previous neurons, will be shown as a filled black rectangle, and the icon next to it represents that the activation function applied is a softmax activation function @softmax_ref; see @softmax-sec.] ) <single_layer_perceptron> *Step 1: Dataset Acquisition:* When we train a machine learning model, we are attempting to model the relationship between an input and an output distribution. In some ways, the model can be considered a compressed version of the matched input and output distributions. After training, when you feed in a single data point from the input distribution, the model will, hopefully, be able to map that input value to the correct value in the output distribution. This makes the training data a fundamental part of the training process @training_dataset_importance. Whether naively attempting a manual solution or optimising through more efficient means, we must acquire a suitable training dataset. In many cases, the input distribution will be very large or even continuous, so an exhaustive training dataset covering every possible value in the distribution will be either technically or literally impossible. For this reason, we have to ascertain or generate a training dataset that will appropriately sample the entire input distribution. There are many preexisting example training sets; as stated, we will use the MNIST dataset @mnist for illustrative purposes. Automating the process of acquiring a dataset is simple. TensorFlow @tensorflow has built-in functions to allow us to acquire the MNIST dataset @mnist easily. @an_data_aquisition below shows us how this process can be performed. The listed function also prepares the data for ingestion by the ensemble of artificial neurons. *One hot encoding* changes a single numerical class label, i.e. $0, 1, ..., 9$ into a Boolean vector where each index of the vector represents a different class; for example, $0$ becomes $[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]$, whereas $1$ becomes $[0, 1, 0, 0,0, 0, 0, 0, 0, 0]$ @one_hot_encoding. This is because each neuron will learn to distinguish a single class by returning a float value closer to $0.0$ if the input falls outside its learned distribution or closer to $1.0$ if the input falls within its learned distribution. Therefore to perform the vector operations necessary for training, one hot encoding must be performed @one_hot_encoding. #figure( ```py # Step 1: Load and prepare the MNIST dataset. def load_and_prepare_data(): # This data is already split into train and test datasets. (x_train, y_train), (x_test, y_test) = mnist.load_data() # Reshape and normalize the images to feed into the neural network. x_train, x_test = x_train.reshape(-1, 784)/255.0, x_test.reshape(-1, 784)/255.0 # Convert labels to one-hot vectors. This is necessary as our output layer will have 10 neurons, # one for each digit from 0 to 9. y_train, y_test = tf.one_hot(y_train, depth=10), tf.one_hot(y_test, depth=10) return x_train, y_train, x_test, y_test ```, caption : [_Python @python ._ Function to load and prepare the MNIST dataset @mnist. The MNIST dataset @mnist consists of many examples of handwritten Arabic numerals from one to nine. The images, `x`, are reshaped, and the labels, `y`, are one-hotted @one_hot_encoding.] ) <an_data_aquisition> *Step 2: Parameter Initialization:* For an artificial neuron to produce a result when it consumes an input vector, all parameters, $vectorn(theta)$, must be initialised to some value. One could imagine choosing these initial parameters, $vectorn(theta)_bold(0)$, in a few distinct ways. Perhaps most intuitively, you could decide on the parameters based on some prior knowledge about the dataset, aiming to get as close as possible to the optimal tunings in order to minimise the number of steps required during training. However, this option is impossible if the human tuner lacks such intuition or if the input size is too large for any human to form such an intuition. That leaves choosing a uniform value for all parameters or randomly initialising all parameters. In any automated process, a uniform initialisation is a bad choice. If one sets all initial parameters, $vectorn(theta)_bold(0)$, to the same value, this creates symmetry @weight_initlisation. Suppose we subsequently try to use a mathematical method to adjust these parameters. In that case, the method will have no way to choose one parameter over another, meaning all parameters will be tuned identically. We will need the parameters to be adjusted independently in order to model complex relationships. For this reason, we will initiate the weights matrix, $matrixn(W)$, randomly by sampling values from a normal distribution. This choice of random distribution will not be discussed here, but note that there is an open area of research hoping to speed up and/or improve the training process by selecting more optimal distributions for parameter initialisation @weight_initlisation. The bias values, $vectorn(b)_bold(0)$, will be initialised to zero. Since there is only one bias value per neuron, we don't have to worry about creating asymmetry, as that is provided automatically by values passed through the neuron's weights. @an_parameter_definition demonstrates the initialisation of two variable tensors to hold the weights and biases of our artificial neurons. Because there are ten classes of numbers in the training dataset, we will initialise ten artificial neurons --- one to recognise each class of digit. There will be a single bias value for each neuron. Hence there are $C$ bias elements in the bias tensor, `biases`, where $C = op("num_classes") = 10$, and the input size is $N = 28 times 28 = 784$, so there are $N times C = 784 times 10 = 7840$ elements in our weights tensor, now a matrix, $matrixn(W) =$`weights`, arranged in the shape `[784, 10]`. This means the total number of tunable parameters in our set of ten neurons is $7840 + 10 = 7850$. #figure( ```py # Step 2: Define the model # We are using a simple single-layer perceptron model # This is essentially a single fully-connected layer def define_model(): # Define weights and biases. We initialise the weights with a random normal # distribution. # There are 784 input neurons (one for each pixel in the 28x28 image) and ten output # neurons. We initialise biases to zero. weights = tf.Variable(tf.random.normal([784, 10]), name="weights") biases = tf.Variable(tf.zeros([10]), name="biases") return weights, biases ```, caption : [_Python @python ._ Function to initialise TensorFlow @tensorflow tensors to store the artificial neuron's parameters, $vectorn(theta)$. In the case of MNIST @mnist digit recognition, ten neurons are being trained, so we have ten bias values, $vectorn(b)$, and the input images are of dimension $28 times 28 = 784$. Therefore, our weights matrix, $matrixn(W)$, is shaped `[784, 10]`.] ) <an_parameter_definition> *Step 3: Define the model's action:* To perform any optimisation method, there must be a way to test the model. Thus we must define the action of the model, $M(vectorn(x))$. We have already shown what form this must take in @artificial_neuron_sec and @artificial_neuron. This is very easily defined by a Python @python function, as seen in @an_computation_definition. #figure( ```py # Step 3: Define the model's computations: def model(x, W, b): return tf.nn.softmax(tf.matmul(x, W) + b) ```, caption : [_Python @python ._ Function to perform the computation of artificial neurons in our single-layer perceptron. Since TensorFlow @tensorflow is natively vectorised, this function will calculate the output of all our tensors simultaneously. This function performs the same operation described in @artificial_neuron, with a softmax function as the activation function, $f$. Softmax activation functions are described in @softmax-sec.] ) <an_computation_definition> *Step 4: Define the loss function*: Now that we have set up a procedure to run the model with a set of randomised parameters, $vectorn(theta)$, we must define a measure of success so that we can see how well the model is performing whilst we perform our parameter tuning operation. If we have no performance metric, then we have no indication of how to tune the model to improve its performance. To do this, we define a loss function, $L$, a function which takes in some information about the state of the model after it has ingested data, usually including the model's output, and returns a numerical output value: the loss of the model with a given set of parameters, $L(M_vectorn(theta) (vectorn(x)), vectorn(y) )$, where $vectorn(x)$, is a particular instance, or batch, of input vectors, and $vectorn(y)$ is, in the case of classification, the data label @deep_learning_review. Note that in unsupervised learning, the loss function does not ingest a label, $vectorn(y)$, as the data is not labelled @deep_learning_review. By convention, a high loss value indicates that the model performance is worse than that which would be indicated by a lower loss value @deep_learning_review. Our optimisation process, therefore, should attempt to minimise the average of this loss value across all potential input vectors. There are many possible metrics for measuring the performance of a model, a large number of which can be used as the loss function @deep_learning_review. The loss function is an important aspect of the training process, which can alter the efficiency of the training significantly. They can be highly specialised to particular scenarios, to the point where using an inappropriate loss function can completely remove any possibility of training @deep_learning_review. A more detailed description of loss functions is available in @loss_functions_sec. For this model, we elect to use the categorical cross-entropy loss function @deep_learning_review as described in @loss_functions_sec. An implementation of that loss function is shown by @loss_function_definition. #figure( ```py # Step 4: Define the loss function def compute_loss(y_true, y_pred): return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred), axis=[1])) ```, caption : [_Python @python ._ Function to compute the loss of the model. The loss function utilised in this case is categorical cross-entropy loss, a loss function commonly used for multi-class, single-label datasets. A more detailed description of the function of this loss function can be found in @cross_entropy_sec.] ) <loss_function_definition> *Step 5: Train the model*: Finally, after we have assembled all the pieces, we can start to tune the parameters, $vectorn(theta)$, so that our perceptron can output useful values when fed input. As we have previously stated, we will initialise our weight parameters, $matrixn(W)$, randomly; this means that no matter what images we feed into the untrained model, we will get non-sensical classification values with no correlation to the ground truth labels unless by extremely unlikely fluke. Using some process, we want to move the model toward successful categorisation. If we again move back to our analogy of attempting to perform this operation manually, what we might imagine is that we would start by feeding it an image from our training dataset. We could then examine the model's output and see which parameters we would need to tune in order to move our network, for that particular image, toward the correct answer. We could achieve this by determining how much each parameter moves the current model's output, $uvectorn(y)$, toward or away from the ground truth value, $vectorn(y)$, and then adjusting each parameter accordingly. If we tuned the parameters by a large amount, then the model could easily become overtuned to a particular image, so we might instead choose to move it a little bit toward the correct input value and then repeat this process over hundreds, if not thousands, of examples, moving the network slowly toward a useful configuration. Gradient descent is an algorithmic implementation of this thought experiment @deep_learning_review. In its most simple case, the loss that is given by the loss function, $L(M_vectorn(theta) (vectorn(x)), vectorn(y))$, measures the distance between the model output, $uvectorn(y)$, and the ground truth, $vectorn(y)$. Since the model is largely defined by its parameters, $vectorn(theta)$, the loss function can be thought of as a function that takes in an input vector, $vectorn(x)$, the model parameters $vectorn(theta)$, and the ground truth label, $vectorn(y)$. So the output of the loss function for a particular input vector and set of parameters becomes $ L(M_vectorn(theta) (vectorn(x)), vectorn(y)) = L(M(vectorn(theta), vectorn(x)), vectorn(y)) = L_M (vectorn(theta), vectorn(x), vectorn(y)), $ <loss_func_eqatuion> where L is the model-architecture-agnostic loss function, $L_M$ is the loss function for a particular model architecture, $M_vectorn(theta)$, is a model with a fixed set of parameters, $vectorn(theta)$, $M$ is a model with a set of parameters as a functional input, $vectorn(x)$, is a particular input vector to the model, and, $vectorn(y)$, is the label vector corresponding to the model input vector. The gradient of the model is defined as the vector of partial derivatives of the model's loss function with respect to its parameters @gradient_descent_matrix. If $L_(M vectorn(x) vectorn(y)) (vectorn(theta))$ is the loss function with a fixed model architecture, input vector, and ground-truth label, then the gradient of the model is $ vectorn(gradient) bold(L) eq.triple vectorn(gradient) bold(L_(M vectorn(x) vectorn(y)) (vectorn(theta))) = [frac(partial L_(M vectorn(x) vectorn(y)), partial theta_1), ..., frac(partial L_(M vectorn(x) vectorn(y)), partial theta_i), ..., frac(partial L_(M vectorn(x) vectorn(y)), partial theta_N)] $ <gradient_equation> where N is the total number of tunable parameters. @gradient_equation describes a vector, $vectorn(gradient) bold(L)$. Each element of the vector, $frac(partial L_(M vectorn(x) vectorn(y)), partial theta_i)$, is a gradient that describes the effect of changing the value of the corresponding parameter, $theta_i$, on the model loss. If the gradient is positive, then increasing the value of the parameter will increase the value of the loss, whereas if it's negative, increasing the value of that parameter will decrease the model loss. The magnitude of the gradient is proportional to the magnitude of that parameter's effect on the loss @gradient_descent_matrix. Since we want to reduce the model loss, we want to move down the gradient. Therefore, for each parameter, we subtract an amount proportional to the calculated gradient @gradient_descent_matrix @deep_learning_review. #figure( image("gradient.png", width: 100%), caption: [An illustration of gradient descent, where $vectorn(gradient) bold(L_(M vectorn(x) vectorn(y)) (vectorn(theta)))$ is the loss at a fixed model architecture, $M$, input vector $vectorn(x)$, and data label $vectorn(y)$. This simplified example of the shape of a 1D parameter space shows how the gradient of the loss function with respect to the model parameters can be used to move toward the minimum of the loss function. The shape of the loss function in this example is given by $ L_(M vectorn(x) vectorn(y)) (vectorn(theta)) = theta^2$. In almost all cases, the parameter space will be much more complex than the one depicted in both dimensionality and shape complexity. Usually, the shape of the loss function will be an N-dimensional surface, where N is the number of parameters, $vectorn(theta)$, in the model, but the principle is still the same. For a 2D example of a gradient space; see @gradient_descent_examples. This plot can be recreated with the code found here: https://tinyurl.com/3ufb5my3.] ) <gradient_example> We need to be able to control the magnitude of the parameter adjustment because the gradient is only measured for the current parameter values, $vectorn(theta)$. Therefore we are unsure of the shape of the loss function. It's possible for the tuning process to overshoot the loss function minimum. In order to apply this control, we introduce a constant coefficient to scale the gradient, known as the learning rate, $eta$ @gradient_descent_matrix @deep_learning_review. Therefore, if we want to find the new adjusted parameters after one optimisation step, we can use $ vectorn(theta)_(t+1) = vectorn(theta)_t - eta vectorn(gradient) bold(L_( M vectorn(x)_bold(t) vectorn(y)_t ) (vectorn(theta)_t)) $ <gradient_decent_step> where t is the step index, we can see this process in a Python @python form in @train_step_definition. In this function, the gradients are captured using the tf.GradientTape scope, which automatically captures the gradients of all "watched" tensors within its scope. This automatic differentiation utilises a process called back-propagation @gradient_descent_matrix @deep_learning_review, which will be discussed in more detail in @backpropagate-sec. #figure( ```py # Step 5: Define the training step @tf.function def train_step(x, y, W, b, η): with tf.GradientTape() as tape: y_pred = model(x, W, b) current_loss = compute_loss(y, y_pred) gradients = tape.gradient(current_loss, [W, b]) W.assign_sub(η * gradients[0]) # update weights b.assign_sub(η * gradients[1]) # update biases return current_loss ```, caption : [_Python @python ._ Function to execute a single training step. This function runs an example, ``` x ``` $ = vectorn(x)_bold(t)$, through the model (usually multiple examples at once as explained in @gradient-descent-sec) and computes the loss, ``` loss ``` $= L_( M vectorn(x)_bold(t) vectorn(y)_bold(t)) (vectorn(theta)_bold(t))$ of the output of that model, ``` y_pred ```$= uvectorn(y)_bold(t)$ compared with the ground truth label of that example, ``` y ```$= vectorn(y)_bold(t)$. The gradients, ``` gradients``` $= vectorn(gradient) L_( M vectorn(x)_bold(t) vectorn(y)_bold(t) ) (vectorn(theta)_bold(t))$, are automatically computed for each parameter by ``` tf.GradientTape()```, which produces a list of gradients for the weights, ``` w``` $= matrixn(W)$, and biases, ``` b``` $= vectorn(b)$, which are then used multiplied by the learning rate ``` η``` $= eta$ and used to update the parameters, $vectorn(theta)$, for the next training step; see @gradient_decent_step.] ) <train_step_definition> If we repeat this process over T steps, where T is the number of training examples in our dataset, then the model will hopefully begin to gain aptitude at the classification task. The process of tuning the model parameters once with all examples in the training dataset is called a training epoch @perceptron_and_neural_network_chapter. Oftentimes, if our training dataset is not large enough, we can improve the model performance by running for multiple epochs, hence training the model with the same examples multiple times. Between epochs, the training dataset is usually shuffled in order to explore new areas of parameter space and avoid repeating exactly the same pathway @perceptron_and_neural_network_chapter. Pulling all the functions we have defined together; we can now implement our main training loop, @train_loop_definition. #figure( ```py def train_model(epochs, batch_size, η, x_train, y_train): # Define model W, b = define_model() # Store loss and accuracy for each epoch loss_per_epoch = [] accuracy_per_epoch = [] # Training loop for epoch in range(epochs): i = 0 while i < len(x_train): start = i end = i + batch_size x_batch = x_train[start:end] y_batch = y_train[start:end] current_loss = strategy.run(train_step, args=(x_batch, y_batch, W, b, η)) i += batch_size # Compute loss and accuracy for each epoch y_pred = strategy.run(compute_model, args=(x_test, W, b)) loss_per_epoch.append(current_loss) accuracy_per_epoch.append(compute_accuracy(y_test, y_pred)) print(f'Epoch {epoch+1} completed') return loss_per_epoch, accuracy_per_epoch, W, b ```, caption : [_Python @python ._ Function to execute multiple training steps across multiple epochs. This function runs the function defined in @train_step_definition for each example in the training_dataset, ``` x_train```, and repeats this process for each requested epoch, ``` num_epochs```, updating the model parameters each time. It returns the model parameters, ``` W, b```, and some metrics measuring the model's performance; see @gradient_decent_step.] ) <train_loop_definition> === Testing the Model <mnist-test-sec> Once we have trained our model using the aforementioned procedure, we can evaluate its performance. Often the first step toward this is to look at the model's performance at each step during training; see @perceptron_history. The model training progresses quickly at first but soon reaches a point of diminishing returns at about 85 per cent accuracy. Although we may be able to squeeze out a little more performance by running the training for more epochs, this can lead to overfitting, where a model becomes tailored too specifically to its training dataset and cannot generalise well, or at all, to other points in the training distribution @perceptron_and_neural_network_chapter. In most cases, we will want our model to classify new unseen data drawn from a similar distribution as the training dataset but not overlapping with any existing points, so we try to avoid this. #figure( grid( columns: 1, rows: 2, gutter: 1em, [ #image("perceptron_history.png", width: 100%) ], [ #image("unsucessfull_clasifications.png", width: 100%) ], ), caption: [_Upper:_ The performance of the single layer perceptron model described in @training_arificial_neurons over 15 epochs, where one epoch consists of training the model on all training examples in the MNIST dataset of handwritten Arabic numerals @mnist. The model loss is defined as the categorical cross-entropy of the model's output vector, $uvectorn(y)$ and the ground-truth label, $vectorn(y)$, whereas the accuracy metric is defined as the number of examples in the test dataset that are correctly classified, where a correct classification is any output with 50 per cent or more probability in the correct class. _Lower_: Two examples of less successful classifications. The left example would still be measured as a successful classification by our accuracy metric, whereas the right example would be marked as an unsuccessful classification. ] ) <perceptron_history> We can also investigate what parameters the neurons have actually learned over this process; see @perceptron_parameters. It is often very difficult to come to much of a conclusion about the true inner workings of artificial neural networks, especially dense layers, which are the most general but also the most non-intuitive @interpetability @deep_learning_review. Network interpretability is a large and ongoing area of machine learning research for many obvious reasons. Being able to see why a model has given you the answer that it has can massively boost confidence in that answer @interpetability @deep_learning_review. However, this thesis will not focus heavily on interpretability, as that could be a whole other thesis on its own. #figure( image("perceptron_parameters.png", width: 100%), caption: [Learned model parameters. Each artificial neuron in our single-layer perception is represented by a labelled parameter map shaped into the same dimensions as the input images. These maps show the learned weight values that correspond to each pixel of the input images. Very little structure can be made out by the human eye. Perhaps in the weight maps for the zero-classifier neuron, we can see an area toward the centre of the map that is negatively weighted. This might be expected as there are rarely high-value pixels at the centre of the circular zero. A similar but opposite effect might also be attributed to the one-classifier, where the centre of the image often contains high-value pixels. In general, unless you squint very hard, it is difficult to make out patterns in the parameters. This "black-box" effect means that after even one more layer is added to the network, it becomes very difficult to determine the action of dense layer neurons intuitively.] ) <perceptron_parameters> Whilst it is difficult to make specific claims on how artificial neural networks are doing what they are doing, we can often speculate on general methods of operation. In the case of the single-layer perceptron, like the one we have built here, the only kinds of operations that can be learned are linear ones. The only path each neuron has available to it is to learn which pixels are often highly valued in its class of digit and which pixels are very rarely highly valued in its class which are more likely to be highly valued in another class. Then it can adjust the bias value so that the neuron only activates when a certain criterion is met. If we were distinguishing between ones and zeros, for example, which have, in general, very different highlighted pixels, then this might be enough for a high degree of classification efficiency. However, there are a multitude of digits which can share many common pixel values, which makes this problem more difficult. In order to solve the problem with more accuracy, we must add a non-linear element to the computation and the ability for the model neurons to work collaboratively on the problem @deep_learning_review @activation_functions_ref. This allows the model to extract more complex "features" from the input vector. We, therefore, introduce the concept of multi-layered neural networks and deep learning. === Neurons Together Strong <together_strong> As we have seen, a single layer of artificial neurons can be trained to perform a small amount of computation, which is often enough for many simple problems. There are, however, a great many problems which require more complex solutions. In order to do this, we can add what is known as "hidden layers" to our network @deep_learning_review @perceptron_and_neural_network_chapter They are called hidden layers because the exact computation that is performed by these additional layers is much more difficult to divine than in output layers, layers that directly output solution vectors, as we have seen in our single-layer perceptron @perceptron_and_neural_network_chapter. For simplicity of design, artificial neural networks are usually organised into layers of neurons, which are usually ordered, and interactions are usually limited to between adjacent layers in the network @perceptron_and_neural_network_chapter. Layer one will usually only pass information to layer two, and layer two will receive information from layer one and pass information to layer three if there are three layers; see @multi-layer-perceptron. This is not always the case, and there are exceptions to all the rules mentioned in this paragraph, including skip connections @skip_connections @res_net_intro, recurrent neural networks @rnn_review @deep_learning_review, and Boltzmann machines @boltzman_machine. Artificial neural network layers come in many varieties, the most simple of which are feed-forward dense (or sometimes linear) layers @deep_learning_review. Dense layers consist of $N$ neurons, where every neuron takes as an input vector the output of every neuron on the previous layer unless the dense layer is acting as the input layer, in which case every neuron takes in as input every element of the input vector @perceptron_and_neural_network_chapter. If the dense layer is acting as the output layer, as was the case for our single-layer perceptron where one layer was both the input and output layer, then $N$ must equal the required size of our output vector, $vectorn(y)$. In the case of a classification problem, this is equal to the number of classes, $C$. In hidden layers, the number of neurons, $N$, can be any number and is, therefore, a customisable non-trainable parameter known as a hyper-parameter that must be chosen before network training by some other method; see @hyperparameters-section. As can be imagined, finding the gradient for networks with one or more hidden layers is a more complex problem than for a single layer. Backpropagation allows us to do this @perceptron_and_neural_network_chapter @deep_learning_review @gradient_descent_matrix and, in fact, is the tool that unlocked the true power of artificial neural networks; see @backpropagate-sec. #figure( grid( columns: 1, rows: 2, gutter: 1em, [ #image("multi_layer_network.png", width: 100%) ], [ #align(center)[#image("multi_layer_abstracted.png", width: 100%)] ], ), caption: [_Upper:_ Diagram of a multi-layer network with one output layer and one hidden layer. The non-linear computation introduced by the ReLU activation function applied to the hidden layer allows this network to solve considerably more complex problems than the previously described single-layer perceptron model. _See @relu-sec _. As can be seen, by the displayed output, which again is taken from a real instance of a trained model, this network has no problem classifying the previously difficult image of a five. _Lower:_ An abstraction of the same model.] ) <multi-layer-perceptron> This model performs considerably better than the previous model; see @multi-layer-perceptron; and seems to be complex enough to more or less solve the problem; see @multi-layer-perceptron_history. We might, in fact, even try reducing the number of neurons in the hidden layer. It is often beneficial to find the simplest possible network, by the number of parameters, that is able to achieve the desired computation, as more complex networks are more computationally expensive and time-consuming to train, require more training data, have an increased inference time (inference meaning to run the model on new unseen data), and crucially are more prone to overfitting to the training dataset @deep_learning_review. This model reaches a high accuracy within just a few epochs, and unless we are very concerned about false alarm rates, then there is no need to add extra complexity to our model. The notebook used to train this model can be found here: https://tinyurl.com/yrbwuw4e. #figure( image("multi_layer_network_history.png", width: 100%), caption: [The performance of the multi-layer perceptron model described in @together_strong over 15 epochs. As can be seen in comparison to @perceptron_history, the training is both faster and with a better final result.] ) <multi-layer-perceptron_history> The addition of hidden layers to our network architectures introduces the possibility of all kinds of structural variations. For more complex problems than the recognition of simple handwritten digits, we have many tools in our arsenal to increase performance. The first thing you might do is try the addition of more than one hidden layer; in fact, there is no inherent theoretical limit to the number of hidden layers that can be added to a network. Of course, at some point, you would run into computational limits, and although the gradient can be calculated, there are problems when attempting to run gradient descent algorithms on very deep networks that are designed without careful consideration. Gradients that vanish over many layers can lead network training to become an almost impossible task @vanishing_gradients @deep_learning_review. These problems with increasing complexity lead researchers to explore types of layers beyond the dense layer. Although the dense layer alone can be thought of as a universal function approximator, there exists no perfect training algorithm to find the ideal set of parameters to achieve every possible function, and this statement is technically only true for all possible arbitrarily complex functions as the number of layers approaches infinity. For this reason, different layer designs and network architectures can create easier environments for training @deep_learning_review, saving computational resources and allowing feasible routes to massively increase network ability. Most often, these non-dense layers are designed with some insight into the structure of the input vectors. An example of this would be the convolutional neural network, see @cnn-sec, which uses the spatial information of the input, as well as the notion that there will be transform-invariant features within the image, to create layers that can perform a similar or better job than dense layers with far fewer parameters @vanishing_gradients. One could also experiment by moving away from the paradigm of feed-forward networks, although this can increase solution complexity significantly. Within feed-forward neural networks, neuron connections only ever move toward neurons that have not yet causally influenced the emitting neurons. Within recurrent networks, however, signal paths can loop, taking either previous inferences as inputs or looping within the calculation itself @reccurant_neural_networks @deep_learning_review. This can allow the network memory of previous inferences, something feed-forward networks do not possess. #figure( image("ann_rnn.png", width: 100%), caption: [_ Left: _ The generalised dense feed-forward artificial neural network. Where $T$ is the number of hidden layers in your network, $H$ is the number of neurons at that layer, $N$, is the number of elements in the input vector, $vectorn(x)$, and $O$ is the number of elements in the output vector $uvectorn(y)$. As can be seen in the diagram, the number of hidden layers in your network is unconstrained, as is the number of neurons in each of those layers, which should be noted does not have to be the same. This is opposed to the output layer, which must have the same number of neurons as is expected by your loss function. _ Right _ A very simple illustration of a recurrent neural network. This network illustrates the retroactive data flow that is possible in a recurrent neural network. In this example, the output of the network from one inference operation is added to the input of the next inference operation. It should be noted that this is a very naive implementation of a recurrent neural network. In actuality, the networks usually have a much more complex structure, such as LSTMs (Long Short Term Memory) networks.] ) <rnn> There will be a more detailed discussion of many of these different network layers and architectures further on in the thesis; the following few sections will explore the concepts outlined in the last two sections in more detail. === Activation Functions <activation_functions_sec> There are many different activation functions; as with most things in machine learning, they are an active area of research @deep_learning_review @activation_functions_ref. As such, this small section will only give a brief introduction plus a few examples. Since the activation function normally acts on the weighted sum of the inputs plus the bias, in this section, we will define $z = sum vectorn(x) dot vectorn(w) + b$ to avoid confusion with the raw input values previously denoted $x$. Depending on the network structure, $z$ could also be generated with an artibtraty function. We will define the vector of all $z$ values in a single network layer of $N$ neurons, i.e. $vectorn(z) = [z_1, ..., z_i, ..., z_N]$. As noted in @artificial_neuron_sec, the activation function aims to coerce an artificial neuron's output into a particular shape @deep_learning_review @activation_functions_ref @perceptron_and_neural_network_chapter. This has several purposes. Firstly, it can act as a thresholding function, which along with a specific value of bias, $b$, can activate or deactivate the neuron depending on the weighted sum of the input vector, $z$. The activation function also limits the output values to a specific range, ensuring that values within the network do not grow without bounds along favoured pathways and destabilise the network. These values can be considered in some way analogous to the maximum firing rate of a biological neuron. Without activation functions, instability can cause values to explode to infinity or vanish to zero. Finally, activation functions provide a non-linear component to the neuron. Without non-linear activation functions, neuron output, hence network outputs, could only be linear combinations of the input values and so would need to be, in general, much more complex to solve non-trivial problems. There are some limits to the type of function we can use within a neural network, primarily since we must be able to flow gradients through the function during backpropagation; the function must be differentiable at all points @activation_functions_ref. For example, if we tried to use a step function as an activation function, the derivative would be 0 at all points, except for at the step where it would be undefined. This would make backpropagating through this function very difficult, as it would fail to update the weights and bias of its corresponding neuron. In other non-continuously differentiable functions, like the ReLU function, we can use a trick to avoid the undefined derivative by defining the value of the derivative at that point, $z = 0$ in this case, to 0 or 1. As well as the distinction between linear and non-linear activation functions, a few further distinctions can be made. Outside of the linear function, we can split activation functions into three types: ridge @ridge_functions, radial basis @pattern_matching_and_machine_learning, and fold @softmax_ref. *Ridge* functions are standard activation functions that change an input's shape based on directionality around a specific point or ridge @ridge_functions. The most common example is the ReLU function @relu_intro @activation_functions_ref and its variants described below. Ridge functions are computationally efficient and introduce non-linearity without requiring exponentiation or other computationally expensive operations. *Radial basis* functions @pattern_matching_and_machine_learning @radial_basis_function, on the other hand, are less commonly used. They are symmetric around a specific point rather than just directional. Their value, therefore, depends entirely on the magnitude of the distance to this point rather than in ridge functions where the sign is also vital. Radial basis functions can create complex surfaces which can localise to a specific region, which can be helpful if you believe your data structure to be localised in such a manner. A typical example of a radial basis function would be the Gaussian function, which can localise a neuron's activation to a particular region. However, they can be computationally expensive and lead to overfitting due to their ability to form complex surfaces. *Fold* functions are complex activation functions that aggregate over multiple neuron $z$ values, such as mean or max functions in pooling layers, or even over the entirety of $vectorn(z)$, such as in softmax layers described below @softmax_ref. Calculating these can be computationally expensive, so they are used in moderation. ==== Linear <linear-sec> The most straightforward activation function is the linear activation, represented simply by @linear below. The linear activation function will not change the shape of the data and is crucial for many applications where this is a desired feature: $ op("linear")(z) = k z. $ <linear> Evidently, in the case where $k = 1$, this is equivalent to not applying any activation function and thus, all the previously stated problems resulting from no activation function will apply. The derivative of the linear activation function is always a constant irrespective of the input values, so it is straightforward to compute. This simplicity brings a significant drawback when dealing with complex data. If it is the only activation function used, the entire network, regardless of complexity or number of layers, will behave as a single-layer model because of the lack of non-linearity between layers. As we have seen, single-layer perceptrons are insufficient for many tasks we wish to tackle with artificial neural networks. One of the primary uses of linear activation functions is as the output layer of regression problems, where the output is expected to be a continuous float value not constrained within a particular distribution @deep_learning_review. The drawbacks are alleviated if the rest of the network before the output layer involves non-linear activation, leaving the output layer to combine inputs into a final output value linearly. They can also sometimes be used in straightforward networks where non-linearity is not required and computational efficiency is highly prized. Therefore, while the linear activation function has its uses, it is not commonly applied in hidden layers of deep learning models, wherein non-linear layers, such as ReLU and its variants, are more valuable; see @activation_functions for a graphical depiction. ==== Logistic <sigmoid-sec> The logistic activation function @sigmoid_ref is a ridge function defined $ f(z) = frac(L, 1 + e^(-k (z - z_0))), $ <logistic> where $z_{0}$ represents the z-coordinate of the function's midpoint, $L$ signifies the maximum value that the function can reach, often referred to as the function's supremum and $k$ is a parameter that controls the steepness of the function's curve, determining the logistic growth rate. The particular case where $L = 1$, $k = 1$, and $x_0 = 0$ is known as the sigmoid function $ sigma(z) = frac(1, 1 + e^(-k z)). $ <sigmoid> See @activation_functions for a graphical depiction. Since this smoothly limits the output of the neuron to between 0 and 1, it is often used on output neurons in a network designed for classification since, in this case, the ground truth vector would consist of entirely Boolean values, meaning an activation function that tends to 0 and 1 in the extreme is very useful @sigmoid_ref. The sigmoid activation function is used in multi-class, multi-label classification problems, where each class variable is independent, and an example can be in multiple classes and single-class single-label problems, where there is only one output neuron. Since each output is calculated independently, it is unsuitable for cases where an example can be in only one class and there are multiple classes; in that case, a Softmax layer, as described in @softmax-sec, is more appropriate. The sigmoid function's derivative is at maximum at the midpoint $z = 0$ and falls off as $z$ moves in either direction $z arrow infinity or z arrow -infinity$; this is a suitable environment for backpropagation as the derivative is always defined and never 0. There are, however, some limitations since the gradient, although never quite 0, can become very small, leading to the "vanishing gradients" problem wherein the model's parameter updates can become negligible, and hence learning is very slow. Secondly, the sigmoid function is not centred at 0. This can lead to zig-zagging during gradient descent, also slowing down convergence. Finally, the sigmoid function involves the computation of exponentials, which can be computationally expensive, especially for large-scale networks. Despite these limitations, the sigmoid function is widely used, particularly in the output layer, for multi-class, multi-label classification problems. However, for hidden layers, modern practices prefer other functions like ReLU or its variants to mitigate some of the issues related to the sigmoid function. ==== ReLU (Rectified Linear Unit) <relu-sec> One of the most common activation functions used very widely in neural network hidden layers is the ReLU (Rectified Linear Unit) function @sigmoid_ref @activation_functions_ref, defined by $ op("ReLU")(z) = cases( z "if" z > 0, 0 "if" z <= 0 ). $ <relu> ReLU is another example of a ridge function. This function is 0 for $z <= 0$, and $z$ for $z > 0$, meaning it is equivalent to the linear function above 0. It is a very simple function but still provides the neuron with the ability to threshold values and adds a non-linear component to the neuron. Because of its simplicity, ReLU is very computationally efficient compared to other activation functions that require expensive operations such as exponentiation or division, an essential factor when deciding on activation functions to use, especially in very large networks @deep_learning_2. The derivative is also very simple, either 1 above $z$ or 0 below $z$; hence it lacks the possibility of becoming very small. This means that the use of ReLU functions can be efficient for training. Having a large section of the domain with a derivative of 0 does, however, also lead to problems. During the training process, some neurons can "die", only able to emit 0s, and since the gradient is also 0, they can become stuck in this state, unable to reactivate @dying_neurons. Evidently, this can reduce the capacity of the network for practical computation since these dead neurons can no longer contribute valuable operations. To ameliorate some of the downsides, there are a plethora of possible ReLU variants, most of which have a non-zero gradient below $z = 0$. These include but are not limited to Leaky ReLU (LReLU) @leaky_relu, Randomized Leaky ReLU (RReLU) @randomised_leaky_relu, Parametric ReLU (PReLU) @parameteric_relu, Exponential Linear Unit (ELU) @parameteric_relu, and Scaled Exponential Linear Unit (SELU) @parameteric_relu. The first three variants, LReLU, RReLU, and PReLU, are defined by $ op("LeakyReLU")(z) = cases( z "if" z > 0, alpha z "if" z <= 0 ), $ <lrelu> where $alpha$ depends on the variant in question; in standard LeakyReLU, $alpha$ is a small, predefined value such as 0.05, meaning the slope is much shallower before 0 than after it; this prevents dying neurons whilst still allowing the function to threshold the input value. In the case of Randomised Leaky ReLU, $alpha$ is randomly sampled from a specified distribution during training but fixed during model inference @randomised_leaky_relu. This solves the dying neuron problem and adds robustness to the training process. Finally, in Parametric ReLU, $alpha$ is treated as a trainable parameter that the model can adjust during backpropagation, allowing it to hopefully self-optimise to a good value @parameteric_relu. ELU and SELU are also both based on a similar definition $ op("SELU")(z) = cases( s z "if" z > 0, s alpha (exp(z) - 1) "if" z <= 0 ). $ <selu> For any $alpha$ value if $s = 1$, the equation defines ELU. ELU has all the death-defying properties of the previously mentioned ReLU variants whilst also introducing differentiability at $z = 0$, meaning that the redefinition trick is not required. Unlike other ReLU variants, it saturates as $z -> inf$, increasing robustness to errors. These extra benefits come at the cost of the computational simplicity of the previous ReLU variants, as the calculation of exponentials is a significant computational expense. If $alpha = 1.673263... and s = 1.05070...$, the equation defines SELU, a self-normalising activation function. These very specific values of $alpha$ and $s$ are designed to work in conjunction with LeCun initialization, a method that initializes neuron parameters with values drawn from distributions with mean zero and variance $1/N$, where $N$ is the number of neurons in that layer. These values of $alpha$ and $s$ massage the neurons toward outputs with a distribution centred on zero and with a variance of one. Which can help smooth the training process by avoiding vanishing or exploding gradients. In practice, ReLU and its variants are the most commonly used activation functions in the hidden layers of deep neural networks due to their efficiency and performance. See @activation_functions for a graphical depiction. ==== Softmax <softmax-sec> Softmax is a more complicated fold distribution and is of interest due to its use in multi-class, single-label classification problems @pattern_matching_and_machine_learning @softmax_ref. It is an extension of the Sigmoid function described above in @sigmoid-sec, which aims to convert a vector of continuous unconstrained output values, in our case $vectorn(z)$, into a vector representing probabilities, with outputs limited between 0 and 1 and a vector sum equal to exactly 1. It does this by finding the exponential of each $z$ value, then normalising by the sum of the exponential of all elements in $vectorn(z)$ $ op("softmax")(vectorn(z))_i = frac(e^(z_i), sum_(j=1)^N e^(z_j)), $ <softmax> where N is the number of elements in $vectorn(z)$, equivalent to the number of neurons in the layer and the number of classes in the dataset, and $i$ is the index of the neuron/class whose output value is calculated. See @activation_functions for a graphical depiction. The softmax function represents a way of mapping the non-normalized output of the network to a probability distribution over predicted output classes, making it invaluable for multi-class, single-label classification problems. It is also differentiable so that it can be used in gradient-decent methods. Softmax can be computationally expensive, particularly in the case of a large number of classes, as each output classification requires the use of multiple expensive operations such as exponentiation and division; it can also suffer from numerical instability when the scores in the input vector are very large or small which may result in numerical overflow or underflow problems. This is not typically too much of an issue as it is usually only used in the output layer of a network. The Softmax function remains the standard choice for multi-class classification problems due to its ability to provide a probabilistic interpretation of the outputs, handle multiple classes, and its differentiability. #figure( image("activation_functions.png", width: 100%), caption: [Four of the most common activation functions. _Upper Left:_ A linear activation function. In this case, the slope, k, is 1, meaning that the shape of the output is unchanged vs the input. _Upper Right:_ Sigmoid activation function, a special case of the logistic activation function, which limits the output value between 0 and 1. _Lower Left:_ ReLU (Rectified Linear Unit) activation function and its variants, an easy way to provide non-linearity to multi-layer networks. _Lower Right:_ SoftMax activation function. In the case of multi-neuron outputs, when using softmax, the output of each neuron depends on the value of each other neuron. For this reason, the simplest non-trivial case, where the length of the output vector, $N$, is 2, has been chosen, and the outputs are represented on a 3D plot. This figure can be recreated with the notebook found at: https://tinyurl.com/muppechr.], ) <activation_functions> === Loss Functions <loss_functions_sec> The loss function (sometimes cost or objective function) is an important part of the model training process @deep_learning_review. The purpose of the loss function is to act as a measure of the effectiveness of the model when acting on a particular batch of examples. In doing this, the loss function gives us a metric to evaluate the performance of the model, compare it against other models, and act as a guide during the training process. In specific cases, it can also act to regularise the model to prevent overfitting or to balance multiple objectives. In supervised learning, this loss function is some measure of the distance between the model's output and the ground truth labels of the examples fed into the model. These are one of the more common types of loss functions, but it should be noted that as long as it is differentiable, a great many terms can be included as part of the loss function, and indeed some of the more complex architectures have complex loss functions. In unsupervised learning, Autoencoders @autoencoder_unsupervised are a formulation of a regression problem where the model input is equal to the model output, and thus, they follow the same principles as the typical regression problem, only the difference between their output, $uvectorn(y)$, and their input, $vectorn(x)$ in the loss, rather than an external label, $vectorn(y)$. Clustering, on the other hand, attempts to split a data distribution into groups by minimising the distance between elements in a given group while maintaining some balance with the number of groups generated --- there are a variety of different ways to do this depending on your desired outcome. ==== Binary Cross Entropy <binary_cross_entropy_sec> The Binary Cross Entropy loss is used primarily for binary classification problems wherein each class label is independent @binary_cross_entropy. This can be the case either for single-class single-label tasks (binary classification tasks) or multi-class multi-label tasks. It is defined by @binary_cross_entropy_eq. $ L(vectorn(y), uvectorn(y)) = - sum_(i=1)^N y_i log(accent(y, hat)_i) - (1 - y_i) log(1 - accent(y, hat)_i) $ <binary_cross_entropy_eq> where $L(vectorn(y), uvectorn(y))$ is the loss function applied to the model output and ground truth vectors, $N$, is the number of elements in the output vector, $y_i$ is the i#super[th] element of the ground truth vector, and $accent(y, hat)_i$ is the i#super[th] element of the ground truth vector. In the single-class single-label case where N = 1, @binary_cross_entropy_eq becomes $ L(y, accent(y, hat)) = - y log(accent(y, hat)) - (1 - y) log(1 - accent(y, hat)). $ <binary_cross_entropy_eq_2> Some confusion can arise in the case of binary classification problems @binary_cross_entropy, wherein the examples can either be in a class or not in that class since this is the same as the situation where there are two distinct classes. As such, these problems can be treated in two ways, either with a single output neuron and an output vector, $vectorn(y)$ of length one, (an output value i.e. $vectorn(y) = y$), where a high value indicates inclusion in the one class and a low-value exclusion, or with two output neurons where each neuron represents a "class", one being inside the class and the other being outside the class. In the first case, we would use a sigmoid activation function and a binary cross-entropy loss, and in the second case, you would use a softmax activation function and categorical cross-entropy loss. These produce very similar outcomes, with the first method being slightly more straightforward, giving a directly interpretable output and reducing the number of parameters, whereas the second case, whilst increasing the model parameter count, can sometimes be more numerically stable. ==== Categorical Cross Entropy <cross_entropy_sec> Categorical Cross Entropy loss is very similar to binary cross-entropy loss but is used primarily in multi-class single-label problems, such as the problem we presented in the MNIST @mnist classification task @deep_learning_review. It is a highly effective loss function, and it is often much easier to classify data into one class using this method than it would be to find multiple labels in a multi-class multi-label problem. So this kind of task is often a desirable framing of your problem. The loss is given by $ L(vectorn(y), uvectorn(y)) = - sum_(i=1)^N y_i log(accent(y, hat)_i), $ <categoricorical_cross_entropy_eq> where $L(vectorn(y), uvectorn(y)) $ is the loss function applied to the model output and ground truth vectors, N is the number of elements in the output vector, $y_i$ is the i#super[th] element of the ground truth vector, and $accent(y, hat)_i$ is the i#super[th] element of the ground truth vector. Both binary cross entropy and categorical cross entropy are loss functions that attempt the measure the difference between probability distributions. In the case of binary cross-entropy, it treats each output element as a separate probability distribution, whereas for categorical cross-entropy, the entire output vector is treated as one probability distribution. They are derived from the concept of entropy in information theory, which quantifies the expected amount of information from a source. Lower information states will have numbers that are closer to one or zero --- in that way minimising the function forces the output to values of one or zero, i.e., toward definite yes/no classifications. ==== Mean Square Error <mse_sec> For regression tasks, wherein the output vectors are not limited to boolean values, we must have more flexible activation and loss functions @mae_and_mae. In these cases, we still want to compare our desired output to our actual output, but we don't want to encourage the output to values near zero and one. There are a number of options to achieve this goal, the choice of which will depend on the specifics of your problem. One option is mean square error loss, the sum of the squares of the error, $vectorn(y) - uvectorn(y)$, normalised by the number of elements in the output vector @mae_and_mae. It is defined by $ L_op("MSE") (vectorn(y), uvectorn(y)) = 1/N sum_(i=1)^N (y_i - accent(y, hat)_i)^2, $ <MSE_eq> where $L_op("MSE") (vectorn(y), uvectorn(y)) $ is the loss function applied to the model output and ground truth vectors, $N$ is the number of elements in the output vector, $y_i$ is the i#super[th] element of the ground truth vector, and $accent(y, hat)_i$ is the i#super[th] element of the ground truth vector. Mean square error is a good choice for regression problems; it is fully differentiable, unlike mean absolute error; however, unlike mean absolute error, it heavily emphasises outliers which can be beneficial or detrimental depending on your scenario. ==== Mean Absolute Error <mae_sec> The mean absolute error can be used in the same problems that the mean square error is used for @mae_and_mae. Again it is normalised by the total sum of the output vector. It is given by $ L_op("MAE") (vectorn(y), uvectorn(y)) = 1/N sum_(i=1)^N |y_i - accent(y, hat)_i|, $ <mae_eq> where $L_op("MSE") (vectorn(y), uvectorn(y)) $ is the loss function applied to the model output and ground truth vectors, N is the number of elements in the output vector, $y_i$ is the i#super[th] element of the ground truth vector, and $accent(y, hat)_i$ is the i#super[th] element of the ground truth vector. Unlike mean squared error, it has a non-differentiable point at zero where the gradient must be artificially replaced, which is not a particularly elegant solution. Mean absolute error punishes small errors more than mean squared error, but large errors less, which can be a desired trait in a model training procedure. ==== Huber <huber_sec> Huber loss is an attempt to combine the benefits of both mean square error and mean absolute error and remove some of their respective disadvantages @huber_loss. It uses a combination of both methods to achieve differentiability at all points whilst removing mean squared error's large penalty to outliers. It does, however, introduce a new user-tuned hyperparameter $delta$, which, as has been discussed, is never ideal. It is defined by $ L_delta (vectorn(y), uvectorn(y)) = 1/N sum_(i=1)^N cases( 0.5 (y_i - accent(y, hat)_i)^2 "if" |y_i - accent(y, hat)_i| ≤ delta, delta |y_i - accent(y, hat)_i| - 0.5 delta^2 "if" |y_i - accent(y, hat)_i| > delta ), $ <huber_eq> where $L_delta (vectorn(y), uvectorn(y)) $ is the loss function applied to the model output and ground truth vectors, $N$ is the number of elements in the output vector, $delta$ is a user-tuned hyperparameter which controls how much of the loss function obeys mean squared error and how much obeys mean absolute error, $y_i$ is the i#super[th] element of the ground truth vector, and $accent(y, hat)_i$ is the i#super[th] element of the ground truth vector. The choice of loss function for regression problems is very much problem-dependent and discoverable only through intuition about the dataset or failing that through investigation. === Network Design The choice of loss function is largely down to the problem being attempted and, as such, is often correlated with an associated output layer activation function; see @loss_function_table. #figure( table( columns: (auto, auto, auto, auto), inset: 10pt, align: horizon, [*Problem*], [*Example Label*], [*Activation Function*], [*Loss Function*], [Single-class \ Single-label \ Classification ], $[1]$ + " or " + $[0,1]$, link( label("sigmoid-sec") )[Sigmoid] + " or " + link( label("softmax-sec") )[Softmax], link( label("binary_cross_entropy_sec") )[Binary] + " or " + link( label("cross_entropy_sec") )[\Categorical Cross Entropy], [Multi-class \ Single-label \ Classification ], $[0, 1, 0, 0]$, link( label("softmax-sec") )[Softmax] , link( label("cross_entropy_sec") )[Categorical \ Cross Entropy], [Multi-class \ Multi-label \ Classification ], $[0, 1, 0, 1]$, link( label("sigmoid-sec") )[Sigmoid] , link( label("binary_cross_entropy_sec") )[Binary Cross Entropy], "Regression including \n Autoencoders", "[0.12, -1.34]", "Often " + link( label("relu-sec") )[ReLU] + " or " + link( label("linear-sec") )[Linear], "Often " + link(label("mse_sec") )[MSE] + ", " + link( label("mae_sec") )[MAE] + ", or " + link( label("huber_sec") )[Huber] ), caption: [Problems often solvable by artificial neural networks and their associated activation and loss functions. This table demonstrates the most commonly used activation and loss functions for several common problem types that machine learning attempts to solve. The activation functions listed are described in @activation_functions_sec, whereas the loss functions were described in this section @loss_functions_sec. MSE is an abbreviation of Mean Squared Error, and MAE is an abbreviation of Mean Absolute Error.] ) <loss_function_table> == The Gradients Must Flow <gradient-descent-sec> Without a method to find useful parameters, artificial neural networks are useful for little more than hopelessly scrambling input data. As mentioned previously, this method is gradient descent @deep_learning_review @stocastic_gradient_descent_ref, using the local gradient of model parameters to find a path toward the minimum loss. It is useful to imagine the entirety of all possible model parameters forming a surface defined by the model's loss function. Every combination of parameters is a coordinate on this highly dimensional landscape, where the corresponding loss function tells us the height of the land. We can abstract away all this high dimensionality and reduce the problem to a two-dimensional mountain range, as long as we keep in mind that, in actuality, the number of dimensions of our surface equals the number of parameters we are fitting to. Imagine we are lost in these Parameter Peaks. Our goal is to find the lowest point in this mountain range, for that will be the location of our helicopter rescue. In the same way that we can only calculate the gradient for one set of parameters at a time, we can only see the ground right beneath our feet; it is a very foggy day on the peaks. If we were to try to get to the lowest point, naturally, what we would do is look at the ground, examine the slope, and walk downhill; the same is true for gradient descent. #figure( image("gradient_descent_examples.png", width: 90%), caption: [_ Left: _ An idealised gradient descent path. This gradient descent process quickly reaches the true function minimum where, in this case, the loss is close to zero. However, this is a constructed example by first finding a point near the function minimum and performing a gradient ascent operation. _ Right: _ A more realistic gradient descent path. This example shows a simple but real gradient descent function running on the cost function. As can be seen, it takes many more steps and has not yet converged on the true minimum; in fact, the process might be at risk of getting stuck in a local minimum. Both examples were generated using this notebook: https://tinyurl.com/3ufb5my3.] ) <gradient_descent_examples> This works perfectly if all gradients point toward the bottom, what is called a convex parameter space with one global minimum and no local minima. Parameter peaks, however, can often have some nasty tricks in store. Just like real-life mountain ranges, there can be local minima. Divits and valleys look like they might be the bottom of the mountain, but without some global information, it is impossible to tell. The parameter space is non-convex. Thus, we must explore before we settle on our final choice, moving up and down smaller hills, generally moving toward lower regions, but always searching for better outcomes --- that is, until our time runs out, model training ends, and we must make a final decision about where to await the helicopter. Although perhaps if our training lasts multiple epochs, we'll have multiple days to figure it out, taking new paths each time. There are, perhaps unsurprisingly, a number of different algorithms to achieve this purpose beyond the naive descent suggested by @gradient_decent_step, which could leave you stuck blindly in a divot on the top of a mountain whilst wide chasms stretch unseen before you. The first misconception to correct beyond what has been previously discussed is that usually when performing gradient descent, it is not performed one example at a time but rather in batches of $N_op("batch")$ examples, the gradient of which is calculated simultaneously. This $N_op("batch")$ adds a further user-adjustable hyperparameter, batch size, $N_op("batch")$, to our growing collection of hyperparameters. It also creates a distinction between three distinct gradient descent modes. *Stochastic gradient descent* is the method that was illustrated in @training_arificial_neurons. This involves updating the model parameters based on the gradient of a single example at a time, looping through every example in the dataset one at a time, $N_op("batch") = 1$ @stocastic_gradient_descent_ref. Stochastic gradient descent can converge faster than other methods as it updates the parameters as frequently as possible. This is more useful with larger datasets that cannot fit into memory, as it can make progress long before it has seen all the data, and ample data will help it converge on a correct solution. Stochastic gradient descent does introduce a lot of noise into the dataset as the smoothing effects from averaging across examples are not present. This has advantages and disadvantages. The noise can help prevent the descent from getting stuck in the local minima, but by the same process, it can struggle to settle in even the true minimum, and convergence can take a long time. It can also be slow, as gradients need to be calculated for each example sequentially. *Mini-batch descent* is perhaps the most commonly used of the gradient descent paradigms @mini_batch_descent. In contrast to stochastic gradient descent, the gradient is calculated for multiple examples simultaneously. Unlike batch descent, however, it does not calculate the gradient for the entire dataset at once. The only restraints, therefore, is that the batch size, $N_op("batch")$, must be larger than one and smaller than the number of elements in your training dataset, $N_op("batch") > 1 and N_op("batch") < N_op("dataset")$. This number is usually a lot smaller than the size of the entire dataset, however, with power of two values around 32 being commonplace. This method can produce a more stable convergence than stochastic descent. Because it averages the gradient over many examples at once, there is less noise. It is a compromise between batch and stochastic descent, and its strengths and weaknesses depend largely on the batch size you select. This is also one of its largest downsides; any additional hyperparameter is one more factor that has to be tuned by some other external method. *Batch descent* occurs when the gradient is calculated for all examples in our training dataset simultaneously, $N_op("batch") = N_op("dataset")$; it is, therefore, in some ways, the purest form of gradient descent, as the gradient has been calculated with all available data included @batch_descent. In theory, it will have the most stable and direct convergence of all the methods, although in practice this is not often the case, @batch_descent. However, whilst sometimes producing good results, this can suffer from problems getting stuck in local minima, as there is no ability for exploration. It also has the considerable downside of being very computationally expensive. For very large training datasets, this could quickly become time and computationally impossible. This method is rarely used in modern machine learning due to infeasibly large training datasets. What follows is a brief explanation of various optimisation algorithms that can be used during the training process. The choice of optimiser can again be considered another hyperparameter that must be externally selected. === Momentum In order to avoid local minima and introduce more exploration to our training process, many optimisers introduce the concept of "momentum" to the descent process @momentum_ref @gradient_descent_algorithms @gradient_descent_algorithms_2. This cannot be applied to batch gradient descent since there is only one step in the process. Adding momentum to a descent algorithm is quite literally what it sounds like; if we consider the descent process to be a ball rolling down a hill, momentum is a property that changes more slowly than the gradient of the terrain beneath it. In that way, it acts to smooth the inherent noise generated from gradient descent by adding a proportion of the previous gradient to the determination of the next parameter space step. This can help improve convergence and prevent progress from getting stuck in a local minimum. In order to describe this process mathematically, we introduce the concept of a parameter space velocity, $v_theta (t)$, which is recorded independently of parameter space position, i.e. the parameter values themselves, $vectorn(theta)$. The two equations that fully describe the descent are $ bold(vectorn(v)_theta (t)) = alpha bold(vectorn(v)_theta (t - 1)) + eta vectorn(gradient) bold(L_( M vectorn(x)_t vectorn(y)_t )) $ <descent_momentum_velocity> and $ vectorn(theta)_bold(t + 1) = vectorn(theta)_bold(t) - bold(vectorn(v)_theta (t)), $ <descent_momentum_position> where $t$ is the current batch index, $ bold(vectorn(v)_theta (t))$ is the parameter velocity at the current batch, $bold(vectorn(v)_theta (t - 1))$, is the parameter velocity at the previous batch (initialized to $0$ at $t - 1$), $alpha$ is the momentum parameter, $eta$ is the learning rate, $vectorn(gradient) bold(L_( M vectorn(x)_t vectorn(y)_t ))$, is the gradient of the model parameters with respect to the loss function, $vectorn(theta)_bold(t+1)$, are the updated model parameters, and $vectorn(theta)_bold(t)$ are the model parameters at the current step. As with the previous training steps, this process can be used for either stochastic or mini-batch descent and will be repeated across all training examples or batches of training examples in the training data set. The momentum parameter is a newly introduced hyperparameter that must be set before the initiation of training. The momentum value indicates what fraction of the previous parameter velocity is added to the current velocity; for any valid descent algorithm, this must be below one, $alpha < 1$, as otherwise, the velocity will grow unbounded with each step. Common choices for momentum values hover around 0.9. Momentum can be combined with stochastic or mini-batch descent and is an important aspect of other gradient techniques, including RMSProp and Adam @gradient_descent_algorithms @gradient_descent_algorithms_2. === AdaGrad (Adaptive Gradient Algorithm) In standard gradient descent, every parameter, $theta_i$, within your parameter vector, $vectorn(theta)$, is treated equally by the descent algorithm. We can, however, imagine scenarios where treating all parameters equally is not the ideal method. A given training dataset may not contain an equal representation of all features present in that dataset. Indeed, even individual examples may have some features that are much more common than others. Often, these rarer features can be crucial to the efficient tuning of the network. However, the parameters that represent these features might see far fewer updates than other parameters, leading to long and inefficient convergence. To combat this problem, AdaGrad, or the adaptive gradient algorithm, was introduced @adagrad @gradient_descent_algorithms @gradient_descent_algorithms_2. This method independently modifies the learning rate for each parameter depending on how often it is updated, allowing space parameters more opportunity to train. It achieves this by keeping a record of the previous sum of gradients squared and then adjusting the learning rate independently by using the value of this record. This is equivalent to normalising the learning rate by the L2 norm of the previous gradients. This approach is defined by $ vectorn(g)_bold(t) = vectorn(g)_bold(t - 1) + vectorn(gradient) bold(L_( M vectorn(x)_bold(t) vectorn(y)_bold(t) ))^(compose 2) $ <adagrad_sum> and $ vectorn(theta)_bold(t + 1) = vectorn(theta)_bold(t) - (eta/(vectorn(g)_bold(t) + epsilon)^(compose 1/2) ) dot.circle vectorn(gradient) bold(L_( M vectorn(x)_bold(t) vectorn(y)_bold(t)) ) $ <adagrad_iteration> where $t$ is the current batch index, $vectorn(g)_bold(t)$ is a vector containing the sum of the square of all parameter gradients up to the training iteration, $t$, $vectorn(g)_bold(t-1)$ is the sum of the square of all parameter gradients except the current gradient squares, $vectorn(gradient) bold(L_( M vectorn(x)_bold(t) vectorn(y)_bold(t) ))$ is a vector containing the gradients for each parameter at the current iteration, $vectorn(theta)_bold(t + 1) $ are the parameters at the next iteration, $vectorn(theta)_bold(t) $ are the parameters at the current iteration, and $epsilon $ is a very small value to prevent division by zero errors in the calculation. The $dot.circle $ notation is known as the Hadamard product and represents the element-wise multiplication of two vectors, i.e $vectorn(a) dot.circle vectorn(b) = [a_1 b_1, ..., a_i b_i, ..., a_N b_N]$. Similarly $vectorn(a)^(compose 2)$ refers to the element-wise square of the vector $vectorn(a)$, i.e $vectorn(a)^(compose 2) = [a_1^2, ..., a_i^2, ..., a_N^2]$, and $a^(compose 1/2)$ represents the element-wise square-root of $a$, i.e $vectorn(a)^(compose 2) = [sqrt(a_1), ..., sqrt(a_i), ..., sqrt(a_N)]$. This method has the advantage of self-tuning the learning rate for individual parameters, removing the need for manual per-parameter tuning, and it helps the model update sparse parameters more quickly by increasing the learning rate for parameters which learn more rarely seen features @adagrad @gradient_descent_algorithms @gradient_descent_algorithms_2. These small features are often very important for whatever operation is being optimised for. AdaGrad still leaves the global learning rate, $eta$, as an open hyperparameter which must be user-defined. It can also lead to problems when training deep networks with many layers. Similarly, the vanishing gradient problem can lead to tiny parameter updates when calculating the gradient of the network through very deep networks @adagrad @gradient_descent_algorithms @gradient_descent_algorithms_2. The vanishing learning rate problem can arise when training using AdaGrad with very large training datasets. In models with large amounts of parameters, it is crucial that the parameters continue to be updated throughout the training process to ensure that all of the many parameters meet optimally tuned values. However, if the normalisation factor, $vectorn(g)$ for some parameters, grows too big over the training process, the gradient updates can become very small, and training can slow to a crawl. Root Mean Square Propagation is a proposed solution to this problem. === RMSProp (Root Mean Square Propagation) RMSProp, or root mean square propagation, is an alternative method to solve the adaptive learning rate issue, which attempts to alleviate the vanishing learning rate problem by less aggressively normalising the learning rate @rmsprop. Instead of using the L2 Norm of all previous gradients to normalise each parameter learning rate, like AdaGrad, it uses a moving average of the squared gradients. This also deals with non-convex scenarios better, as it allows the gradient descent to escape without the learning rate falling to tiny values. This process is described by $ vectorn(E)_bold(g^2) (t) = beta vectorn(E)_bold(g^2) (t-1) + (1 - beta) (vectorn(gradient) bold(L_( M vectorn(x)_t vectorn(y)_t )))^(compose 2) $ <rms_sum> and $ vectorn(theta)_bold(t + 1) = vectorn(theta)_bold(t) - (eta/(vectorn(E)_bold(g^2) (t) + epsilon)^(compose 1/2)) dot.circle vectorn(gradient) bold(L_( M vectorn(x)_t vectorn(y)_t )), $ <rms_iteration> where $t$ is the current batch index, $vectorn(e)_bold(g^2) (t)$ is the moving average of parameter gradients squared with respect to the loss function, $beta$ is the decay rate for the moving average, which controls how quickly the effect of previous gradients on the current learning rate falls off, $vectorn(gradient) bold(L_( M vectorn(x)_t vectorn(y)_t ))$ is a vector containing the gradients for each parameter at the current iteration, $vectorn(theta)_bold(t + 1) $ are the parameters at the next iteration, $vectorn(theta)_bold(t) $ are the parameters at the current iteration, and $epsilon $ is a very small value to prevent division by zero errors in the calculation. This is a similar method to AdaGrad, so it has many of the same strengths and weaknesses but alleviates the vanishing gradient problem @gradient_descent_algorithms @gradient_descent_algorithms_2. It also introduces one new hyperparameter, the decay rate, $beta $, which must be decided, and it does not necessarily completely eradicate the vanishing gradient problem in all situations. === Adam (Adaptive Moment Estimation) Adam (Adaptive Moment Estimation) combines the advantages of AdaGrad and RMSProp @adam_optimiser. Instead of normalising by the L2 loss alone, like AdaGrad, or the moving squared average alone, like RMSProp, it uses an exponential of the moving average of both the gradient, $E_g (t)$ and the squared gradient, $E_(g^2) (t)$ and uses the parameters, $beta_1$ and $beta_2$ to control the decay rates of these averages respectively. The moving average of the gradient and the moving average of the squared gradient are $ vectorn(E)_bold(g) bold(t) = beta_1 vectorn(E)_g bold(t-1) + (1-beta_1) vectorn(gradient) bold(L_( M vectorn(x)_bold(t) vectorn(y)_bold(t) )) $ <adam_average> and $ vectorn(E)_bold(g^2) (t) = beta_2 vectorn(E)_bold(g^2) (t-1) + (1-beta_2) vectorn(gradient) bold(L_( M vectorn(x)_t vectorn(y)_t ))^(compose 2). $ <adam_moving_average> As with previous methods, both moving average values are initialised to vectors of zeros at the start of the descent @adam_optimiser. This poses an issue as early steps would be weighted toward zero. In order to solve this, the algorithm introduces two new terms, $uvectorn(E)_bold(g) bold(t) $, and $uvectorn(E)_bold(g^2) bold(t) $, to correct this issue: $ uvectorn(E)_bold(g) (t) = vectorn(E)_bold(g) (t) / (1 - (beta_1)^t) $ <adam_average_corrected> and $ uvectorn(E)_bold(g^2) bold(t) = vectorn(E)_bold(g^2) bold(t) / (1 - (beta_2)^t). $ <adam_squared_average_corrected> These terms are then collected in @adam_iteration. $ vectorn(theta)_bold(t + 1) = vectorn(theta)_bold(t) - eta uvectorn(E)_bold(g) dot.circle (t) / ((uvectorn(E)_bold(g^2) (t) + epsilon)^(compose 1/2)) $ <adam_iteration> where $t$ is the current batch index, $E_(g) (t)$ is the moving average of parameter gradients with respect to the loss function, $E_(g^2) (t)$ is the moving average of parameter gradients squared with respect to the loss function, $beta_1$ and $beta_2$ are the decay rate for the moving average and the moving squared averages respectively, which controls how quickly the effect of previous gradients on the current learning rate falls off, $vectorn(gradient) bold(L_( M vectorn(x)_t vectorn(y)_bold(t) ))$ is a vector containing the gradients for each parameter at the current iteration, $vectorn(theta)_bold(t + 1) $ are the parameters at the next iteration, $vectorn(theta)_bold(t) $ are the parameters at the current iteration, and $epsilon $ is a very small value to prevent division by zero errors in the calculation. The Adam optimiser can intuitively be thought of as combining the adaptive learning rate methods with a form of momentum @adam_optimiser. $E_g (t)$ carries the first moment, the momentum of the past gradients, which, like momentum, will keep you moving in the general direction that you have been travelling, moderated by the $beta_1$ parameter. $E_(g^2)$ carries information about the second moment, which remembers the magnitude of the gradients. This will make the algorithm move more cautiously if it has been encountering steep gradients, which can normally cause large learning rates and make the optimiser overshoot. This can act as a break to the momentum built up in the first moment. The $beta_2$ parameter moderates this aspect. The Adam optimiser is perhaps the most widely known and widely used in modern artificial neural network training due in large part to its efficacy @gradient_descent_algorithms @gradient_descent_algorithms_2. Although there have been many adaptations and variants of the Adam optimiser which have tried to improve its operation, none have been so successful as to overthrow its position as the standard choice for gradient descent algorithms. === Backpropagation <backpropagate-sec> So far, we have been using the parameter gradient vector, $vectorn(gradient) bold(L_( M vectorn(x)_t vectorn(y)_t))$, without considering how we might calculate this value. In the case of a single-layer perceptron, this process is not particularly difficult. As discussed before, first, we must pass an example (or batch of examples) through a randomly initiated network. This network, though untuned, will still produce an output vector, $accent(y, hat)$, albeit a useless one. We can then work backwards from the model output, $accent(y, hat)$, and, in the case of supervised learning, compare it to our desired output, $y$, by using the loss function, $L$. We can do this by applying the chain rule for the weights @backpropogation_ref. Let's work through an example of how we might do this for a simple single-layer perceptron, with parameters, $vectorn(theta)$, split into a weights matrix, $matrixn(W)$, and bias vector, $vectorn(b)$. The action of the model is defined by: $ M(vectorn(x))= f(vectorn(z)) $ where $vectorn(z) = matrixn(W) vectorn(x) + vectorn(b) $ is the raw input to the activation function, and f is the activation function. The $i^op("th")$ element of the output is given by the softmax function of the raw input, $vectorn(z)$: $ accent(y, hat)_i = e^(z_i) / (∑_(j=1)^N e^(z_j)) $ and the loss function is given by $ L = -sum_(i=1)^N y_i log(accent(y, hat)_i), $ where L is the loss function, N is the number of elements in the output vector and $accent(y, hat)_i$ is the $i^op("th")$ element of the output vector. We want to find the gradients of the model parameters with respect to the loss function. In this case, $(partial L) / (partial matrixn(W))$ and $(partial L) / (partial b)$. We can start by using the chain rule to compute $(partial L) / (partial z_i)$, the derivative of the loss with respect to the $i^op("th")$ component of z: $ (partial L) / (partial z_i) = sum_(j=1) (partial L) / (partial y_j) (partial y_j) / (partial z_i) $ <chain_rule_1> Here, $(partial L) / (partial y_j)$ is the derivative of the loss with respect to the $j^op("th")$ output, and $(partial y_j)/ (partial z_i)$ is the derivative of the $j^op("th")$ output with respect to the $i^op("th")$ input before activation. In our case, because we are using categorical cross-entropy loss: $ (partial L) / (partial y_j) = accent(y, hat)_j / y_j $ <chain_rule_2> And, due to the softmax activation function, in which the value of all output neurons affects the gradient of all others, $ (partial accent(y, hat)_j) / (partial z_i) = cases( accent(y, hat)_j (1 - accent(y, hat)_j) "if" i = j, - accent(y, hat)_j accent(y, hat)_i "if" i ≠ j ) $ <chain_rule_3> Substitution of @chain_rule_2 and @chain_rule_3 into @chain_rule_1 gives $ (partial L) / (partial z_i) = - y_i / accent(y, hat)_i accent(y, hat)_i (1 - accent(y, hat)_i) + ∑_(j ≠ i) y_j / accent(y, hat)_j (-accent(y, hat)_j accent(y, hat)_i). $ Simplifying gives: $ (partial L)/ (partial z_i) = - y_i (1 - accent(y, hat)_i) + ∑_(j ≠ i) -y_j accent(y, hat)_i. $ We can simplify this further because $sum_(j) y_j = 1$, as the input label is a one-hot vector and will always sum to one: // TODO: FIX THIS BASTARD (or at least understand it; the answer is right) $ (partial L)/ (partial z_i) = y_i - accent(y, hat)_i. $ This shows that the derivative of the softmax function with respect to the sum of the weighted inputs and bias values, $(partial L)/ (partial z_i)$, is equal to the difference between the ground truth label value and the model output value. This provides us with another insight into the design of the softmax function and its use of exponentials. We can then again use the chain rule to find the gradient of the weights and biases $ (partial L) / (partial matrixn(W)) = (partial L) / (partial vectorn(z)) (partial vectorn(z)) / (partial matrixn(W)) = (vectorn(y) - uvectorn(y)) dot.circle vectorn(x) $ <weights_chain> and $ (partial L)/ (partial vectorn(b)) = (partial L)/(partial vectorn(z)) (partial vectorn(z))/ (partial vectorn(b)) = y - uvectorn(y). $ <bias_chain> Both of the gradients, @weights_chain, and @bias_chain, are quite intuitively what you might expect from a single-layer network. There is no non-linear behaviour, and as we previously speculated, the network is just training to find pixels that are most often activated by certain classes. We can use a similar method for artificial neural networks of all complexities and depths. For a feed-forward dense network with $N$ layers, let us denote the weighted sums of the inputs plus the biases of a layer with index $i$, as $vectorn(z)_bold(i)$, the output of the activation function, $f$ of layer $i$ as $a_i = f(z_i)$, the weights matrix and biases vector of layer $i$ as $matrixn(W)_bold(i)$ and $ vectorn(b)_bold(i)$, and the loss function again as $L$. First, we compute the forward propagation by running an input vector, $vectorn(x)$, or batch of input vectors, through the network to produce an output vector $uvectorn(y)$. Then follow the following procedure. + Compute the derivative of the loss function with respect to the final output values: $ (partial L)/( partial a_N) = (partial L)/( partial accent(y, hat))$. + Compute $(partial L)/ (partial z_N) = (partial L) / (partial a_N) (partial a_N)/ (partial z_N) $, where $(partial a_N)/(partial z_N)$ is the derivative of the activation function in the final layer. This gives the gradient of the loss function with respect to the final raw outputs, $vectorn(z)_bold(N)$. + Compute $(partial L)/ (partial matrixn(W)_bold(N)) = (partial L) / (partial z_N) (partial z_N) / (partial matrixn(W)_bold(N)) $ and $(partial L) / (partial b_N) = (partial L) / (partial z_N) (partial z_N)/ (partial b_L) $. This gives the gradients with respect to the final layer's weights and biases. + To propagate the error back to the previous layer, compute $(partial L)/ (partial a_(N-1)) = (partial z_N)/ (partial a_(N-1)) (partial L) / (partial z_N) = W_(N)^T (partial L) / (partial z_N)$. + Recursively repeat steps 1 to 4 until you reach the input layer and you have gradients for all parameters. This method is known as backpropagation because you work backward from the output of the model toward the input vector @backpropogation_ref. == Overfitting and Regularisation Thus far, we have been partaking in perhaps one of the most heinous sins when developing a machine learning method --- we have not made a distinction between the dataset we use to train our model, out *training* dataset, and the dataset we use to test our model, our *testing* dataset. It is vital that whenever possible we produce these datasets independently, and keep them entirely separate so that a model never has the chance to use any information present in the testing dataset to adjust its weights, ensuring that the first time a model sees any of the testing examples, is when it is being validated with them. This hard segregation acts as a test to see if our model has *overfit* @overfitting. If the model learns the features of each of our training examples to the point where it can remember each specific example and match that example to a label, then our model may just associate each training example individually to an example rather than learning the general features of the dataset which will allow for classification of new unseen examples. If this is the case, when presented with new examples, such as examples from our testing dataset, the classifier will fail because it does not know what class this example belongs to. Thus keeping a separate dataset for testing is crucial to accurately assess model performance. Without such a distinct dataset, we cannot make any claims about the efficacy of our model. Whether, and to what degree, it is possible for a model to overfit to a particular dataset depends on the size and complexity of the training dataset and the size and complexity of the model. The larger the training dataset the more unlikely it is for overfitting to occur; however, a larger model ensures that a model can "remember" more data, which gives it an increased possibility to overfit to its training dataset. Often, a further distinction is made between testing, training, and validation datasets. Although the nomenclature is often mixed up between these three datasets. The purpose of introducing a third *validation* dataset is to act as a final check of generality. Since the training procedure and model architecture are often altered throughout the development of the model, it is important to make sure that these alterations are not also accidentally tailored to our testing dataset. This third, validation dataset is set aside, ideally until the finalization of the data analysis method, to act as a final test of performance. Ideally, this dataset would have been created prior to the commencement of the project, to ensure that there is no possibility that the validation dataset is generated with the designed method in mind. There may also be a need for more than three datasets, for example, you might want to have a test dataset that a training model is compared against every epoch, then another test dataset after a full training procedure has completed, but before the final method has been selected, which would necessitate a fourth dataset in this case. The term validation dataset will be used throughout this thesis for any dataset that is not the training dataset. Overfitting is one of the most prominent and difficult problems in artificial neural network development, and thus there has been a large body of methods to try and ameliorate the issues it causes @overfitting. These methods are known as regularisations, the following few sections will briefly describe some of these methods. === Dropout One of the most powerful ways to deal with overfitting is to employ dropout layers with your network @dropout_ref. Dropout layers can prevent overfitting by ensuring that the model does not rely on any one given neuron (or any given set of neurons), in order to produce its final output. Dropout layers do this by randomly setting a certain percentage of the previous layer's outputs to zero, ensuring that information from that neuron cannot be used to produce the model's output during this inference. The choice of neurons that are zeroed is randomised between each training instance, this teaches the model to explore different feature recognition pathways in each training batch. The percentage of outputs dropped is a user-selected hyperparameter that must be decided before model training is initiated, can can be anywhere from 0 (equivalent to no dropout layer) to 1 (which would stop all information flowing through the network and make training impossible), typical dropout values lie between 0.1 and 0.5 @dropout_ref. Dropout layers are only active during model training, and when in use for inference proper are not employed and can be removed without affecting model function. By randomly dropping out neurons during model training, it reduces the information that a model can rely on to produce its final output @dropout_ref @dropout_regularisation.. Because, in almost all cases, remembering the exact form of each training example will take a larger amount of information than remembering only general features, the network is incentivized to learn input features rather than memorizing specific training examples. Dropout can sometimes slow down model convergence, and it is not a complete solution to overfitting, but it finds use very commonly across a wide range of modern neural network architectures @dropout_regularisation. === Batch normalisation Another layer type that can be added to a network to act as regularisation as well as provide other benefits is the batch normalization layer @batch_normalisation_ref. As data moves through deep networks, a phenomenon known as internal covariate shift can take place. This describes the scenario wherein values flowing through the network can occupy a wide range of distributions that vary dramatically between layers, usually increasing in size as they move through the network. This can be a problem as the activation functions present in a network are designed to act in specific data distributions, so large values in the network can saturate their non-linearities, and remove much of the potential for nuance, which can increase model convergence time and degrade model performance. Batch normalization layers offer a solution to this problem by normalizing the mean and standard deviation of a layer's output distribution to certain values, normally a mean of zero and a variance of one. To ensure that this normalisation does not reduce the information content of the layer output, the layer has two tunable weight parameters per input neuron, one to scale the neuron's output after normalisation, and the other to shift it after normaliation. Finally, batch normalisation ensures that the gradients stay within a reasonable range, which also increases model convergence. Like dropout, batch normalisation is applied differently during training and when in use in production in the inference phase of the model @batch_normalisation_ref. During training the normalisation is based on the mean and variance of the current training batch, whereas during inference the normalisation uses the moving average and moving variance computed during the training phase, primarily to ensure that the model's output is deterministic, which is often a desired characteristic. Batch normalisation serves many purposes within the network, increasing convergence and allowing for faster training times, but it also can help to prevent overfitting because it dramatically reduces the range of possible states that can occur in inference and training. === Early Stopping One simple way to prevent overfitting that is often employed is to halt the training procedure before overfitting can occur @early_stopping. Generally, if it is possible for a given model to overfit to a given training distribution, then it will overfit more the more often it has seen that training dataset, i.e. the number of epochs that have been used in the training procedure. Shuffling the dataset each epoch will reduce this problem slightly by generating unique batch combinations and altering the order that gradient descent takes through the parameter space, but at each iteration the model is still adjusting its parameters based on the training examples, potentially closing in on a fit that is too close. This can be alleviated by halting the training early based on model post-epoch performance when validated on your validation dataset, generated as independently as possible from your training dataset. If your model begins to overfit your training dataset, then, almost by definition, validation performance will begin to degrade or at least saturate. The model training procedure can be configured to automatically detect if this is the case, relying on a user-defined hyperparameter known as patience, which determines the number of epochs with no improvement in test dataset performance to wait before halting the training. Stopping model training early, and restoring the model parameters that achieved the best performance on the test dataset can be an effective method to stop the model from converging on a fit that is too closely tailored to the training dataset. == Infrastructure Layers <flatten-sec> Most GPU vector libraries, including TensorFlow @tensorflow, have strict requirements about the shapes of vectors that flow through them. Within artificial neural network models, there is often a need to change the shape and/or dimensionality of the vectors as they flow through the network -- for example if we are moving from a 2D image to a 1D vector, as we saw when feeding 2D MNIST images into the 1D perceptron architecture we must employ a *flattening layer* which takes whatever dimensionality the input vector has and reduces it to a 1D vector. We can also use reshape layers to perform more complex reshapings between vector shapes as long as the requested resultant vector contains the same number of elements as the input vector; see @flattening_diagram. #figure( image("flattening.png", width: 80%), caption: [A flattening layer. This layer takes a 2D input matrix $X = mat(x^1_1, x^1_2; x^2_1, x^2_1)$ and converts it into a 1D vector, $ vectorn(y) = [y_1, y_2, y_3, y_4]$, without using any learned parameters or altering the values of the data. It simply rearranges the indexes and removes all but one dimension. Reshaping layers are a more general version of a flattening layer, where an input vector or matrix can be transformed into any equivalently sized output vector or matrix.], ) <flattening_diagram> These kinds of "infrastructure" layers will typically not be discussed nor included in network diagrams if their existence is implied by the network construction. They do not have any trainable parameters and perform no transformation on the passing data other than to change the data layout. They are only noted when newly introduced or of special interest.
https://github.com/michidk/cv
https://raw.githubusercontent.com/michidk/cv/main/src/resume.typ
typst
#import "template/mod.typ": * #show: template.with( data: json("../data/resume.json"), displayTagline: true, displaySummary: true, importanceFilter: 2, // filter everything that is less important than 2 debug: false )
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-11200.typ
typst
Apache License 2.0
#let data = ( ("KHOJKI LETTER A", "Lo", 0), ("KHOJKI LETTER AA", "Lo", 0), ("KHOJKI LETTER I", "Lo", 0), ("KHOJKI LETTER U", "Lo", 0), ("KHOJKI LETTER E", "Lo", 0), ("KHOJKI LETTER AI", "Lo", 0), ("KHOJKI LETTER O", "Lo", 0), ("KHOJKI LETTER AU", "Lo", 0), ("KHOJKI LETTER KA", "Lo", 0), ("KHOJKI LETTER KHA", "Lo", 0), ("KHOJKI LETTER GA", "Lo", 0), ("KHOJKI LETTER GGA", "Lo", 0), ("KHOJKI LETTER GHA", "Lo", 0), ("KHOJKI LETTER NGA", "Lo", 0), ("KHOJKI LETTER CA", "Lo", 0), ("KHOJKI LETTER CHA", "Lo", 0), ("KHOJKI LETTER JA", "Lo", 0), ("KHOJKI LETTER JJA", "Lo", 0), (), ("KHOJKI LETTER NYA", "Lo", 0), ("KHOJKI LETTER TTA", "Lo", 0), ("KHOJKI LETTER TTHA", "Lo", 0), ("KHOJKI LETTER DDA", "Lo", 0), ("KHOJKI LETTER DDHA", "Lo", 0), ("KHOJKI LETTER NNA", "Lo", 0), ("KHOJKI LETTER TA", "Lo", 0), ("KHOJKI LETTER THA", "Lo", 0), ("KHOJKI LETTER DA", "Lo", 0), ("KHOJKI LETTER DDDA", "Lo", 0), ("KHOJKI LETTER DHA", "Lo", 0), ("KHOJKI LETTER NA", "Lo", 0), ("KHOJKI LETTER PA", "Lo", 0), ("KHOJKI LETTER PHA", "Lo", 0), ("KHOJKI LETTER BA", "Lo", 0), ("KHOJKI LETTER BBA", "Lo", 0), ("KHOJKI LETTER BHA", "Lo", 0), ("KHOJKI LETTER MA", "Lo", 0), ("KHOJKI LETTER YA", "Lo", 0), ("KHOJKI LETTER RA", "Lo", 0), ("KHOJKI LETTER LA", "Lo", 0), ("KHOJKI LETTER VA", "Lo", 0), ("KHOJKI LETTER SA", "Lo", 0), ("KHOJKI LETTER HA", "Lo", 0), ("KHOJKI LETTER LLA", "Lo", 0), ("KHOJKI VOWEL SIGN AA", "Mc", 0), ("KHOJKI VOWEL SIGN I", "Mc", 0), ("KHOJKI VOWEL SIGN II", "Mc", 0), ("KHOJKI VOWEL SIGN U", "Mn", 0), ("KHOJKI VOWEL SIGN E", "Mn", 0), ("KHOJKI VOWEL SIGN AI", "Mn", 0), ("KHOJKI VOWEL SIGN O", "Mc", 0), ("KHOJKI VOWEL SIGN AU", "Mc", 0), ("KHOJKI SIGN ANUSVARA", "Mn", 0), ("KHOJKI SIGN VIRAMA", "Mc", 9), ("KHOJKI SIGN NUKTA", "Mn", 7), ("KHOJKI SIGN SHADDA", "Mn", 0), ("KHOJKI DANDA", "Po", 0), ("KHOJKI DOUBLE DANDA", "Po", 0), ("KHOJKI WORD SEPARATOR", "Po", 0), ("KHOJKI SECTION MARK", "Po", 0), ("KHOJKI DOUBLE SECTION MARK", "Po", 0), ("KHOJKI ABBREVIATION SIGN", "Po", 0), ("KHOJKI SIGN SUKUN", "Mn", 0), ("KHOJKI LETTER QA", "Lo", 0), ("KHOJKI LETTER SHORT I", "Lo", 0), ("KHOJKI VOWEL SIGN VOCALIC R", "Mn", 0), )
https://github.com/N3M0-dev/Notes
https://raw.githubusercontent.com/N3M0-dev/Notes/main/CS/CO/Ch_1/ch1.typ
typst
#import "@local/note_template:0.0.1": * #set page(numbering: "1", number-align: center) #set heading(numbering: "1.1") #set par(justify: true) #set text(12pt) #set outline(indent: true) #frontmatter(authors: ("Nemo",), title: "Computer Abstractions and Technology",date: "2024.4.15-2024.4.--") #outline() #pagebreak() = Eight Great Ideas in Computer Architecture + Design for Moore's Law Every 18-24 months, integrated circuit resources double. + Use Abstraction to Simplify Design Use abstraction to characterize the design at different levels of representation. Like desipline, we don't really see the lower details when doing high level design. However, we need to keep the low level details in mind to accompilsh the best design. + Make the Common Case Fast Like _quick sort_. + Performance via Parallelism + Performance via Pipelining + Performance via Prediction + Hierarchy of Memories Smaller, faster cache to bigger, slower virtual memory. + Dependability via Redundancy = Below the Program #def[Machine Language:] #def[Assembly Language:] #def[High-Level Programming Language:] #def[Compiler:] = Under the Covers: the Hardware _*The five classic components of a computer are, input, output, memory, datapath and control.*_
https://github.com/floriandejonckheere/utu-thesis
https://raw.githubusercontent.com/floriandejonckheere/utu-thesis/master/thesis/figures/07-proposed-solution/louvain-4.typ
typst
#import "@preview/cetz:0.2.2": canvas, draw, angle #v(2em) #let vertex = (name, x, y, stroke: black, fill: white) => { draw.circle((x, y), radius: .35, stroke: stroke, fill: fill, name: name) draw.content((x, y), eval(name, mode: "math")) } #let edge = (start, end, name) => { let (a, b) = (start, end) draw.line((a, .35, b), (b, .35, a), name: name) } #canvas(length: 1cm, { import draw: * set-style(content: (padding: .2), stroke: black) content((1.5, -3.5), "Second iteration") vertex("", -.35, .35) vertex("v_26", 0, 0, fill: green.lighten(80%)) vertex("", 3.35, .35) vertex("v_24", 3, 0, fill: yellow.lighten(80%)) edge("v_26", "v_24", "e_1") }) #v(2em)
https://github.com/akrantz01/resume
https://raw.githubusercontent.com/akrantz01/resume/main/template/awards.typ
typst
MIT License
#import "common.typ": format-date, icon, parse-date, section #let entry( name, organization, on, description: none, url: none, settings: (:), ) = { let link = if url != none { let item = if settings.full-links [ #url.text (#link(url.href)) ] else { link(url.href, url.text) } box( move(dx: 0.5em)[ #icon("website") #item ], ) } set block(above: 0.7em, below: 1em) grid( columns: (90%, 10%), align(left)[ #strong(name), #emph(organization) #link ], align(right, format-date(on)), ) if description != none { pad( left: 1.25em, right: 1.25em, top: -0.5em, box(eval(description, mode: "markup")), ) } } #let awards(title: "Awards", settings: (:), ..entries) = { section(title) entries.pos().map(((id, name, organization, on, ..rest)) => entry( name, organization, on, settings: settings, ..rest, )).join() }
https://github.com/ymgyt/blog
https://raw.githubusercontent.com/ymgyt/blog/main/content/entry/aws_cdk_conference_japan_2022/index.md
markdown
+++ title = "🦈 AWS CDK Conference Japan 2022の感想" slug = "aws_cdk_conference_japn_2022" date = "2022-05-22" draft = false [taxonomies] tags = ["event"] +++ {{ figure(images=["images/aws_cdk_conference_japan_2022.png"]) }} 2022/4/9に行われた[AWS CDK Conference Japan 2022の動画](https://www.youtube.com/watch?v=O2JXUyOBjt8)を見たので感想を書きます。 ## [Keynote: What is CDK v2?](https://www.youtube.com/watch?v=O2JXUyOBjt8&t=2693s) Speaker[^speaker] : 亀田治伸 (アマゾンウェブサービスジャパン) さん ### CDK v1とv2の主な違い CDK v1 ```json "dependencies": { "@aws-cdk/core": "1.127.0", "@aws-cdk/aws-apigateway": "1.127.0", "@aws-cdk/aws-autoscaling": "1.127.0", "@aws-cdk/aws-dynamodb": "1.127.0", "@aws-cdk/aws-cloudwatch": "1.127.0", "@aws-cdk/aws-cloudwatch-actions": "1.127.0", "@aws-cdk/aws-eks": "1.127.0", "@aws-cdk/aws-events": "1.127.0", "@aws-cdk/aws-events-targets": "1.127.0", "@aws-cdk/aws-ec2": "1.127.0", "@aws-cdk/aws-ecs": "1.127.0", "@aws-cdk/aws-iot": "1.127.0", } ``` ```js import { App, Stack } from "@aws-cdk/core"; import * as s3 from "@aws-cdk/aws-s3"; ``` * 各AWS Serviceごとにlibraryとして独立している。 * 各libraryがそれぞれ依存関係を持つもので、あるlibraryの更新に破壊的変更があると苦労する場面があった * 非安定板(alpha,beta版)も含まれる場合がある CDK v2 ```json "dependencies": { "aws-cdk-lib": "2.0.0", "constructs": "^10.0.0", "@aws-cdk/aws-iot-alpha": "2.0.0-alpha.0" } ``` ```js import { App, Stack } from "aws-cdk-lib"; import * as s3 from "aws-cdk-lib/aws-s3"; ``` * 安定板が`aws-cdk-lib`に集約された * 全てのserviceがサポートされている訳ではない(app runner, kinesis data firehose,...) * 安定板でないものは`aws-cdk/aws-iot-alpha`のように別libraryになっている ネット上のCDKに関する記事等でv1について書かれているものも多かったので最初にCDKについて調べた時戸惑いました。`@aws-cdk/xxx`はv1のlibで`@aws-cdk/xxx-alpha`はv2のlibのところがわかりづらかったです。 ### Deployまでの流れ ```sh cdk init cdk synth cdk diff cdk deploy ``` ### パラメータに関するCloudformationとの差異 * Cloudformationにはparameterを持たせることができる。stack作成の際に入力が必要 * 一つのtemplateから複数のstackを生成できる * CDKはtemplate作成段階でparameterを処理する * 1stack 1template CDKを運用するに当たって、StackやAppでどこまで変数化して作るかについてまだいまいち基準を設けられていないです。 ### Construct Levels Construct L3+,L2,L1の区別について * L3+: 複数のリソースを操作する。L2からの派生。ECS with ALB等。 * L2: 標準。Cfnの全てのパラメータは操作できない * L1: Cfnパラメータと1:1マッピング 基本(最初)はL2から利用する。L2が提供するAPIでは要件を満たせない場合はL1を使う。 L1を利用するとCDKの利点である高い抽象度でのリソース操作が犠牲になるので、まずはL2を使う。 L3は複数のL2リソースの再利用の単位。 ## [3年目のCDKを振り返って](https://www.youtube.com/watch?v=O2JXUyOBjt8&t=5151s) Speaker: <NAME> ### 初学者にCDKをオススメする理由 * Cloudformationに比べてdocが親切 個人的にはProductionで運用するに当たってAWSリソースの設定できるpropertyは全て理解したいと考えているので、CDKのdocだけでは完結しない印象を持っております。 * 静的型チェックが効くこと yaml/jsonでは許容されない値や存在しないproperty,誤ったデータ型も書けてしまう。 これは本当にその通りだと思います。enumとか本当にありがたいです。 ただし、全てチェックできる訳ではないのでどうしてもStack作成が失敗するケースには何度か遭遇しました。 L2が提供されていないApp Runnerを定義する際にL1 constructだとほとんどstring型で定義されて恩恵が十分に受けられないこともありました。 * editorのサポートが受けられる plugin利用すれば何かしらの支援は受けられますが、やはりtypescriptになっていることによる恩恵は多いと思います。formatやlint等ecosystemもそのまま利用できますし。 ### これまでの運用における成功と失敗 * プロジェクトに関わるリソースを可能な限りCDKで管理したこと Excelによる管理を避けられたことがプロジェクトとメンバーを守ることに繋がったと表現されておりとても共感しました。 * スキルアップできる環境づくり CDKが複数言語で記述できる点が利点に。 実際チームでCDK運用するに当たって、どの範囲まで言語一致させるかは考えたことがなかったです。 moduleのecosystemをそのまま利用できるのがCDKの最大のメリットの一つだと考えているので、これを生かすにはできるだけ言語が統一されているのが望ましいのではとは思っております。 * 初期にスナップショットテストを入れていなかったこと CDKのversion upに伴い、defaultの値が変わったことで影響を受けた。 この辺りはいい感じにdefault値設定してくれることのデメリットなんでしょうか。 個人的には全property明示的に宣言したい派。 ## [CDKでデプロイ先を量産したり環境ごとの差をどうにか埋めたりした話](https://www.youtube.com/watch?v=O2JXUyOBjt8&t=6859s) Speaker: 稲葉太一(inbaa) さん ### 本番もステージングも作りたい * `--context`で値を渡せる cdk.json ```json { "context": { "production": { "fqdn": "hoge.example.com" }, "staging": { "fqdn": "stg.hoge.example.dev" } } } ``` ```sh npx cdk deploy --all --context stage=staging ``` ```typescript const stage = app.node.tryGetContext('stage') // staging const context = app.node.tryGetContext(stage) // {fqdn: "stg.hoge.example.dev"} ``` のようにして環境ごとに異なる設定値を実行時に渡せるようにされていました。 これは非常に参考になります。 さらに発表では環境ごとの差分を扱う`Context`classを定義した例に触れられており真似してみようと思いました。 ## [CDKのコードの読み方とコントリビューション](https://www.youtube.com/watch?v=O2JXUyOBjt8&t=9212s) Speaker: 山本達也(Classmethod) さん aws-iotのL2 Constructを実装されたお話。 ### CDKの予備知識 * CDKはtypescriptのコードから[jsii](https://github.com/aws/jsii)で他言語とDocを生成している * L1はCloudformationの定義から生成している ### CDKのコードの中 概要としてはL2はL1をwrapする実装になっている。 ### コントリビュートに向けて * Unit Testを書く * Integ Testを書く * READMEを書く * PRを作る * CIを通す 詳細はブログに書かれてるとのことでした。(ブログ見つけられておらず) ### CDKの並列Deployサポート * `cdk deploy`にconcurrent deploymentがサポートされる(かも) ### Terraformのような既存リソースのimport * `cdk import`がmergeされた ## [AWS CDKを利用して、Next.js/Stripeで構築したフルスタックSaaSアプリケーションをデプロイ・管理する](https://www.youtube.com/watch?v=O2JXUyOBjt8&t=10685s) Speaker: 岡本秀高(Stripe) さん Next.jsについては自分が[tutorial](https://nextjs.org/learn/foundations/about-nextjs)しかやったことがなく理解できていないので割愛。 AmplifyでIAMの設定がつらいという話は共感しました。 ## [Baseline Environment on AWS (BLEA) 開発にあたって検討したこと](https://www.youtube.com/watch?v=O2JXUyOBjt8&t=12400s) Speaker: 大村幸敬(AWS) さん [BLEA](https://github.com/aws-samples/baseline-environment-on-aws)でブレアと読むらしいです。 [README_ja](https://github.com/aws-samples/baseline-environment-on-aws/blob/main/README_ja.md)によりますと > Baseline Environment on AWS(BLEA) は 単独の AWS アカウントまたは ControlTower で管理されたマルチアカウント環境で、セキュアなベースラインを確立するための リファレンス CDK テンプレート群です。 [usecases](https://github.com/aws-samples/baseline-environment-on-aws/tree/main/usecases/)を見てみますと、CDKのサンプル実装が載っておりました。 問題意識として、長期運用を見据えた引き継ぎのしやすさ、ブラックボックスにしない点を重視されているそうです。自分は開発と運用に携わることが多いので、運用者目線なのは大変ありがたいです。 typescriptやjsのecosystem(npm,lint)に馴染みのない方にも触れてもらうために開発環境をまずはVSCodeに絞ってサポートしているそうです。BLEAについてはあらためてブログ書こうと思います。 ### cdk deploy の認証情報を持ちたくない localで`cdk deploy`を実行する場合にlocalにdeployに必要な権限を持つAWS Credentialが必要になるが、これを避けるにはAWS SSOを利用できるというお話。 設定方法は 1. `~/.aws/config`にssoログイン用プロファイルを設定 2. `aws sso login`コマンドを実行して認証 3. cdkコマンドを実行 aws ssoコマンドを初めて知りました、この機能はcdkに限らずとても重要な機能だと思いました。 開発用のマシンにaws credentialを持たなくてよくなるのは嬉しいです。 ### パラメータを切り替えて使うには prod/stagingといった環境の切り替え方法について、BLEAでの解決策。 1. `cdk.json`のcontextに設定値グループを保持 ```json { "context": { "dev": { "key1": "value1" }, "prod": { "key1": "value1" } } } ``` 2. `--context`optionでグループを指定 `cdk deploy --context environment=prod` 3. CDKコードでパラメータを取得して利用 ```typescript const envKey = app.node.tryGetContext('environment'); const valArray = app.node.tryGetContext(envKey); const value1 = valArray['key1']; ``` この方法がシンプルでわかりやすいと思うので自分のこの方法にならっていこうかと思います。 #### パラメータ挿入: BLEAでの考え方 * Cloudformationのparametersは使わないことが推奨。CDKのcontextと環境変数を利用する * 環境変数はprofileで指定したアカウントとリージョンの取得に使う * context(cdk.json)にdeploy先を限定したい場合環境(aws account idとregion)を設定しておく * 指定がない場合は環境変数を利用 * `--profile`で渡されたaccountとregionと一致しない場合はエラーにする ```json { "context": { "stage": { "env": { "account": "111111111111", "region": "ap-northeast-1" } } } } ``` ### パラメータについてあれこれ * そもそもインフラを定義するコードには複雑な条件分岐がない方が良いのでは * 開発環境の通知はしない等 * 環境差分をパラメータで表現するのではなく環境ごとにStackを定義する方法もありうる * 複雑度私大なところがあるがdiffで管理する方法もあり * public repoで管理する場合、`.gitignore`したい情報(社内のIPとか)をどう管理するか。BLEAで直面。 * ベストプラクティスからは逸脱するが、`cdk.context.json`をignoreして手元で管理する方法を取っていた。 * 他の選択肢としては、`${HOME}/.cdk.json`, SSM ParameterStore,S3,... * これが良いという答えはまだない * そもそもcontext使うか * jsonなのでそのままでは型チェックができない * CDKのdefault libraryとしては提供されていないので独自で実装するしかない CDKでパラメータと環境差分をどう管理するかは自分も悩んでおりました。この辺りはある程度おすすめの方法は確立されつつも、自分達のチームにあう形を選ぶ必要があるということがわかりました。(なんでもそうですが) ### パイプラインをどうデザインするか * CDK Pipelineの実装例を公開している * とりうる選択肢が多いので決定版が出にくい * Stackのどんなテストをどこで行うか * Ownerは誰か。運用チームがいるか、チームがどこまで管理するか。 * `cdk deploy`をどこで実行するか * local, CI(Github Actions), pipeline account, target account * CI(`npm run test`)をどこで実行するのか この辺りは皆様がどういった方法でやられているのか是非知りたいです。 ### 複数のCDKプロジェクトをどう管理するか * 多くのCDK toolkitはroot直下に1 projectがあることを想定している * BLEAではusecase directoryを切って、複数projectを管理している * BLEAではパッケージ管理はnpm workspacesを利用 * アプリコードとインフラコードをどう分離するかという話もあるが、今回は割愛。 今回は触れられていないのですが、アプリコードとインフラコードをどう分離するかはまさに悩んだ点でした。CDKのドキュメントの書きぶりではappとinfraを同じように管理できることがメリットとして説明されていたと思うのですが、CIのトリガーやら権限管理の観点からリポジトリ分けたりしていました。 ### その他の検討 * SecurityGroup,IAM Policyの循環参照 * snapshot test以外に何をテストするか * ALBが勝手にSGを管理するのをやめさせたい * Route53とACMはCDKで管理した方がいいのか * CloudFrontとWAFはus-east-1で設定しないと 全部気になる話題でCDK運用の旅ははじまったばかり感。 実装見てみて参考にしたいと思います。 ## [AWS CDK と AWS SAM 実は仲良し!一緒にローカルで開発しませんか](https://www.youtube.com/watch?v=O2JXUyOBjt8&t=14895s) Speaker: 藤原麻希(ゆめみ) さん ### SAM * cdk synthで生成されたCloudformationのyamlをSAMから読める。 SAM使ったことなく、使う予定もない今のところないです。CDKができた今使われなくなるのではと思ったりしました。 個人的にはlambdaのtestはlambdaのレイヤーをできるだけ薄くして、普通のアプリケーションと同じunit/integration test書きたい派です。 ## [それでも俺はAWS CDKが作るリソースに物理名を付けたい〜CDKのベストプラクティスは本当にベストなのか〜](https://www.youtube.com/watch?v=O2JXUyOBjt8&t=16000s) Speaker: 佐藤智樹(Classmethod) さん ### 問題提起: AWS CDKのベストプラクティスではリソースの自動名付けが推奨されてますけど、自動名付けのリソース辛くないですか?? これは最初にCDK触った時に思いました! が、IaCでリソース管理するとそういうものかと思ってました。(ベストプラクティスであっても疑う姿勢に欠けておりました..) ソフトウェアアーキテクチャの基礎より > アーキテクトには過去の時代から残されている前提や公理を疑うという重要な責任がある という引用が紹介されておりました。ソフトウェアアーキテクチャの基礎はまだ読めていないのですが、MUST READな本のようなので読みたいです。 ### AWS CDKのベストプラクティスとは [Best practices for developing cloud applications with AWS CDK](https://aws.amazon.com/blogs/devops/best-practices-for-developing-cloud-applications-with-aws-cdk/)というCDKのベストプラクティスについて書かれた公式のblogがある。 ### 自動で生成されるリソース名を使用し、物理的な名前を使用しない * 物理名を使う場合のデメリット * インフラの一部を複数デプロイすることができない * -> 命名規則は実装に依存する、似ている名前のリソースが複数ある場合関連がわからず逆に不便では * -> リソース名の衝突は命名規則で回避可能 * -> Stack名が違ってもConstructのidが同じだと重複するリソースが一部存在する * リソースに破壊的変更が伴う場合、再作成に失敗する * -> 危険性に気づけて失敗した方が良いのでは * `RemovalPolicy.RETAIN`を指定した場合、スタック削除後の再デプロイで失敗する * 再デプロイ時に前回実行したリソース名が重複しない -> 以前のリソースが残るのでこまめに削除しないと大量のリソースが残り、どれがみたいものか、削除して良いものかが分かりづらくなる * 自動名付けで生成されたリソースはhash値を含むのでサブリソースまで含めると認知的負荷が高い。(調査が大変) リソースにhash値が含まれたリソースが複数あると認知的負荷が高いのは自分も経験がありました。hash値の最初と最後の数文字だけ覚えて頑張るみたいな感じになってました。 ### 結局リソース名はどうすべきか * 開発/調査/運用などでよく確認するリソース名は固定する * ECS,Lambdaなどのコンピューティングサービス * DynamoDBやS3などのストレージ * ダッシュボード名やアラート名 * 上記以外のサービスはできるだけ固定しない ## [CDKファンに捧ぐ! CDK for Terraformという選択肢](https://www.youtube.com/watch?v=O2JXUyOBjt8&t=17464s) Speaker: 草間一人 @jacopen (Hashicorp) さん 自分は最初にCDKを知った時にTerraformでいいのではと思っておりました。 Mitchell hashimoto先生のコードでGoを勉強したのでHashicorp自体好きでした。 CDKいいかもと思った理由としては * テストが書ける * HCLでもloopやifの制御、listやhash(dict)といったデータ構造を使うところが増えてきてyaml以上、プログラミング未満の表現力に若干疑問を持っていた ### Cloud Development Kit for Terraform 略してCDKTF。CDKの成果物をTerraformが利用できるjsonに変換する。 ```sh cdktf init --template=typescript --local cdktf deploy cdktf destroy ``` のようにCDKを書きつつ、適用をterraformで実行することができる。 `cdktf init`の`--local`を省略すると、terraform cloudと連携してlocalにstateファイルを持たなくても良くなることもできるそうです。 また、内部的には`cdktf.out/stacks/{stack}/cdk.tf.json`が生成されておりこのファイルから直接terraformを実行することもできるそうです。terraform cloud上のstateも更新されます。 terraform cloudを初めて知ったのですが、CDもできそうで試してみたくなりました。 cdktfだとgcpのリソースも同じ書き方ができるのもすごいです。 ### HCL or CDK ? * HCL自体がそれなりの柔軟性と表現力を持つため、YAML->CDKと比べるとHCK->CDKには劇的なメリットはないかも * terraformのmoduleとしてパーツ化するか、Constructsでパーツ化するか悩ましい * CDKで自在な表現力を得るか、HCLの適度な制限の中で書いて品質を保つか この点に関してはメンバーや組織構成に依るところが多いと思うので一概にはいえませんが、CDK(typescript)で書く派です。そもそもts読めないとフロントのコード読めないので、web開発に関わっている自分はCDKができたことでtsは必修と考えるようになりました。 他のspeakerの方もおっしゃられていましたが、CDKにおいては高度な型定義は使わずに愚直にリソース宣言を上から書いていくようなコードになると思うので特に問題ないと思っています。 [Mozillaのプロジェクトで本番利用された話](https://www.hashicorp.com/blog/cdk-for-terraform-in-production-learning-from-pocket)の紹介もありました。 ## [AWS Solutions Constructsで楽してシステム作りたいよ〜!](https://www.youtube.com/watch?v=O2JXUyOBjt8&t=19302s) Speaker: 渡邉洋平(NTTテクノクロス) さん ### AWS Solutions Constructsとは https://docs.aws.amazon.com/solutions/latest/constructs/welcome.html > AWS Solutions Constructs (Constructs) is an open-source extension of the AWS Cloud Development Kit (CDK) that provides multi-service, well-architected patterns for quickly defining solutions in code to create predictable and repeatable infrastructure. ということで、L3 Constructsに対応するものとのことです。(v2にも対応) 発表時点では54個のConstructが提供されており、サーバレス構成の頻出パターンのサポートが厚いそうです。 `@aws-solutions-constructs`はv1のようにserviceごとにmoduleが別れていました。まだ、`aws-cdk-lib`のlatestとはversionが違います。 CDKがL1,L2と抽象化してくれているのでより抽象度の高いL3ライブライ使ってこそメリットを最大限享受できると学べました。 ### テスト solutions constructsで生成したリソースのテストについて。 方針としては、時間の関係で`@aws-cdk/assertions`は使わずに生成したCloudformation templateの設定値を確認する。 テスト用Toolとしては * Security Hub Standard * 最も活発なFSDBを利用 * OSS(Linter) ## [全AWSアカウント×全CDKアプリで LegacyテンプレートをMigrationした話](https://www.youtube.com/watch?v=O2JXUyOBjt8&t=20789s) Speaker: 大越雄太/小笠原寛明(justInCase Technologies) さん なんとjustInCaseさんでは、2019/6にすでにcdk v0.33をKotlinから動かされていたそうです。 KotlinでCDKを書くためにAWS-CDK-Kotlin-DSLを独自開発されたのはすごいです。 v1からv2への移行においてcdk bootstrapにまつわるtemplateの影響でdeployが失敗するようになってしまった事例について。この辺りはわかっていなかったのですが今のチームではv1->v2への移行を行っているので参考になります。 [CDK BootstrapのMdern templateで何が変わるのか](https://dev.classmethod.jp/articles/cdk-bootstrap-modern-template/)を読んでみようと思っております。 [justInCaseさんの技術ブログ](https://team-blog-hub.justincase-tech.com/) ## [アンチCDKだったわたしが「CDK、できる・・」と思ったところ](https://www.youtube.com/watch?v=O2JXUyOBjt8&t=21915s) Speaker: 岡智也(Accenture) さん 歳をとると新しいものに対して否定的になりがちということからお話しされておりました。 > は? CDK? CFnとTerraformあるし これなんかは自分も全く同じ感想でした。あとAWSとGCPの管理が異なる技術スタックになることもネガティブ要因でした。 [AWS Cookbook](https://www.oreilly.com/library/view/aws-cookbook/9781492092599/)は知らなかったのでぜひ読んでみようと思いました。著者はAWSの中の人だそうです! ## [AWS Outposts 上のリソースを CDK する](https://www.youtube.com/watch?v=O2JXUyOBjt8&t=23293s) Speaker: 福田優真(NTT Communications イノベーションセンター) さん ### AWS Outpostsとは AWS Outpostsとは、AWSの出すハイブリッドクラウド製品。 ハイブリッドクラウドとは * オンプレミスにクラウドサービスを導入するためのソリューション * Azure/GCP/AWS 各社が展開中 * Azure Stack Hub/HCI * GCP Anthos/Distributed Cloud * データをPublicな場所に流さなくてもクラウドサービスを利用できるようになる ユースケースとしては * 低レイテンシーなコンピューティング * ローカルなデータ処理 * データレジデンしー * オンプレミス環境のモダナイズ AWS Outposts上のリソースをCDKで管理されたお話しでした。AWSとオンプレ環境をどう管理していくかは興味があるところなので試してみたいと思いました。 ### カスタムリソース * Cloudformationが対応していないリソースを管理する仕組み * 作成/更新/削除のライフサイクルをLambdaで管理 * CDKでやる場合はprovider frameworkというAWSが提供するフレームワークに則ってLambdaを用意するだけ  Outpostsで対応できない処理はカスタムリソースを駆使して対応されたそうです。 ## まとめ * BLEAやcdktfを知れてよかった * 問題意識は結構皆さん同じなんだなと思った * AWS Cookbookを読む [^speaker]: speakerの表記はyoutubeのdescriptionに準拠しています。
https://github.com/Jollywatt/typst-fletcher
https://raw.githubusercontent.com/Jollywatt/typst-fletcher/master/tests/mark-state/test.typ
typst
MIT License
#set page(width: auto, height: auto, margin: 1em) #import "/src/exports.typ" as fletcher: diagram, node, edge #diagram(edge("<->")) #fletcher.MARKS.update(m => m + ( "<": (inherit: "stealth", rev: true), ">": (inherit: "stealth", rev: false), )) #diagram(edge("<->"))
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-1E5D0.typ
typst
Apache License 2.0
#let data = ( ("OL ONAL LETTER O", "Lo", 0), ("OL ONAL LETTER OM", "Lo", 0), ("OL ONAL LETTER ONG", "Lo", 0), ("OL ONAL LETTER ORR", "Lo", 0), ("OL ONAL LETTER OO", "Lo", 0), ("OL ONAL LETTER OY", "Lo", 0), ("OL ONAL LETTER A", "Lo", 0), ("OL ONAL LETTER AD", "Lo", 0), ("OL ONAL LETTER AB", "Lo", 0), ("OL ONAL LETTER AH", "Lo", 0), ("OL ONAL LETTER AL", "Lo", 0), ("OL ONAL LETTER AW", "Lo", 0), ("OL ONAL LETTER I", "Lo", 0), ("OL ONAL LETTER IT", "Lo", 0), ("OL ONAL LETTER IP", "Lo", 0), ("OL ONAL LETTER ITT", "Lo", 0), ("OL ONAL LETTER ID", "Lo", 0), ("OL ONAL LETTER IN", "Lo", 0), ("OL ONAL LETTER U", "Lo", 0), ("OL ONAL LETTER UK", "Lo", 0), ("OL ONAL LETTER UDD", "Lo", 0), ("OL ONAL LETTER UJ", "Lo", 0), ("OL ONAL LETTER UNY", "Lo", 0), ("OL ONAL LETTER UR", "Lo", 0), ("OL ONAL LETTER E", "Lo", 0), ("OL ONAL LETTER ES", "Lo", 0), ("OL ONAL LETTER EH", "Lo", 0), ("OL ONAL LETTER EC", "Lo", 0), ("OL ONAL LETTER ENN", "Lo", 0), ("OL ONAL LETTER EG", "Lo", 0), ("OL ONAL SIGN MU", "Mn", 230), ("OL ONAL SIGN IKIR", "Mn", 220), ("OL ONAL SIGN HODDOND", "Lo", 0), ("OL ONAL DIGIT ZERO", "Nd", 0), ("OL ONAL DIGIT ONE", "Nd", 0), ("OL ONAL DIGIT TWO", "Nd", 0), ("OL ONAL DIGIT THREE", "Nd", 0), ("OL ONAL DIGIT FOUR", "Nd", 0), ("OL ONAL DIGIT FIVE", "Nd", 0), ("OL ONAL DIGIT SIX", "Nd", 0), ("OL ONAL DIGIT SEVEN", "Nd", 0), ("OL ONAL DIGIT EIGHT", "Nd", 0), ("OL ONAL DIGIT NINE", "Nd", 0), (), (), (), (), ("OL ONAL ABBREVIATION SIGN", "Po", 0), )
https://github.com/kirbyaguilar/resume
https://raw.githubusercontent.com/kirbyaguilar/resume/main/resume.typ
typst
#set page(paper: "a4") #set text(font: "Garamond") #set par(justify: true, leading: 0.55em, linebreaks: "optimized") #set list(indent: 8pt) // header #align(center)[ = <NAME> (+63) 916 567 3903 | <EMAIL> | #link("https://www.linkedin.com/in/samuel-kirby-aguilar-0ab0551b4/")[LinkedIn] | #link("https://github.com/kirbyaguilar")[GitHub] ] == Experience #line(length: 100%) // deya #columns(2)[ #align(left)[ *Deya PH* \ _Senior Software Engineer_ ] #colbreak() #align(right)[ Makati City, Philippines \ December 2023 - October 2024 ] ] - Developed an HRIS and Payroll web app that currently supports millions of pesos monthly alongside company\‘s founders - Set up company\‘s deployment infrastructure and CI/CD pipeline from scratch // baton #columns(2)[ #align(left)[ *Baton* \ _Implementation Associate_ ] #colbreak() #align(right)[ USA \ February 2023 - October 2023 ] ] - Built automated Linkedin + email lead generation system and workflow - Optimized client company workflows through automation, software solutions, and product meetings // TM #columns(2)[ #align(left)[ *Thinking Machines* \ _Enterprise Solutions Engineering Intern_ ] #colbreak() #align(right)[ Taguig City, Philippines \ June 2023 - August 2023 ] ] - Worked with project infrastructure and business development initiatives - Designed POCs for internal GPT solutions // Technomancer #columns(2)[ #align(left)[ *Technomancer* \ _Full Stack Developer_ ] #colbreak() #align(right)[ Quezon City, Philippines \ June 2020 - September 2022 ] ] - Spearheaded company\'s framework shift from CI3 to Laravel, including internal training, coding standards, and version control/code review standardization - Trained company interns and implemented 1-on-1 meetings and guidance programs from scratch - Developed, maintained and deployed web-based solutions for clients // Freelancer #columns(2)[ #align(left)[ *Freelancer Ltd* \ _Software Engineering Intern_ ] #colbreak() #align(right)[ Taguig City, Philippines \ July 2021 - September 2021 ] ] - Wrote and reviewed code for the frontend web app, API, and tests (unit, e2e, A/B) == Education #line(length: 100%) #columns(2)[ #align(left)[ *University of the Philippines - Diliman* \ Degree Program: BS Computer Science ] #colbreak() #align(right)[ \ 2018 - 2023 ] ] == Skills #line(length: 100%) *Frontend, CSS and JavaScript:* NextJS/React, Vue.js, TypeScript, Vite, Tailwind CSS, Bootstrap \ *Backend Languages & Frameworks:* PHP Laravel, Ruby on Rails, Python Django, Golang, SQL \ *Infrastructure, Deployment & DevOps:* AWS, CircleCI, Bash, Linux, Docker, Dokku, Automated testing \ *Automation, Operations, Marketing and Low-code:* Airtable, Zapier, LinkedIn Sales Navigator, Apollo, Coda, Notion, Phantombuster, Clay, Shopify, Retool == Events and Projects #line(length: 100%) *Diliman Solutions Challenge 2023 by Google DSC-UPD* \ Grand champion *DigiReady Philippines: Campus Edition 2023* \ Social Media Director. Worked with ASEAN to create an online campaign and summit-type event
https://github.com/unb3rechenbar/TypstPackages
https://raw.githubusercontent.com/unb3rechenbar/TypstPackages/main/styles/LaTeX/0.1.0/src/lib.typ
typst
#set text( fallback: true, weight: "light", font: "New Computer Modern", lang: "de", size: 11pt, ) #set par( justify: true, linebreaks: "optimized", leading: 0.65em, ) #show heading: set block(above: 1.4em, below: 1em)
https://github.com/sofianedjerbi/Resume
https://raw.githubusercontent.com/sofianedjerbi/Resume/main/modules/skills.typ
typst
Apache License 2.0
#import "../brilliant-CV/template.typ": * #cvSection("Key Skills") #cvSkill( type: [Languages], info: [*French* (Native) #hBar() *English* (C1) #hBar() *German* (Learning)] ) #cvSkill( type: [Cloud Stack], info: [AWS #hBar() EC2 #hBar() S3 #hBar() Lambda #hBar() Glue #hBar() Redshift #hBar() CloudFormation] ) #cvSkill( type: [DevOps Stack], info: [Databricks #hBar() Spark #hBar() Airflow #hBar() Docker #hBar() Jenkins #hBar() Git #hBar() Terraform] ) #cvSkill( type: [Developer Stack], info: [Python #hBar() PySpark #hBar() SQL #hBar() PostgreSQL #hBar() MySQL #hBar() SQLAlchemy #hBar() Pandas #hBar() FastAPI] )
https://github.com/avonmoll/ifacconf-typst
https://raw.githubusercontent.com/avonmoll/ifacconf-typst/main/lib.typ
typst
MIT No Attribution
//============================================================================== // lib.typ 2023-11-17 <NAME> // Template for IFAC meeting papers // // Adapted from ifacconf.cls //============================================================================== // This function gets your whole document as its `body` and formats // it as an article in the style of the IEEE. #let ifacconf( // The paper's title. title: "Paper Title", // An array of authors. For each author you can specify a name, email // (optional), and affiliation. The affiliation must be an integer // corresponding to an entry in the 1-indexed affiliations list (or 0 for no // affiliation). authors: (), // An array of affiliations. For each affiliation you can specify a // department, organization, and address. Everything is optional (i.e., an // affiliation can be an empty array). affiliations: (), // The paper's abstract. Can be omitted if you don't have one. abstract: none, // A list of index terms to display after the abstract. keywords: (), // Sponsor or financial support acknowledgment sponsor: none, // The paper's content. body ) = { // Set document metadata. set document(title: title, author: authors.map(author => author.name)) // Set the body font. set text(font: "New Computer Modern", size: 10pt) // Configure the page. set page( paper: "a4", // The margins depend on the paper size. margin: (x: 1.5cm, y: 2.5cm) ) // Set line spacing set par(leading: 0.4em) // Configure equation numbering and spacing. set math.equation(numbering: "(1)") show math.equation: set block(spacing: 0.65em) // Configure appearance of equation references show ref: it => { if it.element != none and it.element.func() == math.equation { // Override equation references. link( it.element.label, numbering( it.element.numbering, ..counter(math.equation).at(it.element.location()) ) ) } else { // Other references as usual. it } } // Configure lists. set enum(indent: 10pt, body-indent: 9pt) set list(indent: 10pt, body-indent: 9pt) // Configure Figures set figure.caption(separator: ". ") show figure.caption: set align(left) show figure.caption: set par(hanging-indent: 8mm) set figure(numbering: "1", supplement: [Fig.]) // Configure Tables show figure.caption.where(kind: "table"): set align(center) show figure.where(kind: "table"): set figure.caption(position: top) set table.hline(stroke: 0.5pt) // Configure Footnotes set footnote(numbering: "1") set footnote.entry(indent: 0mm, separator: line(length: 60%, stroke: 0.4pt), clearance: 0.35em) // Configure headings. set heading(numbering: "1.1.1") show heading: it => locate(loc => { // Find out the final number of the heading counter. let levels = counter(heading).at(loc) let deepest = if levels != () { levels.last() } else { 1 } set text(10pt, weight: "regular") if it.level == 1 [ // First-level headings are centered caps. // We don't want to number of the acknowledgment section. #let is-ack = it.body in ([Acknowledgments], [Acknowledgements], [REFERENCES]) #set align(center) // #show: upper #v(20pt, weak: true) #if it.numbering != none and not is-ack { numbering("1.", deepest) h(5pt, weak: true) } #upper(it.body) #v(13pt, weak: true) ] else if it.level == 2 [ // Second-level headings left-aligned and italic. #set text(style: "italic") #v(16pt, weak: true) #if it.numbering != none { numbering("1.1", ..levels) h(6pt, weak: true) } #it.body #v(16pt, weak: true) ] else { // Third level headings are run-ins too, but different. if it.level == 3 { set text(style: "italic") it.body h(8pt) } } }) let star = [\u{1F7B1}] pad( x: 3.5cm, [ #set align(center) // Display the paper's title. #v(1cm) #let title-font-size = 14pt #if sponsor == none { text(title-font-size, strong(title)) } else { set footnote(numbering: "*") text(14pt, [*#title*#h(-2pt)#text(20pt, super[ #star])]) } #v(2mm) // Display the authors list. #let alist = () #for (i, a) in authors.enumerate() { let mark = text(8pt, [\*] * (a.affiliation)) alist.push(box([#strong(a.name)#h(2pt)#mark])) } #alist.join(h(4pt)) #v(1mm) // Display the affiliations list #for (i, af) in affiliations.enumerate() { let mark = text(8pt, [\*] * (i + 1)) let email-array = () for au in authors { if "affiliation" in au and au.affiliation == i + 1 and "email" in au { email-array.push(au.email) } } let emails = "" if email-array.len() > 0 { emails = "(email: " + email-array.join(", ") + ")" } let affil-array = () if "department" in af { affil-array.push(af.department) } if "organization" in af { affil-array.push(af.organization) } if "address" in af { affil-array.push(af.address) } let affil = affil-array.join(", ") [#mark #emph(affil) #emph(emails)] if i != affiliations.len() - 1 [ \ ] } #v(3mm, weak: false) ], ) // Display abstract and keywords. if abstract != none { grid( columns: (1.7cm, 1fr, 1.7cm), [], [ #set par(justify: true) #line(length: 100%, stroke: 0.4pt) #v(-1.5mm) *Abstract:* #abstract #v(2mm) #if keywords != () [ _Keywords:_ #keywords.join(", ") ] #v(-2.5mm) #line(length: 100%, stroke: 0.4pt) ], [] ) v(0mm, weak: false) } // Start two column mode and configure paragraph properties. show: columns.with(2, gutter: 5mm) // show: columns.with(2, gutter: 3.5mm) set par(justify: true, leading: 0.4em) show par: set block(spacing: 3.5mm) if sponsor != none { scale(x: 0%, y:0%)[#footnote(numbering: (..nums) => super(text(7pt, star)))[#h(5pt)#sponsor]] v(-9mm) counter(footnote).update(0) } // Display the paper's contents. body } #import "@preview/ctheorems:1.1.0": * #let ifacconf-rules(doc) = { show bibliography: set block(spacing: 5pt) show: thmrules doc } #let tablefig = figure.with(supplement: [Table], kind: "table") #let appendix-counter = counter("appendix") #let appendix = it => { appendix-counter.step() heading(numbering: none, supplement: "Appendix")[ A#lower[ppendix] #appendix-counter.display("A.")#h(5pt)#it ] } #let bibliography = bibliography.with(title: "References", style: "CSL/ifac-conference.csl") // Support for numbered Theorems, etc. // NOTE: these definitions may be able to be cleaned up and compressed in the future #let theorem = thmenv( "theorem", none, none, (name, number, body, ..args) => { set align(left) set par(justify: true) block(inset: 0mm, radius: 0mm, breakable: false, width: 100%)[_Theorem #number#if name != none [ (#name)]._#h(2pt)#body] }, ).with( supplement: "Theorem", ) #let lemma = thmenv( "lemma", none, none, (name, number, body, ..args) => { set align(left) set par(justify: true) block(inset: 0mm, radius: 0mm, breakable: false, width: 100%)[_Lemma #number#if name != none [ (#name)]._#h(2pt)#body] }, ).with( supplement: "Lemma", ) #let claim = thmenv( "claim", none, none, (name, number, body, ..args) => { set align(left) set par(justify: true) block(inset: 0mm, radius: 0mm, breakable: false, width: 100%)[_Claim #number#if name != none [ (#name)]._#h(2pt)#body] }, ).with( supplement: "Claim", ) #let conjecture = thmenv( "conjecture", none, none, (name, number, body, ..args) => { set align(left) set par(justify: true) block(inset: 0mm, radius: 0mm, breakable: false, width: 100%)[_Conjecture #number#if name != none [ (#name)]._#h(2pt)#body] }, ).with( supplement: "Conjecture", ) #let corollary = thmenv( "corollary", none, none, (name, number, body, ..args) => { set align(left) set par(justify: true) block(inset: 0mm, radius: 0mm, breakable: false, width: 100%)[_Corollary #number#if name != none [ (#name)]._#h(2pt)#body] }, ).with( supplement: "Corollary", ) #let fact = thmenv( "fact", none, none, (name, number, body, ..args) => { set align(left) set par(justify: true) block(inset: 0mm, radius: 0mm, breakable: false, width: 100%)[_Fact #number#if name != none [ (#name)]._#h(2pt)#body] }, ).with( supplement: "Fact", ) #let hypothesis = thmenv( "hypothesis", none, none, (name, number, body, ..args) => { set align(left) set par(justify: true) block(inset: 0mm, radius: 0mm, breakable: false, width: 100%)[_Hypothesis #number#if name != none [ (#name)]._#h(2pt)#body] }, ).with( supplement: "Hypothesis", ) #let proposition = thmenv( "proposition", none, none, (name, number, body, ..args) => { set align(left) set par(justify: true) block(inset: 0mm, radius: 0mm, breakable: false, width: 100%)[_Proposition #number#if name != none [ (#name)]._#h(2pt)#body] }, ).with( supplement: "Proposition", ) #let criterion = thmenv( "criterion", none, none, (name, number, body, ..args) => { set align(left) set par(justify: true) block(inset: 0mm, radius: 0mm, breakable: false, width: 100%)[_Criterion #number#if name != none [ (#name)]._#h(2pt)#body] }, ).with( supplement: "Criterion", ) #let proof = thmbox( "proof", "Proof", inset: 0mm, base: none, bodyfmt: body => [#body #h(1fr) $square$], separator: [.#h(2pt)] ).with(numbering: none) #let footnote = it => footnote[#h(4pt)#it] #let citep(it) = { cite(it, style: "CSL/ifac-conference-citep.csl") }
https://github.com/RaphGL/ElectronicsFromBasics
https://raw.githubusercontent.com/RaphGL/ElectronicsFromBasics/main/DC/chap3/8_safe_circuit_design.typ
typst
Other
#import "../../core/core.typ" === Safe circuit design As we saw earlier, a power system with no secure connection to earth ground is unpredictable from a safety perspective: there\'s no way to guarantee how much or how little voltage will exist between any point in the circuit and earth ground. By grounding one side of the power system\'s voltage source, at least one point in the circuit can be assured to be electrically common with the earth and therefore present no shock hazard. In a simple two-wire electrical power system, the conductor connected to ground is called the #emph[neutral], and the other conductor is called the #emph[hot], also known as the #emph[live] or the #emph[active]: #image("static/00070.png") As far as the voltage source and load are concerned, grounding makes no difference at all. It exists purely for the sake of personnel safety, by guaranteeing that at least one point in the circuit will be safe to touch (zero voltage to ground). The \"Hot\" side of the circuit, named for its potential for shock hazard, will be dangerous to touch unless voltage is secured by proper disconnection from the source (ideally, using a systematic lock-out/tag-out procedure). This imbalance of hazard between the two conductors in a simple power circuit is important to understand. The following series of illustrations are based on common household wiring systems (using DC voltage sources rather than AC for simplicity). If we take a look at a simple, household electrical appliance such as a toaster with a conductive metal case, we can see that there should be no shock hazard when it is operating properly. The wires conducting power to the toaster\'s heating element are insulated from touching the metal case (and each other) by rubber or plastic. #image("static/00071.png") However, if one of the wires inside the toaster were to accidently come in contact with the metal case, the case will be made electrically common to the wire, and touching the case will be just as hazardous as touching the wire bare. Whether or not this presents a shock hazard depends on #emph[which] wire accidentally touches: #image("static/00072.png") If the \"hot\" wire contacts the case, it places the user of the toaster in danger. On the other hand, if the neutral wire contacts the case, there is no danger of shock: #image("static/00073.png") To help ensure that the former failure is less likely than the latter, engineers try to design appliances in such a way as to minimize hot conductor contact with the case. Ideally, of course, you don\'t want either wire accidently coming in contact with the conductive case of the appliance, but there are usually ways to design the layout of the parts to make accidental contact less likely for one wire than for the other. However, this preventative measure is effective only if power plug polarity can be guaranteed. If the plug can be reversed, then the conductor more likely to contact the case might very well be the \"hot\" one: #image("static/00074.png") Appliances designed this way usually come with \"polarized\" plugs, one prong of the plug being slightly narrower than the other. Power receptacles are also designed like this, one slot being narrower than the other. Consequently, the plug cannot be inserted \"backwards,\" and conductor identity inside the appliance can be guaranteed. Remember that this has no effect whatsoever on the basic function of the appliance: its strictly for the sake of user safety. Some engineers address the safety issue simply by making the outside case of the appliance nonconductive. Such appliances are called #emph[double-insulated], since the insulating case serves as a second layer of insulation above and beyond that of the conductors themselves. If a wire inside the appliance accidently comes in contact with the case, there is no danger presented to the user of the appliance. Other engineers tackle the problem of safety by maintaining a conductive case, but using a third conductor to firmly connect that case to ground: #image("static/00075.png") The third prong on the power cord provides a direct electrical connection from the appliance case to earth ground, making the two points electrically common with each other. If they\'re electrically common, then there cannot be any voltage dropped between them. At least, that\'s how it is supposed to work. If the hot conductor accidently touches the metal appliance case, it will create a direct short-circuit back to the voltage source through the ground wire, tripping any overcurrent protection devices. The user of the appliance will remain safe. This is why its so important never to cut the third prong off a power plug when trying to fit it into a two-prong receptacle. If this is done, there will be no grounding of the appliance case to keep the user(s) safe. The appliance will still function properly, but if there is an internal fault bringing the hot wire in contact with the case, the results can be deadly. If a two-prong receptacle #emph[must] be used, a two- to three-prong receptacle adapter can be installed with a grounding wire attached to the receptacle\'s grounded cover screw. This will maintain the safety of the grounded appliance while plugged in to this type of receptacle. Electrically safe engineering doesn\'t necessarily end at the load, however. A final safeguard against electrical shock can be arranged on the power supply side of the circuit rather than the appliance itself. This safeguard is called #emph[ground-fault detection], and it works like this: #image("static/00076.png") In a properly functioning appliance (shown above), the current measured through the hot conductor should be exactly equal to the current through the neutral conductor, because there\'s only one path for electrons to flow in the circuit. With no fault inside the appliance, there is no connection between circuit conductors and the person touching the case, and therefore no shock. If, however, the hot wire accidently contacts the metal case, there will be current through the person touching the case. The presence of a shock current will be manifested as a #emph[difference] of current between the two power conductors at the receptacle: #image("static/00077.png") This difference in current between the \"hot\" and \"neutral\" conductors will only exist if there is current through the ground connection, meaning that there is a fault in the system. Therefore, such a current difference can be used as a way to #emph[detect] a fault condition. If a device is set up to measure this difference of current between the two power conductors, a detection of current imbalance can be used to trigger the opening of a disconnect switch, thus cutting power off and preventing serious shock: #image("static/00078.png") Such devices are called #emph[Ground Fault Current Interruptors], or GFCIs for short. Outside North America, the GFCI is variously known as a safety switch, a residual current device (RCD), an RCBO or RCD/MCB if combined with a miniature circuit breaker, or earth leakage circuit breaker (ELCB). They are compact enough to be built into a power receptacle. These receptacles are easily identified by their distinctive \"Test\" and \"Reset\" buttons. The big advantage with using this approach to ensure safety is that it works regardless of the appliance\'s design. Of course, using a double-insulated or grounded appliance in addition to a GFCI receptacle would be better yet, but its comforting to know that something can be done to improve safety above and beyond the design and condition of the appliance. The #emph[arc fault circuit interrupter (AFCI)], a circuit breaker designed to prevent fires, is designed to open on intermittent resistive short circuits. For example, a normal 15 A breaker is designed to open circuit quickly if loaded well beyond the 15 A rating, more slowly a little beyond the rating. While this protects against direct shorts and several seconds of overload, respectively, it does not protect against arcs– similar to arc-welding. An arc is a highly variable load, repetitively peaking at over 70 A, open circuiting with alternating current zero-crossings. Though, the average current is not enough to trip a standard breaker, it is enough to start a fire. This arc could be created by a metalic short circuit which burns the metal open, leaving a resistive sputtering plasma of ionized gases. The AFCI contains electronic circuitry to sense this intermittent resistive short circuit. It protects against both hot to neutral and hot to ground arcs. The AFCI does not protect against personal shock hazards like a GFCI does. Thus, GFCIs still need to be installed in kitchen, bath, and outdoors circuits. Since the AFCI often trips upon starting large motors, and more generally on brushed motors, its installation is limited to bedroom circuits by the U.S. National Electrical code. Use of the AFCI should reduce the number of electrical fires. However, nuisance-trips when running appliances with motors on AFCI circuits is a problem. #core.review[ - Power systems often have one side of the voltage supply connected to earth ground to ensure safety at that point. - The \"grounded\" conductor in a power system is called the #emph[neutral] conductor, while the ungrounded conductor is called the #emph[hot]. - Grounding in power systems exists for the sake of personnel safety, not the operation of the load(s). - Electrical safety of an appliance or other load can be improved by good engineering: polarized plugs, double insulation, and three-prong \"grounding\" plugs are all ways that safety can be maximized on the load side. - #emph[Ground Fault Current Interruptors] (GFCIs) work by sensing a difference in current between the two conductors supplying power to the load. There should be no difference in current at all. Any difference means that current must be entering or exiting the load by some means other than the two main conductors, which is not good. A significant current difference will automatically open a disconnecting switch mechanism, cutting power off completely. ]
https://github.com/Woodman3/modern-ysu-thesis
https://raw.githubusercontent.com/Woodman3/modern-ysu-thesis/main/layouts/preface.typ
typst
MIT License
#import "../utils/custom-heading.typ": heading-display, active-heading, current-heading,header-display // 前言,重置页面计数器 #let preface( // documentclass 传入的参数 twoside: false, // 其他参数 spec: (front: "- I -", inner: "- 1 -", back: "I"), ..args, it, ) = { // 分页 if (twoside) { pagebreak() + " " } set page(numbering: "I", footer:[ #set align(center) #counter(page).display("- I -") ], // 从mainmatter那里复制过来的,区别是奇偶页的标题不同 header:{ locate(loc => { let cur-heading = current-heading(level: 1, loc) let first-level-heading = if calc.rem(loc.page(), 2)==1 { heading-display(active-heading(level: 1,prev:false, loc)) } else { "燕山大学本科生毕业设计(论文)" } header-display(first-level-heading) v(0em) }) }) counter(page).update(1) it }
https://github.com/polarkac/MTG-Stories
https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/007%20-%20Theros/008_The%20Sea%20God's%20Labyrinth%2C%20Part%201.typ
typst
#import "@local/mtgstory:0.2.0": conf #show: doc => conf( "The Sea God's Labyrinth, Part 1", set_name: "Theros", story_date: datetime(day: 13, month: 11, year: 2013), author: "<NAME>", doc ) #emph[Diary of Solon—Scholar, Athlete, and Champion of Oxus] Ihave travelled seven days south of my home, a small city on the outskirts of Meletis, at first following the coastline of the Siren Sea, but gradually veering inland for the past two days. I left the last remains of civilization yesterday, when the road ended at the small village of Phaela. I traded four pieces of copper for a bag of dried meat and two loaves of dense black bread. From there, I walked a footpath southwest into the hills. As the sun set, I crested a grassy hill and came upon the great labyrinth. #figure(image("008_The Sea God's Labyrinth, Part 1/01.jpg", width: 100%), caption: [Plains | Art by Steven Belledin], supplement: none, numbering: none) The maze stands arrayed before me, completely filling the dell to the south, and stretching across the plains as far as my eyes can make out. An ancient road leads to the entrance of the labyrinth, laid unerringly straight, stretching to the west. The entrance itself is a stone arch of fine work, well maintained, but by whom, I could not say. The roadway is broken in places, with grasses growing up between the stones. A high hedge stretches both north and south of the stone entranceway. As it approaches the base of the hill it turns to the east and follows the line of hills to the horizon. It is my duty, as the chosen champion of Oxus, to venture inside, find the wondrous treasure hidden within, and return, victorious. The sages say that Thassa's Dekella lies at the center of the labyrinth. I intend to find out. I have trained seven years for this journey—as a scholar and an athlete. I am prepared for the task ahead, armed with knowledge, training, and—most importantly—faith in the gods. My pack holds a parchment map of the maze—at least as far as anyone from my polis has ever dared to venture and make it back out alive. I have made my camp beneath a tree halfway down the hillside, out of the wind. In the morning, I shall enter the labyrinth and begin the ultimate test of my life's work. Below is an inventory of my belongings: - Leather pack - Flint - Small knife - Ox wool blanket - Beeswax candle - Parchment scroll with map of the known maze - Leather-bound book of parchments for my diary - Oiled sealskin pouch to keep my parchments dry - Quill - Two glass vials of blue ink - One glass vial of silver dye for marking my path - A pearl, to offer up to Thassa in an hour of need - Small hammer - Two skins of water - Long knife in a leather scabbard - Yew recurve bow - One score of arrows in a woven grass quiver - Long walking pole - Sack of dried meat - Sack of dried fruit - Wheel of waxed cheese - Two loaves of dense black bread I wear a heavy cloth skirt, tunic and leather sandals. Over my skirt and tunic I wear light armor of leather with bronze buckles. With these provisions, I can survive, fight, record my journey, and—gods willing—return to my polis with invaluable treasure and a tale for the ages. === Day One I entered the labyrinth today, shortly after dawn. The sun was out, making my travels not unpleasant. I followed my map the entire day. It has proven to be accurate so far. The labyrinth is composed of thick hedges, as high as two tall men. The path between them is wide enough to walk three abreast. The ground is mostly green grass. Since I have encountered no one all day, I can only assume it is maintained by the gods themselves, or some sort of magic. I passed signs of explorers who came before me—a pile of stones to mark the way, the ashes of an old fire, silver dye on the corner of a hedge, or lengths of string denoting a path. The sun is setting. Already the entire maze is cast in shadow and soon it will be too dark to continue writing. I will eat my supper and make my bed. There is no shelter in the maze, nor any place to hide. I will curl up as far as I can manage beneath the hedge and pray I remain alone all night. === Day Two Last night passed fitfully, as if I slept not at all. Thank Thassa for the dawn. Lying exposed on the ground a full day into the maze turned sleep into nothing but fear. The merest fluttering of an owl overhead or rustling of hedge in the breeze caused instant panic and wakefulness. I held my long knife in my hand all night and tried to breathe silently. Now I must continue on. It is still mostly dark inside the canyon walls of the labyrinth, but sleep is useless. #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) I have followed my map the entire day. It led me past a stream of sweet water that cut across the path. I drank with abandon and refilled my water skins. I startled a hare as I turned a corner and thought to have it for my dinner, but it squeezed under the hedge and was gone before I could draw my bow. So tonight, again, I eat bread, cheese, and a bit of dried meat and fruit. I will be more alert in the future. Supplementing my rations could be the difference between life and death. Once I reach the end of my map, the going will be much slower. #figure(image("008_The Sea God's Labyrinth, Part 1/04.jpg", width: 100%), caption: [Surveyor’s Scope | Art by Daniel Ljunggren], supplement: none, numbering: none) Once again I face the terror of trying to sleep, exposed on the ground. This night, I have found a dead end off of a side path where I will camp. I am not sure if this is a good strategy. I may be less likely to be found by anything nefarious, but if I am, I will have nowhere to run. === Day Three The labyrinth is becoming rougher, less manicured. As the day wore on, the grass grew higher and the hedge wilder. Some paths were almost blocked by tangled branches growing outwards, while I waded through grasses that grew as high as my waist in places. Several times, I noticed flattened patches of grass where an animal, or person, had made a nest for the night, although I saw no signs of what the creatures may have been. My most interesting discovery of the day was the body of Praxitelius, a former champion of Oxus. When he did not return to the polis two years ago, we presumed him dead, and now I can verify that fact. I found his remains leaning against the hedge. The grass was grown so high I almost walked right past him. I could not determine the cause of death from his bones, although he was still fully armored in rotting leather. He had nothing of use left on his person except an intriguing dagger. When I pulled it from its decaying scabbard, it still shone bright as new. The blade is etched with faint patterns and I am certain it is blessed by a god. I have almost reached the end of my map. Tomorrow I walk into unknown territory, where my real trial will begin. Tonight, I hope to have restful sleep, hidden in the deep grass. === Day Four Disaster. I ventured beyond my map by late morning so I began marking my turns with silver dye on the corners of the hedges, always turning in the same direction, as I have been trained. I attempted to climb the hedge in order to gain a vantage point. Unfortunately, the branches of the hedges are not strong enough to hold me up, although they are too dense and tangled to penetrate. As I stopped for a rest and my lunch, I heard a shuffling sound in the hedge and then a deep growl, as if from a predator. I quickly gathered my belongings and snuck away as silently as I was able. But the creature stalked me for the next several hours. Always I could hear its snuffling and growling, sometimes from the other side of the hedge and sometimes from somewhere behind me. Eventually it caught up to me and I heard it begin earnest pursuit. Although I never saw the beast, I knew that I was unlikely to outrun it in a footrace. Several times I shot an arrow at it, blindly, through the corner of the maze. In a panic, I ran through turn after turn, all the while hearing the beast in close pursuit. Wild branches of the hedge whipped at my face and body. Suddenly I found myself confronted with a dead end. As I was about to turn and fight, fearing my journey had been cut short, I noticed that part of the hedge had collapsed and I caught a glimpse of daylight from the other side. I scrabbled through the collapsed hedge, worming my way to the other side, branches and thorns tearing at my clothing and skin. My backpack caught on a branch and nearly kept me from exiting the far side, but with all my strength I pushed through and ran. After many more turns of the maze I stopped to listen for the beast. All I could hear for a time was my own heavy breathing, but eventually... nothing. I had lost the creature, whatever it was. Perhaps it was too large to fit through the collapsed hedge. #figure(image("008_The Sea God's Labyrinth, Part 1/05.jpg", width: 100%), caption: [Nylea's Emissary | Art by <NAME>ley], supplement: none, numbering: none) After calming down I took stock of myself and that's when I discovered that I had not only lost the beast, I had also lost my quiver of arrows and bags of dried fruit and meat. I am afraid to go back to try and find them. Not only am I thoroughly lost, but the beast is still out there. Any backtracking will only bring me closer to it. I am tired and sore and scraped, but not injured, although none of that will matter if I starve to death, lost in the labyrinth. I would go on lamenting, but I must save some ink and parchment for later. === Day Six Wandering lost for two days. The sun has been beating and I am nearly out of water. Am down to half a wheel of cheese, one loaf of bread. Trying to keep to the shady side of the maze. Exposed skin is burned. Soaked in sweat. Roasting under armor. This section of labyrinth is a mixed hedge of tall, thick, leafy bushes, and thorny brambles. Varied height, but all overhead. I have walked for half a day, turning this way and that, without encountering a single outlet or side passage. Feeling exposed and claustrophobic. Worse, at the end of this horrendous path, with no way out but half a day's walk back, sits a door, mounted in stone. The hedge grows right up to the doorway, with no way around. Inside the door, a stone staircase leads down, into the dark. As horrific as the idea of taking that staircase is, the idea of trudging back the way I have come, in the blazing heat, risking death by exposure, actually seems more horrific to me right now. After a much-needed sleep, I am going to light my candle and brave the dark. I hope the citizens of my polis are praying for me, and Thassa will show me mercy, or all is lost. === Day Seven or Eight Sunlight! Praise the gods. I'm still alive. Sweet fresh air, sweet smell of green life. My hand shakes at the thought of recounting the horrors of that underground maze. The smell was death itself. The masonry walls dripped with slimy water. Cobwebs, and worse, covered my hands and face, dripped down my back, entered my nose and mouth. Skeletal corpses of men, beasts, and strange creatures littered the horrific tomb. Rats, centipedes, worms and every kind of creeping thing covered the floors, skittering and oozing over my feet. How I wished I had high boots instead of these sandals down there in the dark. The only light, the fragile, tiny flame of my candle, hoarded like the most precious jewel. Every minute of light like a clock counting down to my doom as the candle burned lower and lower. And when I blew it out to rest, even worse, as all the foul creatures of darkness brushed against me, and crawled all over me in the infinite blackness. I could do naught but wave my long knife in front of my body hoping to keep them at bay. #figure(image("008_The Sea God's Labyrinth, Part 1/06.jpg", width: 100%), caption: [Lost in a Labyrinth | Art by <NAME>], supplement: none, numbering: none) At times I prayed for the gods to take me. I even considered doing the deed myself. Yet I pressed on and eventually came to another stone staircase leading up and, finally, outside. I was afraid I had become turned around in the dark and had exited the same way I entered, but once outside, I knew that was not true. I am unsure how many days I was underground, but judging by the amount I ate and drank it was one or two. Here the maze looks different again. Vines wind and twist around more substantial branches and trunks of hedge. Trees grow through the walls in places. The path ahead of me is completely overhung with willows, shading out the sun. The ground is soft and more muddy than grassy. Hopefully this is a sign of water, as I grow lightheaded from thirst. No water, no arrows, little food, sleep deprived, and weary, I am still doomed. But at least I will die above ground, and not in that dark hell beneath my feet. === Day Nine The gods have sent a storm. Never have I seen such a tempest. Hail pelts me so hard I am afraid I will break a bone. I have taken what passes for shelter in this endless maze and covered my head with my wool blanket to soften the blows of ice being thrown from the sky, as well as provide a dry lap in which to write. Lightning forks everywhere, followed by cracks of thunder so powerful they shake the water off the trees. Water runs down the paths of the maze in rivers. My armor has soaked up so much moisture I can barely wear it. I am shivering. My stomach is so empty it is eating itself. At least, finally, I am able to quench my thirst. But I am not afraid. I am elated. With each rip of lightning I can actually see the gods themselves in the black sky, wrestling among the clouds. They are fighting over me! They know that I am on the right path. Erebos and Nylea contrive to end my journey here, while Thassa herself defends me. When this storm ends, I will continue forward, on the path that I now know brings me closer to the center of the labyrinth.
https://github.com/LugsoIn2/typst-htwg-thesis-template
https://raw.githubusercontent.com/LugsoIn2/typst-htwg-thesis-template/main/chapters/ipsum.typ
typst
MIT License
#import "../lib/utils.typ": todo #import "@preview/glossarium:0.2.6": gls, glspl = Chapter 2 Ipsum <chap2> #lorem(150) == Ipsum #lorem(100) === Nonumy #lorem(280) === Dolor #lorem(100) == #lorem(3) #lorem(150)
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/meta/counter_02.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page // Count headings. #set heading(numbering: "1.a.") #show heading: set text(10pt) #counter(heading).step() = Alpha In #counter(heading).display() == Beta #set heading(numbering: none) = Gamma #heading(numbering: "I.")[Delta] At Beta, it was #locate(loc => { let it = query(heading, loc).find(it => it.body == [Beta]) numbering(it.numbering, ..counter(heading).at(it.location())) })
https://github.com/WannesMalfait/vub-huisstijl-typst
https://raw.githubusercontent.com/WannesMalfait/vub-huisstijl-typst/main/README.md
markdown
MIT License
# VUB Typst huisstijl An unofficial template to get the look of the [Vrije Universiteit Brussel (VUB)](https://www.vub.be) huisstijl in Typst based on [this LaTeX template](https://gitlab.com/rubdos/texlive-vub) ## Getting Started You can choose "Start from template" in the web app, and search for `vub-huisstijl`. If you are running Typst locally, you can use the following command to initialize the template: ```shell typst init @preview/stv-vub-huisstijl:0.1.0 ``` ### Fonts The package makes use of the "TeX Gyre Adventor" font, with "Roboto" as a fallback. These should be installed for the title page to look right. They are available for free, and also come bundled with texlive. ## Note This only provides a template for a thesis title page, not for slides. That can be added in the future. ## About the name St V ([Saint Verhaegen](https://en.wikipedia.org/wiki/Saint_Verhaegen)) is an important part of the folklore of the VUB and the ULB.
https://github.com/npikall/vienna-tech
https://raw.githubusercontent.com/npikall/vienna-tech/main/template/sections.typ
typst
The Unlicense
#import "@preview/vienna-tech:0.1.1": * = Bachelorarbeit Nach der Einleitung kommt der weitere Text: #lorem(22) == Textbereich Der Text sollte die ganze Breite ausfüllen, also im Blocksatz stehen. Die Silbentrennung soll aktiviert sein. Der erste Absatz ist nicht eingerückt, die folgenden dann schon. Typst generiert dieses Einrücken automatisch. #lorem(25) == Layout, Schriftart, Schriftgrad und Nummerierung Für diese Arbeit wird das Template `vienna-tech` verwendet, die auf den Stil der LaTeX Klasse `BachelorBUI.cls` aufbaut, welche auf der TU Website zu finden ist #link("https://www.tuwien.at/cee/edvlabor/lehre/vorlagen"). Der Haupttext ist in 11-Punkt-Schrift und einzeiligem Abstand geschrieben. Die anderen Schriftgrade sind in der Vorlage definiert. Die Formatierung Kursiv kann für Hervorhebungen verwendet werden. Von der Formatierung Fett und Unterstrichen wird jedoch abgeraten. Es sollten in keinem Fall mehr als drei Überschriftsebenen verwendet werden. #figure( table( columns: 4, align: (left, left, left, left), table.hline(), table.header([Überschriftenebene], [Beispiel], [LaTeX - Befehle], [Typst - Befehle]), table.hline(), [Titel (zentriert)], [Instruktionen], "\ title", [], [Überschrift 1], [1 Einleitung], "\section", "= ", [Überschrift 2], [2.1 Textbereich], "\subsection", "== ", [Überschrift 3], [2.1.1 Unterlagen], "\subsubsection", "===", table.hline(start: 0), ), caption: [Die Tabellenbeschriftung ist normalerweise über der Tabelle. Tabellen sollen durchgehend nummeriert sein. Der letzte Satz der Tabellenbeschriftung endet ohne Punkt] )<tab:überschriften> Die Tabelle sowie deren Beschriftung werden innerhalb der `#figure` Umgebung erstellt. Die Beschriftung der Tabelle wird als Argument `caption` an die `#figure` Funktion übergeben. Das Erstellen der Tabelle erfolgt innerhalb der Funktion `#table`. == Abbildungen und Fotos Abbildungen sollten digital erstellt sein (keine Handzeichnungen). Diese müssen dann in der Bachelorarbeit integriert sein. Diagramme sollen gut lesbar erstellt werden. Der Schriftgrad innerhalb der Abbildungen soll 10-Punkt sein. Abbildungen sollen durchgehend nummeriert sein. Die Abbildungsbeschriftung ist immer unterhalb der Abbildung. Der letzte Satz der Abbildungsbeschriftung endet ohne Punkt. Die Abbildungsbeschriftung soll 10-Punkt-Schrift besitzen. Ein Beispiel sehen Sie in @fig:winston. #figure( image("assets/tuw.jpg", width: 40%) , caption: [Dies ist ein Beispiel für eine Abbildungsbeschriftung] )<fig:winston> Das Bild sowie dessen Beschriftung werden innerhalb der Funktion `#figure` geschrieben. Das Einfügen des Bildes erfolgt durch den Befehl `#image`. Die Beschriftung der Abbildung wird als Argument `caption` an die `#figure` Funktion übergeben, diese ist nun nach bzw. unterhalb der Abbildung positioniert. == Formeln Gleichungen und Formeln sollen generell zentriert in einer eigenen Zeile platziert werden. Die Gleichungen sollen durchnummeriert werden, wobei die Gleichungsnummer in Klammer zu stehen hat. Formeln werden einfach in die zwischen \$ Zeichen gesetzt. Dabei wird die Nummerierung automatisch übernommen. $ K_t = (1 - (R^2 dot tau)/(c_a + nu dot tan delta))^4 dot k_1 $ == Fußnoten Die Fußnotenziffer ist entweder direkt nach dem zu beschreibenden Wort oder nach einem Satzzeichen angeordnet. Fußnoten werden in den Befehl `#footnote` geschrieben. #footnote([Dies ist eine Fußnote]) == Referenzierungen und Literaturverzeichnis Die Liste der Referenzierungen ist mit „Literatur“ betitelt und wird ohne eigenen Seitenumbruch am Ende der Arbeit positioniert (aber noch vor einem möglichen Anhang). Die zugehörige Überschrift hat dann keine Überschriftennummer. Das Literaturverzeichnis wird in Typst durch das verwenden der Vorlage automatisch erstellt sofern eine _BibTeX_ Datei in der Konfigurationsfunktion angegeben wurde. Das Literaturverzeichnis wird dann automatisch am Ende (aber noch vor dem Anhang) eingefügt. Die Literaturangaben sollen nummeriert angeführt werden. Die Nummerierung selbst steht innerhalb eckiger Klammern, z.B. @dirac @einstein @knuthwebsite oder @dirac[p.~27]. //#pagebreak() Die Quellenangabe kann auch hinter dem Zitat oder inhaltlicher Behauptung wie folgt angeführt werden: (Name des Erstautor et al. (Jahreszahl)), z.B. (Alberty et al. (1999)). Dann muss ein geeigneter Literaturstil angegeben werden. Typst bietet hierfür ein paar Möglichkeiten an. Dazu kann man das Argument `form` an den Befehl `#cite` übergeben. Der Stil kann individuell durch das `style` Argument verändert werden.\ #for elem in ("normal", "prose", "author", "year"){ [#h(1cm)"#elem" $-->$ #h(1em) #cite(<dirac>, form: elem, style: "copernicus")\ ]} In diesem Beispiel wurde der Zitierstil `copernicus` verwendet, jedoch können noch viele andere verwendet werden. == Kopfzeile Die Bachelorarbeit ist für einen doppelseitigen Druck formatiert. Daher befindet sich in der Kopfzeile außen die Seitennummer (bei geraden Seiten links und bei ungeraden Seiten rechts) sowie innen der Titel der Arbeit (bei geraden Seiten rechts und bei ungeraden Seiten links). Dies erfolgt in Konfiguration automatisch. == Verwendeter Code Um in der Arbeit Code darzustellen eignet sich die `#codecell`-Funktion. #codecell(```python # Plotting a Sinus Curve import numpy as np import matplotlib.pyplot as plt # define x and claculate y values x = np.linspace(0,10) y = np.sin(x) # Create Plot fig, ax = plt.subplots() ax.plot(x,y) plt.show() ```)
https://github.com/ludovic-esperce/afpa-typst-template
https://raw.githubusercontent.com/ludovic-esperce/afpa-typst-template/main/README.md
markdown
# Introduction Template [typst](https://typst.app/) utilisable pour la génération de documents internes à l'[Afpa](https://www.afpa.fr/). Pour apprendre les bases de la rédaction d'un document vous pouvez vous référer à ces différents tutoriels : - [tutoriel officiel](https://typst.app/docs/reference/syntax/) - [utilisation de typst par l'exemple](https://sitandr.github.io/typst-examples-book/book/basics/tutorial/markup.html) # Utilisation du modèle ## Installation de typst ### Windows Via Winget : ```bash winget install --id Typst.Typst ``` ### Linux Vous pouvez consulter le site suivant afin de voir si typst est disponible pour votre distribution : [repology.org](https://repology.org/project/typst/versions). ### MacOS Via Brew : ```bash brew install typst ``` ## Ajout du "package" au système Ce modèle est disponible sous un ["package typst"](https://github.com/typst/packages). Pour rendre le package utilisable sur votre ordinateur une solution est de cloner ce dépôt dans le dossier suivant (sous Windows) : ```bash %AppData%\Local\typst\packages\local\afpa-template\0.1.0 ``` Avec `%AppData%` étant contenu dans le dossier utilisateur. ## Création d'un nouveau document Une fois le package dans le dossier cité précédemment, vous pourrez utiliser la commande suivante pour créer un nouveau document : ```bash typst init @local/afpa-document:0.1.0 <nom-document> ``` Cette commande créera un nouveau dossier portant le nom du document.
https://github.com/Mufanc/hnuslides-typst
https://raw.githubusercontent.com/Mufanc/hnuslides-typst/master/templates/catalog.typ
typst
#import "/configs.typ" #import "/utils/colors.typ" #import "/utils/fonts.typ": font #import "/utils/background.typ": bgimage #let catalog() = context { bgimage("/assets/images/bg-catalog-2.png") let headers = query(<title>) let count = counter("catalog") count.update(1) let items = headers.map(hdr => context { count.step() let index = count.display("壹") // Typst 似乎尚不支持透明渐变,暂时使用与背景接近的颜色 dirty fix // 待 https://github.com/typst/typst/issues/2546 解决后,还需要将括号透明度改为 50% let paren-color = color.mix( colors.primary, colors.background, space: rgb // 在 rgb 空间混合 primary-color 与背景色来模拟透明度 ) let steps = 5 let border-color = gradient.linear(paren-color, ..(colors.background,) * (steps - 2), paren-color).sharp(steps) let number = pad(top: -0.2em, text(font: font.HYZK, fill: colors.primary, index)) box( width: 2em, height: 2em, baseline: (100% - measure(number).height) / 2, stroke: 1pt + border-color, radius: 0.5em, align(center + horizon, number) ) h(0.8em) text(font: font.FZKT, hdr.body) }) set text(size: 1.5em) show table.cell: cell => box(width: 100%, height: 100%, align(left + horizon, cell)) pad(left: 10%, box( width: 80%, height: 60%, table(columns: (1fr, 1fr), rows: 1fr, stroke: none, ..items) )) pagebreak() }
https://github.com/dyc3/senior-design
https://raw.githubusercontent.com/dyc3/senior-design/main/visualization-requirements.typ
typst
#import "lib/requirements.typ": * = Visualization The load balancer will be showcased at the innovation exposition in April. To help communicate functionality, a graph visualizer will be developed to accompany the load balancer. The visualization will be a single, multiscreen web-application for ease of navigation. The visualization will also be interactive for seamless setup on the day of the exposition to gather/retain attention at the booth. == Requirements #figure( table( columns: 1, [#req("Should be eye catching to get more people to stop at the booth", shouldHave)], [#req("Must run on a production environment", mustHave)], [#req("Must gather number of monoliths, rooms within each monolith, and users in each room from the balancer", mustHave)], [#req("Must have options for different views", mustHave)], [#req("Should be able to interact with the state of the balancer", wouldBeNiceToHave)], [#req("Should support multiple balancers", shouldHave)], [#req("Should play an animation when a new user joins through the balancer", wouldBeNiceToHave)], [#req("Must be able to configure the size of nodes", mustHave)], [#req("Must be configurable to fetch from self-hosted instance or fly.io deployment of OpenTogetherTube", mustHave)], [#req("Must effectively communicate functionality to a non-technical audience", mustHave)], [#req("Must function as a useful debugging tool", mustHave)], [#req("Must assign unique colors to each type of node", mustHave)], [#req("Must have a legend", mustHave)], [#req("Must not display offensive material from user generated content during the Expo", mustHave)], ), caption: "General Visualization Requirements" ) <Table::visualization-requirements> == Types of Visualizations There will be multiple types of visualizations to show different aspects of the system. They are as follows: === Region View The purpose of this visualization is to how different resources are spread across different regions. #figure( image("figures/vis/region-view.png"), caption: "The Region View displaying sample data showing 2 Balancers and 2 Monoliths in ewr, 1 Balancer and 1 Monolith in cdg." ) <Figure::region-view> === Tree View The purpose of this visualization is to show the logical hierarchy of resources in the system. #figure( image("figures/vis/tree-view.png"), caption: "The Tree View displaying sample data showing 3 Balancers (center, orange) and 3 Monoliths (green), with rooms (purple) attached to each Monolith, and clients (red) attached to each room." ) <Figure::tree-view> === Topology View The purpose of this visualization is to show the physical layout of resources in the system. #figure( image("figures/vis/topology-view.png", width: 80%), caption: "The Topology View displaying sample data showing 2 Balancers and 2 Monoliths in ewr, 1 Balancer and 1 Monolith in cdg." ) <Figure::topology-view> === Traffic Heat Map The purpose of this visualization is to show where user traffic is coming from. #figure( image("figures/vis/traffic-heat-visualization.png"), caption: "Figma Mockup of Traffic Heat Map Visualization" ) <Figure::traffic-heat-visualization> This visualization will display a world map with heat circles in regions where there is user traffic. Higher concentrations of users will correspond to a bubble with a warmer color. === User Traffic Graph The purpose of this visualization is to show the total amount of user traffic to OpenTogetherTube over a selected period of time. #figure( image("figures/vis/user-traffic-visualization.png"), caption: "Figma Mockup of User Traffic Visualization" ) <Figure::user-traffic-visualization> The x-axis represents time, the y-axis represents OTT traffic === Latency Graph The purpose of this visualization is to show average server latency in real time. #figure( image("figures/vis/latency-graph-visualization.png"), caption: "Figma Mockup of Latency Graph Visualization" ) <Figure::latency-graph-visualization> The x-axis represents time, the y-axis represents server latency.
https://github.com/alberto-lazari/cns-report
https://raw.githubusercontent.com/alberto-lazari/cns-report/main/critical-aspects.typ
typst
= Critical Aspects <critical_aspects> The architecture presented in the previous section is not scalable since running multiple tests in parallel would require to use multiple smartphones and monitors at the same time. Moreover setting up all the equipment is not trivial and the results of the tests may differ because different devices has been used. These limitations slow the process of finding new vulnerabilities in the apps under test. Conducting extended tests on applications using a physical smartphone revealed a sporadic issue of camera overheating, which significantly impeded the continuity of the entire testing process. This overheating problem, in addition to slowing down the testing process, also made it necessary to make a distinciton between crashes caused by QRs and crashes caused by camera overheating. Due to differnt programming languages used in the development of the QR Code Fuzzer and QR Generator, coordinating them is not a straightforward task. To overcome this challenge, a JSON file is used as a means of communication and synchronization between the QR Code Fuzzer and the QR Code Generator. However, relying solely on a JSON file for communication introduces potential drawbacks such as increased latency and complexity, as well as the risk of data inconsistency or loss due to manual handling.
https://github.com/yhtq/Notes
https://raw.githubusercontent.com/yhtq/Notes/main/经济学原理/微观部分.typ
typst
#import "../template.typ": * // Take a look at the file `template.typ` in the file panel // to customize this template and discover how it works. #show: note.with( title: "经济学原理(微观部分)", author: "YHTQ", date: none, logo: none, ) = 前言 本门课程上下分两个学期,主要是经济学入门课程,介绍经济学的基本原理,微观经济学基础。 教师:田巍\ 讨论班:25,26为通选课\ 考核:平时20%,期中(11月13日)30%,期末(1月8日)50% (不会有概念默写类的题目)\ 注意作业题和 PPT 上题目\ 参考书:(无教材,作业考试以上课要求为准,仅参考) - 平狄克、鲁宾费尔德:《微观经济学》,中国人民大学出版社 - 范里安:《微观经济学:现代观点》,格致出版社 - 平新乔:《微观经济学十八讲》,北京大学出版社 == (微观)经济学的研究内容 - 经济学研究在资源稀缺的情况下,个体(消费者、厂商、资本等要素)如何做出选择,以实现尽可能大的经济收益。 - 稀缺性指社会资源是有限的,是经济学研究的前提假设。 - 个体行为(或如何配置资源)是经济学研究的核心,经济效率是经济学研究的目标。 本学期基本内容: - 供给和需求的基本原理 - 消费者理论:消费者行为,个人需求与市场需求 - 生产者理论或厂商理论:企业或生产者如何做出决策 - 市场理论: - 产品市场:交换行为 - 要素市场:购买生产要素行为 经济学理论难以完美刻画社会运行规律,仅能提供方向参考 == 生产要素 生产要素: - 自然资源 - 资本($K$) - 有形资本(机器厂房,股票债券) - 无形资本(专利,商标,知识产权) - 劳动力($L$):劳动力不等价于人,而是指抽象的人的劳动能力 生产要素是是流量概念而非存量概念,购买生产要素也是单位时间内的行为。 生产要素的稀缺性决定了产品也是稀缺的,因此生产要素和产品都需要支付价格购买。 == 微观经济学是经济学的基础 - 对企业而言,决策的目的是 _最大化利润_ - 对消费者而言,决策的目的是 _最大化效用_ 政府会对生产交易的每个环节进行约束干扰,从而影响个体的行为,比如征税 经济学还有其他分支,例如宏观经济学,金融学,企业理论,产业组织,国际经济学 == 个体决策的关键词 - 权衡取舍 trade-offs - 机会成本 opportunity cost:资源是有限的,如果一定量资源用于决策A的收益低于决策B,则将会做出B的决策(本课程中,机会成本被定义为成本 + 放弃的最大收益) - 理性自利 rationality:按照对自己最有利的方式决策,$eq.not$ 自私 - 激励 incentives == 市场、交易和价格 个体之间的交易改进社会总体福利,因此交易行为会发生。换言之,专业化生产提高了效率,但是需要交易才能实现。当然专业化未必是最优的,在有些场景下确实需要自给自足。 - 市场:产品的买卖双方的集合,包括买方、卖方和产品 - 范围:市场的范围包括地理范围,产品范围等等。地理范围多数时候无明确界限,产品之间也不可能一摸一样,因此现实中许多市场难以界定范围。在这门课中我们往往不会讨论市场的范围,而是直接给定市场。 - 结构:不同的产品,买卖方产生了不同的市场结构。经济学理论中常常讨论的有完全竞争市场,垄断市场,垄断竞争市场,寡头垄断市场等等。其核心差别在于价格形成机制的不同。 - 价格:市场中最核心的要素(看不见的手),是市场经济中调配资源配置的手段,直接影响成本和收益从而影响个体决策,由买卖双方通过一定过程形成(_在市场经济中,价格在任何情况下都是由市场决定的_)。事实上,在非常理想的情况下,价格可以看作参与市场的个体需要的唯一信息。 \ 注:事实上当今所谓的市场经济国家几乎不可能是完美的市场经济,当代市场经济国家的政府往往都会对市场进行强力调控。当然,经济学理论中往往假设市场经济是完美的。 - 免费的物品没有稀缺性,但没有稀缺性的物品不能称为商品。换言之,商品的价格不可能为零。 - 价格总是在不断变化的。CPI(居民消费价格指数)是衡量价格变化的指标。 - 当买方购买量和卖方的生产量达到平衡时,称为市场达到均衡。此时价格一定,个体的决策也一定。 - 如果所有考虑的市场都达到均衡,称为一般均衡(general equilibrium)。显然这是一种理想状态。 - 帕累托效率:指资源分配的一种理想状态,即在一定市场均衡下,不存在其他资源分配方法使得任何人境况都不变差的情况下,有人境况变好。 - 帕累托有效:指资源分配所能达到的最优资源配置。最优效率并不意味着最公平。 - 当市场均衡不能达到帕累托有效时,称为市场失灵。市场失灵的原因有很多,例如外部性,公共物品,不完全竞争等等。 = 供给和需求 当进行供需分析时,我们假设市场是完全竞争的或者至少竞争程度很高: + 产品完全同质化 + 潜在无穷多的竞争者(单个个体的供给和需求对市场微乎其微,市场进出无成本) + 买卖双方都是价格接受者(价格接受者:价格不受单个个体影响) 消费者和生产者分别产生需求和供给曲线,换言之供给和需求完全由价格决定。供给和需求的交点即为市场均衡点。此时看不见的手完全决定资源分配。\ 在一般而言,我们讨论供给/需求曲线当且仅当厂商/消费者是价格的接受者 == 需求 消费者在一定时期内,在一定价格水平下愿意而且能够购买的商品或服务的数量,是购买意愿和支付能力的统一。 - 需求定律:需求和价格负相关,其函数关系构成需求曲线。为了研究方便,在交易量极大时可以看作连续曲线 影响需求的因素: + 价格 + 消费者收入 - 正常商品:收入增加需求增加 - 低档商品:收入增加需求减少 + 相关商品价格 - 替代品:替代品价格增加导致价格增加 - 互补品:互补品价格增加导致价格减少 + 消费者偏好 + 消费者对未来的期望 + 人口数量/结构 + ...... 所谓需求曲线是以上所有因素不变时,价格和需求的关系。显然需求曲线的参数是其他影响因素的函数。 - 保留价格:愿意为某商品支付的最高价格(降低购买数量的阈值),显然它就是需求曲线上的对应点 - 消费者剩余:每单位商品的保留价格和实际价格之间的差值综合总和,往往用于近似替代消费者福利 == 供给 供给是指在一定时期内,一定价格水平下,生产者愿意而且能够提供的商品或服务的数量,是生产者的供给意愿和供给能力的统一。 - 供给定律:供给和价格正相关 影响供给的因素: + 价格 + 生产成本 - 生产要素价格 - 生产技术 + 生产者技术水平 + 生产者对未来的期望 + 相关产品价格 + 政府的产业政策 + ...... == 均衡 - 供给等于需求的状态称为平衡 - 供给过量/不足称为过剩、短缺 - 均衡状态会被其他因素的改变而打破,一段时间后达到新的平衡2 == 限价 如果政府认为均衡价格过高,就可能规定低于均衡价格的最高限价,但此时由于供需关系,供给减少,需求增加,自然会出现资源短缺。若要达到福利的目的,必须对生产者进行补贴。\ 类似的,有些商品会规定最低限价以保护生产者,比如典型的粮食往往会同时有最低和最高限价。单纯的最低限价会导致生产过剩,往往会结合政府收购等模式。\ 从以上例子可以看出,对于正常运转的市场进行政府干预往往要付出很大的成本。 == 贸易条件 称一个国家出口某商品的世界价格 / 进口某商品的世界价格\ 对于经济体量极大的大国,大量进口,出口商品都会导致贸易条件恶化,从而导致福利下降。\ 这可能可以解释一些发展中国家 “贫困化增长”,越进行贸易越发贫困。 = 需求和供给弹性 弹性表示了价格影响供给/需求的程度。\ 由于供需曲线横纵轴的选取,越平坦的曲线表示商品供需受价格影响越剧烈。\ 定义: $ (delta x(p)) / (delta p) \/ (x(p) / p) $ 为商品 $x$ 关于价格 $p$ 的(点)弹性。 有时也有弧弹性,用一段弧的割线替代$(delta x(p)) / (delta p)$,用中点替代 $(x(p) / p$ == 需求的价格弹性 简称需求弹性\ 一般而言,需求随价格增加而减少,因此 $epsilon <0$\ 有时也会不带符号直接考虑绝对值\ - 若 $|epsilon| = 1$,称需求是单位弹性 $ (delta x(p)) / (delta p) = -x(p) / p \ ln (x(p)) = ln (1/p) + c\ x(p) = A / p $ - 若 $|epsilon| > 1$,称需求是有弹性(也称商品是奢侈品) - 若 $|epsilon| < 1$,称需求是缺乏弹性(也称商品是必需品) 往往花费占收入更大的商品弹性更大,难以替代的需求弹性更小,变化速度越快的价格弹性越小。\ 注意到商家微调价格的收入(这里没有考虑利润)约为: $ (P + d P)(Q + d Q) = P Q + P d Q + Q d P + d P d Q = P Q + P d Q + Q d P \ = P Q + (P d Q + Q d P) = P Q + Q d P (1 + epsilon) $ 从而: - 若 $|epsilon| > 1$,降低价格收入增加 - 若 $|epsilon| < 1$,降低价格收入降低 == 需求的交叉价格弹性 称: $ (delta q_1 / delta p_2) / (q_1 / p_2) $ 为商品 $1$ 对商品 $2$ 的价格的交叉价格弹性。\ 显然替代品的交叉价格弹性为正,互补品的交叉价格弹性为负。\ = 消费者偏好 == 商品束 消费束或者商品束是一种或多种商品的组合,一个长度为商品数量,分量为商品数量的向量。\ 消费者在不同的商品束之间的选择称为消费者的偏好。 == 消费者偏好的假设 + 完备性:消费者能够对任意两个商品束进行比较得出偏好 + 传递性:若 $A$ 比 $B$ 好,$B$ 比 $C$ 好,则 $A$ 比 $C$ 好 + 反身性:$A$ 和 $A$ 一样好 以下是一些额外假设,满足的称为良性偏好 + 非饱和性 + 单调性 + 凸性 == 边际替代率 衡量消费者为了获得额外一单位商品愿意放弃的另一种商品的数量。\ $ "MRS" = - (delta x_1) / (delta x_2) $ 无差异曲线都是直线或者边际替代率都是常数时称为完全替代。 无差异曲线出现直角,边际替代率只取0或无穷时称为完全互补。 == 偏好函数 用偏好函数是一种将商品束映射到实数的函数。\ 称只关心排序的效用函数为序数效用,同时需要考虑数的大小的为基数效用。\ == 预算限 消费者可用于消费的收入是有限的。恰好花完收入的商品束构成一个集合,称为预算限。\ 对于理性的消费者,应当在预算限上最大化效益。几何上看,就是无差异曲线与预算曲线相切状态。 == 案例:税收机制讨论 + 从量税/从价税:按照购买商品的数量/价格的一定比例收取税款 + 所得税:按照收入的一定比例收取税款 == 替代效应与收入效应 在个人需求曲线上,每一点都是消费者效用最大化的结果。因而随着价格下降,由于消费者选择多买,效益一定是提高的。\ 在消费者理论的基础上,可以将需求的变换分为两种: - 替代效应:相对价格变动,效用不变引发的需求变化称为替代效应 - 收入效应:价格降低导致实际购买力的变动引发效用上升称为收入效应 衡量购买力不变有不同方法。以效用水平不变表示购买力不变称为 Hicks 分解,以依旧购买原商品称为 Slustsky 分解。 对于低档品,价格下降会引起正的替代效应和负的收入效应。一般而言大部分低档品都有替代效应强于收入效应,收入效应更强,也即价格下降会导致最终需求下降到商品称为吉芬品。 现实中,吉芬品是否存在仍是争议问题。 当效用函数形如: $ u = X + f(Y) $ 也即对商品 $X$ 线性时,效用最大化条件为: $ p_1 / p_2 = 1/f'(Y) $ 此时意味着 $Y$ 与收入是无关的,只与相对价格有关。换言之收入改变对 $Y$ 的消费量没有影响 = 生产者理论 企业追求利润最大化,面临的主要决策因素为: + 生产技术:描述投入品如何转化为产出 + 成本约束:投入品价格与数量的约束 + 生产则选择:在一定的生产技术和投入品价格下,企业如何选择每种投入品以及产出的数量,使得利润最大化。 == 短期生产函数 短期而言,一般来说只有劳动投入是容易改变的。假设在短期、资本固定不变,而劳动投入可变,厂商如何选择劳动投入以提高产量,这被称为短期生产函数。 在资本不变的前提下,典型的随着劳动力投入数量的变化时,总产出、边际产出、平均产出都应该是先增加后下降,归根到底是劳动力数量与资本数量的匹配 边际产出递减:在生产技术不变的前提下,随着投入要素的增加,最终的边际产出一定会下降(这并不是因为劳动力素质的变化而是劳动力与资源的配比) == 等产量线 长期而言,产出将是各项投入的函数。往往考虑 K 为资本,L 为劳动力。实现相同产出的不同生产要素投入的配比称为等产量线。 由于一般而言平衡优于极端,因此我们一般假定生产函数是单调的,凸的,也就是平均生产要素的产出大于产出的平均。 注意:显然产值的值是有意义的,因此生产函数是基数函数。 等产量线的导数称为边际技术替代率: $ "MRTS"_(L K) = (delta K)/(delta L) $ 生产函数中也可以有完全替代和完全互补两种极端情形。 规模报酬:扩张规模带来的报酬,也即: $ log_k (F(k overline(x))/F(overline(x))) $ 它表示扩张资本、劳动力的效果,大于一时称规模报酬递增,小于一时称规模报酬递减,等于一时称为规模效应不变 规模报酬递增的原因: + 生产要素的专用性 一台机器的一百倍不一定意味着一百台机器,而可能是一百倍价值的流水线,每个劳动力可以专业化分工 + 生产经营的不可分性 不能买半条流水线 规模报酬递减的原因: + 管理成本膨胀 规模报酬随规模变化往往也会呈现先上升后下降的趋势。如果把管理资源也看成一种生产要素,管理部门的扩张往往远远慢于正常部门的扩张,因此也可以看成边际报酬递减的特例。 == 成本函数 生产成本往往会分成多部分 + 会计成本 账面上花出的钱 + 机会成本 机会成本是指企业为了生产而放弃的最高收益,也即机会成本 = 账面成本 + 放弃的最高收益 + 沉没成本 已经花出的成本。注意沉没成本已经沉没了,决策时不应该考虑已经花出的成本。 + 固定成本 (在模型内)无法改变的成本,与沉没成本的差距在于可以(部分)收回 + 可变成本 (在模型内)可以改变的成本 边际成本:单位产出的成本,仅对可变成本有效,对固定成本无效。\ 边际成本往往先下降后上升,它和边际产出先上升后下降是相对应的,也有类似的原因: - 投入较低时,增加投入增加专业化,边际成本下降 - 投入较高时,再增加投入时管理成本膨胀,边际成本上升。 == 利润最大化/成本最小化 一般而言,我们假设企业是要素市场价格的接收者,也即企业的决策不改变要素市场价格。\ 追求利润最大化和成本最小化是等效的,可以理解为在等成本线上找到最大产量或者在等产量线上找到最小成本。\ 因此,厂商利润最大化的根本条件是边际收益等于边际成本。\ == 规模经济 它是规模报酬的一种推广,用当产出增加一定倍数时,成本的增加倍数来衡量。\ 当成本增加倍数小于产出增加倍数时,称为规模经济,反之称为规模不经济。\ 规模经济经常用成本产出弹性: $ (partialDer(c, q))/(c/q) $ ( c 表示成本,q 表示产出) 衡量。小于一时称为规模经济,大于一时称为规模不经济。 == 完全竞争市场的生产者理论 复习三个假定: - 产品完全同质化 - 潜在无穷多的竞争者,进出没有成本 - 买卖双方都是价格接受者 生产者是价格的接受者,因此才会有供给-价格曲线的存在 短期和长期的区别: - 长期所有生产要素可以改变,短期只有少数生产要素可以改变 - 长期完全竞争市场中,所有厂商可以自由进出,短期则无法进出 长期而言,企业有退出的选项,因此若无论如何都会亏损,则企业当然会退出\ 而短期而言,需要考虑固定成本的问题。企业此时无法退出,不生产时也需要支付固定成本。因此若平均可变成本曲线低于价格,企业虽然生产也会亏损,但不生产亏损更大,因此会选择生产;反之会选择停产。\ 完全竞争市场的长期状态: - 每个生产者都是利益最大化 $=> P = "边际成本"$ - 利润为零(否则厂商会进入/退出)$=> P = "平均成本"$ - 市场出清(供给等于需求) 这表明完全竞争市场中,所有生产者应当都处边际成本曲线与平均成本曲线的交点,也即平均成本曲线的最低点。\ 同时,这表明如果平均成本一直下降,则这样的厂商不能存在。\ 完全竞争市场实现了所有可能的交换,因此某种意义上是最大的福利。\ 既然在长期竞争市场中,所有生产者利润都为零,那么为什么还有人愿意进入市场呢?\ - 短期内企业得到利润,这个短期在现实生活中可能很长 - 我们所谈的都是机会成本,而会计成本小于机会成本,因此最终还是能够盈利。 - 若某个企业拥有某项技术能够降低成本,那么它就能够获得利润。但是从机会成本的角度来说,技术带来的收益就是它隐含的(转卖给别人)的机会成本,因此总利润仍然为零。 短期供给曲线就是每个厂商的供给曲线加总,长期则并不然,它取决于生产要素受产业扩张的影响程度。例如: - 若行业扩张对要素价格不产生影响 - 若行业扩张导致要素价格降低,则更多企业进入,价格更低 == 垄断市场 假设某个市场中,进入成本很大导致只有一个生产者,而消费者仍然潜在无穷多。此时定价权实际由唯一的生产者决定,此种市场称为完全竞争市场。\ 在完全竞争市场中,厂商的定价策略当然应该让总利润,也即: $ P Q - "Cost" (Q) " ,where" Q = "Demand"(P) $ 其中 $"Demand" (Q)$ 就是市场的需求函数 最大化 == 价格歧视 在垄断市场下,商家可以采用称作价格歧视的策略,不将价格制定为统一价格,而将商品卖给更愿意付出高价的买家。 + 一级价格歧视:同时依据买家和数量制定价格,完全状态是每一个商品都恰好以最高的保留价格卖出,消费者剩余为零,社会总福利全部由生产者占有。 + 二级价格歧视:只根据数量,例如最典型的套餐销售、两步定价(先交固定价格,再交可变价格),实践上成本较小,因此更为常见。 + 三级价格歧视:只根据人群定价,要求人群身份不能随便冒充,例如不同地区的连锁店定价不同 == 垄断竞争市场 介于垄断市场与竞争市场之间,假设为: - 产品间有差异性和一定程度的可替代性 - 自由进入和退出,长期而言利润为零 厂商的定价策略当然应该让总利润,也即: $ P Q - "Cost"(Q) " ,where" Q = "Demand"(P)\ $ 最大化。\ 与垄断市场不同,此时 $"Demand"(P)$ 是厂商所面临的需求函数,它的弹性比整个市场面临的需求函数量值更小,弹性更大。\ 短期而言,厂商可以通过这样的定价策略获得利润。长期而言,由于自由进入,单个厂商面临的需求函数会萎缩,导致利润降低,最终降为零。\ 换言之,长期而言边际收益曲线,边际成本曲线交点的 $Q$ 恰好就是平均成本曲线与需求曲线交点的 $Q$ 。同时,由于利润不能为正,因此平均成本不能低于需求曲线,最终导致平均成本曲线应该与需求曲线相切。(此时平均成本事实上是下降的)\ == 寡头垄断市场 比垄断竞争市场垄断性更强,较垄断市场多了不同企业间的策略性行文。假设为: - 市场有很强的进入壁垒 - 厂商数量较少,拥有大部分或者全部的产量 - 产品之间可能有差异性可能没有 在寡头垄断市场中,不能只简单地最优化自己的利润,因为一方的决策可能对其他寡头造成影响,导致市场条件发生变化,这在之前的市场结构是没有出现过的。因此,某种意义上寡头垄断市场是一大类市场结构而不是一种市场结构。\ 在具体的决策中,若干寡头之间的决策可能会产生博弈论问题。在多方博弈中,如果每个人都已经实现了当前它人决策下的最优决策,则称为一个纳什均衡。\ 注意纳什均衡可能有多个,也可能没有。同时,纳什均衡即使存在也未必能实现。\ 古诺模型:简单的双寡头模型,两个垄断寡头在同一个市场售卖完全相同的商品,并且知道市场需求信息。一方扩张产量会压低市场价格从而影响他方的利润。\ 在这个模型下,一方的所有决策就是产量。他方可以通过假设对方的产量决定自己的产量(这个曲线称为反应曲线),最终求解出纳什均衡状态。
https://github.com/typst-doc-cn/tutorial
https://raw.githubusercontent.com/typst-doc-cn/tutorial/main/src/figures.typ
typst
Apache License 2.0
#import "@preview/fletcher:0.4.0" as fletcher: node, edge #import "/typ/templates/page.typ": main-color, is-light-theme #import "mod.typ": typst-func #let figure-typst-arch( stroke-color: main-color, light-theme: is-light-theme, ) = { let node = node.with(stroke: main-color + 0.5pt) let xd = align.with(center) fletcher.diagram( node-outset: 2pt, axes: (ltr, btt), // nodes node((0, 0), xd[文件解析\ (Parsing)]), node((1.5, 0), xd[表达式求值\ (Evaluation)]), node((3, 0), xd[内容排版\ (Typesetting)]), node((3, -1), xd[结构导出\ (Exporting)]), // edges edge((0, 0), (1.5, 0), "..}>", bend: 25deg), edge((1.5, 0), (0, 0), xd[`import`或\ `include`], "..}>", bend: 25deg), edge((1.5, 0), (3, 0), "..}>", bend: 25deg), edge((3, 0), (1.5, 0), xd[`styled`等], "..}>", bend: 25deg), edge((3, 0), (3, -1), "..}>", bend: 25deg), ) } #let figure-content-decoration( stroke-color: main-color, light-theme: is-light-theme, ) = { // let node = node.with(stroke: main-color + 0.5pt) let xd = align.with(center) fletcher.diagram( node-outset: 2pt, axes: (ltr, btt), // nodes node((0, 0), xd[```typ 左#[一段文本]右```]), node((1.5, 0), xd(```typc text(blue)```)), node((3, 0), xd([左]+text(blue)[一段文本]+[右])), node((0, -0.5), xd[选中内容]), node((1.5, -0.5), xd[对内容块应用#typst-func("text")函数]), node((3, -0.5), xd[最终效果]), // edges edge((0, 0), (1.5, 0), "..}>"), edge((1.5, 0), (3, 0), "..}>"), ) }
https://github.com/ice1000/website
https://raw.githubusercontent.com/ice1000/website/main/lnl-modal/tizart.typ
typst
#import "@preview/ctheorems:1.1.2": * #let alpha = "33" #let theorem = thmbox("theorem", "Theorem", breakable: true, fill: rgb("#eeffee" + alpha)) #let lemma = thmbox("lemma", "Lemma", breakable: true, fill: rgb("#eeffee" + alpha)) #let proof = thmproof("proof", "Proof")
https://github.com/AlvaroRamirez01/Analisis_de_Algoritmos_2024-1
https://raw.githubusercontent.com/AlvaroRamirez01/Analisis_de_Algoritmos_2024-1/master/Tarea_01_Problemas_y_Algoritmos/main.typ
typst
#import "conf.typ": * #show: doc => conf( materia: "Análisis de Algoritmos", tarea: "Tarea 01: Problemas y Algoritmos", profesor: ( nombre: "<NAME>", sexo: "F", ), ayudantes: ( "<NAME>", "<NAME>" ), alumnos: ( ( nombre: "<NAME>", cuenta: "316276355", email: "<EMAIL>" ), ), fecha: datetime.today(), encabezado: "Problema a desarrollar", doc, ) #let colors = (black, gray, silver, white, navy, blue, aqua, teal, eastern, purple, fuchsia, maroon, red, orange, yellow, olive, green, lime) #text(12pt)[ #par(justify: true)[ = Problema a desarrollar Francisco necesita comprar clavos para un proyecto personal, le recomendaron que si los compraba en la tienda de _The Home Depot_ podría encontrar el clavo adecuado a su proyecto. Llegando a la tienda, fue a la sección de construcción y encontró una máquina que dispensa clavos, para que la maquina le dé el clavo que quiere solo debe especificar ciertas cosas como lo es: - Tipo de cabeza (ovalada o sin cabeza, ancha, plana) - Diámetro del clavo (expresado en pulgadas o milímetros) - Longitud del clavo (expresado en pulgadas o milímetros) - Acabado del clavo (acero, acero inoxidable, cobre y latón) Francisco necesita clavos de cabeza plana, de 3 milímetros de diámetro, 50 milímetros de longitud y de acero inoxidable. = Datos de entrada + Una maquina despachadora de clavos $M$ + Tipo de cabeza de clavo $T$ + Diámetro de clavo $D$ + Longitud de clavo $L$ + Acabado del clavo $A$ == Pre-condición La máquina $S$ es finita. Los datos $T,D,L,A$ no nulos. = Proceso + La máquina $M$ recibe los datos $T,D,L,A$ que ingresa la persona + $M$ se conecta a su base de datos a la que llamamos $B\D$ + Si $B\D$ tiene en existencia un clavo de características $T,D,L,A$ entonces: - Expulsamos un clavo de características $T,D,L,A$ y en $B\D$ descontamos los artículos que fueron despachados. - En otro caso, informamos al usuario que no contamos con el clavo que solicito. 4. Vuelve a la pantalla principal. == Post-condición $M$ sigue siendo finita y dependiendo de su $B\D$ puede ser no vacia o vacía si es que se le acabo su inventario de clavos = Datos de salida Se obtiene el clavo de acuerdo a las características que se dio a la maquina o se obtiene un mensaje diciendo que no se cuenta con el articulo disponible. ]] /* == Primera pregunta #lorem(40) #for c in colors { solucion(color: c)[ #lorem(40) ] } #pagebreak() == Segunda pregunta #lorem(40) @Typst */
https://github.com/Mc-Zen/zero
https://raw.githubusercontent.com/Mc-Zen/zero/main/docs/figures/table.typ
typst
MIT License
#import "/src/zero.typ": * #set page(width: auto, height: auto, margin: 4pt) #let clr = if "dark" in sys.inputs { white } else { black } #set page(fill: white) if clr == black #set text(fill: clr) #set text(1.2em) #set table(stroke: clr + .5pt) #set table(stroke: (x, y) => { if y == 0 { (bottom: clr + .6pt) } if x > 0 { (left: clr + .6pt) } }) #let table1 = ztable( columns: 3, align: center, format: (none, auto, auto), $n$, $α$, $β$, [1], [3.45], [-11.1], [2], [4.568], [-0.1], [3], [12.2], [-.02], [...], [24.1], [-1.1], [10], [234], [-0.00234], ) #let table2 = ztable( columns: 4, align: center, format: (none, auto, auto, (digits: 1)), $n$, $α$, $β$, $γ$, [1], [3.45e2], [-11.1+-3], [0], [2], [4.568e1], [-0.1+-1e4], [1.23], [3], [12.2e4], [-.02+-.02], [1], [...], [24.1e6], [-1.1+-5], [9.1], [10], [234e200], [-0.00234+-.3e3], [1.1] ) #table1 #pagebreak() #table2
https://github.com/ayoubelmhamdi/typst-phd-AI-Medical
https://raw.githubusercontent.com/ayoubelmhamdi/typst-phd-AI-Medical/master/chapters/ch17.typ
typst
MIT License
#import "../functions.typ": heading_center, images, italic,linkb #let finchapiter = text(size: 24pt, fill:rgb("#1E045B"),[■]) = DETECTING LUNG CANCER NODULES. == Introduction Lung cancer ranks among the top contributors to cancer-related mortality worldwide@national2011reduced. Early recognition and diagnosis of lung nodules, small tissue masses in the lungs, can significantly elevate survival rates and treatment success for individuals with lung cancer. However, detecting and classifying these lung nodules poses a worthy challenge due to their varying size, shape, location, and physical characteristics@SetioTBBBC0DFGG16. Furthermore, the majority of lung nodules are benign or non-cancerous, with only a scant percentage classified as malignant or cancerous@dou2017automated. These conditions create complications for automated lung nodule detection and classification through machine learning models. In this study, we put into action a machine learning experiment utilizing a CNN model to determine benign or malignant lung nodules from CT scan images. We employed the publicly accessible LUNA16 dataset@SetioTBBBC0DFGG16 comprising 888 annotated nodule CT scans. A total of 10862 nodules were singled out from these scans where only 25 were malicious. The nodules were partitioned into a training set consisting of 8152 nodules and a validation set of 2709 nodules@armato2011lidc. We facilitated training of the CNN model using the training set and gauged its performance on the validation set@lin2017feature. Providing accuracy and recall as performance indicators. === CT Scans with Lung Nodules To read, process, and visually represent CT scans depicting lung nodules@SetioTBBBC0DFGG16@dou2017automated@ding2017accurate, we implemented two Python libraries: SimpleITK and matplotlib. SimpleITK furnishes a simplified access point to the Insight Segmentation and Registration Toolkit (ITK), a framework built for image analysis and processing. Matplotlib, on the other hand, offers functionalities for image visualization and enhancement. With SimpleITK, we read the CT scan files from the LUNA16 dataset@SetioTBBBC0DFGG16, converting these images from their DICOM or NIfTI format into manipulable multidimensional numerical arrays, referred to as numpy arrays. Additionally, SimpleITK was utilized to obtain the images' origin and spacing, defined as the image coordinates and the voxel size, respectively@SetioTBBBC0DFGG16. Afterward, we resampled the images using SimpleITK, achieving a uniform voxel size of 1 mm x 1 mm x 1 mm, normalized pixel values to a range of -1000 to 320 Hounsfield Units (HU), and applied a lung segmentation algorithm to isolate the lung regions from the images. We utilized matplotlib for plotting and displaying the CT scan slices containing nodules, supplementing these images with white lines marking the boundaries around each nodule to emphasize their location and dimensions@SetioTBBBC0DFGG16@dou2017automated@ding2017accurate. A function was developed, accepting as its input a CT scan array, a numpy array consisting of nodule coordinates and diameters, the image's origin and spacing, and some optional parameters. This function iterates over the nodule array, computing the voxel coordinates for each nodule based on the image's physical coordinates, origin, and spacing. Afterward, it alters the CT scan array, incorporating the white lines around each nodule, and concludes with creating a plot to display the CT scan slices housing the nodules using matplotlib. Figure 1 offers an example of a CT scan slice, where the nodule is highlighted with white lines. ![Figure 1: Examples of a CT scan slice with a nodule highlighted by white lines](images/seg4.png) == Methodology -== Resources Our study's resources were CT scans and annotations sourced from the LUNA16 dataset@SetioTBBBC0DFGG16. LUNA16, a publicly accessible CT scan set from the Lung Image Database Consortium (LIDC) and Image Database Resource Initiative (IDRI), comprises 888 CT scans with a slice thickness lesser than 3 mm and a pixel spacing diminutive of 0.7 mm. This set also caters two separate CSV files embodying candidate and annotation details. In the candidates.csv file, four columns are illustrated: seriesuid, coordX, coordY, coordZ, and class. Here, the seriesuid works as a unique identifier for each scan; coordX, coordY, and coordZ represent spatial coordinates for each candidate in millimeters, and 'class' provides a binary categorization, depicting whether the candidate is a nodule (1) or not (0). The annotations.csv file consists of five columns: seriesuid, coordX, coordY, coordZ, and diameter_mm, commanding the scanner's unique identifier, spatial annotation coordinates in millimeters, and each annotation's diameter in millimeters, respectively. Based on the identification of nodules larger than 3 mm in diameter by four independent radiologists, these annotations were manually marked@<EMAIL>@<EMAIL>. === Procedural Outline Our study encompassed three principal stages: Data preprocessing, nodule detection algorithm development, and performance evaluation@<EMAIL>@<EMAIL>. ==== Data Preprocessing In the data preprocessing phase, the CT scans were transformed from DICOM format to arrays(tensors). This was followed by resampling the images to attain uniform voxel dimensions of 1 mm x 1 mm x 1 mm, pixel value normalization to cater a -1000 to 320 Hounsfield Unit (HU) range, and finally the usage of a lung segmentation algorithm to extract lung regions from the images. This segmentation algorithm was designed on composite thresholding, morphological operations, and connected component analysis, delivering a set of lung masks for each scan@<EMAIL>@<EMAIL>. ==== Development of Nodule Detection Algorithm The nodule detection algorithm construction was divided into several imperative stages. At its foundation, the algorithm relied on a Convolutional Neural Network (CNN) model, tasked with identifying nodules from CT scan images@lin2017feature. The designed model entailed: - An _Input Layer_ to receive a 3D grayscale image. - The first convolutional layer (Convolutional Layer 1) was encoded with 32 filters and a 3x3x3 kernel size. Padding was adjusted to 1 to preserve the image's spatial dimensions. - The output from Convolutional Layer 1 was directed through a _ReLU Activation Function (Activation Layer 1)._ - The framework further encapsulated a second convolutional layer (Convolutional Layer 2) with 32 filters and a 3x3x3 kernel size, with a padding of 1. - The output from Convolutional Layer 2 found itself in another ReLU activation function (Activation Layer 2). - 3D max pooling was executed in Max Pooling Layer 1 with a kernel dimension of 2x2x2 and a stride of 2, reducing spatial dimensions by half. - This structure was hence reiterated with Convolutional Layer 3 and 4, Activation Layer 3 and 4, and Max Pooling Layer 2, albeit the convolutional layers were outfitted with 64 filters. - The output from the final max pooling layer was compressed into a 1D tensor in the Flatten Layer before directing it to the fully connected layer using PyTorch's 'view' method. - The condensed tensor underwent a fully connected (dense) layer processing, called the _Fully Connected Layer_. This layer embodied two output neurons, corresponding to the presence or absence of nodules@lin2017focal. - Lastly, a softmax function was applied to the output generated from the fully connected layer in the Softmax Layer. This step is mandatory for binary classification tasks, offering a probability distribution over the two classes@lin2017feature. To train the model, the *Adam optimizer* was commissioned with a learning rate of 0.001, a batch size of 40, and the binary cross-entropy loss function. The model training spanned over 100 epochs@SetioTBBBC0DFGG16. == Results === Assessment of Model Performance We gauged the success of the model through its precision on both training and validation datasets. The model's accuracy on training data and validation data was documented at each stage of the learning process@SetioTBBBC0DFGG16. The term *accuracy* refers to the ability of the model to forecast the results on the training data accurately, while *validation accuracy* signifies the model's capability to extend its predictions to new, unseen data, i.e., the validation data. Upon examining the accuracy and validation accuracy values throughout the stages of learning, it's indicative that the model is acquiring knowledge, as can be seen through the gradual enhancement of both training and validation accuracies. The model begins with comparatively lower accuracies, around 0.64, before improving to above 0.89 by the end of training. This showcases the model's refined ability to accurately categorize a considerable ratio of cases. Nonetheless, an accuracy of 89% was garnered on the validation set, implying that the model correctly predicted the class of 2654 of the 2709 nodules@SetioTBBBC0DFGG16. === Evaluation Metrics: Precision, Recall and F1-Score The model performance was additionally evaluated using metrics such as *precision*, *recall*, and *F1-score* alongside accuracy. These measures provide a broader insight into the model's performance, especially in circumstances where an imbalance in classes is witnessed@lin2017focal. - *Precision* represents the fraction of correct positive predictions (pecifically, when the model accurately identifies a nodule) out of all positive forecasts made by the model. A high precision indicates a model's low false positive rate. The model achieved a precision of 0.91 for class 0 and 0.86 for class 1. - *Recall*, synonymous with sensitivity or true positive rate, is the ratio of accurate positive predictions to all actual positives. A high *recall* indicates that the model correctly identified a majority of the actual positive cases. The model achieved a recall of 0.91 for class 0 and 0.86 for class 1 @lin2017focal. - *F1-score* is the harmonic mean of precision and recall, providing a single measure that balances between these metrics. The model scored an _F1-score_ of 0.91 for class 0 and 0.86 for class 1 @lin2017focal. The results illustrate that the model performed proficiently in identifying both classes, with a slight preference in identifying class 0 (no nodule) over class 1 (presence of nodule). In general, the model performed impressively in terms of precision, recall, and _F1 score_@SetioTBBBC0DFGG16. == Discussion A significant barrier to optimizing performance is that our dataset presents an imbalance problem@dou2017automated. The dataset exhibits an excessive discrepancy between benign and malignant classes, with the benign class being more than 400 times prevalent than the malignant class. This disproportion obstructs the model's learning process in distinguishing between the classes, and it might default to predicting the most frequent class@lin2017focal. Furthermore, due to our dataset's high imbalance, accuracy doesn't serve as an appropriate performance measure as it can present a deceptively high figure, even when the model incorrectly predicts the minority class@lin2017focal. Addressing this issue requires a refined strategy to train our model and an improved performance indicator better than accuracy. Potential solutions encompass: - Implementing data augmentation techniques to amplify the number of malignant samples in our dataset@SetioTBBBC0DFGG16. - Employing oversampling or undersampling techniques to achieve class balance in our dataset@lin2017focal. In our subsequent work, we aim to incorporate some of these solutions and expect to enhance our model's performance relative to lung nodule classification@SetioTBBBC0DFGG16@dou2017automated@ding2017accurate.
https://github.com/xrarch/books
https://raw.githubusercontent.com/xrarch/books/main/xrcomputerbook/chapkinnow.typ
typst
#import "@preview/tablex:0.0.6": tablex, cellx, colspanx, rowspanx = Kinnow Framebuffer == Introduction The Kinnow framebuffer card provides a very simple linear 1024x768 framebuffer with 8-bit pixels. The slot space is laid out as follows: #tablex( columns: (1fr, 5fr), cellx([ #set text(fill: white) #set align(center) *Offset* ], fill: rgb(0,0,0,255)), cellx([ #set text(fill: white) #set align(center) *Purpose* ], fill: rgb(0,0,0,255)), [+0x000000], [SlotInfo], [+0x003000], [Kinnow Registers], [+0x100000], [Framebuffer Memory] ) There are two 32-bit read-only device registers: #tablex( columns: (1fr, 5fr), cellx([ #set text(fill: white) #set align(center) *Offset* ], fill: rgb(0,0,0,255)), cellx([ #set text(fill: white) #set align(center) *Purpose* ], fill: rgb(0,0,0,255)), [+0x0], [Display Size], [+0x4], [Framebuffer Memory Size], ) The display size register contains two 12-bit fields. The low 12 bits [0:11] contain the width of the display in pixels, and the next 12 bits [12:23] contain the height of the display in pixels. The framebuffer memory size register contains the size in bytes of the framebuffer memory. It will always be at least large enough to contain the display pixel data, calculated by a simple multiplication of width by height as each pixel occupies only 1 byte. The framebuffer memory is laid out in row-major order. That is, the first row of a 1024 pixel wide display is stored as a contiguous sequence of 1024 bytes, the next row is the next 1024 bytes, and so on. Therefore, the offset for a particular (X,Y) pair can be calculated by: ``` Offset := (y * Width) + x ``` And, likewise, an offset can be converted to an (X,Y) pair by: ``` Y = Floor(Offset / Width) X = Offset % Width ``` #box([ == Palette The pixel color is computed by looking up the 8-bit pixel value in a hardwired palette of 256 colors: #set align(center) #image("palette.png", width: 50%) #set align(left) ]) The RGB values are reproduced below: 000000 220022 000044 220066 000088 2200aa 0000cc 2200ee 440000 660022 440044 660066 440088 6600aa 4400cc 6600ee 880000 aa0022 880044 aa0066 880088 aa00aa 8800cc aa00ee cc0000 ee0022 cc0044 ee0066 cc0088 ee00aa cc00cc ee00ee 002200 222222 002244 222266 002288 2222aa 0022cc 2222ee 442200 662222 442244 662266 442288 6622aa 4422cc 6622ee 882200 aa2222 882244 aa2266 882288 aa22aa 8822cc aa22ee cc2200 ee2222 cc2244 ee2266 cc2288 ee22aa cc22cc ee22ee 004400 224422 004444 224466 004488 2244aa 0044cc 2244ee 444400 664422 444444 664466 444488 6644aa 4444cc 6644ee 884400 aa4422 884444 aa4466 884488 aa44aa 8844cc aa44ee cc4400 ee4422 cc4444 ee4466 cc4488 ee44aa cc44cc ee44ee 006600 226622 006644 226666 006688 2266aa 0066cc 2266ee 446600 666622 446644 666666 446688 6666aa 4466cc 6666ee 886600 aa6622 886644 aa6666 886688 aa66aa 8866cc aa66ee cc6600 ee6622 cc6644 ee6666 cc6688 ee66aa cc66cc ee66ee 008800 228822 008844 228866 008888 2288aa 0088cc 2288ee 448800 668822 448844 668866 448888 6688aa 4488cc 6688ee 888800 aa8822 888844 aa8866 888888 aa88aa 8888cc aa88ee cc8800 ee8822 cc8844 ee8866 cc8888 ee88aa cc88cc ee88ee 00aa00 22aa22 00aa44 22aa66 00aa88 22aaaa 00aacc 22aaee 44aa00 66aa22 44aa44 66aa66 44aa88 66aaaa 44aacc 66aaee 88aa00 aaaa22 88aa44 aaaa66 88aa88 aaaaaa 88aacc aaaaee ccaa00 eeaa22 ccaa44 eeaa66 ccaa88 eeaaaa ccaacc eeaaee 00cc00 22cc22 00cc44 22cc66 00cc88 22ccaa 00cccc 22ccee 44cc00 66cc22 44cc44 66cc66 44cc88 66ccaa 44cccc 66ccee 88cc00 aacc22 88cc44 aacc66 88cc88 aaccaa 88cccc aaccee cccc00 eecc22 cccc44 eecc66 cccc88 eeccaa cccccc eeccee 00ee00 22ee22 00ee44 22ee66 00ee88 22eeaa 00eecc 22eeee 44ee00 66ee22 44ee44 66ee66 44ee88 66eeaa 44eecc 66eeee 88ee00 aaee22 88ee44 aaee66 88ee88 aaeeaa 88eecc aaeeee ccee00 eeee22 ccee44 eeee66 ccee88 eeeeaa cceecc ffffff
https://github.com/binhtran432k/ungrammar-docs
https://raw.githubusercontent.com/binhtran432k/ungrammar-docs/main/contents/evaluation/index.typ
typst
= Evaluation and Results #include "./technology.typ" #include "./plan.typ" #include "./method.typ"
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/chordx/0.2.0/src/chart.typ
typst
Apache License 2.0
#import "./utils.typ": parse-input-string, top-border-normal, top-border-round // Draws a horizontal border that indicates the starting of the fretboard #let draw-nut(self) = { let size = ( width: self.grid.width, height: 1.2pt * self.scale ) if self.fret-number in (none, 1) { if self.style == "normal" { top-border-normal(size, self.stroke, self.scale) } else { top-border-round(size, self.stroke, self.scale) } } } // Draws a grid with a width = (length of tabs) and height = (number of frets) #let draw-grid(self) = { let radius = (bottom: 1pt * self.scale, top: 1pt * self.scale) place( rect( width: self.grid.width, height: self.grid.height, radius: if self.style == "normal" {0pt} else {radius}, stroke: self.stroke ) ) // draws the vertical lines for i in range(self.grid.cols - 1) { let x = (i + 1) * self.step place( line( start: (x, 0pt), end: (x, self.grid.height), stroke: self.stroke ) ) } // draws the horizontal lines for i in range(self.grid.rows - 1) { let y = (i + 1) * self.step place( line( start: (0pt, y), end: (self.grid.width, y), stroke: self.stroke ) ) } } // Draws the tabs over the grid #let draw-tabs(self) = { for (tab, col) in self.tabs.zip(range(self.tabs.len())) { if type(tab) == "string" and lower(tab) == "x" { let offset = col * self.step place( line( start: (offset - 1.5pt * self.scale, -2.5pt * self.scale), end: (offset + 1.5pt * self.scale, -5.5pt * self.scale), stroke: self.stroke ) ) place( line( start: (offset - 1.5pt * self.scale, -5.5pt * self.scale), end: (offset + 1.5pt * self.scale, -2.5pt * self.scale), stroke: self.stroke ) ) continue } if (type(tab) == "string" and lower(tab) == "o") { let radius = 1.7pt * self.scale place( dx: self.step * col - radius, dy: -4pt * self.scale - radius, circle(radius: radius, stroke: self.stroke) ) continue } if type(tab) == "integer" and tab > 0 and tab <= self.frets { let radius = 1.7pt * self.scale place( dx: self.step * col - radius, dy: self.step * tab - radius - 2.5pt * self.scale, circle(radius: radius, stroke: none, fill: black) ) continue } } } // Draws a capo list // // capo = (fret, start, end) // fret: fret position // start: lowest starting string // end: highest ending string #let draw-capos(self) = { let size = self.tabs.len() for (fret, start, end, ..) in self.capos { if start > size { start = size } if end > size { end = size } place( dy: fret * self.step - 2.5pt * self.scale, line( start: ((size - start) * self.step, 0pt), end: ((size - end) * self.step, 0pt), stroke: (paint: black, thickness: 3.4pt * self.scale, cap: "round") ) ) } } // Draws the finger numbers below the grid #let draw-fingers(self) = { let size = self.tabs.len() for (finger, col) in self.fingers.zip(range(size)) { if type(finger) == "integer" and finger > 0 and finger < 6 { place( left + top, dx: col * self.step - 1.3pt * self.scale, dy: self.grid.height + 1.5pt * self.scale, text(6pt * self.scale)[#finger]) } } } // Draws the fret start number that indicates the starting position of the fretboard #let draw-fret-number(self) = { place(left + top, dx: -3pt * self.scale, dy: self.step / 2 - 0.2pt * self.scale, place(right + horizon, text(8pt * self.scale)[#self.fret-number]) ) } // Draws the chord name below the grid and finger numbers #let draw-name(self) = { place( dx: self.grid.width / 2, dy: self.grid.height + self.vertical-gap-name, place(center + horizon, text(12pt * self.scale)[#self.name]) ) } // Render the chart #let render(self) = { style(styles => { let fret-number-size = measure(text(8pt * self.scale)[#self.fret-number], styles) let chord-name-size = measure(text(12pt * self.scale)[#self.name], styles) let tabs-height = if "o" in self.tabs or "x" in self.tabs { -(4pt + 1.7pt) * self.scale } else { 0pt } let graph = ( width: self.tabs.len() * self.step, height: self.frets * self.step ) let chart = ( width: graph.width + fret-number-size.width + self.step / 2, height: graph.height + chord-name-size.height / 2 + self.vertical-gap-name - tabs-height ) let canvas = ( width: calc.max(graph.width / 2, chord-name-size.width / 2) + calc.max(chart.width / 2 + fret-number-size.width, chord-name-size.width / 2), height: chart.height, dx: calc.max((chord-name-size.width - graph.width) / 2 + self.step / 2, fret-number-size.width + self.step / 2), dy: -(graph.height + chord-name-size.height / 2 + self.vertical-gap-name), ) box( width: canvas.width, height: canvas.height, place( left + bottom, dx: canvas.dx, dy: canvas.dy, { draw-nut(self) draw-grid(self) draw-tabs(self) draw-capos(self) draw-fingers(self) draw-fret-number(self) draw-name(self) } ) ) }) } /// Return a new function with default parameters to generate chart chords for stringed instruments. /// /// - frets (integer): Presets the number of frets (rows of the grid). *Optional*. /// - scale (integer, float): Presets the scale. *Optional*. /// - style (string): Sets the chart style. *Optional*. /// - ```js "normal```: chart with right angles. /// - ```js "round```: chart with round angles. /// - font (string): Sets the name of the text font. *Optional*. /// -> function #let new-chart-chords( frets: 5, scale: 1, style: "normal", font: "Linux Libertine" ) = { /// Is the returned function by *new-chart-chords*. /// /// - tabs (string): Shows the tabs on the chart. *Optional*. /// - *x*: mute note. /// - *o*: air note. /// - *n*: without note. /// - *number*: note position on the fret. /// /// The string length of tabs defines the number of strings on the instrument. /// #parbreak() Example: /// - ```js "x32o1o"``` - (6 strings - C Guitar chord). /// - ```js "ooo3"``` - (4 strings - C Ukulele chord). /// /// - fingers (string): Shows the finger numbers. *Optional*. /// - *n*, *x*, *o*: without finger, /// - *number*: one finger /// #parbreak() Example: ```js "n32n1n"``` - (Fingers for guitar chord: C) /// /// - capos (string): Adds one or many capos on the chart. *Optional*. /// - 1#super[st] digit -- *fret*: fret position. /// - 2#super[nd] digit -- *start*: lowest starting string. /// - 3#super[rd] digit -- *end*: highest ending string. /// #parbreak() Example: ```js "115"``` $\u{2261}$ ```js "1,1,5"``` $=>$ ```js "fret,start,end"``` /// #parbreak() With ```js "|"``` you can add capos: /// #parbreak() Example: ```js "115|312"``` $\u{2261}$ ```js "1,1,5|3,1,2"``` $=>$ ```js "fret,start,end|fret,start,end"``` /// /// - frets (integer): Sets the number of frets (rows of the chart grid). *Optional*. /// - fret-number (integer): Shows the fret number that indicates the starting position of the fretboard. *Optional*. /// - scale (integer): Sets the scale. *Optional*. /// - name (string, content): Shows the chord name. *Required*. /// -> content let chart-chord( tabs: "", fingers: "", capos: "", frets: frets, fret-number: none, scale: scale, name ) = { let tabs = parse-input-string(tabs) let fingers = parse-input-string(fingers) let capos = parse-input-string(capos) if capos.len() != 0 and type(capos.first()) != "array" { capos = (capos,) } let step = 5pt * scale let stroke = black + 0.5pt * scale let vertical-gap-name = 14pt * scale if fingers.len() == 0 { vertical-gap-name = 9pt * scale } set text(font: font) let self = ( scale: scale, step: step, stroke: black + 0.5pt * scale, vertical-gap-name: vertical-gap-name, grid: ( width: (tabs.len() - 1) * step, height: frets * step, rows: frets, cols: tabs.len() - 1, ), tabs: tabs, fingers: fingers, capos: capos, frets: frets, fret-number: fret-number, style: style, name: name, ) render(self) } return chart-chord }
https://github.com/polarkac/MTG-Stories
https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/011%20-%20Journey%20into%20Nyx/001_Ajani%2C%20Mentor%20of%20Heroes.typ
typst
#import "@local/mtgstory:0.2.0": conf #show: doc => conf( "Ajani, Mentor of Heroes", set_name: "Journey into Nyx", story_date: datetime(day: 09, month: 04, year: 2014), author: "<NAME>", doc ) A warm breeze, touched lightly with the scent of olives and the distant sea, ruffled Ajani's fur. It whispered through short yellow grass and the bright green leaves of stout, timeworn trees. Of all the worlds he had visited, Theros felt the most like home. Even his own plane, Naya, was no longer the world he had known, having since merged with the other four shard-planes of Alara to form a whole but wounded world. But Theros, protected by powerful, ageless entities, had always seemed unchanging. The locals called them gods, and for once it was difficult to argue. Elspeth seemed to find it comforting, to think that minds greater than any mortal's were watching over her. Ajani found the idea less appealing. Bitter experience had taught him that any being powerful enough to be worshiped as a god was also powerful enough—and at least as likely—to destroy a world rather than protect it. The god-behemoths of Naya were just mindless beasts, their motions forming patterns of mundane migration rather than divine inspiration. The dragon Planeswalker <NAME>, whose power truly was godlike, had manipulated centuries of religious and political development on the shards of Alara to facilitate his own selfish and destructive aims. Still, Ajani could not deny the might and majesty of the gods of Theros, nor the beauty of the plane they safeguarded. Perhaps Elspeth had the right of it after all. Ajani walked the rest of the day, over rolling hills covered in scrub and up into the mountains, until he caught the familiar scent of leonin. His people had been here, recently. He was still hours away from what he remembered as their territory, but it wouldn't surprise him if they'd expanded. #figure(image("001_Ajani, Mentor of Heroes/01.jpg", width: 100%), caption: [Plains | Art by <NAME>], supplement: none, numbering: none) He made camp in the fading light, without stealth or subterfuge. His white fur would shine like a beacon in the darkness, and if there were leonin out here, they would find him. As the sun set, the stars came out, and Ajani gazed up, eager to see the show of light and color that played across the sky every night, stories of gods and mythic figures echoed in slowly moving constellations. The phenomenon was unique to Theros, and it was beautiful. But not tonight. Tonight, the stars were coldly indifferent, the space between them black and empty. Worse, in one corner of the sky, there was simply blankness, a void where no star shone. Ajani wondered what had happened, and what Elspeth would think of it. Elspeth. His thoughts kept returning to her. He knew she had left Dominaria, headed for the metal world of Mirrodin. Ajani knew the leonin there and had planned to visit them—primarily as an excuse to look for her, if he was being perfectly honest. Then other Planeswalkers had spread the word: #emph[Mirrodin is dead. Phyrexia has returned. For the sake of this world and every world, do not set foot there.] He feared for Elspeth, and even more for his friends on Mirrodin. But he kept up hope. Even when worlds died and skies darkened, there was always hope. Elspeth might still be alive. The thought was a comfort he clung to as he drifted off to sleep. #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) He awoke, blinking at the dawn, staring up at a massive axe wielded by a young female leonin with gray fur. Her ears were folded tightly back. The faintest rustle in the scrub told him there were two more leonin surrounding him. "Identify yourself," said the young female. "You are Seza," said Ajani, yawning, "and the last time I was here, you were too young to go out on patrol." Seza's eyes widened, and her ears sprang up. "Ajani?" He smiled. "White fur. One eye. Big axe. Who else?" She lowered her axe and smiled shyly. Ajani sat up, rubbing his eyes. "I had to be sure," she said. "There are strange things in the wilds these days." #figure(image("001_Ajani, Mentor of Heroes/02.jpg", width: 100%), caption: [Art by <NAME>], supplement: none, numbering: none) She signaled to her companions. "This is <NAME>, a friend of Oreskos." Two other warriors, also female, stepped out from behind Ajani. "Aletha!" he said, clapping a hand to the shoulder of the taller one. "And... forgive me. You are?" "Koila," said the third leonin. She looked to Seza, who nodded reassurance. "I grew up in the wilds, and came to Tethmos only recently." "Koila," he repeated, fixing the name in his mind, along with the image of her mottled golden fur and the notched scar above her nose. "Come on," said Seza. "Let's get you back to Tethmos. You should see what Brimaz has done with place." "Brimaz!" said Ajani. "Brimaz is king? And Seza leads patrols. Have I truly been away so long?" #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) The four leonin walked together through most of the day. Aletha had always been quiet, and Koila proved even more so, so the conversation was mostly Seza catching Ajani up on the happenings among the leonin of Oreskos. The queen, Omala, had died in battle with humans. Brimaz, her chosen successor, declined to seek vengeance, a controversial choice that had ultimately won him more friends than enemies. When Ajani had been here before, it was apparent that Brimaz might be king someday. Ajani had spent long hours in conversation with the young man about the importance of coexistence with humankind. Perhaps his words had taken root. At the border of the patrol's territory, his companions stepped aside for a brief conference in hushed tones. When it was over, Aletha and Koila waved farewell to him and turned back, while Seza gestured for him to keep walking. "I don't want to take you from your duty," said Ajani, although he was grateful for the company. "I know the way to Tethmos." Seza shook her head and fell into step beside him. "It's not that. Things have been dangerous lately. The minotaurs are ranging more widely than usual, and in greater numbers. And there's no saying what the humans will do, now that..." She gestured to the sunlit sky and faltered. "Do you know anything about that?" he asked. "Not much," said Seza. "One day, the gods just... went away. Angry at mortals, or so the humans say. All I say is good riddance." "And they left that ring of blank sky?" She stared at him for a moment, but on his previous visit he'd already earned a reputation for asking strange questions. "That showed up a few days ago," she said. "Our storyteller calls it the Moat of Oblivion, but he's as clueless as anybody else as to what it actually is." They were now well within the borders of Oreskos, the leonin homeland. Nonetheless, the sun had set and the strange, scattered stars had come out by the time they crested a rise and saw the watch-fires of the den of Tethmos, the largest single settlement of leonin on Theros. Most of the leonin were nomadic—most leonin everywhere were at least partially nomadic, enough so that Ajani considered it part of their nature—so even their largest settlement could hardly be called a city. Still, it was bigger than he remembered it, with less open space within its brightly painted walls. Seza and Ajani were no outsiders, and the gates were open. They walked through unchallenged. Seza spoke with a nearby guard, and the two were ushered into the king's hall. There was a fire roaring in the middle of the great hall and a boar roasting over it, the air filled with the succulent smell of seared meat. A group of leonin sat around the fire. The largest and youngest of them sat facing the entryway, and he rose when Ajani and Seza entered. Ajani was tall for his kind, but Brimaz was taller. The gangly adolescent of Ajani's memory had grown broad-shouldered and self-assured, with sharp features and a full, flowing mane. Brimaz wore fine clothes and a thin, jagged crown, concessions to his station, but the tracery of scars across his exposed shoulder marked him as a leader who did not shy from the front lines. "Brimaz, old friend!" Brimaz stepped forward, so that Ajani had to crane his neck to look up at the king's face. "That's King Brimaz, now," he rumbled. There was no sound but the crackle of the fire. #figure(image("001_Ajani, Mentor of Heroes/03.jpg", width: 100%), caption: [Brimaz, King of Oreskos | Art by <NAME>], supplement: none, numbering: none) Ajani stared into Brimaz's golden eyes. Few people could bring themselves to look directly into Ajani's pale blue right eye and the tangle of scar tissue that had once been his left, but Brimaz had never shied away. The corner of Brimaz's mouth twitched. Ajani let himself smile. Brimaz snorted, and soon the two leonin were chortling and embracing. "King, now, is it?" said Ajani, regarding the young monarch at arm's length. "Shall I call you 'your majesty?'" "Fah," said the king. "Brimaz will do. But I did have you worried there, didn't I?" "Not for a moment," said Ajani. "Of course not," said Brimaz, eyes twinkling. "Sit. Eat. I'm sure you've traveled far." Brimaz wasn't fully aware of Ajani's true nature as a Planeswalker, but he surely knew that the older leonin's origins, and his frequent travels, had nothing to do with any place the leonin of Oreskos had ever heard of. "It's good to be back," said Ajani. Ajani and Seza took their seats. One of the king's advisors peeled hunks of slippery meat off the boar roasting in the fire pit and handed them to the newcomers. Ajani gratefully bit into succulent pork, grease dribbling down his chin. "My thanks for your hospitality," he said around the mouthful of food. Beloved visitor or no, business before sustenance was an offense not even he would be forgiven. There were small conversations going on around the fire, none about matters of consequence. Only when Ajani was licking the grease from his paws did he speak. "As I said, it is good to be back." "But that is not why you return to us," said Brimaz, smiling. "You have come to counsel me, or ask me for something, or urge me to action. I recall the ways of <NAME>, stranger and friend." Ajani smiled. "You know me too well," he said. "Someday, perhaps, I will come here simply to see my friends. It would be worth the journey. On this occasion, I'm afraid you're right. I'm here looking for your help." "Ajani is a friend of Oreskos," said Brimaz. "What help do you need?" "I'm here looking for a friend of mine, a human," said Ajani. "Her name is Elspeth." "She is, like you, from far away?" Ajani nodded. "And she's in trouble?" "If she's here," said Ajani, "I hope that means she's no longer in trouble." "But if she is like you," said Brimaz, "I'd imagine trouble has a habit of finding her." Ajani nodded again. "You should talk to Lanathos," said Brimaz. "He is a human chronicler who's trying to learn about our ways." "Does that mean you've been taking my advice about reconciliation?" The rest of the leonin around the fire pit had been talking among themselves, content to let the king and his old friend speak in a semblance of privacy. Now they were quiet, ears cocked forward, listening. "I have, where it seems prudent," said Brimaz. "But as it happens, Lanathos came to us. Not everyone trusts his motives,"—a glance, there, to some around the fire—"but he is harmless. He tells us stories, and he listens to ours. I don't know why he finds us worthwhile company, but a human who listens to our grievances is refreshing enough to let him remain. He spoke with human traders recently, so perhaps he's heard news of your friend." "There is much I want to ask," said Ajani. "About your people, old friends, the sky..." "...but you are worried about your friend," said Brimaz. "Go. Speak to Lanathos. Catching up can wait." #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) Lanathos the Chronicler was an older man whose neck and lower face were covered in rippling burn scars. His white hair was cropped close, and he stood in front of a roaring fire and told his tale with the passion and intensity of a master storyteller. "...and so Polukranos returned!" cried Lanathos. "Knocked from his place in Nyx by the careless fury of Purphoros." Lanathos glanced meaningfully at the starry sky and exaggeratedly rolled his eyes. Such irreverence would land him in court in any of the human poleis. But here, the leonin chuckled. Either the man was playing to his audience, or he was every bit the iconoclast they were. Ajani wondered how different this story had been when Lanathos had heard it. "You all know what happened next—how Nylea and Heliod bound Polukranos beneath the earth, to slumber there forever and leave the mortal world in peace." Polukranos was some kind of massive celestial hydra who had to be bound in the stars to keep mortals safe. Ajani had heard the story of his fall and imprisonment the last time he was here. #figure(image("001_Ajani, Mentor of Heroes/04.jpg", width: 100%), caption: [Polukranos, World Eater | Art by Johann Bodin], supplement: none, numbering: none) "But, my friends, the gods have a different definition of 'forever' than you and I do," said Lanathos. "For Polukranos awakened to threaten all of Theros once more!" There were murmurs now of genuine alarm, as though Polukranos might charge over the horizon at any moment. Ajani smiled. The leonin were a good audience, when they deigned to be. "In eons past he devoured an entire city, and no less than a city would sate the hunger of his fifty heads," said Lanathos. "Meletis, now, would be the target of his savage might!" More of the crowd murmured. One cheered, but was swiftly shushed. It was Meletis that had enslaved the leonin, long ago. They considered it their true homeland, and many hoped to one day recover it. The thought of the city in peril, even in a story, seemed to cut both ways. "The earth shook with his steps and lakes formed in his footprints. He devoured everything in his path, rampaging from the Nessian Forest to the Plateau of the Four Winds, where the tyrant Agnomakhos was vanquished. Nothing stood between Polukranos and Meletis. Nothing... except the Champion of the Sun!" The crowd cheered. Ajani wasn't surprised to hear that this was a story from the #emph[Theriad] , about the half-mythical Champion of the Sun. Despite being blessed by the foremost of the human gods, the Champion was a popular figure among the leonin, and Ajani had heard many stories from the #emph[Theriad] on his previous visit. Perhaps it was because stories of the Champion, unlike the god-stories popular in human lands, focused on the acts of mortals rather than the quarrels of the gods. The Champion had Heliod's blessing, but her trials were her own. "There she stood!" said Lanathos. "Armor shining, spear held high, white cloak whipping in the four winds. She strode forward and shouted a challenge, and the hydra's fifty heads turned to face her. "Now, the Champion was a stranger to these lands. Meletis was not her city. But no one else was there to defend it, and the Champion would not let it fall. "She was a speck before the great body of Polukranos, whom even the gods could not kill. But she had a weapon, a weapon Polukranos had never faced: a spear, forged by Purphoros, blessed by Heliod, shining with the light of the sun and the will of the Champion. She leveled her spear and charged. "Polukranos saw this lone mortal who dared to face him, and fury awakened in him. He opened wide one slavering maw and swallowed the Champion whole!" The leonin gasped. Ajani frowned. Was this the end of the #emph[Theriad] ? Had they never heard it before? "Ah," said Lanathos, "but the Champion of the Sun was wise as well as mighty, and she proved a difficult morsel for Polukranos to devour. Just as he swallowed her, she lashed out with her spear and sliced the hydra's head nearly off its neck. She jumped to safety, and the head was maimed, but not fully severed. It could not grow back! #figure(image("001_Ajani, Mentor of Heroes/05.jpg", width: 100%), caption: [Art by <NAME>], supplement: none, numbering: none) "For days, they battled. Each time Polukranos, in his mindless rage, tried to swallow the Champion, she cut her way out and left another head useless. The gods watched, helplessly bound by the Silence of Kruphix, as a mortal killed their prized pet one head at a time." Ajani wondered if this "Silence" was something like what was happening in the sky now. It wouldn't surprise him if such things had happened before. The memory of Theros was long, and the gods unruly. "At last, the Champion faced the final head of the mighty Polukranos. He had learned that swallowing her would only lead to pain. He tried to step on her, but she set her spear to skewer his mighty feet. He lashed out with his tail, but she nimbly dodged. When he faltered, she dashed up his tail, ran across his back, climbed his last thrashing head, and drove her spear into his eye. "Polukranos, eater of poleis, beloved of the gods, was no more. All that remained on the silent plateau was the Champion of Heliod, her spear shining in the sunlight." Lanathos bowed expansively, and the leonin murmured appreciation. No doubt the storyteller understood by now that leonin were much more subdued than human audiences. As the crowd dispersed, Ajani made his way over to the chronicler. "A fine story," he said. Lanathos bowed. "My name is Ajani. The leonin of Oreskos count me as a friend, and I'm glad to see they consider you the same." Lanathos chuckled. "Friend to most," he said. "Pest, propagandist, and spy to others. As with anything, it depends on who's telling the story. But I seem to have King Brimaz's trust, and that's enough for most people." "It's enough for me," said Ajani. "I was hoping you could help me find a friend of mine, a human. Her name is Elspeth." "That's an unusual name," said Lanathos. "So's Ajani, when it comes to it. Where did you say you're from?" #emph[It's always the storytellers who ask] , thought Ajani. "Far away," said Ajani. "Over the mountains, and then some." He'd thought it an innocuous answer, but he could tell right away he'd said too much. Lanathos's bright eyes widened. "There are people across the mountains? Leonin and human? Have they cities? Temples? Knowledge of the gods?" Ajani held up a hand, and Lanathos paused in his breathless litany. "Later," said Ajani, "perhaps, I will answer your questions." Lanathos's face reddened—something like lowering one's ears in shame, Ajani had learned, although there were nuances of human expression he still didn't grasp. "Of course," said the chronicler. "You're looking for your friend." He rubbed his scar-mottled chin. "I'm sorry to disappoint you, but I'm the only human in all of Oreskos that I'm aware of." "Brimaz said you spoke with human traders recently. Did you hear anything from them about a newcomer to their lands, a solitary woman who spoke strangely and carried a sword?" Lanathos blinked. "Of course I did. You just heard the story they told me." Ajani's ears swiveled back of their own accord. Was the old human making a joke? "I am looking for my friend," he said. "Not the Champion of the Sun." "What makes you think your friend isn't the Champion?" Ajani let his ears fall entirely flat. "I heard stories of the #emph[Theriad] the last time I was here," he said. "About the Champion training with the centaurs, and living among the leonin. My friend is here now, and she never did any of that." "Ajani, you're speaking about the #emph[Theriad] as though it were about a single person." Ajani took a deep breath and let it out. "Yes," he said. "I am. Remember, I am from a distant land. If I've misunderstood, please, explain." "The #emph[Theriad] is about the Champion of the Sun," said Lanathos. "But that's not a person. It's a title, bestowed by Heliod on a worthy mortal in times of great need. The story of the #emph[Theriad] is the story of all of them, from the first, whoever that was, to the latest—the woman who slew Polukranos." #figure(image("001_Ajani, Mentor of Heroes/06.jpg", width: 100%), caption: [Art by <NAME>], supplement: none, numbering: none) Something didn't fit. "The latest," murmured Ajani. "You told that story as if..." His eyes widened. "#emph[How long ago?] " "Recently," said Lanathos. "Very recently. During the Silence of the gods. A month ago, perhaps slightly more." How quickly Theros transformed events into history, and history into myth! "In your story, the Champion fought with a spear," said Ajani. "I've never known Elspeth to carry one." Lanathos shrugged. "Details are like clothing; people change them to keep them fresh. When I heard the tale, it was a spear. When the traders who told it to me first heard it, perhaps it was a sword. These things change in the telling." "And what about the setting?" asked Ajani. "The Plateau of the Four Winds. Do you think she was truly there?" "Perhaps not on the plateau itself," said Lanathos. "I'd wager she really was near Meletis, though. Polukranos threatening the City of the Twelve is the soul of the story. But don't think she's there any longer." "What makes you say that?" "Another story I heard," said Lanathos. "After she slew Polukranos, the Champion was spotted at Akros, which has been besieged by a horde of minotaurs. Brimaz is aware of it, as a military situation, but I wasn't going to tell that story yet. I still don't know how it ends." "Thank you," said Ajani. "You've helped a great deal." "What are you going to do?" asked Lanathos. "Find her," said Ajani. "Help her, if she needs it." He smiled. "After all, the Champion always has companions."
https://github.com/Axot017/CV
https://raw.githubusercontent.com/Axot017/CV/master/modules_en/certificates.typ
typst
#import "@preview/brilliant-cv:2.0.2": cvSection, cvHonor #let metadata = toml("../metadata.toml") #let cvSection = cvSection.with(metadata: metadata) #let cvHonor = cvHonor.with(metadata: metadata) #cvSection("Certificates") #cvHonor( date: [2022 - 2025], title: [AWS Certified Developer – Associate], issuer: [Amazon Web Services (AWS)], ) #cvHonor( date: [2023 - 2026], title: [AWS Certified Solution Architect – Associate], issuer: [Amazon Web Services (AWS)], )
https://github.com/EpicEricEE/typst-plugins
https://raw.githubusercontent.com/EpicEricEE/typst-plugins/master/united/examples/ranges.typ
typst
#import "../src/lib.typ": numrange, qtyrange #set raw(lang: "typ") #set text(size: 14pt) #set table( inset: 0.7em, fill: (x, y) => if y == 0 { luma(230) } ) #set page( width: auto, height: auto, margin: 1em, background: pad(0.5pt, box( width: 100%, height: 100%, radius: 4pt, fill: white, stroke: white.darken(10%), )), ) #table( columns: 2, [*Input*], [*Output*], [`#numrange(2, 5)`], [#numrange(2, 5)], [`#numrange[1.2e2][1.8e2]`], [#numrange[1.2e2][1.8e2]], [`#numrange[1.25e2][5.3e3]`], [#numrange[1.25e2][5.3e3]], [`#qtyrange[36(1)][38][celsius]`], [#qtyrange[36(1)][38][celsius]], [`#qtyrange[10][1e8][cm^-1]`], [#qtyrange[10][1e8][cm^-1]], )
https://github.com/Area-53-Robotics/53E-Notebook-Over-Under-2023-2024
https://raw.githubusercontent.com/Area-53-Robotics/53E-Notebook-Over-Under-2023-2024/giga-notebook/entries/wedges-rebuild/identify.typ
typst
Creative Commons Attribution Share Alike 4.0 International
#import "/packages.typ": notebookinator #import notebookinator: * #import themes.radial.components: * #show: create-body-entry.with( title: "Identify: Wedges Rebuild", type: "identify", date: datetime(year: 2023, month: 11, day: 29), author: "<NAME>", witness: "<NAME>", ) At our last tournament we discovered a few issues with our wedges. While they were amazing for pushing other robots, they were not so good for pushing triballs. The wedges also do not cover the full length of the robot, and therefore do not protect the tracking wheels whatsoever. Having the wedges be able to do this would greatly simplify scoring, and would allow us to still score triballs that we don't successfully get across the barrier. #image("./identify.svg") = Design Goals - The design must be able to push triballs over the barrier - The design must cover the full front of the robot = Design Constraints - Design must be completely passive (no actuators) - Design must obey the functional plastic limit
https://github.com/polarkac/MTG-Stories
https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/046%20-%20Streets%20of%20New%20Capenna/007_Episode%204%3A%20The%20Font.typ
typst
#import "@local/mtgstory:0.2.0": conf #show: doc => conf( "Episode 4: The Font", set_name: "Streets of New Capenna", story_date: datetime(day: 04, month: 04, year: 2022), author: "<NAME>", doc ) = MUSEUM These familiar halls, polished and carefully curated, were as sacred as a church to Xander. Tonight, on the eve of the Crescendo, he was determined to walk them all one final time and savor their splendor. Moments of peace were fleeting, and if his informants and assumptions were correct, there would be blood before the new year arrived. "This is one of my favorites." Xander paused before a statue of an angel cradling a babe. "Every time I look at it, I think of my own mother." If only he could remember anything about her. "It's beautiful." Anhelo humored him. Xander could feel his general's confusion as he followed Xander about the museum. Anhelo checked his timepiece. "My lord, if we are going to make the Crescendo on time, we should begin preparing for our departure." Xander didn't move. Instead, he continued to stare at the peaceful visage of the woman. Had there ever been a time in New Capenna where such peace and tranquility truly existed? Had there ever been a time in #emph[his] life? He chuckled softly to himself and murmured, "I'm going soft in my old age." "What was that?" "Nothing." Xander folded both hands on the top of his cane. The golden claws he had adorned his fingers with tonight clanked against the metal. "You should go to the Crescendo and check on Elspeth in my stead." As of the last update Xander received from her, she had done well infiltrating the Cabaretti. But no word yet on the Font. Still, he had every faith in her. There was something unique about Elspeth. He'd quickly gleaned she would never #emph[really] be one of the family. There was an air about her of greater purpose. Perhaps that was why he had ultimately decided to open his archives to her. "Are you not attending?" "Not this year." "But the Font—" "I should like to stay here." Xander interjected. "Xander, what troubles you?" Anhelo rested his hand lightly on Xander's elbow. The touch betrayed his deep worry. "You're not yourself tonight." "You fret too much." Xander patted Anhelo's hand. "You will serve me best by going in my stead to the Crescendo. We wouldn't want the Cabaretti mistaking my absence for a Maestro slight." "They will anyway, since I am not the leader of the Maestro." "But someday soon, you will be." "Pardon?" "Go on. We will discuss more on these matters after the new year," Xander encouraged. He had never outright planned succession with Anhelo. It had always been assumed, but in a distant, far-off manner. But there wasn't time any more to prepare, and Xander could only hope Anhelo was ready. "Tonight, you are to go and enjoy yourself at the Crescendo and report back on any strange happenings, that's an order." "You're sure?" Xander looked back to the statue, using the motion to glance out of the corners of his eyes without Anhelo noticing. #emph[They're close.] "I am very sure. Now, #emph[go] , Anhelo. Use the receiving door to leave." #emph[The back door most don't think or know of.] "As you wish." Anhelo bowed his head and set off. Xander watched him leave with a sigh of relief he didn't bother hiding. He stared back up at the statue as he waited. Would he meet his mother again once he had departed this long-occupied and aching mortal coil? Was there even enough soul left in this old vampire-demon that there could even be an "after" for him? Movement ended his musings. Xander turned to face the living shadows at the far end of the hall. Into that gloom, he asked, "Have you finally come to kill me, Adversary?" #figure(image("007_Episode 4: The Font/01.jpg", width: 100%), caption: [Art by: Matt Stewart], supplement: none, numbering: none) = CRESCENDO Elspeth stood on the dance floor of the Vantoleone with a platter in hand. Jinnie had kept true to her word. After a few menial tasks—not much different than the tests Xander had put her through—she had given Elspeth a position helping with the Crescendo. As a part of the floor team, Elspeth was well positioned to see and hear everything. Moreover, she could move unbothered. No one paid any of the uniformed staff any mind. They were as unimportant as the pennons hung against the pillars or the overgrown flowerpots at the base of each one. #figure(image("007_Episode 4: The Font/02.jpg", width: 100%), caption: [Art by: Kasia 'Kafis' Zelińska], supplement: none, numbering: none) Well, she didn't go #emph[entirely] unnoticed. "Elspeth," Jinnie approached, her usual entourage of Kitt and Giada flanking her. "I'm glad we ran into you. I hope you're finding the Crescendo worth your effort?" "And more." Elspeth forced a smile. "Just wait, the real fun hasn't even begun yet." Jinnie helped herself to two of the cheese-filled pastries on Elspeth's platter and handed one to Giada. Per usual, the teen remained silent, her eyes hungry for something her mouth couldn't fill. "I caught Kitt's performance already," Elspeth said. "And? What's your review? Spare me no details!" Kitt perked up. "It was lovely." "That's hardly a critique at all." Kitt groaned. "I'm afraid I'm not well versed in music so I can't give you much more than that." Elspeth offered an encouraging smile to placate her. "Speaking of performances, I need to check on something for ours," Jinnie said to Giada then turned to Kitt. "I could use your hands, if you don't mind?" "For you? Never." "Do you mind keeping an eye on Giada for a moment?" Jinnie asked Elspeth. Giada wasn't #emph[that] young to need a constant chaperone. But Elspeth had seen over the past few weeks how Jinnie doted on and coddled Giada. It was a relationship Elspeth had yet to fully make heads or tails of. She'd think it sweet and sisterly, if not for the perpetual discomfort that radiated off the young woman. "It's no trouble," Elspeth said. "Thanks, you're such a doll." Jinnie squeezed her shoulder and started off with Kitt. "Would you like another?" Elspeth asked, holding out the tray for Giada. "No, thank you." #emph[So, she ] could#emph[ speak.] "I'm afraid if I eat anything else I'll be sick." "Are you not feeling well?" Elspeth eased the platter away. "Nervous," Giada admitted. "This performance means so much to Jinnie, the Cabaretti, #emph[everyone] ." "What type of performance is it?" Elspeth worked to sound casual. A harmless question, nothing more. Giada glanced askance. "You'll see soon." "Sorry about that!" Jinnie promptly returned and took Giada's hand. "Are you ready to change the plane?" Giada didn't have a chance to respond before Jinnie tugged her away. Apprehension curdled Elspeth's stomach.#emph[ Something was wrong.] Every fiber of her being told her so. The sensation was further compounded when she saw Maestros begin to move behind Jinnie and Giada. She knew the look in the vampires' pale eyes. They were out for blood. Elspeth quickly stepped away from the main floor and into a back room where she set down the platter. A few of the other floor staff gave her questioning looks, but none stopped her as she dashed back out into the main hall of the Vantoleone. Giada was already on stage, Jinnie behind her. Four Cabaretti men manhandled a large empty bottle onto the stage, bringing it before Giada. The vessel was nearly as large as Giada herself. "This year, the Cabaretti promised a Crescendo unlike any other. Now, we will make good on that promise," Jinnie announced. Elspeth reached for the knife Xander had given her, hidden in her coat. It was the only weapon she had, and the only thing she could conceal in the attire the waitstaff were all required to wear. Elspeth hoped she wouldn't need it. Jinnie said something to Giada, and the teen stepped toward the enormous bottle. She took a bracing breath, a look of sheer determination overcoming her. Giada touched it with both hands. Light exploded. The room let out a collective gasp of surprise. Like the rest of them, Elspeth was left blinking away the blue haze that the blindingly bright light left behind. As the stage came back into focus, murmurs rippled across the crowd. None could believe what they saw, Elspeth included. The empty bottle was now filled with Halo. Tiny, golden bubbles rose from a cerulean bottom to a rosy top. Darker currents swirled within, like dusk filtering through water, and a distinct haze clouded the air around it, as if the condensed power within could barely be contained by its glass casing. The horrible truth of what she had just witnessed began to dawn on Elspeth. There was no way the bottle could've been swapped. It was far too massive for that. It would take far longer than a flash of light to fill it. "Your eyes don't deceive you." Jinnie responded to the collective skepticism of the room once she had managed to get Giada stable on her feet, though the young woman was still swaying slightly, visibly weary. "In front of you all, with no tricks or games, we have made Halo from #emph[nothing] . You are the first witnesses of a new order—an order that no longer knows limitations and is no longer held back by diminishing supply." She paused for emphasis; the room was enraptured. Jinnie motioned back to Giada. "Behold, the Font!" The crowd erupted with cheers. These people saw a resource, a tool, a solution to a problem plaguing them. All Elspeth saw was an exhausted and trapped young woman. Their "new era" would come at the cost of Giada's life. = MUSEUM Shadows clung to the man approaching Xander. They hung to every fold in his pristine pinstripe suit, every hammered adornment of the armored plate that framed his shoulders and chest. Former attendants and allies Xander might have once thought of as his own family rushed around the Adversary's bat-like wings, weapons drawn. The first thing he offered the turncoats in return was a bitter smile. Next, he would offer them their own hearts. The last few loyal officers in his employ were already cut down, no doubt. Old wounds and aching bones be damned, he would return that favor in kind, even if it was the last thing he ever did. He had readied himself tonight. Come what may, he wouldn't go down without a fight. The first of his former assassins charged. Xander shifted his weight entirely onto his good leg, threw one hand back, tossed his cane up with the other, and caught it by its end. Steel rang out against steel as he deflected the assassin's dagger. The other vampire's eyes went wide with shock. "#emph[I] gave you that dagger," Xander snarled. "Don't think I gave you a tool I didn't know how to best." With a twist of his wrist, he disengaged from the blade and brought the handle of his cane into the man's neck with a satisfying crunch. Another was coming for him. Xander slid his grip forward on the cane. He flicked his thumb and disengaged a secret locking mechanism. The hilt of the hidden blade popped loose of its scabbard. Xander grabbed it, slashing through the air and sending a spray of ash across the room. He used the death as a distraction, strategically retreating into his museum. Others were already giving chase. But Xander knew the pathways better than any of his people. He had been the one who built this place. Its father. Its curator. In the narrow access halls between the galleries, he could prevent himself from being flanked, picking them off one by one as they came to him. He might have them beat in skill, but he was still an old man. He could tolerate pain, but he didn't have the stamina to take them on all at once. Xander crossed through the main hall of the museum. Shots rang out and bolts of magic chased his heels, leaving pockmarks in his marble floors. The quarry for this stone was no longer in operation; his pristine haven would never be the same after tonight. He made it to a stairwell, gritting his teeth and ascending as fast as he could. Assassins scampered behind him, but Xander's knowledge of the museum continued to pay off and he managed to stay ahead. He burst through a door, wind accosting his face the moment he took in the crisp air of Park Heights. Breathless, Xander spun and barricaded the door to the viewing deck. There wasn't much space on the balcony. It was meant to be a small sculpture garden, a reprieve to take in the splendor of the skyline among some pieces from the museum's collection. Xander walked to the edge, catching his breath. #emph[How he loved this view.] Mighty wings buffeted behind him. With a rumble, the Adversary landed. "Did you really think you could escape me?" His voice was gravel rattling against pure hate. "To a balcony!" He howled with laughter, drawing air into his lungs to stretch out his chest and expand his wings for emphasis. "Hardly." Xander turned to face him. "I thought without your hired help we might have a fair fight." "I don't play fair." Xander didn't either. He lunged forward without warning. The Adversary went to deflect the sword with his bare hand, mouth set in a snarling grin of delight. Xander feinted, disengaged underneath the Adversary's wrist, and twisted to bring the point up right in the soft spot underneath the Adversary's jaw—the only vulnerable flesh exposed on his armored body. But the Adversary was faster. Perhaps enhanced by Halo. Perhaps by an evil that was greater than Xander could ever imagine. Xander never saw the Adversary lift his other hand to point at him. The #emph[bang] of magic reverberated through Park Heights. But the last thing Xander heard as he staggered away was the clattering of his trusted sword, the tool of his infamy, slipping from his fingers for the last time. His foot met open air, and he plummeted into the clouds below. #figure(image("007_Episode 4: The Font/03.jpg", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) = CRESCENDO The cheers of the masses turned into screams. All around them, revelers threw off the colors and signets of their family and reached for their weapons. No family was exempt from infiltration by the Adversary. Elspeth drew the knife Xander had given her and began pushing through the crowd. The Cabaretti on the stage dropped the large bottle of Halo, the precious substance spilling a rainbow waterfall around Jinnie's feet. The Halo bubbled as if boiling, evaporating as starbursts and rings. "Jetmir!" Jinnie called out over the crowd. Giada, forgotten, inched toward the back of the stage, gloved hands covering her mouth in shock. "Jetmir!" Jinnie's eyes landed on a distant point, halfway through the room. Elspeth followed her gaze, seeing the horned leonin who ruled the Cabaretti, cornered and outnumbered. Jinnie leapt into the fray, weapons drawn, leaving Giada behind. Shock and rage coursed through Elspeth, and she channeled the emotions into a quick jab at a would-be attacker, rendering him harmless in an instant. Giada was their #emph[precious] #emph[Font] . She was giving her life for the Cabaretti and their Halo. And for what? To be abandoned. Sure, Jinnie had left Cabaretti agents with her. But they weren't enough to handle the seven others scrambling up the stage. Throwing elbows, squeezing past people, leaping over bodies, Elspeth pushed her way through the blood and chaos to the stage. She vaulted onto the platform as the last of the Cabaretti guards fell, turned into pincushions by the Adversary's loyalists now advancing on Giada. Elspeth didn't waste a second. She brought the butt of her knife down onto the back of the neck of one on her right. The man on her left tried to jab for her, but Elspeth was prepared for the motion and grabbed him by the wrist, clutching in such a way that his fingers went limp. She spun as the third lunged for her, tossing the man into the new attacker. "Giada," Elspeth said calmly, despite the rising chaos. She knelt down before Giada, looking into her dark eyes, not unlike Elspeth's own. "Would you like me to take you from here?" Giada inhaled slowly, the breath quivering at the end. "Yes." She spoke for the first time in what sounded like a hopeful manner. "I know a back door." Elspeth nodded and stood, glancing over her shoulder at their assailants, rousing with groans. More had noticed them on the stage and were beginning to charge. "This way." Giada tugged her hand, and Elspeth followed her stage right. They ran past the panels of forest-green curtains and deep into the wings. The heavy velvet muffled their steps. But it also concealed those giving chase. Elspeth strained her hearing to listen for their pursuers. "Get down!" Elspeth put her weight in her heels, holding onto Giada's hand. She spun the teen toward her, wrapping an arm around her shoulders and bringing Giada to the ground with her as a blade ripped through the curtain at their right. Elspeth released Giada, rising. Judging from the slash, she could make a confident assessment as to the build of their attacker. And when her dagger sank into a man's gut with a gurgle, she knew she was right. Stepping back, Elspeth gave Giada a nudge. "Keep going." If Giada was shaken, she didn't let it show. She dashed off into the darkness with Elspeth close behind. She supposed Giada had seen far worse throughout her time with the Cabaretti. The thought filled her with a profound sorrow. Giada was just a child. What had her life been like so far? Elspeth doubted it had been anything close to good. She knew a prison when she saw one, even if there were no bars and the wardens were dressed in sequins and gold. "Over there," Giada whispered, pointing to the left as they emerged from the curtains that hung along the wings. Elspeth followed her through stacked props and instrument cases. Two more men caught up with them, but Elspeth quickly dealt with them both. If she had time, she would hide the bodies to cover their trail, but it was best for them to keep moving. Once they were out, they could slip into the city proper and get lost among the populace. Giada grunted as she pushed open a heavy backstage door. Elspeth put her shoulder in to help. The door hadn't been used in some time and squealed loudly as it permitted them access to an alleyway. The goons at the far end caught sight of them before they'd taken more than two steps, no doubt alerted to their presence by the noise of the hinges. Elspeth cursed their luck. Had they still been facing the other way, she might have managed to get the drop on them. #figure(image("007_Episode 4: The Font/04.jpg", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) "There's more coming from inside," Giada said, glancing back toward the theater. "I know, stay close." Elspeth held out her knife. What she wouldn't give for a spear or sword. "Looks like we have two escapees," one of the guards said. "The Adversary told us no one is making it out of there alive—sorry, ladies." The other cracked his knuckles. "Should we call for reinforcements?" "No, I think we can handle these two on our own." Elspeth could tell by their smirks that these guards underestimated her. "I'd like to see you try." Without warning, a flash of green blazed through the air from somewhere behind them. Charged with magic and cool green flame, the arrow exploded with a howl into a ghostly viridian wolf. It landed on the back of one of their first would-be attackers, driving him to the ground. The man yelled an obscenity, trying to twist away, but the spectral wolf sunk in its claws. #figure(image("007_Episode 4: The Font/05.jpg", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) "What the—!" His companion didn't have time to finish. Two more shots were fired, summoning two more wolves. The green-hued beasts made quick work of the two goons, their sharp teeth more effective than Elspeth's knife could ever be. Giada clung to Elspeth's side, half hidden as the creatures turned toward the two of them. Emerging from their spectral haze was Vivien. "I told you I would find you again." "Vivien." Elspeth heaved a sigh of relief and then looked to Giada. "Vivien's an ally; she can be trusted." Elspeth hoped. Vivien's expression briefly touched on surprise, but she didn't object to Elspeth's claims. "I have news for you, on the matter we last discussed. But first, let's get somewhere safe." Elspeth and Giada were halfway down the alley, nearly to Vivien, when the door to the theater blasted open behind them, launched off its hinges. The metal echoed loudly as it skidded to a stop. Jinnie, Jetmir, and a group of Cabaretti walked out from the smoking doorway. "Oh, thank goodness, Elspeth," Jinnie heaved a sigh of relief. "Thank you for keeping Giada safe." Elspeth gave a wary nod, continuing to inch backward. Giada followed her lead, moving in tandem with Elspeth. She glanced down at the teen, who gave her a worried look in reply. The same dread that filled Elspeth when Giada had taken the stage returned in force. "Come, we have a secret passage out through the theater. Bring Giada here." #emph[No.] The word rose clear and true, resonated from the very depths of Elspeth's being. Under no condition should she ever bring Giada back to these people who would use and imprison her. But it wasn't Elspeth's choice. Elspeth locked eyes with Giada, trying to communicate without words. #emph[I will do what you desire] , she thought, but didn't say so outright. She wasn't going to boldly go against the Cabaretti before knowing Giada's wishes. #emph[What do you want?] Giada's hand slipped into hers and she gave a light tug, much like she had when they'd started her flight. "We're going somewhere safe. We'll return when things are quiet," Elspeth called back. "#emph[We ] are 'somewhere safe.'" Jinnie's usually jovial nature began to slip away. Underneath was the woman who had earned her place as the adoptive daughter of Jetmir and his right hand. No one climbed as high as she did in New Capenna without leaving a trail of blood behind. "It's better if we split up, for now." "Bring Giada to me." Jinnie began to stalk forward. Elspeth's grip tightened on her knife. She didn't want to fight Jinnie. The woman hadn't been unkind to her, but she was also the one who would turn Giada into a tool. Before Elspeth could make up her mind—fight or flight—a green arrow arced overhead. It exploded into a beast far greater than a wolf. Clawing itself out of the viridian fire, the loops of a scaled dragon filled the alleyway. Two mighty wings stretched up to the very tops of the nearby buildings. Elspeth could only see the back of the spectral dragon, but if its front was even half as imposing, there was no way Jinnie was going to get to them. Elspeth looked over her shoulder at Vivien, who lowered her bow. Green-fletched arrows smoldered in her quiver, illuminating the braids on the side of her head. "Shall we?" Vivien gave a slight smirk. The three of them ran into the night. = MUSEUM "Not so smug now, are you?" Ob Nixilis wiped ash off his hands. The mighty head of the Maestros, the infamous Xander, #emph[master assassin] . Not the master of anything now. Ob Nixilis snorted and spit on the mutilated body that lay at his feet. The vampire was hardly recognizable after the fall, especially after Ob Nixilis vented some of his frustrations at the vampire's frustrating tenacity. "Boss." A young man ran out of the building, stopping short when he laid eyes on the carnage. He was a Maestro. Ob Nixilis wished he could hear what was going through the man's head as he laid eyes on the mutilated body of the assassin he'd once so revered. "What?" Ob Nixilis demanded. "I~" The young man swallowed hard and tore his eyes from the remnants of Xander. "I have word from the Crescendo." Ob Nixilis could tell by the man's shifting gaze and uncertain stance that the news would not be good. And if he was going to be given bad news, then he would sweeten it with a bit of torment first by making the kid sweat. "Good. Tell me of my triumph." "Everyone was in place, just as you ordered. We quickly took control of the situation in the Vantoleone." "But?" Ob Nixilis encouraged, allowing his voice to be dangerously quiet. "But~" He fought for words, crumbling under the weight of Ob Nixilis's stare. "The Font escaped." Ob Nixilis grabbed the man by the throat and hoisted him into the air. He swung like a ragdoll, kicking helplessly, clawing instinctively at the bracers Ob Nixilis wore. "Tell me, how did the Font escape if everyone was 'where they should be?'" "We—we don't—there was another—unaccounted for," the man wheezed. Ob Nixilis tightened his grip slightly and briefly fantasized about squeezing so hard the man's head would pop off like a cork. But he released his grip. He still needed these worthless amateurs to do his dirty work. The man fell to the ground, clawing at his already bruising neck, gasping for air. Ob Nixilis looked back to the remains of Xander. He grimaced, but not at the carnage he'd wrought. No~he was disgusted with himself. He'd missed something. Even from beyond the grave, Xander had played him. "No matter," he snarled. "I'll find it, even if I must burn this city to the ground." "Boss, the Cabaretti are all but neutralized—" Four other officers rushed out from the museum, stopping a few steps from their gasping friend. "We see you already know." "Yes, I have been informed of your #emph[failure] ." Ob Nixilis forced the words through teeth clenched so tightly his jaw popped. "The Maestros are in hand. What of the others?" "Jetmir escaped but was gravely wounded. The Cabaretti will crumble once he falters—and we'll see he does," one reported. "The Brokers and Obscura have crawled into their holes and shadows, though we pursue them as well. The Riveteers are thoroughly compromised. The fighting is hot in the Caldaia; we'll have the situation in our control soon." Ob Nixilis cracked his knuckles and rolled his head. "I want the families' heads. #emph[All] of them. Bring me every last one, attached at the neck or not." "And the Font?" the only one of his generals still brave enough to speak asked. "Two of you—lead the hunt for Jetmir and Jinnie. If anyone will know how to track down the Font, it'll be them. The other two, form search parties of our own for the Font. And #emph[when] you find it, bring it to me, and your cups will never be empty and your families will want for nothing." = STREETS OF NEW CAPENNA The city was on fire. The families were at war. #figure(image("007_Episode 4: The Font/06.jpg", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) Elspeth, Vivien, and Giada fled through the streets, dashing up iron stairwells and onto rooftops as they raced through the Mezzio. "It never fails to astound me how quickly things spiral out of control once a balance is upset." Vivien appraised as they caught their breath. "Where are we going?" Giada asked. Elspeth wracked her brain. The early places she'd holed up in on arrival were now burning. The Maestros weren't an option, either. "I have an idea." Elspeth recalled her second mission for Xander. "There's a warehouse not too far from here—it's abandoned. It #emph[might] be safe." "I'm not sure why you bother," Vivien said under her breath for only Elspeth to hear. "These family feuds aren't the real threat." "I don't care about the families right now." Elspeth turned to hide her face from Giada so their companion wouldn't hear. "I care about a young woman who's in danger." #emph[Who deserves so much more than she's ever gotten.] Elspeth would've given anything when she was trapped and helpless for the first years of her life to have someone fighting for her. No one had been there, then. Filling that role for Giada now felt like breaking a cycle, like giving hope to the scared, trapped girl that still existed deep within Elspeth's soul. "Then we see her safe and disengage." Vivien folded her arms. "You and I have more pressing matters to deal with. Things that only people like us can handle." Elspeth had a thousand burning questions about what Vivien had found but kept them to herself for now. There would be time to ask when they were no longer fleeing for their lives. "Am I clear?" Vivien asked, direct but not unkind. "Perfectly. We see Giada safe and then we move on." "Good. Now, lead on to this warehouse." "I will, but first, I'm going back to the Maestro headquarters." Elspeth started toward a nearby ladder. "I need a better weapon than a knife. You two wait here until I return. If something goes awry, we meet at the bench in Park Heights." "Understood." Vivien knew exactly what bench Elspeth meant. "What?" Giada grabbed her hand. "I'm going where you go." "I don't think it's a good idea. It's too risky to bring you into another family's den," Elspeth said gently. Giada considered it a moment, released Elspeth, then said, "You're right. I'll wait here with Vivien." Elspeth managed to make it to one of the civic elevators without major issue. There were a few opportunists who made the mistake of trying to attack her along the way, but Elspeth could handle them with ease. She knew something was wrong the moment she approached the museum. There were no guards out front. Ash dusted the entry stairwell, turning it as pale as bone. The Adversary's attack wasn't just on the Crescendo tonight. Fortunately, her time cataloging all the various pieces in Xander's collection had given her an in-depth knowledge of the museum. She used a side receiving door, smashing in the lock with her knife and rendering the blade useless. She was committed to finding a new weapon now. Elspeth crept through the dark halls. The stink of death was heavy in the air. She could almost feel an ominous presence stalking the corridors in the form of enemies she had yet to meet and was determined not to. She hid the second she heard footsteps, then used two connected rooms to work her way around the noise and into one of the armories. #emph[What weapon to pick?] Elspeth scanned her choices. There were daggers, whips, #emph[swords] . Voices froze her in place. "You don't think it was really the new recruit, do you? The one Xander seemed to take a shining to?" Elspeth's stomach churned. The way the man spoke about Xander, the state of the museum~she knew what had happened here. Her hands balled into fists. "It looked like her." "The Adversary has deemed her as target number one, and the Cabaretti say she's out, too. Doesn't matter if she's the one who took the Font or not, she's got nowhere to turn; she'll be dead soon enough." They were all after her now, starting with the two quickly approaching the armory. She steeled her resolve and turned toward the weapons. Hiding or playing down her skill and powers from now on would be pointless. A sword it would be. She didn't have time to carefully assess which sword was the best weight for her frame. She ran her hands over the hilts, wondering if one of these was the weapon Xander had alluded to when they'd last spoke. The voices were nearing. She picked one on nothing but instinct and fled into the night. The way those men talked made her sound like an easy target. She was going to prove them all wrong. #figure(image("007_Episode 4: The Font/07.jpg", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) #grid( columns: (1fr, 1fr), gutter: 2em, figure(image("007_Episode 4: The Font/08.png", height: 40%), caption: [], supplement: none, numbering: none), figure(image("007_Episode 4: The Font/09.png", height: 40%), caption: [], supplement: none, numbering: none), )
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/text/baseline_00.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page Hi #text(1.5em)[You], #text(0.75em)[how are you?] Our cockatoo was one of the #text(baseline: -0.2em)[#box(circle(radius: 2pt)) first] #text(baseline: 0.2em)[birds #box(circle(radius: 2pt))] that ever learned to mimic a human voice.
https://github.com/francescoo22/masters-thesis
https://raw.githubusercontent.com/francescoo22/masters-thesis/main/chapters/5-Annotation-System.typ
typst
#import "../config/utils.typ": * #import "../vars/rules/base.typ": * #import "../vars/rules/relations.typ": * #import "../vars/rules/unification.typ": * #import "../vars/rules/statements.typ": * #import "../vars/kt-to-vpr-examples.typ": compare-grammar-kt #pagebreak(to:"odd") = Annotation System<cap:annotation-system> This chapter formalizes the uniqueness system that was introduced in @cap:annotations-kt. While inspired by prior works @aldrich2002alias @boyland2001alias @zimmerman2023latte, it introduces several significant improvements. This system is designed for being as lightweight as possible and gradually integrable with already existing Kotlin code. The main goal of the system is to improve SnaKt's verification process by adding aliasing control to Kotlin, thereby establishing a connection to separation logic in Viper. == Grammar In order to define the rules of this annotation system, a grammar representing a subset of the Kotlin language is used. This grammar captures the specific syntax and features that the system needs to handle. By focusing on a subset, the rules can be more clearly defined and easier to manage, while many complex features of the language can be supported through syntactic sugar. #frame-box( $ P &::= overline(CL) times overline(M) \ CL &::= class C(overline(f\: alpha_f)) \ M &::= m(overline(x\: af beta)): af {begin_m; s ; ret_m e} | m(overline(x\: af beta)): af \ af &::= unique | shared \ beta &::= dot | borrowed \ p &::= x | p.f \ e &::= null | p | m(overline(p)) \ s &::= var x | p = e | s_1 ; s_2 | fi p_1 == p_2 then s_1 els s_2 | m(overline(p)) $ ) #v(1em) Classes are made of fields, each associated with an annotation $alpha_f$. Methods have parameters that are also associated with an annotation $alpha_f$ as well as an additional annotation $beta$, and they are further annotated with $alpha_f$ for the returned value. The receiver of a method is not explicitly included in the grammar, as it can be treated as a parameter. Similarly, constructors are excluded from the grammar since they can be viewed as methods without a body returning a unique value. Overall, a program is simply made of a set of classes and a set of methods. The annotations are the same that have been introduced in the previous chapter, the only difference is that `Borrowed` is represented using the symbol $borrowed$. Finally, statements and expressions are pretty similar to Kotlin. The runtime semantics of this grammar is not formalized in this work, as it corresponds to the expected semantics for an imperative language. Moreover, annotations do not impact the runtime behavior of the program. #compare-grammar-kt == General #display-rules( M-Type, "", M-Type-2, "", M-Args, "", M-Args-2, "", F-Default, "" ) Given a program $P$, M-Type rules define a function taking a method name and returning its type. Similarly, M-Args rules define a function taking a method name and returning its arguments. In order to derive these rules, the method must be contained within $P$. For simplicity, it is assumed that in $P$, fields within the same class, as well as across different classes, have distinct names. This assumption simplifies the definition of the F-Default rule, which defines a function that returns the type of a given field. #example[ Given a method: $ m(x: unique borrowed, y: shared): unique $ The type and the arguments of $m$ are the following: $ mtype(m) = unique borrowed, #h(.5em) shared -> unique \ args(m) = x, y $ ] == Context A context is a list of distinct paths associated with their annotations $alpha$ and $beta$. While $beta$ is defined in the same way of the grammar, $alpha$ is slightly different. Other than unique and shared, in a context, an annotation $alpha$ can also be $top$. As will be better explained in the following sections, the annotation $top$ can only be inferred, so it is not possible for the user to write it. A path annotated with $top$ within a context is not accessible, meaning that the path needs to be re-assigned before being read. The formal meaning of the annotation $top$ will be clearer while formalizing the statement typing rules. #frame-box( $ alpha &::= unique | shared | top \ beta &::= dot | borrowed \ Delta &::= dot | p : alpha beta, Delta $ ) #v(1em) Apart from $top$, the rest of the annotations are similar to the annotations in the previous section. A reference annotated as unique may either be `null` or point to an object, with no other accessible references to that object. In contrast, a reference marked as shared can point to an object without being the only reference to it. The annotation $borrowed$ (borrowed) indicates that the method receiving the reference will not create additional aliases to it, and upon returning, the fields of the object will have at least the permissions specified in the class declaration. Finally, annotations on fields only indicate the default permissions; to determine the actual permissions of a field, the context must be considered, a concept that will be formalized in the upcoming sections. == Well-Formed Context #display-rules( Not-In-Base, Not-In-Rec, Ctx-Base, Ctx-Rec, ) This first set of rules defines how a well-formed context is structured. The judgment $p in.not Delta$ is derivable when $p$ is not present in the context. If the judgment $Delta ctx$ is derivable, the context is well-formed. In order to be well-formed, a context must not contain duplicate paths and must be finite. #example[ Given a context: $ Delta = x: unique, space x.y: shared $ The following judgments are derivable: $ y &in.not Delta \ x.f &in.not Delta \ x.y.z &in.not Delta $ ] #example[ Given the following contexts: $ Delta_1 &= x: unique, space x.y: shared \ Delta_2 &= x: unique, space x.y: shared, space x: shared \ Delta_3 &= x: unique, space x.y: shared, space x.y: top $ The judgment "$Delta_1 ctx$" is derivable meaning that $Delta_1$ is a well-formed context. However, the judgments "$Delta_2 ctx$" and "$Delta_3 ctx$" are not derivable meaning that $Delta_2$ and $Delta_3$ are not well-formed contexts. Indeed, they are not well-formed because $x$ appears twice in $Delta_2$ and $x.f$ appears twice in $Delta_3$. ] == Sub-Paths and Super-Paths === Definition #display-rules( SubPath-Base, SubPath-Rec, SubPath-Eq-1, SubPath-Eq-2, ) This set of rules is used to formally define sub-paths and super-paths. #example[ Given two paths $x.y$ and $x.y.z$, the following judgment is derivable: $ x.y subset.sq x.y.z $ We say that: - $x.y$ is a sub-path of $x.y.z$ - $x.y.z$ is a super-path of $x.y$ ] === Remove #display-rules( Remove-Empty, Remove-Base, Remove-Rec, "", ) Remove rules are used to define a function taking a context and a path and returning a context. $ \_without\_ : Delta -> p -> Delta $ Basically, the function will return the context without the specified path if the path is within the context, and it will return the original context if the path is not contained. #example[ Given a context: $ Delta = x: shared, space x.f: shared $ Remove has the following results: $ Delta without x.f = x: shared \ Delta without x = x.f: shared \ Delta without y = x: shared, space x.f: shared $ ] === Deep Remove #display-rules( Remove-SuperPathsEq-Empty, "", Remove-SuperPathsEq-Discard, "", Remove-SuperPathsEq-Keep, "", ) Deep-Remove rules define a function similar to Remove ($without$) that in addiction to removing the given path from the context, also removes all the super-paths of that path. $ \_minus.circle\_: Delta -> p -> Delta $ #example[ Given a context: $ Delta = x: unique, space x.y: unique, space x.f: unique, space x.y.z: unique $ Deep Remove has the following result: $ Delta minus.circle x.y = x: unique, space x.f: unique $ ] === Replace #display-rules( Replace, "", ) This rule gives the definition of a function that will be fundamental for typing statements. The function takes a context, a path $p$ and a set of annotations $alpha beta$ and returns a context in which all the super-paths of $p$ have been removed and the annotation of $p$ becomes $alpha beta$. $ \_[\_|->\_] : Delta -> p -> alpha beta -> Delta $ #example[ Given a context: $ Delta = x: unique, space x.y: unique, space x.y.z: unique $ Replace has the following result: $ Delta[x.y |-> top] = x: unique, space x.y: top $ ] === Get Super-Paths #display-rules( Get-SuperPaths-Empty, "", Get-SuperPaths-Discard, "", Get-SuperPaths-Keep, "", ) Finally, Get-Super-Paths rules are used to define a function that returns all the super-paths of a give path within a context. Also this function will be used for statements typing rules. $ \_ tr sp(\_) : Delta -> p -> overline(p : alpha beta) $ #example[ Given a context: $ Delta = x: unique, space x.y: unique, space x.y.z: unique $ Getting super-paths has the following result: $ sp(x.y) = x.y.z: unique $ ] == Relations between Annotations === Partial Ordering<cap:PO> #display-rules( A-id, A-trans, A-bor-sh, A-sh, A-bor-un, A-un-1, A-un-2, "", ) This set of rules is used to define a partial order between the annotations. This partial order can be represented by the lattice shown in @annotation-lattice. The meaning of these relations is that if $alpha beta rel alpha' beta'$, then $alpha beta$ can be used where $alpha' beta'$ is expected, for example for method calls. Thanks to these rules, it will be correct to pass a unique reference to a method expecting a shared argument, but not vice versa. Moreover, the relations are consistent with the definition of $top$ since it will not be possible to pass an inaccessible reference to any method. #v(1em) #figure(image(width: 35%, "../images/lattice.svg"), caption: [Lattice obtained by Rel rules])<annotation-lattice> === Passing<cap:passing> #display-rules( Pass-Bor, Pass-Un, Pass-Sh, "" ) Pass rules define what happens to the annotations of a reference after passing it to a method. If derivable, a judgment $alpha beta ~> alpha' beta' ~> alpha'' beta''$ indicates that after passing a reference annotated with $alpha beta$ to a method expecting an argument annotated with $alpha' beta'$, the reference will be annotated with $alpha'' beta''$ after the call. However, these rules are not sufficient to type a method call statement since passing the same reference more than once to the same method call is a situation that has to be handled carefully. Nonetheless, the rules are fundamental to express the logic of the annotation system and will be used for typing method calls in subsequent sections. == Paths <cap:paths> === Root #display-rules( Root-Base, Root-Rec, ) This simple function takes a path and returns its root. The function can simplify the preconditions of more complex rules. For example $root(x.y.z) = x$ $ root : p -> p $ #example[ $ root(x.y.z) = x \ root(y.z) = y \ root(z) = z $ ] === Lookup<cap:lookup> #display-rules( Lookup-Base, Lookup-Rec, Lookup-Default, "", ) Lookup rules define a (partial) function that, given a well-formed context, returns the annotations associated with a given path When the path is explicitly contained within the context, the function returns the corresponding annotation. If a field access ($p.f$) is not explicitly present in the context, the function returns the annotations specified in the class declaration containing $f$. This concept, formalized by Lookup-Default, is crucial as it ensures that contexts remain finite, even when handling recursive classes. However, if a variable ($x$) is not present in the context, its lookup cannot be derived. It is important to note that the lookup function returns the annotations associated with a path based on the context or the class declaration, rather than determining the actual ownership status of that path. $ \_inangle(\_): Delta -> p -> alpha beta $ #example[ Given a context: $ Delta = x : shared, space x.f : unique $ The result of the lookup for $x.f$ is the following: $ Delta inangle(x.f) = unique $ However, since $x$ is shared, there can be multiple references accessing $x$. This implies there can be multiple references accessing $x.f$, meaning that $x.f$ is also shared. A scenario like this can occur when, starting from a context containing only $x : shared$, a unique value is assigned to the field $x.f$. A function able to determine the actual ownership of a path is defined in the subsequent section. ] #example[ Given class $C$, context $Delta$ and variable $x$ such that: $ class C(f: shared) in P \ Delta = x : unique $ The result of the lookup for $x.f$ is the following: $ Delta inangle(x.f) = shared $ Since $x.f in.not Delta$, the lookup returns the default annotation, which is the one declared in the class signature. ] === Get #display-rules( Get-Var, Get-Path, ) As described in the previous subsection, the lookup function might not return the correct annotation for a given path. The task of returning the right annotation for a path within a context is left to the (partial) function described in this section. $ \_(\_) : Delta -> p -> alpha beta $ In the case that the given path is a variable, the function will return the same annotation returned by the lookup function. If the given path is not a variable, the function will return the least upper bound ($lub$) between the lookup of the given path and all its sub-paths. The LUB between a set of annotations can be easily obtained by using the partial order described in @cap:PO. It is important to note that if $Delta(p) = alpha beta$ is derivable for some $alpha beta$ then the root of $p$ is contained inside $Delta$. This is important because many rules in the subsequent sections will use the judgment $Delta(p) = alpha beta$ as a precondition and it also helps to guarantee that the root of $p$ is contained inside $Delta$. Furthermore, in the rule Get-Path, the premise $Delta inangle(p.f) = alpha'$ does not pair a $beta'$ annotation to $alpha'$. This omission is intentional because, by the design of the subsequent typing rules, a field access lookup should never result in a borrowed annotation. Regardless, the $beta$ annotation for a field access should be determined solely by its root. #example[ Given a context: $ Delta = x: unique, space x.y: top, space x.y.z: shared $ The annotation that is returned for the variable $x$ is the same as the one returned by the lookup. $ Delta(x) = Delta inangle(x) = unique $ The annotation returned for the path $x.y$ is the LUB between the lookup of $x.y$ and that of all its sub-paths. $ Delta(x.y) &= Lub{Delta inangle(x), Delta inangle(x.y)} \ &= Lub{unique, top} \ &= top $ Finally, the annotation returned for the path $x.y.z$ is the LUB between the lookup of $x.y.z$ and that of all its sub-paths. $ Delta(x.y.z) &= Lub{Delta inangle(x), Delta inangle(x.y), Delta inangle(x.y.z)} \ &= Lub{unique, top, shared} \ &= top $ ] === Standard Form #display-rules( Std-Empty, Std-Rec-1, Std-Rec-2, "", ) If the judgment $Delta tr std(p, alpha beta)$ is derivable, inside the context $Delta$, all the super-paths of $p$ carry the right annotations when $p$ is passed to a method expecting an argument annotated with $alpha beta$. This type of judgment is necessary verify the correctness of the annotations in a method-modular fashion. Since a called method does not have information about $Delta$ when verified, all the super-paths of $p$ must have an annotation in $Delta$ that is lower or equal ($rel$) to the annotation that they have in a context containing just their root annotated with $alpha beta$. #example[ Given the following program: $ class C(y: unique) \ m_1(x: unique) : shared \ m_2(x: shared) : shared \ $ Within the context $ Delta = x: unique, space x.y : shared $ - $Delta tr std(x, unique)$ is not derivable, meaning that $x$ cannot be passed to the method $m_1$. The judgment is not derivable because $Delta(x.y) = shared$ while in a context $Delta' = x: unique$, $Delta'(x.y) = unique$, but $shared lt.eq.curly.not unique$. - $Delta tr std(x, shared)$ is derivable, meaning that $x$ can be passed to the method $m_2$ if all the preconditions, which would be formalized by statement's typing rules, are also satisfied. ] == Unification This section introduces several functions essential for managing contexts in control flow constructs such as branching and scope transitions. === Pointwise LUB #display-rules( Ctx-Lub-Empty, Ctx-Lub-Sym, Ctx-Lub-1, "", Ctx-Lub-2, "", ) The rules in this section describe a function that takes two contexts and returns the LUB between each pair of paths in the given contexts. If a variable $x$ is present in only one of the two contexts, it will be annotated with $top$ in the resulting context. $ \_ lub \_ : Delta -> Delta -> Delta $ #example[ $ Delta_1 &= x: shared, space y: shared \ Delta_2 &= x: unique \ Delta_1 lub Delta_2 &= x: Lub {shared, unique}, space y: top \ &= x: shared, space y: top $ ] === Removal of Local Declarations #display-rules( Remove-Locals-Base, "", Remove-Locals-Keep, "", Remove-Locals-Discard, "", ) The function formalized by these rules is used to obtain the correct context when exiting a scope. When writing $Delta_1 triangle.filled.small.l Delta_2$, $Delta_1$ represents the resulting context of a scope, while $Delta_2$ represents the context at the beginning of that scope. The result of the operation is a context where paths rooted in variables that have been locally declared inside the scope are removed. $ \_ triangle.filled.small.l \_ : Delta -> Delta -> Delta $ #example[ $ Delta_1 &= x: unique, space y: unique, space x.f: unique, space y.f: shared \ Delta_2 &= x: shared \ Delta_1 triangle.filled.small.l Delta_2 &= x: unique, space x.f: unique \ $ ] === Unify #display-rules( Unify, "" ) Finally, the unify function groups the two functions described before. This function will be fundamental to type `if` statements. In particular, $unify(Delta, Delta_1, Delta_2)$ can be used to type an `if` statement: when $Delta$ is the context at the beginning of the statement while $Delta_1$ and $Delta_2$ are the resulting contexts of the two branches of the statement. $ "unify" : Delta -> Delta -> Delta -> Delta $ #example[ Given the following contexts: $ Delta &= x: unique \ Delta_1 &= x: shared, space x.f : shared, space y: unique \ Delta_2 &= x: unique, space x.f : top, space y: unique \ $ Unification has the following result: $ unify(Delta, Delta_1, Delta_2) &= (Delta_1 lub Delta_2) triangle.filled.small.l Delta \ &= (x: shared, space x.f: top, space y: unique) triangle.filled.small.l Delta \ &= x: shared, space x.f: top $ ] == Normalization #display-rules( N-Empty, "", N-Rec, "" ) Normalize is a function that takes and returns a list of annotated paths. In the returned list, duplicate paths from the given list are substituted with a single path annotated with the LUB of the annotations from the duplicate paths. As already mentioned, rules in @cap:passing are not sufficient to type a method call because the same path might be passed more than once to the same method. Normalization is the missing piece that will enable the formalization of typing rules for method calls. $ norm : overline(p : alpha beta) -> overline(p : alpha beta) $ #example[ $ norm(x\: top, space x\: shared, space y\: unique) &= x: Lub{shared, top}, space y: unique \ &= x: top, space y: unique $ ] == Statements Typing Typing rules are structured as follows: $ Delta tr s tl Delta' $ This judgment means that typing a statement $s$ in a context $Delta$ leads to a context $Delta'$. It is important to note that this refers only to the types involved and is not related to the operational semantics of the program. A program $P$ is well-typed if and only if the following judgment is derivable: $ forall m(overline(x\: af beta)): af {begin_m; s; ret_m e} in P . space dot tr begin_m; s; ret_m e tl dot $ This means that a program is well-typed if and only if, for every method in that program, executing the body of the method within an empty context leads to an empty context. Methods without a body are excluded from this judgment, as they can be safely assumed to be well-typed without further analysis. === Begin #display-rules(Begin, "") This rule is used to initialize the context at the beginning of a method. The initial context will contain only the method's parameters with the declared uniqueness annotations. The example below demonstrates how the rule works in practice. In this and subsequent examples, the resulting context after typing a statement is shown on the next line. #figure( caption: "Typing example for Begin statement", ``` f(this: unique, x: unique ♭, y: shared ♭, z: shared): unique { begin_f; ⊣ Δ = this: unique, x: unique ♭, y: shared ♭, z: shared ... } ``` ) === Sequence #display-rules(Seq-New, "") This rule is straightforward, but necessary to define how to type a sequence of statements. In a sequence, statements are typed in the order that they appear. After a statement is typed, the resulting context is used to type the following one. === Variable Declaration #display-rules(Decl, "") After declaring a variable, it is inaccessible until its initialization and so the variable will be in the context with $top$ annotation. Note that this rule only allows to declare variables if they are not in the context while Kotlin allows to shadow variables declared in outer scopes. Kotlin code using shadowing is not currently supported by this system. #figure( caption: "Typing example for variable declaration", ``` f(): unique { begin_f; ⊣ Δ = ∅ var x; ⊣ Δ = x: T ... } ``` ) === Call #display-rules(Call, "") Typing a method call follows the logic presented in the rules of @cap:passing ($~>$) while taking care of what can happen with method accepting multiple parameters. - All the roots of the paths passed to a method must be in the context (also guaranteed by the language). - All the paths passed to a method must be in standard form of the expected annotation. - It is allowed to pass the same path twice to the same method, but only if it passed where a shared argument is expected. - It is allowed to pass two paths $p_i$ and $p_j$ such that $p_i subset.sq p_j$ when one of the following conditions is satisfied: - $p_j$ is shared. - The method that has been called expects shared (possibly borrowed) arguments in positions $i$ and $j$. - The resulting context is constructed in the following way: - Paths passed to the method and their super-paths are removed from the initial context. - A list of annotated paths (in which a the same path may appear twice) in constructed by mapping passed paths according to the "passing" ($~>$) rules. - The obtained list is normalized and added to the context. @call-arg-twice shows the cases where it is possible to pass the same reference more than once and how normalization is applied. In @call-sup-ok-1 it is possible to call `f` by passing `x` and `x.f` since $Delta(x.f) = shared$. In @call-sup-wrong is not possible to call `g` by passing `b` and `b.f`, this is because `g`, in its body, expects `x.f` to be unique, but it would not be the case by passing `b` and `b.f`. Finally @call-sup-ok-2 shows that it is possible to call `h` by passing `x` and `x.f` since the method expects both of the arguments to be shared. #figure( caption: "Typing example for method call with same reference", ``` f(x: unique, y: shared ♭): unique g(x: shared ♭, y: shared ♭): unique h(x: shared, y: shared ♭): unique use_f(x: unique) { begin_use_f; ⊣ Δ = x: unique f(x, x); // not derivable: 'x' is passed more than once but is also expected to be unique ... } use_g_h(x: unique) { begin_use_g_h; ⊣ Δ = x: unique g(x, x); // ok, uniqueness is also preserved since both the args are borrowed ⊣ Δ = x: unique h(x, x); // ok, but uniqueness is lost after normalization ⊣ Δ = x: shared } ``` )<call-arg-twice> #figure( caption: "Typing example for correct method call with super-paths", ``` class A(f: shared) f(x: unique, y: shared): unique fun use_f(x: unique) { begin_use_f; ⊣ Δ = x: unique f(x, x.f); // ok ⊣ Δ = x: T, x.f: shared // Note that even if x.f is marked shared in the context, it is not accessible since Δ(x.f) = T ... } ``` )<call-sup-ok-1> #figure( caption: "Typing example for incorrect method call with super-paths", ``` class B(f: unique) g(x: unique, y: shared): unique use_g(b: unique) { begin_use_g; ⊣ Δ = b: unique g(b, b.f); // error: 'b.f' cannot be passed since 'b' is passed as unique and Δ(b.f) = unique // It is correct to raise an error since 'g' expects x.f to be unique } ``` )<call-sup-wrong> #figure( caption: "Typing example for correct method call with super-paths", ``` class B(f: unique) h(x: shared, y: shared) {} use_h(x: unique) { begin_use_h; ⊣ Δ = x: unique h(x, x.f); // ok ⊣ Δ = x: shared, x.f: shared ... } ``` )<call-sup-ok-2> === Assignments All rules for typing assignments have a path $p$ on the left-hand side and vary based on the expression on the right-hand side. The common trait of these rules is that they require the root of $p$ to be contained within the initial context using the premise "$Delta(p) = alpha beta$". Additionally, in the resulting context, the annotation of $p$ is always updated according to the expression on the right-hand side of the assignment. ==== Assign null #display-rules(Assign-Null, "") The definition of unique tells us that a reference is unique when it is `null` or is the sole accessible reference pointing to the object that is pointing. Given that, we can safely consider unique a path $p$ after assigning `null` to it. Moreover, all super-paths of $p$ are removed from the context after the assignment. #figure( caption: "Typing example for assigning null", ``` class C(t: unique) f() { begin_f; ⊣ Δ = ∅ var b; ⊣ Δ = b: T ... ⊣ Δ = b: shared, b.t: T b = null ⊣ Δ = b: unique ... } ``` ) ==== Assign Call #display-rules(Assign-Call, "") After defining how to type a method call, it is easy to formalize the typing of a call assignment. Like all the other assignment rules, the root of the path on the left side of the assignment must be in the context. First of all, the method call is typed obtaining a new context $Delta_1$. Then, the annotation of the path on the left side of the assignment is replaced ($|->$) in $Delta_1$ with the annotation of the return value of the method. #figure( caption: "Typing example for assigning a method call", ``` get_unique(): unique get_shared(): shared f(): unique { begin_f; ⊣ Δ = ∅ var x; ⊣ Δ = x: T var y; ⊣ Δ = x: T, y: T x = get_unique(); ⊣ Δ = x: unique, y: T y = get_shared(); ⊣ Δ = x: unique, y: shared ... } ``` ) ==== Assign Unique #display-rules(Assign-Unique, "") In order to type an assignment $p = p'$ in which $p'$ is unique, the following conditions must hold: - The root of $p$ must be in context. - $p'$ must be unique in the context. - Assignments in which $p' subset.eq.sq p$, like $p.f = p$, are not allowed. The resulting context is built in the following way: - Starting from the initial context $Delta$, a context $Delta_1$ is obtained by replacing ($|->$) the annotation of $p'$ with $top$. - The context $Delta_1$ is used to obtain a context $Delta'$ by replacing ($|->$) the annotation of $p$ with unique. - Finally, to obtain the resulting context, all the paths that were originally rooted in $p'$ are rooted in $p$ with the same annotation and added to $Delta'$. #figure( caption: "Typing example for assigning a unique reference", ``` class B(t: unique) class A(b: unique) f(x: unique, y: unique): unique { begin_f; ⊣ Δ = x: unique, y: unique y.t = x.b.t; ⊣ Δ = x: unique, y: unique, x.b.t: T, y.t: unique x.b = y; ⊣ Δ = x: unique, y: T, x.b: unique ... } ``` ) ==== Assign Shared #display-rules(Assign-Shared, "") Typing an assignment $p = p'$ in which $p'$ is shared is similar to the case where $p'$ is unique, but with some differences: - $p$ cannot be borrowed. This is necessary to guarantee the soundness of the system when a unique variable is passed to a method expecting a shared borrowed argument. - Obviously $p'$ must be shared in the context. Also the resulting context is constructed in a similar way to the previous case. The only difference is that in this case it is not needed to replace ($|->$) the annotation of $p'$. #figure( caption: "Typing example for assigning a shared reference", ``` class B(t: unique) f(x: unique, y: shared): unique { begin_f; ⊣ Δ = x: unique, y: shared x.t = y; ⊣ Δ = x: unique, y: shared, x.t: shared ... } ``` ) ==== Assign Borrowed Field #display-rules(Assign-Borrowed-Field, "") Fields of a borrowed parameter must be treated with caution to avoid unsoundness. Borrowed fields can be passed as arguments to other methods if the preconditions for typing the method call are respected. In addition, they can be used on the right-hand side of an assignment with certain limitations. After being read, a borrowed field will inaccessible even if shared. Finally, borrowed fields can be used on the left-hand side of an assignment when a unique reference is on the right-hand side. Ensuring inaccessibility after reading borrowed fields and restricting their reassignment to unique references, along with respecting the preconditions for typing a return statement stated in @type-ret, is essential for maintaining soundness when unique references are passed to methods that accept a borrowed-shared parameter. #figure( caption: "Typing example for assigning a borrowed field", ``` class B(t: unique) f(x: shared ♭): unique { begin_f; ⊣ Δ = x: shared ♭, var z; ⊣ Δ = x: shared ♭, z: T z = x.t; ⊣ Δ = x: shared ♭, z: shared, x.t: T ... } ``` ) === If #display-rules(If, "") Once the unification function is defined, typing an `if` statement is straightforward. First it is necessary to be sure that paths appearing in the guard are accessible in the initial context. The `then` and the `else` branches are typed separately and their resulting contexts are unified to get the resulting context of the whole statement. The system does not allow to have `null` or a method call in the guard of an `if` statement, as these constructs can be easily desugared. #example[ Desugaring for `if` statements containing expressions different from paths within the guard. $ fi (p == null) ... equiv var "fresh" ; "fresh" = null ; fi(p == "fresh") ... $ $ fi (p == m(...)) ... equiv var "fresh" ; "fresh" = m(...) ; fi(p == "fresh") ... $ ] #figure( caption: "Typing example for if statement", ``` class A(c: unique) consume_unique(c: unique): shared consume_shared(a: shared): shared fun f(a: unique, c: shared ♭) { begin_f; ⊣ Δ = a: unique, t: shared ♭ if (a.c == c) { consume_unique(a.c); ⊣ Δ1 = a: unique, a.f: T, t: shared ♭ } else { consume_shared(a); ⊣ Δ2 = a: shared, t: shared ♭ }; ⊣ Δ = a: shared, a.f: T, t: shared ♭ // unify(Δ; Δ1; Δ2) = a: LUB{ unique, shared }, a.f: LUB{ T, shared }, t: shared ♭ ... } ``` ) === Return<type-ret> #display-rules(Return-p, "") By the construction of the grammar, a `return` statement is designed to be the final statement executed within a method. As such, there is no need to maintain a resulting context after the return statement has been typed. However, several important conditions must be satisfied when returning. First, the annotation of the path being returned must be lower than or equal to ($rel$) the annotation of the return value of the method. This ensures that a method cannot return a value with greater aliasing than what was specified in the method’s signature, effectively preventing borrowed values from being returned (@ret-bor). Second, the path being returned must be in the standard form of the return type (@ret-std). Finally, all parameters that are shared or borrowed (or both) must remain in the standard form of their original annotations by the time the method returns. These conditions are essential for maintaining the modularity, allowing each method to be typed without knowing the implementation of the other methods. The system does not allow returning `null` or a method call, since these cases can be easily desugared, as shown in @ret-desugar. Similarly, functions that do not return a value can be represented by having them return a call to the `Unit` constructor. #example[ Given the following program: $ class C(f: unique) \ m(x: unique borrowed): unique {begin_m ; ...; ret_m space x.f } $ The following judgment is not derivable: $ x: unique borrowed = Delta tr ret_m space x.f tl dot $ This happens because the function returns a borrowed field, which is prohibited by the third precondition of the rule. Specifically: $ mtype(m) = unique borrowed -> unique \ Delta(x.f) = unique borrowed $ However, the third precondition is not derivable since: $ unique borrowed lt.eq.curly.not unique $ ]<ret-bor> #example[ Given the following program: $ class C(f: unique) \ m(x: unique): unique {begin_m ; ...; ret_m space x } $ The following judgment is not derivable: $ x: unique, space x.f: shared = Delta tr ret_m space x tl dot $ This occurs because the fourth precondition, $Delta tr std(x, unique)$, is not derivable. ]<ret-std> #example[ Desugaring for return statements that do not return a path. $ {...; ret null} equiv {...; var "fresh" ; "fresh" = null ; ret "fresh"} $ $ {...; ret m(...)} equiv {...; var "fresh" ; "fresh" = m(...) ; ret "fresh"} $ Where $"fresh"$ refers to a variable that does not exist in the context prior to its declaration. ]<ret-desugar> == Stack Example @stack-grammar illustrates how the context evolves in the example presented in @cap:kt-stack when it is encoded using the grammar described in this chapter. #figure( caption: "Typing for a Stack implementation", ``` class Node(value: unique, next: unique) class Stack(root: unique) fun Node(value: unique, next: unique): unique fun push(this: unique ♭, value: unique): shared { begin_push; ⊣ Δ = this: unique ♭, value: unique var r; ⊣ Δ = this: unique ♭, value: unique, r: T r = this.root; ⊣ Δ = this: unique ♭, value: unique, r: unique, this.root: T this.root = Node(value, r); ⊣ Δ = this: unique ♭, value: T, r: T, this.root: unique return Unit(); } fun pop(this: unique ♭): unique { begin_pop; ⊣ Δ = this: unique ♭ var value; ⊣ Δ = this: unique ♭, value: T if (this.root == null) { value = null; ⊣ Δ = this: unique ♭, value: unique } else { value = this.root.value; ⊣ Δ = this: unique ♭, value: unique, this.root.value: T this.root = this.root.next; ⊣ Δ = this: unique ♭, value: unique, this.root: unique } // Unification... ⊣ Δ = this: unique ♭, value: unique, this.root: unique return value; } ``` )<stack-grammar>
https://github.com/Kasci/LiturgicalBooks
https://raw.githubusercontent.com/Kasci/LiturgicalBooks/master/CSL_old/oktoich/Hlas2/1_Pondelok.typ
typst
#let V = ( "HV": ( ("", "Jehdá ot dréva", "Imíjaj blahoutróbija istóčnik, istočájaj že bézdny mílostej, i potóki ščedrót, ótče preblahíj, i Sýne, Slóvo Ótčeje: i Dúše svjatýj, nesozdánnoje jestestvó, prijimí molénije i molítvu nášu, vsím v prehrišénijich súščym podajá proščénije, jáko Bóh ščédr i čelovikoľúbec."), ("", "", "Ímaši po jestestvú jáko Bóh, utróbu ščedrót, i mílosti i blahostýni: ťímže mólim ťá Christé Spáse náš, i pripádajušče zovém, prísno tí vopijúšče: dáruj rabóm tvojím prehrišénij mnóhich razrišénije i proščénije vsích, íchže sohrišíchom vsí, jáko Bóh ščédr i čelovikoľúbec."), ("", "", "Spastí choťá Spáse vsích, jáko Bóh, íchže rádi vočelovíčivsja, i javílsja jesí čelovík, spasí nás poklaňájuščichsja tvojím zápovidem. Ne prišél bo jesí čelovikoľúbče, právedniki spastí: no íže v prehrišénijich okovánnych nás mnóhimi hrichí, razrišíti blahodátiju kreščénija božéstvennaho, jáko Bóh ščédr i čelovikoľúbec."), ("", "Jehdá ot dréva", "Prestóli, Cheruvími i Serafími : Hospóďstvija že i síly, i vlásti čéstnýja: ánheli, archánheli i načála s ními: ťích netľínnoje jestestvó sostávľšemu: písň neprestánnuju sohlásno pojút, jedíno v Tróici suščestvó vsjá naučájušče čestí, srásleno, jedinočéstno i soprestóľno."), ("", "", "Pérviji neveščéstvennych lícy, Bohonačáľstvija zarí, nechodátajstvennymi sijáňmi podjémľušče, po čínu ích próčim prepodavájut božéstvennyja svétlosti: i prinósjat nám sijá ľubóvnym zakónom, po dostojániju tákožde, k serdéčňij komúždo čistoťí s priľižánijem."), ("", "", "Horí k vysoťí, duší horí serdéčnoje óko, i úmnaja stremlénija, ľubóviju božéstvennoju imúšče, v dušách svojích prostirájem vsehdá: jáko da íže ottúdu lučámi oblistájemi, ubíhnem ťmý strastéj, čájušče so ánhely predstáti strášnomu prestólu ziždíteľa, i preobrazítisja ot svíta vo svít."), ("Bohoródičen", "", "Mnóžestvo ánhel Sýna tvojehó prečístaja, vospivájut trisvjátými hlásy, jáko prestól ťá tohó súščuju ohnezráčen, i palátu oduševlénnuju, móst božéstvennyj, íže ot zemlí prevoďášči vsehdá k nemú: rádujsja obrádovannaja, vopijúšče tí sohlásno so archánhelom Havrijílom, jáko róždšej istóčnika rádosti."), ), "S": ( ("", "", "Sohriších tí Christé Spáse, jáko blúdnyj sýn: prijimí mja Ótče kájuščasja i pomíluj mjá Bóže."), ("", "", "Vopijú ti Christé Spáse mytarévym hlásom: očísti mjá jákože ónaho, i pomíluj mjá Bóže."), ("", "", "Íže zemnýja sládosti ne vozľúbľše strastotérpcy, nebésnym blahím spodóbišasja, i ánhelom sohráždane býša: Hóspodi, molítvami ích pomíluj, i spasí nás."), ("Bohoródičen", "", "Rádujsja Maríje Bohoródice, chráme nerazrušímyj, páče že svjatýj, jákože vopijét prorók: svját chrám tvój, díven v právďi."), ), ) #let P = ( "1": ( ("", "", "Vo hlubiňí postlá inohdá, faraonítskoje vsevójinstvo preoružénnaja síla, voplóščšejesja že Slóvo vsezlóbnyj hrích potrebílo jésť, preproslávlennyj Hospóď, slávno bo proslávisja."), ("", "", "Voobrážsja iz tebé po nám, Sólnce nevečérneje, ťá slovésnoje nébo, i svitľíjšeje, jávstvenňi pokazá. Ťímže mjá izbávi strástnaho smráda: jáko da vospojú ľubóviju ťá jedínu Máter Ďívu."), ("", "", "Kleščú mýslennyj úhľ nosívšuju, i čelovíčeskoje jestestvó očístivšuju, moľú ťa Vladýčice: mnóhich mojích prehrišénij skvérnu omýj, i ot oskverňájuščich strastéj izbávi mjá molítvami tvojími."), ("", "", "Íže istóčnik mílosti Christá prečístaja róždši, i Jedém čelovíkom otvérzši, dvéri otvérzi mí mílosti tvojejá Bohoródice, i dáruj sohrišénij proščénije, Vladýčice míru."), ("", "", "Mojích prehrišénij plenícy rastórhni Vladýčice tvojehó Sýna i Bóha moľášči, i tomjáščij mjá hrích nizloží prisnoďívo: jáko da vospojú ťa vsehdá preneporóčnaja spasájem."), ), "3": ( ("", "", "Na kámeni mjá víry utverdív, razširíl jesí ustá mojá na vrahí mojá, vozveselí bo sja dúch mój vnehdá píti: ňísť svját, jákože Bóh náš, i ňísť práveden páče tebé Hóspodi."), ("", "", "Mhlú strastéj mojích i omračénije otžení, zarjámi molítv tvojích, i prosvití mja prečístaja Vladýčice, róždšaja vozsijávšaho préžde dennícy Christá, ot sólnca sólnce nezachodímoje."), ("", "", "Vozstávila jesí prečístaja, tvojím roždestvóm, skíniju pádšuju i sokrušénnuju, Davída tvojehó práotca: ťímže vozstávi Bohoródice, i mené rabá tvojehó, sokrušénaho strasťmí i prehrišéniji."), ("", "", "Prohoňájuščeje orúžije hrichí i strásti duší mojejá búdi Ďívo: i pokaží mja jákože druhíj ráj, beznavíten i sobľudén tobóju i prinosjášč cvíty Dúcha."), ("", "", "Prostérši rúku blahoutróbija, prijimí mja Bohorodíteľnice: i vsjákaho ischití osuždénija k tebí pribihájuščaho, i tebí pripádajušča, i prizyvájuščaho tvojú pómošč."), ), "4": ( ("", "", "Pojú ťa, slúchom bo Hóspodi uslýšach i užasóchsja: do mené bo ídeši, mené iščjá zablúždšaho. Ťím mnóhoje tvojé snizchoždénije, jéže na mjá, proslavľáju mnohomílostive."), ("", "", "Kvás čelovíčeskaho jestestvá, neskvérnyj i svjatýj, ot nehóže prijém ťísto, sozdá jehó ziždíteľ: i mňí strastéj skvérnu omýj, i kál prehrišénij mojích očísti."), ("", "", "Ľičbóju molítv tvojích Vladýčice, i plástyrem i máslom, duší mojejá strúpy, i jázvy hrichóvnyja nýňi iscilí čístaja, jáko róždši Bohonačáľnuju ciľbú jestestvá."), ("", "", "Plenícu strastéj mojích razriší Vladýčice, i veríhi hrichóv mojích rastórhni orúžijem molítv tvojích, Christá róždšaja, podajúščaho izbavlénije svjázannym vo áďi."), ("", "", "Strásti duší mojejá iscilí, omračívšyja mjá preokajánňi: i pómysl prosvití, daléče othoňájušči čístaja, ťmý bisóv oskorbľájuščich mjá i po vsjá dní síti prostirájuščich mňí."), ), "5": ( ("", "", "Prosviščénije vo ťmí ležáščich, spasénije otčájannych Christé Spáse mój, k tebí útreňuju carjú míra, prosvití mja sijánijem tvojím: inóho bo rázvi tebé Bóha ne znáju."), ("", "", "Ot kľátvy drévnija mír jáko izbávila jesí Vladýčice, míru blahoslovénije Christá róždšaja: pomyšlénij bezmístnych izbávi mjá, i vsjákija pečáli, jedína bo jesí vírnych rádovanije."), ("", "", "Okružájet duší mojejá hrád bisóvskij pólk, i razpľiníti ziló pokušájetsja: preslávnaja Vladýčice, hráde vsích carjá, ohradí mja stólpnymi sťinámi, tvojími molítvami, i spasí."), ("", "", "Búrja ľutá razvraščénij, duchóv lukávstvija, mojú mýsľ smuščájet: predvarí skóro, i utíši sijú róždšaja vsjákija tvári jedínaho kórmčija Christá vsesíľnaho."), ("", "", "Vozsijáj mí svít pokajánija Bohorodíteľnice, vo ťmí pohíbeľňij zatvorénnomu: dáruj rúku pómošči ležáščemu, i tvojími molítvami vozdvíhni mjá čístaja, tvoríti Bóžija opravdánija."), ), "6": ( ("", "", "V bézdňi hrichóvňij vaľájasja, neizsľídnuju milosérdija tvojehó prizyváju bézdnu: ot tlí Bóže mjá vozvedí."), ("", "", "Jáko žízň víčnuju róždši prečístaja, ot ťíla duší mojéj ischoďáščej, smérť tvojími molítvami umertví."), ("", "", "Lóvit iskušája mjá zmíj čelovikonenavístnyj: sehó uprazdní Vladýčice, jáko róždši hlavý sokrúššaho zmijév."), ("", "", "Stropótnyja stezí šéstvovavšaho k právij stezí nastávi, jáže púť žízni róždšaja, Christá Bóha nášeho."), ("", "", "Duší mojejá strásti iscilí, i pečáli nedoumínije pobidí, jáže Bóha róždšaja na spasénije čelovíkom, prečístaja."), ), "S": ( ("", "Podóben", "Milosérdija súšči istóčnik, mílosti spodóbi nás Bohoródice, prízri na ľúdi sohrišívšyja, javí jáko prísno sílu tvojú: na ťá bo upovájušče, rádujsja, vopijém tí, jákože inohdá Havrijíl bezplótnych archistratíh."), ), "7": ( ("", "", "Ťílu zlatómu premúdryja ďíti ne poslužíša, i v plámeň sámi poidóša, i bóhi ích obruháša, sredí plámene vozopíša, i orosí já ánhel: uslýšasja užé úst vášich molítva."), ("", "", "Smuščájet mí úm, i koléblet dúšu volná strastéj: no róždšaja, móre volnújuščejesja poveľínijem božéstvennym obuzdavájuščaho, Ďívo, k tišiňí preloží bezstrástija, i plotskóje ustávi smuščénije."), ("", "", "Ťá dvér vídyj mýslennaho vostóka s nebesé jávľšujusja, vníti v nebesá tobóju Ďívo, ráb tvój moľúsja: ťímže prijimí mja Vladýčice, i svítom nastávi."), ("", "", "Na kresťí Christós Sýn tvój raspénsja vóleju, načála ťmý posramí, i smértnuju razruší tletvórnuju sílu: ímže Vladýčice prečístaja, strástnoje mojé plóti mudrovánije umertví."), ("", "", "Ne prestáj čístaja, moľášči ot tebé róždšahosja Bóha, o íže víroju pritekájuščich k tebí, i prosjáščich izbavlénija, jedína zastúpnice míru, otčájannym nadéžda i zastuplénije, oburevájemym v ľútych prísno pómošč."), ), "8": ( ("", "", "O podóbiji zláťi nebréhše treblažéniji júnoši, neizmínnyj i živýj Bóžij óbraz víďivše, sredí ohňá vospiváchu: osuščestvovánnaja da pojét Hóspoda vsjá tvár, i prevoznósit vo vsjá víki."), ("", "", "Bohoďílannaja skínije, súščaja svjatáho svjatých, pokazávšaho drévnija skíniji óbraz, archijeréja velíka vnútr imúšči, mojé sérdce Ďívo, javí výšňaho, i vsích carjá Christá skíniju svjátu."), ("", "", "Božéstvennyj kovčéh nosívši i skrižáli zavíta, neopísannaho Sýna tvojehó Christá Ďívo, Slóvo Bóžije prečístoje, semú napisátisja, jákože na dscí duší mojejá sotvorí pérstom Bóžijim, tvojími molítvami."), ("", "", "Usičésja ot tvojích ložésn, preneporóčnaja Vladýčice, jáko ot horý čestnýj kámeň, i stolpý vsjákija prélesti, jáko vsederžíteľ jedín vsesíľnyj sokruší: ímže nýňi óbrazy strastém potrebí duší mojejá, mýslennych vráh sokrušívši čéľusti."), ("", "", "Vodámi živótnymi, izsóchšeje mojé sérdce hrichóvnym plámenem, tý napój Ďívo Bohoródice, vo umiléniji vsehdá sobľudájušči mjá, so stráchom pojúšča: da pojét Hóspoda vsjá tvár, i prevoznósit jehó vo víki."), ), "9": ( ("", "", "Jáže préžde sólnca svitíľnika Bóha vozsijávšaho, plótski k nám prišédšaho, iz bokú Ďivíču neizrečénno voplotívšaja, blahoslovénnaja vsečístaja, ťá Bohoródice veličájem."), ("", "", "Právdy sólnce čístaja, Christá vozsijála jesí, i nébo pokazásja presvítlo, ímže nóšč strastéj mojích, tvojími molítvami razorí, i dúšu mojú prosvití, i sérdce mojé ozarí i ujasní."), ("", "", "Sudí Bohoródice, i preprí ľútaho mojehó sopostáta, i ot tohó mja oskorblénija izbávi: sudijú bo blaháho i právednaho rodilá jesí, íže v ňídrich Božestvá osudívšaho smérť."), ("", "", "Íže bezplótnych suščestvá jáko Bóh sostavléj, iz tebé Ďívo otrokovíce voplotísja. Ťímže izbávi mjá plotskáho strastéj obstojánija i plotskáho mudrovánija, tvojehó rabá tvojími molítvami."), ("", "", "Razrušíla jesí Jévinu pečáľ, Adáma nóvaho róždši, préžde Adámovo jestestvó sozdávšaho. Ťímže razriší i mojích prehrišénij rukopisánije, napástej i skorbéj i strastéj mjá svoboždájušči."), ), ) #let U = ( "S1": ( ("", "", "Jáko vólny morskíja vostáša na mjá bezzakónija mojá, i jáko korábľ v pučíňi, áz jedín pohružájusja sohrišéňmi mnóhimi: no v tíchoje pristánišče pokajánija nastávi mjá Hóspodi, i spasí mja."), ("", "", "Áz jésm drévo neplódnoje, Hóspodi, pokajánija plodá ne tvorjá otňúd, i posičénija bojúsja, i ohňá ónaho ustrašájusja nehasímaho. Ťímže ťá moľú: préžde ónyja bidý obratí i spasí mja."), ("Bohoródičen", "", "Milosérdija súšči istóčnik, mílosti spodóbi nás Bohoródice, prízri na ľúdi sohrišívšyja, javí jáko prísno sílu tvojú: na ťá bo upovájušče, rádujsja, vopijém tí, jákože inohdá Havriíl, bezplótnych archistratíh."), ), "S2": ( ("", "", "Pomíluj mjá, rečé Davíd: i áz tebí zovú: sohriších, Spáse, mojá hrichí pokajánijem očísti, i pomíluj mjá."), ("", "", "Pomíluj mjá Bóže, pomíluj mjá, o dvojú hrichú Davíd plákašesja, áz že o ťmách prehrišénij vopijú ti: ón postéľu slezámi omakáše, áz že kápli nijedínyja priťažách. Otčajavájusja i moľú: pomíluj mjá Bóže, po velícij mílosti tvojéj."), ("", "", "Tebé oďivájuščaho nébo óblaki, imúšče svjatíji oďijánije, v míri múki ot bezzakónnik preterpíša, i lésť ídoľskuju uprazdníša. Ťích molítvami i nás svobodí ot nevídimaho vrahá, Spáse, i spasí nás."), ("Bohoródičen", "", "Tebé veličájem Bohoródice, vopijúšče: rádujsja, nezachodímaho svíta óblače, sehó nosívši v ňídrich tvojích, slávy Hóspoda."), ), "S3": ( ("", "Milosérdija súšči", "V nevíďiniji i rázumi sohrišája, v noščí i vo dní, tebé Christé prohňívaju, jedín na zemlí bezzakóniji moími: jedíne blahíj bezhríšne, obratí i spasí mja moľbámi ánhel tvojích, prišédyj milosérdijem hríšnyja prizváti na pokajánije."), ("", "", "Bezplótnych presvjatíji lícy, móľat ťá Bóha blaháho i Vladýku, poščadíti nás v čás osuždénija, i izbáviti nás hórkaho mučénija, bisóvskija zlóby, i strastéj omračénija, i vsjákaho preščénija, pribihájuščich ľubóviju k pokróvu vášemu."), ("Bohoródičen", "", "Bohoródice beznevístnaja čístaja, jáže bez símene róždšaja vsích Vladýku, tohó so ánhely molí, izbávitisja nám ot vsjákaho nedoumínija, i dáti umilénije i svít dušám nášym, i sohrišénij očiščénije, jáže jedína vskóri zastupájušči."), ), "K": ( "P1": ( "1": ( ("", "", "Hrjadíte ľúdije, pojím písň Christú Bóhu, razďíľšemu móre, i nastávľšemu ľúdi, jáže izvedé iz rabóty jehípetskija, jáko proslávisja."), ("", "", "Voplóščsja Slóve, i ne prišéd právedniki, jákože rékl jesí, no hríšniki prizváti na pokajánije, mnóho sohrišívša prijimí i spasí mja."), ("", "", "Jedín áz hrichóm poraboščén jésm, jedín áz strastém dvéri otverzóch: blahopreminíteľu jedíne, obratív mjá, spasí milosérdijem tvojím."), ("Múčeničen", "", "Na sudíšči tvojém Hóspodi, vincý nosjášče predstoját strastonóscy, lukávaho pobidívše dérzosť, i bezsmértijem obohatívšesja."), ("Múčeničen", "", "Strují nám iscilénij istočájušče strastotérpcy, vsehdá tečénija plóti nášeja strásti izsušájut, síloju božéstvennaho Dúcha."), ("Bohoródičen", "", "Jáže istóčnik bezstrástija róždšaja, ujázvenaho mjá strasťmí, Otrokovíce iscilí, i ohňá víčnaho ischití mja, jedína Bohoblahodátnaja."), ), "2": ( ("", "", "Netrénu, neobýčnu, nemókrenno morskúju šéstvovav stezjú, izbránnyj vopijáše Izráiľ: Hóspodevi pojím, jáko proslávisja."), ("", "", "Úhlije Bohonósnoje, zaréju razžéhšejesja tvojehó suščestvá, bezplótnyja javíl jesí líki, tebé Christé slávjaščyja, jáko vsesíľna."), ("", "", "Sílu netľínija, i slávu sťažávše bezsmértija darovánnuju ánheli Christé, jéže k tebí približénijem prosviščájutsja."), ("", "", "Javíšasja svitonóscy, íže neveščéstvennuju pokazújušče, i po jestestvú obrazmí ánheli Christé, čúvstvenňi voobrazújuščiji čistotú."), ("Bohoródičen", "", "Číni ánheľstiji roždestvú tvojemú Ďívo, jéže páče jestestvá, slúžat rádujuščesja čístaja: sích bo rodilá jesí Bóha i Hóspoda."), ), ), "P3": ( "1": ( ("", "", "Utverdí nás v tebí Hóspodi, drévom umerščvéj hrích, i strách tvój vsadí v serdcá nás pojúščich ťá."), ("", "", "Íže vo črévo Christé všédyj netľínnoje, istľívšuju strasťmí dúšu mojú pokajánijem obnoví, i svíta prisnosúščnaho javí ispólnenu."), ("", "", "Poslúšach vrahá prohňívajuščaho, i vsják hrích soverších, i bezúmno prohňívach tebé čelovikoľúbče, jedínaho dolhoterpilívaho."), ("Múčeničen", "", "Krestóm ohraždéni nepobidímiji orúžnicy, i múčenicy Spásovy, jákože sťíny razoríša o Bózi dóblestvenno, léstnaja utveržénija."), ("Múčeničen", "", "Síloju božéstvennoju, nemoščnóe váše ukrípľše, dóbliji strástotérpcy, kríposť borcá vsekonéčňij páhubi predáste."), ("Bohoródičen", "", "Maríje, zlatája kadíľnice, strastéj mojích voňú otžení, i utverdí mja koléblemaho prilóhmi ľstívaho borcá."), ), "2": ( ("", "", "Lúk sokrušísja síľnych deržávoju tvojéju Christé, i síloju nemoščstvújuščiji prepojásašasja."), ("", "", "Íže jestestvóm bezsmérten Bóh, premúdre soďivája blahodátiju, vóinstva bezsmértnaja javľájet."), ("", "", "Nýňi predstojášče želánijem krájnim Christú spastísja vsím nám, ánheli molítesja."), ("Bohoródičen", "", "Načálo podľítno prijém izvístno iz tebé, íže ľít soďíteľ Máti prisnoďíívo."), ), ), "P4": ( "1": ( ("", "", "Pojú ťa, slúchom bo Hóspodi uslýšach, i užasóchsja, do mené bo ídeši, mené iščjá zablúždšaho. Ťím mnóhoje tvojé snizchoždénije, jéže na mjá, proslavľáju mnohomílostive."), ("", "", "V kál hrichóvnyj vpád, i výšneje blahoobrázije pohubích, Hóspodi, i bojúsja mučénija. Ťímže pokajánija krasotóju, smirénnuju dúšu mojú prosvití."), ("", "", "Ľstívymi hlahóly, ľstívyj ot tebé mja ukrád, sňíď sebí soďíla: Bóže vsích, sehó zlóby ischití mja, i pokajánija óbrazy prizoví mja."), ("Múčeničen", "", "Predavájemi na sňíď zvirém, i ohném žhómi múčenicy, i odirájemi, i na údy razdrobľájemi, Christá pomóščnika imúšče ne otverhóstesja: tohó priľížno molíte, za mjá okajánnaho."), ("Múčeničen", "", "Jedínicu čtúšče suščestvóm, Tróicu že lícy, Božestvó nesozdánno, sozdánije nikákože počétše vý Christóvy múčenicy poznástesja: sehó rádi múki vsjákija preterpíste."), ("Bohoródičen", "", "Voploščájetsja iz čístych krovéj tvojích, i čelovíkom sojediňájetsja vóleju Hospóď prečístaja, i prijémlet kájuščyjasja, íže drévle pohružénnyja hrichmí, tvojími moľbámi čístaja prísno preklaňájem."), ), "2": ( ("", "", "Uslýšach Hóspodi, slávnoje tvojé smotrénije, i proslávich čelovikoľúbče nepostižímuju tvojú sílu."), ("", "", "Molítvenniki privoždú ti bezplótnyja: íchže Ščédre priím jáko blahoutróben, ot hrichóv mjá izbávi."), ("", "", "Umú približájuščesja vsevinóvnomu, úmove božéstvenniji očiščájemi, prosviščájutsja prevýšnimi rázumy."), ("", "", "Božéstvennaja ukrašénija nebésnaho činonačálija, božéstvennym Dúchom ukrašájemi, sochraňájemi súť neprelóžňi."), ("Bohoródičen", "", "Razumív ťá izdaléča Ďívo, Bóha nosjáščuju voploščénna na rukú tvojéju, Isáia predvozvistí."), ), ), "P5": ( "1": ( ("", "", "Svíta podáteľu, i vikóv tvórče Hóspodi, vo svíťi tvojích poveľínij nastávi nás: rázvi bo tebé inóho Bóha ne znájem."), ("", "", "Íže prosvitívyj óči sľipáho, osľiplénnuju dúšu mojú prosvití, i bďíti sijú ukripí v ďijánija blahája, són že ľínostnyj do koncá voznenavíďiti."), ("", "", "Iscilí jedíne Spáse, ujázvlennuju mojú dúšu voístinnu, jázvoju ľútoju hrichá drévle v razbójniki vpádšemu jázvy iscilívyj."), ("Múčeničen", "", "Sobór čestných stradálec, sobór bezzakónnych honítelej posramí mnohovídnyja boľízni preterpí: i nýňi so svjaščénnymi sobóry ánheľskimi prísno rádujetsja."), ("Múčeničen", "", "Zvízdy mnohosvítlyja, zarí terpínija ispuščájušče, i vírnych dúšy Dúchom božéstvennym prosviščájušče, presvítliji javíšasja múčenicy."), ("Bohoródičen", "", "Nóvo jáko mlaďá nám predvíčnaho Sýna rodilá jesí, vo dvojú choťíniju i ďijániju súšča suhúba, jáko čelovíka vkúpi že i Bóha, vsepítaja."), ), "2": ( ("", "", "Úhľ Isáiji projavléjsja, sólnce iz ďívstvennyja utróby vozsijá, vo ťmí zablúždšym, Bohorazúmija prosviščénije dáruja."), ("", "", "Božéstvennaho Bohonačálija blíz sijájušče, Cheruvímy i Serafímy, svítlostiju, so prestóly prevzjátymi, Bohopodóbno próčaja prosviščájut."), ("", "", "Božéstvennaho Bohonačálija blíz sijájušče, Cheruvímy i Serafímy, svítlostiju, so prestóly prevzjátymi, Bohopodóbno próčaja prosviščájut."), ("", "", "Jáko vsehó svitolítija podáteľu, zercála svitovídnaja Slóve Bóžij, privél jesí tvojú zarjú s vesélijem i rázumom neuklónnym prijémľuščaja."), ("Bohoródičen", "", "Rádosť tebí, íže práotcem sítovanije razrišájuščuju, Havrijíl svjaščénnyj archánhel, s nebesé sleťív blahovistvováše čístaja Bohonevísto."), ), ), "P6": ( "1": ( ("", "", "V bézdňi hrichóvňij vaľájasja, neizsľídnuju milosérdija tvojehó prizyváju bézdnu: ot tlí Bóže mjá vozvedí."), ("", "", "Nizpadóchsja zlóboju zmijínoju, i na odrí otčájanija ležú: íže razsláblennyja hlahólom vozdvíhnuvyj Christé, i mené vozdvíhni."), ("", "", "Víjema vítry zmiínymi, i potopľájema vsehdá volnámi hrichóvnymi, spasí mja, jákože Petrá, čelovikoľúbče Hóspodi."), ("Múčeničen", "", "Preminúvše preďíly jestestvá, jáže páče jestestvá múki preterpíste: sehó rádi páče umá blahích spodóbistesja, múčenicy."), ("Múčeničen", "", "Jáko dóbriji, jáko krásniji, jáko čestníji, dobrotvórnomu i krásnomu sočetástesja, o nás vsehdá moľáščesja, preslávniji múčenicy."), ("Bohoródičen", "", "Ot vsích rodóv tvoréc izbrá tebé vseneporóčnaja, dobrótu Jákovľu, júže vozľubí, i iz tebé vozsijáv javísja."), ), "2": ( ("", "", "Hlás hlahól molébnych ot boľíznennyja Vladýko, duší uslýšav, ot ľútych mjá izbávi: jedín bo jesí nášeho spasénija vinóven."), ("", "", "Božéstvennaja mistá slávy tvojejá, načáľnaho svíta Vladýko Christé, svitoblistánnymi razžéhšesja sijáňmi, zaréju svitovídňi víčno prebyvájut."), ("", "", "Božéstvennaja mistá slávy tvojejá, načáľnaho svíta Vladýko Christé, svitoblistánnymi razžéhšesja sijáňmi, zaréju svitovídňi víčno prebyvájut."), ("", "", "Síloju božéstvennoju ukripľájemi, Serafími zovúšče hlásy nemólčnymi, pínije trisvjatóje, počitáti vozvóďat nás jestestvó trijipostásnoje."), ("Bohoródičen", "", "Kľátsja, jákože rečé Hospóď, ístinoju drévle Davídu, iz čréva tvojehó prošéd ispólni: rodilá bo jesí otrokovíce, nébom i zemléju cárstvujuščaho."), ), ), "P7": ( "1": ( ("", "", "Óbrazu zlatómu na póli deíri služímu, trijé tvojí ótrocy nebrehóša bezbóžnaho veľínija, posreďí že ohňá vvérženi, orošájemi pojáchu: blahoslovén jesí Bóže otéc nášich."), ("", "", "Úmnym razvraščénijem i bisóvskimi prilóhi vsjákaho studá ispólnichsja, daléče bých zápovidej tvojích, jákože blúdnyj. Nýňi že obraščájasja zovú: sohriších jákože ón, ne vozhnušájsja mené Iisúse, voploščéjsja mené rádi."), ("", "", "Bóže vsjáčeskich, Ninevíťany zapreščénija raždájuščaho smérť, pokájavšyjasja drévle spásl jesí: tákožde jedíne čelovikoľúbče, premnóhim bludóm oskvernénnoje sérdce mojé, nýňi obraščájuščejesja, ľútych izbávi múk."), ("Múčeničen", "", "Ujazvísja ľúťi, ujazvíti voschoťívyj vás zloďíj, Bohoblažénniji božéstvenniji múčenicy, i neiscílen prebýsť, vsím že vírnym iscilénije jázvy váša javíšasja, ujázvlenym prilóhom vétchaho zapináteľa."), ("Múčeničen", "", "Ni zviréj ľútych, ni mučítelej preščénija, ni ohňá, ni rán, ni mečá sekúščaho, ni razžžénnych ubojástesja sosúd, no jákože v čuždích ťilesích vsjá preterpíste velikomúčency: sehó rádi vinčáni býste."), ("Bohoródičen", "", "Neveščéstvennaho svíta žilíšče črévo tvojé býsť, prečístaja Bohonevísto otrokovíce, svítlosťmi božéstvennaho rázuma bezbóžije othnávšaho, jehóže pojúšče zovém: blahoslovén Bóh otéc nášich."), ), "2": ( ("", "", "Vitíji javíšasja ótrocy, ľubomúdrijšiji drévle, ot Bohoprijátnyja bo duší, Bohoslóvjašče ustnámi pojáchu: prebožéstvennyj otcév i náš Bóže blahoslovén jesí."), ("", "", "Ókrest prestóla neizrečénnyja tvojejá slávy, likújušče prísno nebésniji úmove, nemólčnymi ustnámi vospivájut: prebožéstvennyj otcév i náš Bóže blahoslovén jesí."), ("", "", "Ókrest prestóla neizrečénnyja tvojejá slávy, likújušče prísno nebésniji úmove, nemólčnymi ustnámi vospivájut: prebožéstvennyj otcév i náš Bóže blahoslovén jesí."), ("", "", "Jehdá víďiša ťá ánheľstiji číni na nebesá s plótiju voznosíma, vratá nebésnaja otverzóša tí, pojúšče: prebožéstvennyj otcév i náš Bóže blahoslovén jesí."), ("Bohoródičen", "", "Zakóna ťá načátok i prorókov Otrokovíce Havrijíl pokazúja vopijáše: sé tý raždáješi jedína vsepítaja, prebožéstvennaho otcém i nám Bóha blahoslovénnaho."), ), ), "P8": ( "1": ( ("", "", "Péšč inohdá óhnennaja vo Vavilóňi ďíjstva razďiľáše, Bóžijim veľínijem chaldéji opaľájuščaja, vírnyja že orošájuščaja, pojúščyja: blahoslovíte vsjá ďilá Hospódňa Hóspoda."), ("", "", "Podpadóch lukávomu, i kozňmí jehó poraboščén bých, i zrjá mja ľstívyj v nebrežéniji mnózi, chválitsja: sehó mjá ischití, íže zablúždšym obraščénije ščédre."), ("", "", "Razriší mja nerišímymi úzami strastéj soderžíma ťilésnymi, íže nerišímyja razrišívyj ot víčnych úz, i teščí k stezjám naprávi spasíteľnym Christé Spáse míra."), ("Múčeničen", "", "Íže strastém soďíteľije, na mňí smirénňim vsjú zlóbu sozidáša voístinnu: no íže strastém Christóvym jáko voístinnu podóbnicy blažénniji múčenicy, sehó izbáviti mjá vreždénija."), ("Múčeničen", "", "Koľína ne preklónše istukánnym, v péšč rán jákože ótrocy vvérženi býste drévle, i rosóju božéstvennoju jávľšesja neopálni prebýste, prechváľniji múčenicy, pojúšče Christá vo víki."), ("Bohoródičen", "", "Iznemóhšuju mnóhimi prilóhmi lukávaho, okajánnuju mojú dúšu, Bohorodíteľnice, cilébnymi tvojími moľbámi Ďívo, zdrávu soďílaj, da ťá slávľu vo vsjá víki."), ), "2": ( ("", "", "O podóbiji zláťi nebréhše treblažénniji júnoši, neizmínnyj i živýj Bóžij óbraz víďivše, sredí ohňá vospiváchu: osuščestvovánnaja da pojét Hóspoda vsjá tvár, i prevoznósit vo vsjá víki."), ("", "", "Podóbija tvojejá bláhosti sostávil jesí ánhely, zápovidi tvojá krípostiju soveršájuščyja Slóve, i spospišestvújuščyja vsím vírnym zváti: osuščestvovánnaja da pojét Hóspoda vsjá tvár, i prevoznósit vo vsjá víki."), ("", "", "Božéstvennymi dobroďítelmi nebésnoje ukrasíl jesí žitijé, svjatých ánhel svjaščennonačálije, prosviščája sích tebé Christé vopijúščich: osuščestvovánnaja da pojét Hóspoda vsjá tvár, i prevoznósit vo vsjá víki."), ("", "", "Rádujuščesja svítlo, vospojím božéstvennoje pínije bezplótnych neoskúdňi, i Vladýku Bohoslóvjašče, písň s ními vozopiím: osuščestvovánnaja da pojét Hóspoda vsjá tvár, i prevoznósit vo vsjá víki."), ("Bohoródičen", "", "Právosť, jáže múdri vsjá právjaščaja, vozľubí dostójno ťá preneporóčnuju i prečístuju Ďívu neskazánno presvjatája Bohoródice v ťá vséľšajasja, júže slavoslóvim, da pojét Hóspoda vsjá tvár, i prevoznósit vo vsjá víki."), ), ), "P9": ( "1": ( ("", "", "Ot Bóha Bóha Slóva, neizrečénnoju múdrostiju, prišédšaho obnovíti Adáma, jádiju v tľínije pádšaho ľúťi, ot svjatýja Ďívy neizrečénno voplotívšahosja nás rádi, vírniji jedinomúdrenno písňmi veličájem."), ("", "", "Sé vrémja ďílanija, čtó spíši bez umá dušé, vo unýniji hlubóci? Vostáni, slezámi prosvití sviščú tvojú. Potščísja, približájetsja ženích dušám: ne ukosní, da ne ostánešisja božéstvennych dveréj vňijúdu."), ("", "", "O kóľ strášnoje tvojé sudíšče, javľájuščeje vsjáko ďijánije náho pred ánhely i čelovíki! O kóľ ľútoje izrečénije, jéže sohréššym chóščeši dáti! Ot nehóže izbávi mjá Christé préžde koncá, podajá mi obraščénija slézy."), ("Múčeničen", "", "Známenavšesja božéstvennoju Áhnca i Pástyrja króviju, slávniji božéstvenniji stradáľcy, zakalájemi rádovachusja, jákože nezlóbiviji áhncy, i nýňi vsjú pervoródnych cérkov svjatúju, voístinnu na nebesích prosviščájut."), ("Múčeničen", "", "Sólnečnych sijánij, jáko svitíľnicy jávľšesja, lučámi stradánij vášich vsjáku dúšu ozarjájete, i vsjáku ťmú prélesti othoňájete dóbliji stradáľcy: sehó rádi dostójno víroju blažími jesté."), ("Bohoródičen", "", "Poščadí mja Hóspodi, poščadí, jehdá chóščeši sudíti, i ne osudí mené vo óhň, ni járostiju tvojéju obličí mené: mólit ťá Ďíva, jáže tebé Christé, róždšaja, i ánhel mnóžestvo, i múčenik soslóvije."), ), "2": ( ("", "", "Vés jesí želánije, vés sládosť, slóve Bóžij, Ďívy Sýne, Bóže bohóv Hóspodi, svjatých presvjatýj. Ťím ťá vsí s róždšeju veličájem."), ("", "", "Biloobrazújušče javíšasja ánheli svitoobrázno, božéstvennym učenikóm tvojím povídajušče tvojé vtoróje prišéstvije Christé: s nímiže ťá vsí Bohoslóvjašče veličájem."), ("", "", "Biloobrazújušče javíšasja ánheli svitoobrázno, božéstvennym učenikóm tvojím povídajušče tvojé vtoróje prišéstvije Christé: s nímiže ťá vsí Bohoslóvjašče veličájem."), ("", "", "Jáko blahoďíteľ vsjákaho jestestvá slovésnaho, tvoríši pérvij sí zarí vtorúju svítlost, krájnija rádi bláhosti: ťímže ťá vsí blahodarjášče veličájem."), ("Bohoródičen", "", "Razumív tvojé prečístaja, svitoobráznoje ďívstvo, i udívlsja, rádosť tebí prinesé božéstvennyj archistratíh: ťímže ťá vsí jáko Bohoródicu veličájem."), ), ), ), "ST": ( ("", "", "Soďíjannych mnóju ľútych pomyšľája bezmístnaja, k tvojím pribiháju ščedrótam, mytarjá podražája, i bludnícu slezívšuju i blúdnaho sýna: ťímže i pripádaju tí mílostive, préžde dáže ne osúdiši mjá, poščadí mja Bóže, i pomíluj mjá."), ("", "", "Bezzakónija mojá prézri Hóspodi, ot Ďívy roždéjsja, i sérdce mojé očísti, chrám sijé tvorjá svjatómu tvojemú Dúchu. Ne otríni mené ot tvojehó licá, bezmírnuju imíjaj véliju mílosť."), ("", "", "Po Chrisťí postradávše dáže do smérti, o strastotérpcy múčenicy, dúšy bo ímate na nebesích v rucí Bóžijej, i po míru vsemú počitájemi súť váša móšči. Svjaščénnicy poklaňájutsja i ľúdije vsí, rádujuščesja, sohlásno vopijém: són čésten pred Hóspodem smérť prepodóbnych jehó."), ("Bohoródičen", "", "Na ťá upovánije Bohoródice vozložíchom, čájanija da ne otpadém, spasí nás ot bíd, pomóščnice nedoumíjemym, i soprotívnych sovíty razorí: tý bo jesí náše spasénije, blahoslovénnaja."), ) ) #let L = ( "B": ( ("", "", "Hlás tí prinósim razbójnič, i mólimsja: pomjaní nás Spáse vo cárstviji tvojém."), ("", "", "Spasí mja Hóspodi Bóže, i óbščnika sotvorí části ot duší vozľúbľšym ťá."), ("", "", "Vsjá nebésnaja vóinstva so stráchom slúžat tí jáko Bóhu: íchže molítvami spasí nás."), ("", "", "Orúžijem víry stradáľcy, pobidíste polkí vrážija, i prineséni býste k Bóhu."), ("", "", "Počitájut číni úmniji, s čelovíčeskim ródom, Tróicu próstu, jedínaho že suščestvá Božestvó."), ("", "", "Prečístaja Vladýčice, jáže Bóha bez símene róždšaja, tohó molí spastísja nám."), ) )
https://github.com/timetraveler314/Note
https://raw.githubusercontent.com/timetraveler314/Note/main/24Fall/IM/im.typ
typst
#import "@local/MetaNote:0.0.1" : * #let detm = math.mat.with(delim: "|") #show: doc => MetaNote( title: [ Mathematical Foundations for the Information Age ], authors: ( ( name: "timetraveler314", affiliation: "University of Genshin", email: "<EMAIL>", ), ), doc, ) = High Dimensional Space == Properties of the Unit Ball (Counterintuitive) We focus on high-dimensional (unit) balls. Overall, we want to solve for the volume of a them first. An elegant way begins with hyper-spherical coordinates, which extends the polar coordinates to higher dimensions in a natural way. Like $r dif r dif theta$, $r^2 sin theta dif r dif theta dif phi$ in 3D, we merge all $d-1$ angles into a single differential term $d Omega$. Then integrating over the unit ball looks like: $ integral r^(d-1) integral_(SS^(d-1)) d Omega dif r. $ This gives us the intuition of accumulating layer-by-layer from the center to the surface, like peeling an onion. #theorem("Volume of a Unit Ball")[ $ V(d) = pi^(d/2) / Gamma(d/2 + 1). $ ] #proof[ Using the polar coordinates, we have: $ V(d) = integral_0^1 r^(d-1) dif r integral_(SS^(d-1)) d Omega = A(d)/d, $ where $A(d) = integral_(SS^(d-1)) d Omega$ denotes the surface area. We extract the surface area because it recurringly appears when integrating in the hyper-spherical coordinates. Now we extend the Gaussian integral to higher dimensions, which is key to finding out the factor $A(d)$. Consider $ I(d) = integral_RR ... integral_RR e^(-(x_1^2 + ... + x_d^2)) dif x_1 ... dif x_d. $ In Cartesian coordinates, we have $ I(d) = [integral_RR e^(-x^2) dif x]^d = pi^(d/2). $ In polar coordinates, we have $ I(d) = integral_0^(+oo) e^(-r^2) r^(d-1) dif r integral_(SS^(d-1)) d Omega = integral_0^(+oo) t^(d/2-1) e^(-t) dif t A(d) = 1/2 Gamma(d/2) A(d). $ Hence $ A(d) = (2 pi^(d/2)) / Gamma(d/2), V(d) = (2 pi^(d/2)) / (d Gamma(d/2)) = pi^(d/2) / Gamma(d/2 + 1). $ ] Balls in high dimensions exhibit some counterintuitive properties. We will show some of them. - *Observation 1*: The volume of a unit ball in high dimensions concentrates near the surface. $ Pr["Point is in the annulus"] = (V(d)-V(d)(1-epsilon)^d)/(V(d)) = 1 - (1-epsilon)^d >= 1 - e^(-epsilon d). $ From now on, analysis on the asymptotic behavior will prevail. Here we see that, most of the volume of the $d$-dimensional unit ball is contained in an annulus of width $O(1/d)$ near the boundary. That is to say, at least a constant fraction of the volume is within $O(1/d)$ of the boundary. Wow! - *Observation 2*: The volume of a unit ball in high dimensions concentrates near the equator. To formalize this, we need to solve for bounds of the volume of a spherical cap. Take a thin slab of height $h$ of the hemisphere. A coarse upper bound, but enough for our purpose: $ V_1 &= V(d-1) integral_h^1 (1 - r^2)^((d-1)/2) dif r \ "where" integral_h^1 (1 - r^2)^((d-1)/2) dif r &<= integral_h^1 e^(-r^2(d-1)/2) dif r <= integral_h^(+oo) r/h e^(-r^2 (d-1)/2) dif r\ &= e^(-(d-1) h^2/2)/(h(d-1)). $ Then it suffices to obtain a lower bound of the "slab". We approximate by the cylinder with the same height and base area as a lower bound. Specifically, the height is $h$ and the base radius is $sqrt(1-h^2)$. $ V_2 &>= V(d-1) h (1-h^2)^((d-1)/2) \ &>= V(d-1) h (1-(h^2(d-1))/2) ("Bernoulli", (1-x)^alpha >= 1-alpha x, alpha >= 1) $ Thus $ "Ratio" <= (e^(-((d-1)h^2)/2))/((d-1)h^2 (1-((d-1) h^2)/2)). $ Notice the frequent occurance of $(d-1)h^2$, to make the upper bound asymptotically a constant, we choose $h = c/sqrt(d-1)$, then $ "Ratio" <= 2/c e^(-c^2/2). $ #note("Why did we use a lower bound for the slab part?")[ In our approach, $V(d-1)$ remains hard to calculate. By virtue of the cylinder, $V(d-1)$ can be cancelled out, simplifying our discussion. ] The above discussion boils down to the theorem: #theorem("Concentrated Volume Near the Equator, Quantized")[ For $c>=1$ and $d>=3$, at least a $1-2/c e^(-c^2/2)$ fraction of the volume of the $d$-dimensional unit ball has $abs(x_1) <= c/sqrt(d-1)$. ] Now let's extend the single restriction by considering restriction on more dimensions or more points. For this, union bound is needed to bound the probability of multiple events happening together. #lemma("Union Bound Abbreviated")[ $ Pr[A union B] <= Pr[A] + Pr[B]. $ ] #proof[ Trivial. ] We first consider a small box centered at the origin. By the above theorem, $ Pr[exists i, abs(x_i) >= c/sqrt(d-1)] <= d dot 2/c e^(-c^2/2). $ This is the probability that a point falls out of the box. Let $c = 2 sqrt(ln d)$, the upper bound becomes $1/(d sqrt(ln d)) <= 1/2$. Then we can say the box takes up at least $1/2$ proportion of the sphere. Wait - is there anything wrong? One might wonder how it can be that nearly all the points in the sphere are very close to the surface and yet at the same time in a box of side-length $O(sqrt((ln d)/(d-1)))$. To answer this, we notice for each coordinate, a typical vlaue for $x_i$ will be $O(1/sqrt(d))$. Then the relation becomes clear. With the above observations, an immediate consequence arises when we draw more points: they are likely to have large norms, and are likely to be mutually orthogonal. To be precise, when we draw $n$ points at random, we expect at a high probability $1-O(1/n)$ that they all follow some properties. Here, union bound occurs repeatedly. #theorem("Near Orthogonality")[ Consider drawing $n$ points $p_1, ... ,p_n$ at random from the unit ball. With probability $1-O(1/n)$, we have: + $forall i, norm(p_i) >= 1 - (2 ln n)/d$; + $forall i != j, abs(p_i dot p_j) <= (sqrt(6 ln n))/(sqrt(d-1))$. ] #proof[ The first part follows from the observation 1, $ Pr[norm(p_i) < 1 - epsilon] <= e^(-epsilon d). $ By the union bound, we have $ Pr[exists i, norm(p_i) < 1 - epsilon] <= n e^(-epsilon d). $ We wish the upper bound to be $O(1/n)$, so we set $epsilon = (2 ln n)/d$, and the first part is proved. The second part follows from the observation 2. Consider pairwise dot products of $p_i, p_j$. Fix $p_i$ as the north pole, then the dot product is no more than the projection $x_1$. By the theorem, we have $ Pr[abs(p_i dot p_j) >= c/sqrt(d-1)] <= 2/c e^(-c^2/2). $ There are $binom(n,2) = O(n^2)$ pairs in total, again by the union bound, we have $ Pr[exists i != j, abs(p_i dot p_j) >= c/sqrt(d-1)] <= O(n^2) 2/c e^(-c^2/2) <= O(n^2) e^(-c^2/2). $ Letting $c = sqrt(6 ln n)$, the upper bound is $O(1/n)$. ] = Appendix == Useful Inequalities #theorem("Bernoulli")[]
https://github.com/HPDell/typst-cineca
https://raw.githubusercontent.com/HPDell/typst-cineca/main/test/month-summary.typ
typst
MIT License
#import "@preview/cineca:0.2.0": * #set page(paper: "a5", flipped: true) #let events = ( (datetime(year: 2024, month: 02, day: 21), (circle, (stroke: color.green, inset: 2pt))), (datetime(year: 2024, month: 02, day: 22), (circle, (stroke: color.green, inset: 2pt))), (datetime(year: 2024, month: 05, day: 27), (circle, (stroke: color.green, inset: 2pt))), (datetime(year: 2024, month: 05, day: 28), (circle, (stroke: color.blue, inset: 2pt))), (datetime(year: 2024, month: 05, day: 29), (circle, (stroke: color.blue, inset: 2pt))), (datetime(year: 2024, month: 06, day: 03), (circle, (stroke: color.blue, inset: 2pt))), (datetime(year: 2024, month: 06, day: 04), (circle, (stroke: color.yellow, inset: 2pt))), (datetime(year: 2024, month: 06, day: 05), (circle, (stroke: color.yellow, inset: 2pt))), (datetime(year: 2024, month: 06, day: 10), (circle, (stroke: color.red, inset: 2pt))), ) #calendar-month-summary( events: events ) #calendar-month-summary( events: events, sunday-first: true ) // An empty calendar #calendar-month-summary( events: ( (datetime(year: 2024, month: 05, day: 21), (none,)), ), stroke: 1pt, )
https://github.com/TypstApp-team/typst
https://raw.githubusercontent.com/TypstApp-team/typst/master/tests/typ/text/shaping.typ
typst
Apache License 2.0
// Test shaping quirks. --- // Test separation by script. ABCअपार्टमेंट // This is how it should look like. अपार्टमेंट // This (without the spaces) is how it would look // if we didn't separate by script. अ पा र् ट में ट --- // A forced `latn` script inhibits Devanagari font features. #set text(script: "latn") ABCअपार्टमेंट --- // A forced `deva` script enables Devanagari font features. #set text(script: "deva") ABCअपार्टमेंट --- // Test that RTL safe-to-break doesn't panic even though newline // doesn't exist in shaping output. #set text(dir: rtl, font: "Noto Serif Hebrew") \ ט
https://github.com/0xPARC/0xparc-intro-book
https://raw.githubusercontent.com/0xPARC/0xparc-intro-book/main/old-stuff/r1cs.typ
typst
#import "preamble.typ":* = Implementing R1CS using the IPA #todo[Clean this up and merge some stuff together] We will implement the R1CS system using IPA. Recall that in R1CS, we wish to do the following: We have 3 $n times n$ matrices $A$, $B$, $C$ with entries in $FF_p$. We are interested in solving the quadratic equation $(A x) dot.circle (B x) = (C x)$ over $x in FF_p^n$ (here, $dot.circle$ denotes the Hadamard, i.e. element-wise, product). The prover wishes to show that they know a solution to this equation. In addition, they need to be able to reveal information about $x$ (specifically, they need to be able to open the $i$th entry for arbitrary $i$). == Pedersen Commits and the IPA API We will fix a large prime $p$ and some (Elliptic curve) group $E$ of order $p$. We first use the following: #definition("Basis")[ A set of points $P_1, P_2, dots, P_n$ is a #emph("basis") if they have no "known" linear combinations. ] Generically, we will let the verifier pick some large basis by just randomly sampling points on the curve. This is useful for the following reason. #definition("Pedersen Commitment")[ For a vector $x = (x_1, x_2, dots, x_n)$, we let the #emph("Pedersen commitment") of $x$ with respect to some basis $(G_1, G_2, dots, G_n)$ be the group element $x_1G_1 + x_2G_2 + dots.c + x_n G_n in E$. ] Note that the commitment depends on the choice of basis. We will later show that it is possible to perform a "change of basis". #theorem("Inner Product Argument")[ Suppose that $G_1, G_2, dots, G_n, H_1, H_2, dots, H_n, Q$ is a basis. There exists an interactive protocol with $O(log n)$ message complexity and $O(n)$ verifier computational complexity that allows a prover to prove the following about some commitment $C$: The prover knows $a_1, a_2, dots, a_n, b_1, b_2, dots, b_n, z$ such that $ C = a_1G_1 + a_2G_2 + dots.c + a_n G_n + b_1 H_1 + b_2 H_2 + dots.c + b_n H_n + z Q. $ Furthermore, the prover can show that $z = a_1 b_1 + a_2 b_2 + dots.c + a_n b_n$. ] One corollary is that this lets us query arbitrary inner products of some commit, as follows: 1. Prover commits to some $(a_1, a_2, dots, a_n)$ with respect to some basis $(G_1, G_2, dots, G_n)$, and sends the commitment $C = a_1 G_1 + a_2 G_2 + dots.c + a_n G_n$ to the verifier. 2. Verifier extends the basis to $(G_1, G_2, dots, G_n, H_1, H_2, dots, H_n, Q)$ and sends the rest of the basis to the prover, along with some query $(b_1, b_2, dots, b_n)$ to the prover. 3. Prover computes $z = a_1 b_1 + a_2 b_2 + dots.c + a_n b_n$ (and sends it to the verifier), and shows that $C' = C + b_1 H_1 + b_2 H_2 + dots.c + b_n H_n + z Q$ is in the desired format from IPA. In particular, note that in step 3, the verifier can compute $C'$ themselves and make sure it is the correct $C'$ being argued in the IPA. We thus have: #proposition[ There exists a protocol that runs in $O(n)$ verifier time, which lets the verifier get the value of $a dot b$ for some committed $a$. The proof size can be made to take $O(log n)$ space with Fiat-Shamir. ] <dot_product> By doing the following with $b = e_i$ for some standard basis vector $e_i$, we have the following: #corollary[ There exists a protocol that runs in $O(n)$ verifier time (and $O(log n)$ space with Fiat-Shamir) that reveals ] == Basic Primitives We also have the following: #theorem[ Suppose $C$ is the Pedersen commitment of $(a_1, a_2, dots, a_n)$ with respect to some basis $(G_1, G_2, dots, G_n)$. Then, there exists a protocol that lets the prover show that some public $C'$ can be written in the form $C' = a_1 H_1 + dots.c + a_n H_n$, where $(H_1, H_2, dots, H_n)$ are #emph("any") publicly known elements of $E$ (in particular, they may have known linear combinations). This protocol requires $O(n)$ verifier work. ] <check_vector> Before describing the protocol, we first need the following gadget: #lemma[ Suppose $C$, $C'$ are publicly known commitments with respect to $(G_1, G_2, dots, G_n)$ and $(H_1, H_2, dots, H_n)$, respectively, where, $(G_i)$ and $(H_i)$ are bases, but can overlap (in particular, they are not required to be linearly independent of each other). Then, it is possible to check, in $O(n)$ verifier time complexity, that $C$ and $C'$ are commitments of the same vector. ] <change_basis> #proof[ Suppose that $C$ is the commitment of $a = (a_1, a_2, dots, a_n)$, and $C'$ is the commitment of $b = (b_1, b_2, dots, b_n)$ The verifier picks a random challenge $lambda = (lambda_1, lambda_2, dots, lambda_n) in FF_p^n$, and sends it to the prover. The prover then computes $a dot lambda$ and $b dot lambda$ with IPA (see @dot_product). If $a$ and $b$ are not the same, this passes with probability $p^(-1)$. ] Now we are ready to prove @check_vector. #proof([of @check_vector])[ The verifier picks some random challenge $mu in FF_p$, and computes $C'' = C + mu C'$. If $C = a_1 G_1 + a_2 G_2 + dots + a_n G_n$, and $C' = a_1 H_1 + a_2 H_2 + dots + a_n H_n$, then $ C'' = a_1(G_1 + mu H_1) + a_2 (G_2 + mu H_2) + dots.c + a_n (G_n + mu H_n). $ Now, for randomly chosen $mu$, we also have that $(G_1 + mu H_1, G_2 + mu H_2, dots, G_n + mu H_n)$ have no known linear dependencies, so we can use @change_basis on $C$ and $C''$. ] We also have the following extension of @change_basis: #lemma[ Suppose $C$ and $C'$ are publicly known commitments of $a = (a_1, a_2, dots, a_n)$ and $b = (b_1, b_2, dots, b_n)$ with respect to the bases $(G_1, G_2, dots, G_n)$ and $(H_1, H_2, dots, H_n)$ (again, the bases $(G_i)$ and $(H_i)$ can have dependencies). Then, there exists a protocol with $O(n)$ verifier time complexity that checks that $a = b dot.circle c$, where $c$ is some publicly known vector. ] <check_public_hadamard> #proof[ The idea is for the verifier to pick a random challenge $lambda in FF_p^n$ and then to check that $ a dot lambda = (b dot.circle c) dot lambda <=> a dot lambda = b dot (c dot.circle lambda). $ Since $c$ is publicly known, we can run @dot_product to reveal the values of both $a dot lambda$ and $b dot (c dot.circle lambda)$. ] == Towards R1CS In this section, we will describe the following protocols: #proposition[ There exists a protocol that can, for some commitments $C$ and $C'$ with respect to publicly known bases $(G_1, G_2, dots, G_n)$ and $(H_1, H_2, dots, H_n)$, can show that $C = x_1 G_1 + x_2 G_2 + dots.c + x_n G_n$ and $C' = y_1 H_1 + y_2 H_2 + dots.c + y_n H_n$, and that $y = M x$, where $M$ is a publicly known square matrix. This computation takes $O(n^2)$ verifier time, though it can be made to take $O(n^2)$ preprocessing and $O(n)$ per proof. ] <matrix_by_vector> #proposition[ There exists a protocol, that, for some commitments $C_a, C_b, C_c$ with respect to publicly known bases, can show that the commitments are with respect to $a$, $b$, $c$, respectively, satisfying $c = a dot.circle b$. ] <hadamard_commit> To see how we can turn this into an R1CS system, suppose we have some problem $(A x) dot.circle (B x) = C x$, and we wish to show that the prover has a solution $x$ to this system. We can do the following: 1. Prover commits to $C_x$ with respect to some basis. 2. Prover provides commitments $C_(A x), C_(B x)$, and $C_(C x)$, and uses @matrix_by_vector to show that $C_(A x)$ is indeed the multiplication of $x$ by $A$ (and similarly for $C_(B x)$ and $C_(C x)$). 3. Prover shows that $C_(A x)$, $C_(B x)$ are commitments of vectors whose Hadamard product is the preimage of $C_(C x)$, using @hadamard_commit. This implies the following: #theorem[ There exists a proof system (implementing R1CS) using IPA, such that each proof takes linear time for the verifier (though with $O(n^2)$ preprocessing time). ] Now, we describe how to prove the propositions. #proof([of @matrix_by_vector])[ Note that we have $ y_i = M_(i, 1) x_1 + M_(i, 2) x_2 + dots.c + M_(i, n) x_n. $ So, if $C' = y_1 H_1 + y_2 H_2 + dots + y_n H_n$, we must have $ C' = sum_(i=1)^n sum_(j=1)^n M_(i, j) x_j H_i = sum_(j=1)^n x_j sum_(i=1)^n M_(i, j) H_i. $ So, letting $T_j = sum_(i=1)^n M_(i, j) H_i$, we see that we require $C' = x_1 T_1 + x_2 T_2 + dots.c + x_n T_n$. This can be done with @check_vector. ] #proof([of @hadamard_commit])[ As usual, our general proof strategy will be for the verifier to generate random $lambda$, and then the prover will show that $(a dot.circle b) dot lambda = c dot lambda$. The prover can reveal $t = c dot lambda$ (this is just @dot_product), so it remains to show that the prover can show that $(a dot.circle b) dot lambda = t$. Note that $(a dot.circle b) dot lambda = a dot (b dot.circle lambda)$. So, we can do the following: For some basis $(G_1, G_2, dots, G_n, H_1, H_2, dots, H_n, Q)$, the verifier performs the following proofs: 1. $C = a_1 G_1 + a_2 G_2 + dots + a_n G_n$ is the commitment of the same vector as $C_a$. 2. $C' = v_1 H_1 + v_2 H_2 + dots + v_n H_n$ is the commitment of some vector $v$ satisfying $v = b dot.circle lambda$ (using @check_public_hadamard). 3. $C + C' + t Q$ satisfies the IPA condition. This shows that $t = a dot v = (a dot.circle b) dot lambda$. ]
https://github.com/RandomcodeDev/FalseKing-Design
https://raw.githubusercontent.com/RandomcodeDev/FalseKing-Design/main/engine/renderer.typ
typst
= Renderer architecture The renderer will be implemented in multiple layers, flexible enough to support drawing and post-processing fairly complex scenes, extensible with more techniques and passes, and simple to use. == Hardware interface The hardware interface is an abstraction of Vulkan/Direct3D/GNM/whatever other ungodly API I have to deal with. It's low level, and implements render targets, materials, and geometry primitives, as well as special render targets just for going to the screen (they wrap the swap chain images). #list( [Handles VkInstance/IDXGIFactory, VkDevice/ID3D12Device, VkCommandBuffer/ID3D12GraphicsCommandList, VkSwapChainKHR/IDXGISwapChain], [Creates and manages geometry (VB+IB), textures, render targets, shaders, materials (texture + shader)], [Handles drawing given geometry + material], ) == Rendering pipeline Handles the process of taking data (model, position, etc of objects in scene, and general properties of the world) and using the hardware interface to render and post-process all of it. #list( [Calls for drawing objects and adding lights], [Uses multiple render passes to light and post-process the scene], [Rasterization-based deferred lighting passes], [Ray-tracing-based lighting passes], [Common post-processing passes], ) == Render system Calls into the rendering pipeline to draw scenes from different cameras, such as the player's eyes/over the shoulder, cinematic cameras, mirrors and other reflective surfaces, and literal cameras. #list( [ECS system that iterates over objects in the scene], [Sets parameters based on scene, such as sky details (even that could be an entity)], )
https://github.com/SkiFire13/typst-prooftree
https://raw.githubusercontent.com/SkiFire13/typst-prooftree/master/examples/example.typ
typst
MIT License
#import "../prooftree.typ": * #show: content => style(styles => { let content = box(inset: 2em, content) let sizes = measure(content, styles) page(width: sizes.width, height: sizes.height, margin: 0pt, content) }) #set text(font: "New Computer Modern", size: 12pt) #prooftree( axiom("A"), rule("B"), axiom("C"), rule(n: 2, "D"), axiom("E"), axiom("F"), rule(n: 2, "G"), rule("H"), rule(n: 2, "I") )
https://github.com/DJmouton/Enseignement
https://raw.githubusercontent.com/DJmouton/Enseignement/main/Templates/utils.typ
typst
// Permet de contrôler toutes les corrections en même temps #let CORRIGÉ_GLOBAL = false #set text( lang: "FR" ) // Cadre pour du contenu avec un titre en haut à gauche #let cadre(titre: [Joli cadre], couleur: black, body) = [ #v(1em) #block( radius: 1em, stroke: couleur, inset: 1em, width: 100%, breakable: true )[ // Le titre #move(dx: 0pt, dy: -20pt, rect( fill: white, // Fond blanc pour casser le contour du cadre radius: 7pt, [*#titre*]) ) #v(-2.8em) #body ] #v(0.5em) ] // Exemples #cadre(titre: "Définition", couleur: red)[#lorem(30)] #cadre(titre: [Remarque])[#lorem(30)] // Permet de superposer des contenu #let z-stack(..items) = { grid( columns: items.pos().len() * (1fr,), column-gutter: -100%, rows: 1, ..items ) } // Permet d'avoir 2 versions d'un contenu au même endroit, modifié par un bool, idéal pour garder la mise en page entre une version élève et une version corrigée #let correction(correction_visible: CORRIGÉ_GLOBAL, énoncé: [], corrigé) = { block( breakable: true, stroke: 0pt, inset: 0pt, )[ #if correction_visible {z-stack(corrigé, hide(énoncé))} else {z-stack(stroke: (luma(250)), hide(corrigé), énoncé)} ] } // Tu devineras jamais cque ça fait #let pointillets = [#v(1em) #move(dy: 0em ,line(length: 100%, stroke: (dash: "dotted")))] // Exemples #correction(énoncé: smallcaps(lorem(30)))[#lorem(40)] #correction(énoncé: lorem(40))[#for i in range(3) [#pointillets]]
https://github.com/takotori/PhAI-Spick
https://raw.githubusercontent.com/takotori/PhAI-Spick/main/sections/fadenpendel.typ
typst
= Fadenpendel #grid( columns: (32%, auto), gutter: 5pt, image("../figures/fadenpendel.png"),[ #set math.cases(reverse: true) $ cases(F = "Fadenkraft", F_G = "Gewichtskraft") F_"res" &= F - F_G \ &= ((m v^2)/l) $ Die Energie-Erhaltung sagt uns, dass potenzielle Energie gleich kinetische Energie ist. Daraus folgt: $ (m v^2)/2 &=m g h \ &= m g(l-l dot cos(phi) ) \ &= m g l(1-cos(φ) ) $ *Schwingungsdauer:* #grid( columns: (auto, auto), gutter: 5pt, [Feder], [$T=2 phi sqrt(m/D) $], [Mathematisches Pendel], [$T approx 2 phi sqrt(l/g)$] ) ] )
https://github.com/ysthakur/PHYS121-Notes
https://raw.githubusercontent.com/ysthakur/PHYS121-Notes/main/Notes/Ch09.typ
typst
MIT License
#import "@preview/gentle-clues:0.3.0": important, note = Chapter 9: Momentum == Impulse and Momentum / Collision: Short-duration interaction between two objects. \ During a collision, it takes time to compress the object, and it takes time for the object to re-expand. The duration of a collision depends on the materials. / Impulse force: A large force exerted during a short interval of time *The effect of an impulsive force is proportional to the area under the force vs time curve* #align(center)[ #image("../Images/9-Impulse.png", width: 50%) ] / Impulse: The area under a force vs time curve (integral of force with respect to time?) It's a vector quantity pointing in the same direction as the average force (units of $upright(N dot s)$): $ arrow(J) = arrow(F)_"avg" Delta t $ / Momentum: Product of mass and velocity: $arrow(p) = m arrow(v)$ #important(title: "Impulse-momentum theorem")[ Impulse is change in momentum: $ arrow(J) = Delta arrow(p) $ ] / Total momentum ($arrow(P)$): Sum of momenta of all particles in system The impulse approximation states that we can ignore the small forces that act during the brief time of the impulsive force (only consider momenta and velocities immediately before and immediately after collision). == Conservation of Momentum #note(title: "Law of conservation of momentum")[ The total momentum of the system is conserved _as long as_ there are no external forces. ] $arrow(F)_"net"$ is the net force due to external forces. If $arrow(F)_"net" = arrow(0)$, the total momentum does not change. / Isolated system: System with no net external force acting on it, leaving the momentum unchanged. == Explosions / Explosion: When the particles of a system move apart after a brief, intense interaction (opposite of collision) The forces in an explosion are *internal* forces, so if the system is isolated, the total momentum is 0 == Inelastic Collisions / Perfectly inelastic collision: Two objects stick together and move with common final velocity (e.g. clay hitting the floor) / Perfectly elastic collision: Mechanical energy is conserved #note[ Although momentum is conserved in all collisions, mechanical energy is only conserved in a perfectly elastic collision. ] In an inelastic collision, some mechanical energy is converted to thermal energy. == Angular Momentum / Angular momentum ($L$): Analogue of linear momentum for circular motion, since linear momentum is not conserved for spinning objects ($upright("kg" dot m^2 \/ s)$) $ L = I omega $ Can be written like the linear impulse-momentum equation: $ tau_"net" Delta t = Delta L $ === Varying Moment of Inertia Unlike linear momentum, *an isolated, rotating object can change its angular velocity* Moment of inertia can change because the distribution of mass can change
https://github.com/Godalin/Typst-Notations
https://raw.githubusercontent.com/Godalin/Typst-Notations/main/lib.typ
typst
#import "CatT.typ" #import "Proof.typ" #import "Short.typ"
https://github.com/noahjutz/AD
https://raw.githubusercontent.com/noahjutz/AD/main/notizen/komplexitaet/factor.typ
typst
#import "@preview/cetz:0.2.2" #import "/config.typ": theme #cetz.canvas(length: 100%, { import cetz.draw: * import cetz.plot rect((0, 0), (1, 1), stroke: none) plot.plot( axis-style: none, y-max: 25, y-min: 0, { plot.add( domain: (0, 15), style: (stroke: black), a => a + 2 * calc.sin(a) ) plot.add( domain: (0, 15), style: (stroke: gray), a => a ) plot.add( domain: (0, 15), style: (stroke: theme.primary), a => 1.3 * a ) plot.annotate({ line((14, 14), (14, 18.2), mark: (symbol: "straight"), name: "diff") content("diff.start", anchor: "north")[$c$] }) }) })
https://github.com/jassielof/typst-templates
https://raw.githubusercontent.com/jassielof/typst-templates/main/latex-standard/font-sizes.typ
typst
MIT License
#let font-sizes = ( vpt: 5pt, vipt: 6pt, viipt: 7pt, viiipt: 8pt, ixpt: 9pt, xpt: 10pt, xipt: 10.95pt, xiipt: 12pt, xivpt: 14.4pt, xviipt: 17.28pt, xxpt: 20.74pt, xxvpt: 24.88pt ) #let font-10pt = ( margin: ( top: 1.725in, bottom: 1.65in, left: 1.85in, right: 1.865in, ), tiny: font-sizes.vpt, scriptsize: font-sizes.viipt, footnotesize: font-sizes.viiipt, small: font-sizes.ixpt, normalsize: font-sizes.xpt, large: font-sizes.xiipt, Large: font-sizes.xivpt, LARGE: font-sizes.xviipt, huge: font-sizes.xxpt, Huge: font-sizes.xxvpt ) #let font-11pt = ( margin: 1.75in, tiny: font-sizes.vipt, scriptsize: font-sizes.viiipt, footnotesize: font-sizes.ixpt, small: font-sizes.xpt, normalsize: font-sizes.xipt, large: font-sizes.xiipt, Large: font-sizes.xivpt, LARGE: font-sizes.xviipt, huge: font-sizes.xxpt, Huge: font-sizes.xxvpt ) #let font-12pt = ( margin: 1.5in, tiny: font-sizes.vipt, scriptsize: font-sizes.viiipt, footnotesize: font-sizes.xpt, small: font-sizes.xipt, normalsize: font-sizes.xiipt, large: font-sizes.xivpt, Large: font-sizes.xviipt, LARGE: font-sizes.xxpt, huge: font-sizes.xxvpt, Huge: font-sizes.xxvpt )
https://github.com/MyPedagogicalRessources/Demo-git-typst
https://raw.githubusercontent.com/MyPedagogicalRessources/Demo-git-typst/main/MonPremierDocument.typ
typst
#import "@preview/ilm:1.1.2": * #import "TP_Commons.typ": * #show: ilm.with( title: [R1-01 - Initiation Git et GitHub], author: "Les contributeurs du projet..", date: datetime(year: 2024, month: 10, day: 01), abstract: [Travaux dirigés], preface: [], // bibliography: bibliography("refs.bib"), figure-index: (enabled: false), table-index: (enabled: false), listing-index: (enabled: false) ) #set heading(numbering: "1.1.") #include "TP0-Introduction-git.typ" = Vos notes sur les commandes Git et GitHub == Git 1. Initialisation d'un dépôt Git _Votre réponse ici..._ #fix[\#1 - Initialisation d'un dépôt Git] 2. Commit de fichiers _Votre réponse ici..._ #fix[\#2 - Initialisation d'un dépôt Git] 3. Ignorer des fichiers _Votre réponse ici..._ #fix[\#3 - Initialisation d'un dépôt Git] == GitHub Syncronisation avec un dépôt distant _Votre réponse ici..._ #fix[\#4 - Syncronisation avec un dépôt distant] #include "TP1-Gh-Classroom.typ"
https://github.com/NycRat/arduino-workshop
https://raw.githubusercontent.com/NycRat/arduino-workshop/main/slides_arduino_code/main.typ
typst
#set page(width: 1920pt, height: 1080pt, fill: rgb("#fffbfc")) #set align(center + horizon) #set page(footer: context [ #set text(30pt) #set align(center) #counter(page).display( "1/1", both: true, ) ]) #let example(body, size: 30pt) = { set text(size) block(stroke: gray, fill: rgb("#f0f0f0"), inset: 20pt, radius: 5pt, body) } /* #let x = 2.2 #set page(width: 11in * x, height: 8.5in * x, fill: rgb("#fffbfc")) */ #set text(40pt, font: "IBM Plex Mono") #[ #set align(center + horizon) = Programming Small Little Computers #emoji.computer #text(rgb("#888"))[Basics of Arduino Language with _Eric, <NAME>_ ] ] #pagebreak() = What is Arduino? #pagebreak() it's a company that creates microcontrollers and software for them #pagebreak() what do arduinos mean to us, for this lesson? #pagebreak() microcontrollers that are Arduino uno compatible #pagebreak() = What is a microcontroller? #pagebreak() small device that has many components of a regular computer, can execute code, can send input and output signals #pagebreak() #columns(3)[ #image("washer.png") #image("microwave.png") #image("camera.png") ] #pagebreak() what does this mean for arduino programming? #pagebreak() we can program logic that takes input, and sends output. that is it. #pagebreak() #image("microwave.png") #pagebreak() if (pressed one button) { set time to one minute } if (pressed two button) { set time to two minutes } if (pressed start button) { start microwave } #pagebreak() if (pressed_button == "1") { time = 1; } if (pressed_button == "2") { time = 2; } if (pressed_button == "start") { start("microwave"); } #pagebreak() = How do we program an arduino? #pagebreak() Arduino Programming Language. an extended version of C++, for arduino. #pagebreak() == How do we program with arduino language? = And how do we upload the code onto the arduino? #pagebreak() #image("arduino_ide.png") #pagebreak() #image("arduino_ide_labelled.png") #pagebreak() = How do I write Arduino Programming Language? #pagebreak() #example[ ```c // The setup() function runs once when you power the Arduino or press reset // Lines starting with "//" are comments, only here to explain the code void setup() { // this runs a function to set the Pin 13 to be for output pinMode(13, OUTPUT); // this runs a function to send a HIGH voltage (5V) to Pin 13 (turns on LED) digitalWrite(13, HIGH); } // The loop() function runs over and over again after setup() is done void loop() { // nothing is done here just yet } ``` ] basics #pagebreak() #example[ ```c void setup() { pinMode(13, OUTPUT); digitalWrite(13, HIGH); } // The loop() function runs over and over again after setup() is done void loop() { // send HIGH voltage to Pin 13 (turns on LED) digitalWrite(13, HIGH); delay(100); // waits for 100ms // send LOW voltage to Pin 13 (turns off LED) digitalWrite(13, LOW); delay(100); // waits for 100ms } ``` ] loop function #pagebreak() #example[ ```c // set variables that are often repeated int LED_PIN = 13; int DELAY_TIME = 100; void setup() { pinMode(LED_PIN, OUTPUT); } void loop() { digitalWrite(LED_PIN, HIGH); delay(DELAY_TIME); digitalWrite(LED_PIN, LOW); delay(DELAY_TIME); } ``` ] variables #pagebreak() #example[ ```c int LED_PIN = 13; int BUTTON_PIN = 2; int DELAY_TIME = 100; void setup() { // set BUTTON_PIN to be for input pinMode(BUTTON_PIN, INPUT); pinMode(LED_PIN, OUTPUT); } void loop() { // stores the voltage of BUTTON_PIN int button_voltage = digitalRead(BUTTON_PIN); // writes the same voltage to the LED_PIN digitalWrite(LED_PIN, button_voltage); } ``` ] input #pagebreak() #example[ ```c int LED_PIN = 13; int BUTTON_PIN = 2; int DELAY_TIME = 100; void setup() { pinMode(BUTTON_PIN, INPUT); pinMode(LED_PIN, OUTPUT); } void loop() { // check if BUTTON_PIN is being provided a HIGH voltage if (digitalRead(BUTTON_PIN) == HIGH) { // if it is being provided HIGH voltage, run this code digitalWrite(LED_PIN, HIGH); delay(DELAY_TIME); digitalWrite(LED_PIN, LOW); delay(DELAY_TIME); } // does nothing if our if statement is false } ``` ] if statements #pagebreak() #example[ ```c void setup() { // begin communication with computer at a reliable speed Serial.begin(9600); // prints (displays text) on computer Serial.print("Hello World"); } void loop() { // prints out milliseconds since the arduino started Serial.print(millis()); } ``` ] #pagebreak() those are the basics. now you have to piece everything together
https://github.com/astrale-sharp/typstfmt
https://raw.githubusercontent.com/astrale-sharp/typstfmt/main/.github/ISSUE_TEMPLATE/bug_report.md
markdown
Apache License 2.0
--- name: Bug report about: Report a bug title: '' labels: '' assignees: '' --- [ ] I have tested with the master version of typstfmt and not just typst-lsp **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior:
https://github.com/fufexan/cv
https://raw.githubusercontent.com/fufexan/cv/typst/modules/projects.typ
typst
#import "../src/template.typ": * #cvSection("Projects & Associations") #cvEntry( title: [#link("https://github.com/fufexan/dotfiles")[dotfiles]], society: [], date: [2021 - Present], location: [], description: list( [Managing my desktop machines running NixOS using Git and Flakes], [Every machine is accessible everywhere through Tailscale], [Useful configurations accumulated over the years], ) ) #cvEntry( title: [#link("https://github.com/fufexan/infra")[Infrastructure]], society: [], date: [2022 - Present], location: [], description: list( [Server management provisioned through Terraform, running NixOS, connected over Tailscale], [*alpha*: Oracle ARM VPS running multiple services], [*home server*: Older machine running various services at home], ) ) #cvEntry( title: [#link("https://github.com/fufexan/nix-gaming")[nix-gaming]], society: [], date: [2021 - Present], location: [], description: list( [Repo hosting an ever-growing collection of games and related software packaged for Nix], ) ) #cvEntry( title: [Contributions], society: [], date: [], location: [], description: list( [Maintainer of #link("https://github.com/hyprwm")[Hyprland and HyprWM]], [Maintainer for #link("https://github.com/NixOS/nixpkgs")[Nixpkgs]], [Contributor to #link("https://github.com/nix-community")[Nix Community projects]], ) )
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/super-suboptimal/0.1.0/README.md
markdown
Apache License 2.0
# Super-Suboptimal A Typst package enabling support for Unicode super- and subscript characters in equations. ## Usage The package exposes the template-function `super-subscripts`. It affects all `math.equation`s by attaching every superscript- and subscript-character to the first non-space-element on its left. ```typ #import "@preview/super-suboptimal:0.1.0": * #show: super-subscripts For all $(x,y)∈ℝ²$: $ q := norm((x,y))₂ < 1 ==> ∑ᵢ₌₁ⁿ q ⁱ < ∞ $ ``` ![Rendering of above code](./assets/example0.svg) Because code like `$x+yᶻ$` throws an "unknown variable" error, the package also exposes the function `eq`, which inserts spaces before every superscript- and subscript-character and passing it on to `math.equation`. This comes at the cost of missing syntax-highlighting and code-suggestions in your IDE. `eq` accepts a `raw` string as a positional parameter, and an argument-sink that's passed onto `math.equation`. Unless specified otherwise in the argument-sink, the resulting equation is typeset with `block: true` if and only if the `raw` also satisfied `block: true`. `eq` is automatically applied to every `raw` with `lang: "eq"`. ````typ #eq(`0 = aᵇ`) #eq(``` 1 = x+yᶻ ```) #eq(`2 = aᵇ`, block: true, numbering: "(1)") ```eq 3 = aᵇᶜ⁺ᵈ₃ₑ⁽ᶠ⁻ᵍ⁾ₕᵢ ``` ```` ![Rendering of above code](./assets/example1.svg) Sometimes in mathematical writing, variables are decorated with an asterisk, e.g. `$x^*$`. The character `꙳` can now be used, as well: `$x꙳ = x^*$`. ## Known issues - As mentioned above, `$aᵇ$` leads to an "unknown variable" error. As a workaround, `$a ᵇ$` produces the same output, or you can use the `eq` function described above. - The first workaround also means I can't reasonably implement top-left and bottom-left attachments. For example, `$a ³b$` is rendered like `$attach(a, t: 3) b$`, rather than `$a attach(b, tl: 3)$`. - Multiple attachments are concatenated into one content without another pass of `equation`. For example, ```#eq(`0ˢⁱⁿ⁽ᵏ⁾`)``` is equivalent to `$0^(s i n "(" k ")")$`, rather than `$0^sin(k)$`. I won't fix this, because: - Another pass of `equation` would cause performance issues at best, and infinite loops at worst. - If this were fixed, code such as `$e ˣ ʸ$` would undesirably produce an "unknown variable `xy`" error. - Let's call a piece of content "small" if it consists of only a single non-separated sequence of characters in Typst (internally, this is the distinction between the content-functions `sequence` and `text`). For instance, `$1234$` and `$a$` constitute "small" content, but `$a b$` and `$3a$` and `$1+2+3+4+5$` do not. This package only runs on non-"small" pieces of content. For example, `$sqrt(35²)$` still renders with the default-Unicode-character and will look different from `$sqrt(35^2)$`. On the other hand, `$sqrt(a⁶)$` *is* rendered correctly. This is because `35²` constitutes "small" content, but `a⁶` does not. A workaround is implemented for "small" content immediately within an equation, i.e. not nested within another content-function. For example, `$7²$` renders the same as `$7^2$`, even though it's "small" content. - Equations within other content-elements might trigger multiple show-rule-passes, possibly causing performance-issues.
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/great-theorems/0.1.0/example.typ
typst
Apache License 2.0
#import "@preview/great-theorems:0.1.0": * #import "@preview/headcount:0.1.0": * #set heading(numbering: "1.1") #show: great-theorems-init #show link: text.with(fill: blue) #let mathcounter = counter("mathblocks") #show heading: reset-counter(mathcounter) #let theorem = mathblock( blocktitle: "Theorem", counter: mathcounter, numbering: dependent-numbering("1.1"), ) #let lemma = mathblock( blocktitle: "Lemma", counter: mathcounter, numbering: dependent-numbering("1.1") ) #let remark = mathblock( blocktitle: "Remark", prefix: [_Remark._], inset: 5pt, fill: lime, radius: 5pt, ) #let proof = proofblock() = Some Heading #theorem[ This is some theorem. ] <mythm> #lemma[ This is a lemma. Maybe it's used to prove @mythm. ] #proof[ This is a proof. ] = Another Heading #theorem(title: "some title")[ This is a theorem with a title. ] <thm2> #proof(of: <thm2>)[ This is a proof of the theorem which has a title. ] #remark[ This is a remark. The remark box has some custom styling applied. ]
https://github.com/jonaspleyer/peace-of-posters
https://raw.githubusercontent.com/jonaspleyer/peace-of-posters/main/docs/content/showcase/2024-10-22-pop-template/main.typ
typst
MIT License
#import "@preview/peace-of-posters:0.4.3" as pop #set page("a0", margin: 1cm) #pop.set-poster-layout(pop.layout-a0) #pop.set-theme(pop.uni-fr) #set text(size: pop.layout-a0.at("body-size")) #let box-spacing = 1.2em #set columns(gutter: box-spacing) #set block(spacing: box-spacing) #pop.update-poster-layout(spacing: box-spacing) #pop.title-box( "Peace of Posters Template", authors: "<NAME>¹", institutes: "¹Freiburg Center for Data-Analysis and Modelling", keywords: "Peace, Dove, Poster, Science", image: image("peace-dove-thumbnail.png"), ) #columns(2,[ #pop.column-box(heading: "Columbidae")[ 'Columbidae is a bird family consisting of doves and pigeons. It is the only family in the order Columbiformes.' #cite(<wiki:Columbidae>) #figure(caption: [ Pink-necked green pigeon #cite(<wiki:File:Treron_vernans_male_-_Kent_Ridge_Park.jpg>). ])[ #image("Treron_vernans_male_-_Kent_Ridge_Park.jpg") ] ] #let hba = pop.uni-fr.heading-box-args #hba.insert("width", 60%) #let bba = pop.uni-fr.body-box-args #bba.insert("width", 60%) #bba.insert("inset", 0pt) #pop.column-box( heading: "Biological Information", body-box-args: bba, heading-box-args: hba )[ #table( columns: (auto, 1fr), inset: 0.5cm, [Domain],[Eukaryota], [Kingdom],[Animalia], [Phylum],[Chordata], [Class],[Aves], [Clade],[Columbimorphae], [Order],[Columbiformes], [Family],[Columbidae], [Type genus],[Columba], ) ] #pop.column-box()[ #figure(caption: [Geographic range of the family #cite(<wiki:Columbidae>)])[ #image("Pigeon_range.png") ] ] #colbreak() #pop.column-box(heading: "General Relativity")[ Einstein's brilliant theory of general relativity starts with the field equations #cite(<Einstein1916>). $ G_(mu nu) + Lambda g_(mu nu) = kappa T_(mu nu) $ However, they have nothing to do with doves. ] #pop.column-box(heading: "Peace be with you")[ #figure(caption: [ 'Doves [...] are used in many settings as symbols of peace, freedom or love. Doves appear in the symbolism of Judaism, Christianity, Islam and paganism, and of both military and pacifist groups.' #cite(<wiki:Doves_as_symbols>). ])[ #image("peace-dove.png") ] ] #pop.column-box(heading: "Etymology")[ Pigeon is a French word that derives from the Latin pīpiō, for a 'peeping' chick, while dove is an ultimately Germanic word, possibly referring to the bird's diving flight. The English dialectal word culver appears to derive from Latin columba #cite(<wiki:Online_Etymology_Dictionary>). A group of doves is called a "dule", taken from the French word deuil ('mourning') #cite(<Lipton1991-qa>). ] #pop.column-box(heading: "Fill space with a box", stretch-to-next: true)[ Notice that this box would not fill the entire space up to the bottom of the page but we can stretch it such that it does so anyway. #bibliography("bibliography.bib") ] ]) #pop.bottom-box()[ What does the Dove say? $-->$ Ouuuhhhh - Oh ... Ouuuhhhhh - Oh #linebreak() #link("https://www.youtube.com/watch?v=dQw4w9WgXcQ") ]
https://github.com/mrcinv/nummat-typst
https://raw.githubusercontent.com/mrcinv/nummat-typst/master/osnove_julie.typ
typst
= Osnovni ukazi v jeziku Julia Programe v jeziku Julia poženemo tako, da v terminalu poženemo ukaz `julia` #rect( [ - *julia* zažene se Julia REPL (Read Eval Print Loop). - *julia program.jl* Poženi kodo v datoteki `program.jl`. - *julia --project="."* aktivira projekt v trenutnem direktoriju. ] ) == Julia REPL(Read Eval Print Loop) Z vnosom naslednjih znakov se spremeni način REPL #let ukaz(ukaz, opis) = [ - #h(0.2em) *#raw(ukaz)* #h(1em) #opis ] #rect( [ #ukaz("?", [Način dokumentacije (*`help>`*). Vnos *`?ime`* prikaže dokumentacijo za funkcijo `ime`.]) #ukaz( ";", [Način zunanje lupine (*`shell>`*). Vnašamo lahko ukaze sistemske lupine.] ) #ukaz( "]", [Način paketov (*`pkg>`*). Vnašamo lahko ukaze iz modula `Pkg`.] ) #ukaz( "include(\"program.jl\")", [Požene kodo iz datoteke `program.jl`.] ) #ukaz( "using MojPaket", [Naloži paket `MojPaket`.] ) ] ) == Paketi #ukaz( "import(\"Pkg\")", [naloži modul `Pkg`] ) REPL način paketov (`pkg>`): #rect( [ #ukaz( "activate Direktorij", [Aktviraj projekt v mapi `Direktorij`.] ) #ukaz( "add ImePaketa", [Namesti paket v trenutno aktivirani projekt.] ) #ukaz( "test", [Poženi teste definirane v `test/runtests.jl`.] ) ] ) == Kontrolne strukture #rect([ ```jl if 1 == 2 print("Ena je enako dva.") end ``` ]) #rect([ ```jl for i=1:10 println("Indeks v zanki je $i") end ``` ]) #rect([ ```jl throw("Zgodilo se je nekaj pričakovano nepričakovanega.") ``` ]) == Funkcije #rect( [ ```jl f(x,y) = x*y + x # definicija v eni vrstici ``` ] ) #rect( [ ```jl function mojfun(x, y) # definicija v bloku return x*y + y end ``` ] ) #rect( [ ```jl fun = (x, y) -> x*y + x # anonimna funkcija ``` ] )
https://github.com/goshakowska/Typstdiff
https://raw.githubusercontent.com/goshakowska/Typstdiff/main/tests/test_working_types/header/header_inserted.typ
typst
= Heading = Second heading = Inserted heading
https://github.com/danbalarin/vse-typst-template
https://raw.githubusercontent.com/danbalarin/vse-typst-template/main/lib/lib.typ
typst
#import "title-page.typ": title-page #import "abstract-keywords.typ": abstract-keywords #import "macros.typ": heading-like, revisit, custom-lorem #import "outline.typ": custom-outline #import "headings.typ": heading-blocks #let template( title: "Thesis Title", date: "January 2025", university: "Prague University of Economics and Business", faculty: "Faculty of Informatics and Statistics", study-program: "Study Program", specialization: "", author: "<NAME>", supervisor: "Ing. <NAME>", consultant: "", city: "Prague", lang: "en", acknowledgements: none, abstract-cs: none, keywords-cs: none, abstract-en: none, keywords-en: none, separated-abstracts: false, bibliography-file: none, body, ) = { set document(title: title, author: author) set text(size: 11pt, lang: lang, font: "DejaVu Sans", region: "GB") set page( paper: "a4", // margin: (right: 25mm, left: 3cm, top: 25mm, bottom: 25mm), margin: (right: 20mm, left: 2cm, top: 25mm, bottom: 25mm) ) set heading(numbering: "1.1.1") show heading.where(level: 1): it => { heading-blocks.at(1)(it) } show heading.where(level: 2): it => { heading-blocks.at(2)(it) } show heading.where(level: 3): it => { heading-blocks.at(3)(it) } title-page( title: title, date: date, university: university, faculty: faculty, study-program: study-program, specialization: specialization, author: author, supervisor: supervisor, consultant: consultant, city: city, ) set page( margin: (right: 25mm, left: 3cm, top: 25mm, bottom: 25mm), footer: context [ #line(length: 100%) #h(1fr) #counter(page).display( "1", ) ]) set align(left) if acknowledgements != none [ #align(bottom, [ #heading-like([Acknowledgements], level:2) #acknowledgements #pagebreak() ]) ] abstract-keywords( abstract-cs: abstract-cs, keywords-cs: keywords-cs, abstract-en: abstract-en, keywords-en: keywords-en, separated-abstracts: separated-abstracts, ) custom-outline() set par(leading: 1.2em) set block(spacing: 1.2em) body if bibliography != none [ #bibliography(bibliography-file, style: "american-psychological-association") ] }
https://github.com/noahjutz/AD
https://raw.githubusercontent.com/noahjutz/AD/main/appendix/code/main.typ
typst
= Code == MaxTeilSum-2d <code-mts2d> ```python import math import numpy as np def kadane(array): s = 0 m = -math.inf for x in array: s = max(x, s + x) m = max(m, s) return m def combinations(matrix): (n, _) = matrix.shape for i2 in range(n+1): for i1 in range(i): yield np.sum(matrix[i1:i2], axis=0) def max_subarray_2d(matrix): return max(map(kadane, combinations(matrix))) matrix = np.loadtxt("data.txt", dtype=int) m = max_subarray_2d(matrix) print(m) ```
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-3100.typ
typst
Apache License 2.0
#let data = ( (), (), (), (), (), ("BOPOMOFO LETTER B", "Lo", 0), ("BOPOMOFO LETTER P", "Lo", 0), ("BOPOMOFO LETTER M", "Lo", 0), ("BOPOMOFO LETTER F", "Lo", 0), ("BOPOMOFO LETTER D", "Lo", 0), ("BOPOMOFO LETTER T", "Lo", 0), ("BOPOMOFO LETTER N", "Lo", 0), ("BOPOMOFO LETTER L", "Lo", 0), ("BOPOMOFO LETTER G", "Lo", 0), ("BOPOMOFO LETTER K", "Lo", 0), ("BOPOMOFO LETTER H", "Lo", 0), ("BOPOMOFO LETTER J", "Lo", 0), ("BOPOMOFO LETTER Q", "Lo", 0), ("BOPOMOFO LETTER X", "Lo", 0), ("BOPOMOFO LETTER ZH", "Lo", 0), ("BOPOMOFO LETTER CH", "Lo", 0), ("BOPOMOFO LETTER SH", "Lo", 0), ("BOPOMOFO LETTER R", "Lo", 0), ("BOPOMOFO LETTER Z", "Lo", 0), ("BOPOMOFO LETTER C", "Lo", 0), ("BOPOMOFO LETTER S", "Lo", 0), ("BOPOMOFO LETTER A", "Lo", 0), ("BOPOMOFO LETTER O", "Lo", 0), ("BOPOMOFO LETTER E", "Lo", 0), ("BOPOMOFO LETTER EH", "Lo", 0), ("BOPOMOFO LETTER AI", "Lo", 0), ("BOPOMOFO LETTER EI", "Lo", 0), ("BOPOMOFO LETTER AU", "Lo", 0), ("BOPOMOFO LETTER OU", "Lo", 0), ("BOPOMOFO LETTER AN", "Lo", 0), ("BOPOMOFO LETTER EN", "Lo", 0), ("BOPOMOFO LETTER ANG", "Lo", 0), ("BOPOMOFO LETTER ENG", "Lo", 0), ("BOPOMOFO LETTER ER", "Lo", 0), ("BOPOMOFO LETTER I", "Lo", 0), ("BOPOMOFO LETTER U", "Lo", 0), ("BOPOMOFO LETTER IU", "Lo", 0), ("BOPOMOFO LETTER V", "Lo", 0), ("BOPOMOFO LETTER NG", "Lo", 0), ("BOPOMOFO LETTER GN", "Lo", 0), ("BOPOMOFO LETTER IH", "Lo", 0), ("BOPOMOFO LETTER O WITH DOT ABOVE", "Lo", 0), ("BOPOMOFO LETTER NN", "Lo", 0), )
https://github.com/PorterLu/Typst
https://raw.githubusercontent.com/PorterLu/Typst/main/make_a_template_II/make_a_template.typ
typst
``` #let conf(title, doc) = { set page( paper: "us-letter", header: align( right + horizon, title ) ) set par(justify: true) set text( font: "Linux Libertine", size: 11pt, ) columns(2, doc) } #show: doc => conf( [Paper title], doc ) ``` #set par(justify: true) #let conf( title: none, authors: (), abstract: [], doc, ) = { set align(center) text(17pt, title) let count = authors.len() let ncols = calc.min(count, 3) grid( columns: (1fr, ) * ncols, row-gutter: 24pt, ..authors.map(author => [ #author.name \ #author.affiliation \ #link("mailto:" + author.email) ]), ) par(justify: false)[ *Abstract* \ #abstract ] set align(left) columns(2, doc) } #show: doc => conf( title: [Towards Improved Modeling], authors: ( ( name: "<NAME>", affiliation: "Artos Institute", email: "<EMAIL>", ), ( name: "<NAME>", affiliation: "Honduras State", email: "<EMAIL>", ), ), abstract: lorem(80), doc, ) = Introduction #lorem(300) == Motivation #lorem(200) == Problem Statement #lorem(200) = Related Work #lorem(400)
https://github.com/ludwig-austermann/modpattern
https://raw.githubusercontent.com/ludwig-austermann/modpattern/main/examples/comparison.typ
typst
MIT License
#import "../main.typ": * #set page(height: auto, width: auto, margin: 10pt) #let patterncontents = ( "diagonal line": line(start: (0%,0%), end: (100%, 100%)), "weird lines": { place(line(start: (0%,0%), end: (100%, 100%))) place(line(start: (0%,0%), end: (50%, 100%))) place(line(start: (50%,0%), end: (100%, 100%))) }, "circles": circle(), "simple math": $x$, "explained": box(height: 100%, width: 100%, stroke: red + 0.2pt, $x$), "math": $x^n$, "fun": $()$, "more fun": $sum$, ) #set text(8pt) #table( columns: 3, [], [pattern], [modpattern], ..patterncontents.pairs().map( ((k, v)) => (k, rect(fill: pattern(size: (10pt, 5pt), v)), rect(fill: modpattern((10pt, 5pt), v))) ).flatten() )
https://github.com/CHHC-L/ciapo
https://raw.githubusercontent.com/CHHC-L/ciapo/master/examples/long-example-0/template.typ
typst
MIT License
#let scriptsize = 8pt #let margin-x = 1.2cm #let transition( // The slide accent color. Default is a vibrant yellow. accent-color: rgb("1f1e33"), // Whether to make the text white. Default is true. text-color: white, // The slide content. body, ) = { page( width: 16cm, height: 12cm, margin:(top: 2cm, bottom: 2cm, left: margin-x, right: margin-x), background: rect(width: 100%, height: 100%, fill: accent-color), header: none, footer: none, )[ #set align(center+horizon) #set text(28pt, fill: text-color, weight: "bold") #body ] } #let refpage( body, ) = { page( header: none, )[ #text(18pt, weight: "bold")[Reference] #v(1em) #set par(justify: true, leading: 2em, ) #body ] } #let longpage( // How many times its height is, with respect to normal pages hpages, // margin-x value, default is 1.2cm marginxn: margin-x, // The slide content. body, ) = { page( width: 16cm, height: hpages*12cm, margin:(left: marginxn, right: marginxn), )[ #body ] } #let diapo( // The presentation's title, which is displayed on the title slide. title: [Title], // The presentation's author, which is displayed on the title slide. author: none, // The date, displayed on the title slide. date: none, // The email, displayed on the title slide. email: none, // If true, display the total number of slide of the presentation. display-lastpage: true, // If set, this will be displayed on top of each slide. short-title: none, // The presentation's content. body ) = { // Ensure that the type of `author` is an array author = if type(author) == "string" { ((name: author, email: email), ) } else if type(author) == "array" { author } else { panic("expected string or array, found " + type(author)) } // Set the metadata. set document(title: title, author: author.map(author => author.name)) // Configure page and text properties. set text(font: "Lucida Sans", weight: "extralight") set page( width: 16cm, height: 12cm, margin:(top: 2cm, bottom: 1.5cm, left: margin-x, right: margin-x), header-ascent: 40%, footer-descent: 40%, header: [ #let headertext = locate(loc => { let elemsbef = query(selector(heading).before(loc), loc,) let elemsaft = query(selector(heading).after(loc), loc,) let elem = if (elemsaft.len() > 0 and elemsaft.first().level == 1) or elemsbef.filter(e => e.level == 1).len() == 0 { () } else { elemsbef.filter(e => e.level == 1).last().body } let subelem = if (elemsaft.len() > 0 and elemsaft.first().level <= 2) or elemsbef.filter(e => e.level == 2).len() == 0 { () } else { elemsbef.filter(e => e.level == 2).last().body } if subelem != () { subelem + " | " } if elem != () { elem + " | " } short-title }) #set align(right) #text()[#headertext] ], footer: [ #let lastpage-number = locate(pos => counter(page).final(pos).at(0)) #set align(right) #text(size: scriptsize)[ #counter(page).display("1") #if (display-lastpage) [\/ #lastpage-number] ] ], ) // Display the title page. page(background: none, header: none, footer: none)[ #set align(center+horizon) #set text(24pt, weight: "semibold") #title #set text(14pt, weight: "light") #let count = author.len() #let ncols = calc.min(count, 3) #grid( columns: (auto,) * ncols, column-gutter: 16pt, row-gutter: 24pt, ..author.map(author => { author.name if (author.keys().contains("email") and author.email != none) { " " + link("mailto:" + author.email) } if (author.keys().contains("affiliation")) { linebreak() author.affiliation } }), ) #text(features: ("case",))[#date] ] // Customize headings to show new slides. show heading: set text(weight: "bold") show heading.where(level: 1): it => { pagebreak() align(top, it) v(1em) } show heading.where(level: 2): it => { align(top, it) v(1em) } show heading.where(level: 3): it => { align(top, it) v(1em) } // Customize raw set raw(tab-size: 4) show raw.where(block: true): box.with( fill: luma(240), inset: (x: 7pt, y: 7pt), radius: 2pt, ) show raw.where(block: false): box.with( fill: luma(240), inset: (x: 3pt, y: 0pt), outset: (y: 3pt), radius: 2pt, ) set heading(numbering: "1.") show outline.entry.where(level: 1): it => { v(3pt) strong(it) } show link: underline // Add the body. body } #let mcolor = ( miku: color.rgb(57,197,187), violet: color.rgb(49,0,128), lblue: color.rgb(85,204,250), lpink: color.rgb(246,165,181), ) #let bf(content) = text(weight: "bold")[#content]
https://github.com/Ciolv/typst-template-bachelor-thesis
https://raw.githubusercontent.com/Ciolv/typst-template-bachelor-thesis/main/README.md
markdown
# Bachelor Thesis Typst Template - DHBW Mannheim **This template is not affiliated with DHBW Mannheim.** It is intended to be used as a template for the bachelor thesis at the computing faculty in accordance to the 2019 guidelines. If you intend to use the template, please double-check its accordance with the guidelines, there might be a newer version! The template is created for a thesis in German, but provides most of its content in English as well when `language: "en"` is set. Adopting to another language should be straightforward, if you have some experience with Typst or programming in general. ## Usage Check out the [Typst Repository](https://github.com/typst/typst) and the [Typst Documentation](https://typst.app/docs/) to get started. I'm using a `cargo` install on a local machine, writing in [VS Code](https://code.visualstudio.com/). The [Typst App](https://typst.app/) works as well, but I was facing some stability issues. For a local install, clone the Repo: ```shell git clone https://github.com/Ciolv/typst-template-bachelor-thesis ``` and open it: ```shell code ./typst-template-bachelor-thesis ``` ## Structure The template itself is located in `config.typ`. `main.typ` uses the template for a sample document. For a project in German or English, the config parameters in `main.typ` should be sufficient. Other languages might require adjustment in `cofig.typ`. `helper.typ` contains small helper functions. `acronyms.typ` attempts to provide some acronym / abbreviation handling, inspired by the LaTeX `acro` package. ## Configuration `title`: Provide the title string, to print it to the title page. `authors`: Only one author is supported by this template. Every value set to `none` will not be displayed on the title page. ```typ authors: ( ( name: "<NAME>", company: "Example Company", matriculation_number: "1234567", department: "Example Department", course: "Example course", course_administrator: "Course Administrator", scientific_advisor: "Scientific Advisor", company_advisor: "Company Advisor", period: "Editing period", submission_date: "Submission date", ), ), ``` `university`: Your university name, to print it on the title page. `abstract`: As a content block. `none` will suppress the abstract page from being printed at all. `language`: No matter what language you are writing in, set the [ISO 639](https://en.wikipedia.org/wiki/ISO_639) language code to enable [smart quotes](https://typst.app/docs/reference/text/smartquote/). `font`: Set your favourite font. I've got some issues with numbers with multiple fonts, so I use `Manrope`. I don't know whether it's a Typst bug or a template bug currently, requires more investigation. `outlines`: Provide pairs of type `("Outline name", kind)` to auto-create an outline for `figures`, see [kind](https://typst.app/docs/reference/meta/figure/#parameters--kind) and [outline](https://typst.app/docs/reference/meta/outline/) for more information. The outline will only be printed, if at least one `figure` of its `kind` is in the text. Commonly, the following parameters should be sufficient: ```typ outlines: ( ("List of figures", image), ("List of tables", image), ("List of listings", raw) ), ``` `acronym_outline_name`: The outline title for acronyms. Configured outside the `outlines` parameter, since it is not based on `figure`. `thesis_type`: e.g. Bachelor Thesis or Project Thesis printed on the title page. `none` suppresses printing. `course of studies`: Your course of studies printed on the title page, e.g. Computer Science. `none` suppresses printing. `field_of_studies`: Your field of studies printed on the title page, e.g. Cyber Security. `none` suppresses printing. `company_logo`: Dictionary with `path` to an image file and `alternative_text` for the [`alt`](https://typst.app/docs/reference/visualize/image/#parameters--alt) parameter, printed on the title page. `none` suppresses printing. `university_logo`: Dictionary with `path` to an image file and `alternative_text` for the [`alt`](https://typst.app/docs/reference/visualize/image/#parameters--alt) parameter, printed on the title page. `none` suppresses printing. `declaration_on_honour`: [`Boolean`](https://typst.app/docs/reference/types/boolean/) value, to print or suppress printing of a declaration on honour. German and English text is provided and toggled via the `language` parameter. `bibliography-file`: File path to your bibliography file. See [bibliography documentation](https://typst.app/docs/reference/meta/bibliography/) for more information. `bibliography-style`: Bibliography / citation style for your thesis. See [bibliography documentation](https://typst.app/docs/reference/meta/bibliography/#parameters--style) for supported styles. **Don't forget to add `doc` at the end of the parameter list, as seen in `main.typ`.** ## Credits The layout is inspired by [DHBW LaTeX Template](https://github.com/pfisterer/DHBW_LaTeX_Template). The `acronym.typ` functions are inspired by [jnlmmln](https://github.com/jnlmmln)'s [issue comment](https://github.com/typst/typst/issues/659#issuecomment-1517902585).
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/g-exam/0.2.0/g-exam.typ
typst
Apache License 2.0
#import "@preview/oxifmt:0.2.0": strfmt #let __g-question-number = counter("g-question-number") #let __g-question-point = state("g-question-point", 0) #let __g-question-point-position-state = state("g-question-point-position", left) #let __g-localization = state("localization") #let __g-show-solution = state("g-show-solution", false) #let __g-default-localization = ( grade-table-queston: "Question", grade-table-total: "Total", grade-table-points: "Points", grade-table-calification: "Calification", point: "point", points: "points", page: "Page", page-counter-display: "1 of 1", family-name: "Surname", personal-name: "Name", group: "Group", date: "Date" ) #let __g-student-data(show-line-two: true) = { locate(loc => { [#__g-localization.final(loc).family-name: #box(width: 2fr, repeat[.]) #__g-localization.final(loc).personal-name: #box(width:1fr, repeat[.])] if show-line-two { v(1pt) align(right, [#__g-localization.final(loc).group: #box(width:2.5cm, repeat[.]) #__g-localization.final(loc).date: #box(width:3cm, repeat[.])]) } } ) } #let __g-grade-table-header(decimal-separator: ".") = { locate(loc => { let end-g-question-locations = query(<end-g-question-localization>, loc) let columns-number = range(0, end-g-question-locations.len() + 1) let question-row = columns-number.map(n => { if n == 0 {align(left + horizon)[#text(hyphenate: false,__g-localization.final(loc).grade-table-queston)]} else if n == end-g-question-locations.len() {align(left + horizon)[#text(hyphenate: false,__g-localization.final(loc).grade-table-total)]} else [ #n ] } ) let total-point = 0 if end-g-question-locations.len() > 0 { total-point = end-g-question-locations.map(ql => __g-question-point.at(ql.location())).sum() } let points = () if end-g-question-locations.len() > 0 { points = end-g-question-locations.map(ql => __g-question-point.at(ql.location())) } let point-row = columns-number.map(n => { if n == 0 {align(left + horizon)[#text(hyphenate: false,__g-localization.final(loc).grade-table-points)]} else if n == end-g-question-locations.len() [ #strfmt("{0:}", calc.round(total-point, digits:2), fmt-decimal-separator: decimal-separator) ] else { let point = points.at(n) [ #strfmt("{0}", calc.round(point, digits: 2), fmt-decimal-separator: decimal-separator) ] } } ) let calification-row = columns-number.map(n => { if n == 0 { align(left + horizon)[#text(hyphenate: false, __g-localization.final(loc).grade-table-calification)] } } ) align(center, table( stroke: 0.8pt + luma(80), columns: columns-number.map( n => { if n == 0 {auto} else if n == end-g-question-locations.len() {auto} else {30pt} }), rows: (auto, auto, 30pt), ..question-row.map(n => n), ..point-row.map(n => n), ..calification-row.map(n => n), ) ) } ) } #let __g-question-numbering(..args) = { let nums = args.pos() if nums.len() == 1 { numbering("1. ", nums.last()) } else if nums.len() == 2 { numbering("(a) ", nums.last()) } else if nums.len() == 3 { numbering("(i) ", nums.last()) } } #let __g-paint-tab(point: none, loc: none) = { if point != none { let label-point = __g-localization.final(loc).points if point == 1 { label-point = __g-localization.final(loc).point } [(#emph[#strfmt("{0}", calc.round(point, digits: 2), fmt-decimal-separator: ",") #label-point])] } } #let g-question( point: none, point-position: none, body) = { assert(point-position in (none, left, right), message: "Invalid point position") __g-question-number.step(level: 1) [#hide[]<end-g-question-localization>] __g-question-point.update(p => { if point == none { 0 } else { point } }) locate(loc => { let __g-question-point-position = point-position if __g-question-point-position == none { __g-question-point-position = __g-question-point-position-state.final(loc) } if __g-question-point-position == left { v(0.1em) { __g-question-number.display(__g-question-numbering) if(point != none) { __g-paint-tab(point:point, loc: loc) h(0.2em) } } body } else if __g-question-point-position == right { v(0.1em) if(point != none) { place(right, dx: 15%, float: false, __g-paint-tab(point: point, loc: loc)) } __g-question-number.display(__g-question-numbering) body } else { v(0.1em) __g-question-number.display(__g-question-numbering) body } }) } #let g-subquestion( point: none, point-position: none, body) = { assert(point-position in (none, left, right), message: "Invalid point position") __g-question-number.step(level: 2) let subg-question-point = 0 if point != none { subg-question-point = point } __g-question-point.update(p => p + subg-question-point ) locate(loc => { let __g-question-point-position = point-position if __g-question-point-position == none { __g-question-point-position = __g-question-point-position-state.final(loc) } if __g-question-point-position == left { v(0.1em) { h(0.7em) __g-question-number.display(__g-question-numbering) if(point != none) { __g-paint-tab(point: point, loc:loc) h(0.2em) } } body } else if __g-question-point-position == right { v(0.1em) if(point != none) { place(right, dx: 15%, float: false, __g-paint-tab(point: point, loc:loc)) } { h(0.7em) __g-question-number.display(__g-question-numbering) } body } else { v(0.1em) { h(0.7em) __g-question-number.display(__g-question-numbering) } body } } ) } #let g-solution( alternative-content: none, body) = { assert(alternative-content == none or type(alternative-content) == "content", message: "Invalid alternative-content value") locate(loc => { let show-solution = __g-show-solution.final(loc) if show-solution == true { body } else { hide[#body] // alternative-content } } ) } #let __g-show_clarifications = (clarifications: none) => { if clarifications != none { let clarifications-content = [] if type(clarifications) == "content" { clarifications-content = clarifications } else if type(clarifications) == "string" { clarifications-content = clarifications } else if type(clarifications) == "array" { clarifications-content = [ #for clarification in clarifications [ - #clarification ] ] } else { panic("Not implementation clarificationso of type: '" + type(clarifications) + "'") } rect( width: 100%, stroke: luma(120), inset:8pt, radius: 4pt, clarifications-content ) v(5pt) } } #let g-exam( author: ( name: "", email: none, watermark: none ), school: ( name: none, logo: none, ), exam-info: ( academic-period: none, academic-level: none, academic-subject: none, number: none, content: none, model: none ), localization: ( grade-table-queston: none, grade-table-total: none, grade-table-points: none, grade-table-calification: none, point: none, points: none, page: none, page-counter-display: none, family-name: none, personal-name: none, group: none, date: none ), // date: none auto datetime, date: none, keywords: none, languaje: "en", clarifications: none, show-studen-data: "first-page", show-grade-table: true, decimal-separator: ".", question-point-position: left, show-solution: true, body, ) = { assert(show-studen-data in (none, true, false, "first-page", "odd-pages"), message: "Invalid show studen data") assert(question-point-position in (left, right), message: "Invalid question point position") assert(decimal-separator in (".", ","), message: "Invalid decimal separator") assert(show-solution in (true, false), message: "Invalid show solution value") let __show-watermark = ( author: ( name: "", email: none, watermark: none ), school: ( name: none, logo: none, ), exam-info: ( academic-period: none, academic-level: none, academic-subject: none, number: none, content: none, model: none ), ) => { place( top + right, float: true, clearance: 0pt, dx:72pt, dy:-115pt, rotate(270deg, origin: top + right, { if author.at("watermark", default: none) != none { text(size:7pt, fill:luma(90))[#author.watermark] h(35pt) } if exam-info.at("model", default: none) != none { text(size:8pt, luma(40))[#exam-info.model] } } ) ) } let __document-name = ( exam-info: ( academic-period: none, academic-level: none, academic-subject: none, number: none, content: none, model: none )) => { let document-name = "" if exam-info.at("name", default: none) != none { document-name += " " + exam-info.name } if exam-info.at("content", default: none) != none { document-name += " " + exam-info.content } if exam-info.at("number", default: none) != none { document-name += " " + exam-info.number } if exam-info.at("model", default: none) != none { document-name += " " + exam-info.model } return document-name } let __read-localization = ( languaje: "en", localization: ( grade-table-queston: none, grade-table-total: none, grade-table-points: none, grade-table-calification: none, point: none, points: none, page: none, page-counter-display: none, family-name: none, personal-name: none, group: none, date: none )) => { let __lang_data = toml("lang.toml") if(__lang_data != none) { let __read_lang_data = __lang_data.at(languaje, default: localization) if(__read_lang_data != none) { let __read-localization_value = (read_lang_data: none, field: "", localization: none) => { let __parameter_value = localization.at(field) if(__parameter_value != none) { return __parameter_value } let value = read_lang_data.at(field, default: __g-default-localization.at(field)) if(value == none) { value = __g-default-localization.at(field)} return value } let __grade_table_queston = __read-localization_value(read_lang_data: __read_lang_data, field: "grade-table-queston", localization: localization) let __grade_table_total = __read-localization_value(read_lang_data: __read_lang_data, field: "grade-table-total", localization: localization) let __grade_table_points = __read-localization_value(read_lang_data: __read_lang_data, field: "grade-table-points", localization: localization) let __grade_table_calification = __read-localization_value(read_lang_data: __read_lang_data, field: "grade-table-calification", localization: localization) let __point = __read-localization_value(read_lang_data: __read_lang_data, field:"point", localization: localization) let __points = __read-localization_value(read_lang_data: __read_lang_data, field: "points", localization: localization) let __page = __read-localization_value(read_lang_data: __read_lang_data, field: "page", localization: localization) let __page-counter-display = __read-localization_value(read_lang_data: __read_lang_data, field: "page-counter-display", localization: localization) let __family_name = __read-localization_value(read_lang_data: __read_lang_data, field: "family-name", localization: localization) let __personal_name = __read-localization_value(read_lang_data: __read_lang_data, field: "personal-name", localization: localization) let __group = __read-localization_value(read_lang_data: __read_lang_data, field: "group", localization: localization) let __date = __read-localization_value(read_lang_data: __read_lang_data, field: "date", localization: localization) let __g-localization_lang_data = ( grade-table-queston: __grade_table_queston, grade-table-total: __grade_table_total, grade-table-points: __grade_table_points, grade-table-calification: __grade_table_calification, point: __point, points: __points, page: __page, page-counter-display: __page-counter-display, family-name: __family_name, personal-name: __personal_name, group: __group, date: __date, ) __g-localization.update(__g-localization_lang_data) } } } set document( title: __document-name(exam-info: exam-info).trim(" "), author: author.name ) let margin-right = 2.5cm if (question-point-position == right) { margin-right = 3cm } set page( paper: "a4", margin: (top: 5cm, right:margin-right), numbering: "1 / 1", number-align: right, header-ascent: 20%, header:locate(loc => { let page-number = counter(page).at(loc).first() if (page-number==1) { align(right)[#box( width:108%, grid( columns: (auto, auto), gutter:0.7em, align(left + top)[ #if(school.at("logo", default : none) != none) { set image(height:2.5cm, width: 2.7cm, fit:"contain") if(type(school.logo) == "content") { school.logo } else if(type(school.logo) == "bytes") { image.decode(school.logo, height:2.5cm, fit:"contain") } else { assert(type(school.logo) in (none, "content", "bytes") , message: "school.logo be of type content or bytes.") } } ], grid( rows: (auto, auto, auto), gutter:1em, grid( columns: (auto, 1fr, auto), align(left + top)[ #school.name \ #exam-info.academic-period \ #exam-info.academic-level ], align(center + top)[ // #exam-info.number #exam-info.content \ ], align(right + top)[ #exam-info.at("academic-subject", default: none) \ #exam-info.number \ #exam-info.content ], ), line(length: 100%, stroke: 1pt + gray), if show-studen-data in (true, "first-page", "odd-pages") { __g-student-data() } ) ) )] } else if calc.rem-euclid(page-number, 2) == 1 { grid( columns: (auto, 1fr, auto), gutter:0.3em, align(left + top)[ #school.name \ #exam-info.academic-period \ #exam-info.academic-level ], align(center + top)[ ], align(right + top)[ #exam-info.at("academic-subject", default: none) \ #exam-info.number \ #exam-info.content ] ) line(length: 100%, stroke: 1pt + gray) if show-studen-data == "odd-pages" { __g-student-data(show-line-two: false) } } else { grid( columns: (auto, 1fr, auto), gutter:0.3em, align(left + top)[ #school.name \ #exam-info.academic-period \ #exam-info.academic-level ], align(center + top)[ // #exam-info.number #exam-info.content \ ], align(right + top)[ #exam-info.at("academic-subject", default: none) \ #exam-info.number \ #exam-info.content \ ] ) line(length: 100%, stroke: 1pt + gray) } } ), footer: locate(loc => { line(length: 100%, stroke: 1pt + gray) align(right)[ #__g-localization.final(loc).page #counter(page).display(__g-localization.final(loc).page-counter-display, both: true, ) ] // grid( // columns: (1fr, 1fr, 1fr), // align(left)[#school.name], // align(center)[#exam-info.academic-period], // align(right)[ // Página // #counter(page).display({ // "1 de 1"}, // both: true, // ) // ] // ) __show-watermark(author: author, school: school, exam-info: exam-info) } ) ) set par(justify: true) set text(font: "New Computer Modern") __read-localization(languaje: languaje, localization: localization) __g-question-point-position-state.update(u => question-point-position) set text(lang:languaje) if show-grade-table == true { __g-grade-table-header( decimal-separator: decimal-separator, ) v(10pt) } __g-show-solution.update(show-solution) set par(justify: true) if clarifications != none { __g-show_clarifications(clarifications: clarifications) } body [#hide[]<end-g-question-localization>] [#hide[]<end-g-exam>] } #let g-clarification(size:8pt, body) = { text(size:size)[$(*)$ #body] }
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/classy-german-invoice/0.2.0/template/main.typ
typst
Apache License 2.0
#import "@preview/classy-german-invoice:0.2.0": invoice #show: invoice( // Invoice number "2023-001", // Invoice date datetime(year: 2024, month: 09, day: 03), // Items ( ( description: "The first service provided. The first service provided. The first service provided", price: 200, ), (description: "The second service provided", price: 150.2), ), // Author ( name: "<NAME>", street: "Straße der Privatsphäre und Stille 1", zip: "54321", city: "Potsdam", tax_nr: "12345/67890", ), // Recipient ( name: "<NAME>", street: "Musterallee", zip: "12345", city: "Musterstadt", ), // Bank account ( name: "<NAME>", bank: "Deutsche Postbank AG", iban: "DE89370400440532013000", bic: "PBNKDEFF", ), // Umsatzsteuersatz (VAT) vat: 0.19, kleinunternehmer: true, )
https://github.com/mrknorman/evolving_attention_thesis
https://raw.githubusercontent.com/mrknorman/evolving_attention_thesis/main/07_crosswave/07_crosswave.typ
typst
#set page(numbering: "1", number-align: center) #set math.equation(numbering: it => {[7.#it]}) #counter(math.equation).update(0) = CrossWave: Cross-detector Attention for the Detection and Parameterisation of Overlapping Gravitational-Wave Compact Binary Coalescences <crosswave-sec> Thus far, we have focused our attention on perhaps one of the simpler problems in gravitational-wave data analysis, transient detection; the fact remains, that many, more complex, tasks are yet to be satisfactorily solved. One of the largest and most intriguing of these is Parameter Estimation (PE) @parameter_estimation_review. Whilst detection merely identifies the presence of a signal, and, in a modeled search, tells us the type of signal we have detected, there is invariably other scientifically valuable information that can be extracted from a signal. During PE, we attempt to predict, with error margins, several parameters about a gravitational-wave-producing system. Typically this is a CBC system, although PE could also be performed on burst events if they were to be detected @burst_pe. Fortunately, CBCs can be described quite well in as few as 14 parameters that contain information both about the internal state of a CBC system, known as intrinsic parameters, and its relation to us as observers, known as extrinsic parameters @intrinstic_params. Care should be taken to distinguish between the parameters being extracted by PE, and the parameters of a neural network model, as they are unrelated. Without further analysis, detection alone is useful for little more than rudimentary population analysis; PE, therefore, is a crucial part of gravitation-wave data science @gravitational_wave_science_overview. Extrinsic parameters, like the source distance and sky location, aid in population studies and multimessenger analysis @multimessenger_ref, and intrinsic parameters such as the companion mass and spin properties can help unveil information about the underlying physics of sources themselves @starquake_detection @neutron_star_equation_of_state_1 @neutron_star_equation_of_state_2, as well as their formation channels @gravitational_wave_population. This section does not focus on a general PE method for either CBC or burst signals. Those have both been well investigated and although there is arguably a greater room for improvement and a larger need for innovation on these fronts than in detection alone it was not within the scope of this work. In this section, we present an analysis of a much smaller subproblem within PE; the detection and isolation, of overlapping signals contained within a single stretch of interferometer data. Because of the somewhat limited nature of the problem, it has not been studied as thoroughly as any of the other problems we have yet examined, which, in some ways, gives us more space for exploration, and an increased potential for novel scientific output. == Frequency of Overlapping Compact Binary Coalescences (CBCs) Significant improvements to our gravitational wave detection capability are anticipated within the next few decades, with improvements to existing detectors such as LIGO-Voyager @LIGO_Voyager, as well as future 3#super("rd") and 4#super("th") generation space and ground-based detectors such as the Einstein Telescope (ET) @einstein_telescope, Cosmic Explorer (CE) @cosmic_explorer, and the space-based Laser Interferometer Space Antenna (LISA) @LISA. Whilst the current detection rate ($1~2 space "week"^(-1)$ [BBHs]) and average detectable duration ($~7 s$ [BBHs]) of Compact Binary Coalescences (CBCs) is too low for any real concern about the possibility of overlapping detections @bias_study_one, estimated detection rates ($50~300 space "week"^(-1)$ [BBHs]) and durations ($40~20000 s$ [BBHs]) for future networks will render such events a significant percentage of detections @bias_study_one. See @overlaping-event-rate for a more detailed breakdown of overlapping event estimates. Contemporary detection and PE pipelines do not currently have any capabilities to deal with overlapping signals --- and although, in many cases, detection would still be achieved @bias_study_one @bias_study_two, PE would likely be at least somewhat compromised by the presence of the overlap, especially if more detailed information about higher modes and spins @bias_study_one are science goals. #figure( table( columns: (auto, auto, auto, auto, 80pt, 70pt, 70pt), inset: 10pt, align: horizon, [*Configuration*], [*Range (MPc)*], [*Cut Off (Hz)*], [*Mean Visible Duration (s)*], [*P(Overlap) ($"year"^(-1)$)*], [*$N_"events"$ ($"year"^(-1)$)*], [*$N_"overlaps"$ \ ($"year"^(-1)$)*], [aLIGO: O3], [611.0], [20], [6.735], [$3.9_(-1.3)^(+1.9) times 10^(-6)$], [$42.0_(-13.0)^(+21.0)$], [$0.0_(-0.0)^(+0.0)$], [aLIGO: O4], [842.5], [20], [6.735], [$1.0_(-0.3)^(+0.5) times 10^(-5)$], [$100.0_(-29.0)^(+56.0)$], [$0.0_(-0.0)^(+0.0)$], [aLIGO: Design], [882.9], [20], [6.735], [$1.2_(-0.4)^(+0.6) times 10^(-5)$], [$120.0_(-38.0)^(+60.0)$], [$0.0_(-0.0)^(+0.0)$], [LIGO-Voyager], [2684.0], [10], [43.11], [$2.3_(-0.8)^(+1.2) times 10^(-3)$], [$2700.0_(-38.0)^(+60.0)$], [$6.3_(-3.4)^(+7.7)$], [Einstein Telescope], [4961.0], [1], [19830.0], [$1.0_(-0.0)^(+0.0)$], [$15000.0_(-5000.0)^(+7100.0)$], [$15000.0_(-5000.0)^(+7100.0)$], ), caption: [Estimated overlap rates of BBH signals in current and future detectors, sourced from Relton @phil_thesis and Relton and Raymond @bias_study_two. Presented error values are 90% credible intervals. Note that these results, including past observing runs are estimates rather than real values, and are meant only as an illustration of the probable difference in overlap rates between current and future detector configurations. The number of overlapping signals, $N_"overlap"$, anticipated within one year is determined by the number of detections, $N_"events"$, and the visible duration of those detections, which are, in turn, affected by the detection range and lower frequency cut off the detector configuration in question. We can see that although with the current and previous detector configurations an overlapping event is extremely unlikely, it will increase with LIGO-Voyager to the point where we would expect $6.3_(-3.4)^(+7.7)$ overlapping signals per year of observing time, and further increase with the Einstein Telescope to the point where we would not expect any event to be detected without components of other signals also present in the detector. Similar overlaps are expected for LISA and Cosmic Explorer.] ) <overlaping-event-rate> == Detection and Parameter Estimation (PE) of Overlapping Compact Binary Coalescences (CBCs) Two studies examined the rate at which overlaps were likely to occur in different detector configurations along with the effect of overlapping signals on PE. Samajdar _et al._ @bias_study_one, determined that during an observing period of the future Einstein Telescope, the typical BNS signal will have tens of overlapping BBH signals and that there will be tens of thousands of signals per year that have merger times within a few seconds of each other. They found that for the most part, this had little effect on parameter recovery except in cases where a short BBH or quiet BNS overlapped with a louder BNS signal. Relton and Raymond @bias_study_two performed a similar study and produced the overlap estimates seen in @overlaping-event-rate. They found that PE bias was minimal for the larger of the two signals when the merger time separation was greater than #box($0.1$ + h(1.5pt) + "s") and when the SNR of the louder signal was more than three times that of the quieter signal. This bias was also smaller when the two signals occupied different frequency regions, and when the louder of the two signals appeared first in the detector stream. Despite this, they found evidence of PE bias even when the smaller signal was below the detectable SNR threshold. They found that overlapping signals can mimic the effects of procession; it will be important to be able to distinguish the two when detailed procession analysis becomes possible. Much of the work in this area focuses on performing PE with overlapping signals, and there has not been as much attention to simply distinguishing pairs of mergers from single mergers. Relton _et al._ @overlapping_search measured the detection performance of both a modelled (PyCBC) @pycbc and unmodeled (coherent WaveBurst [cWB]) @cWB search pipeline when searching for overlapping signals. They determined that both pipelines were able to recover signals with minimal efficiency losses ($<1%$) although they noted that the clustering algorithm used in both pipelines was inadequate to separate the two events. They concluded that adjustments to clustering could be made to both pipelines in order to return both events given a sufficient merger time separation. Using these altered pipelines it would then be possible to separate the data into two regions, which could be used for independent PE. Once an overlapping single has been identified, the next step is to deal with PE. Although in many cases, existing PE techniques may provide results with little bias @bias_study_one @bias_study_two, there are some situations in which this may not be the case. If the PE method can be improved in order to reduce that bias, it is useful so long as it does not result in a reduction of PE accuracy that is greater than the bias introduced by the overlapping signal. There are four types of methods we can apply to alleviate the issues with PE @phil_thesis. + *Global-fit* methods attempt to fit both signals simultaneously. There have been several studies investigating this method by Antonelli _et al._ @global_fit, which attempts to apply it to both Einstein Telescope and LISA data, @hieherachical_overlapping_pe_2 which compares this method to hierarchical subtraction, and several studies focusing solely on LISA data @lisa_global_1 @lisa_global_2 @lisa_global_3. This has the advantage of being somewhat a natural extension of existing methods, with no special implementation other than an increased parameter count, but that can also be its greatest disadvantage. The total number of parameters can quickly become large when an overlap is considered, especially if multiple overlaps are present which will be expected to occur in ET and LISA data. + *Local-fit* methods attempt to fit each signal independently and correct for the differences. The original proposal by Antonelli _et al._ @global_fit suggests using local fits to supplement a global-fit approach. This will reduce the number of parameters that you require your method to fit, but its efficacy is highly dependent on the proficiency of your correction method. + *Hierarchical Subtraction* methods suggest first fitting to the most evident signal, then subtracting the signal inferred from your original fit and repeating this process for all anticipated signals @hiherachical_subtration_overlapping_pe @hieherachical_overlapping_pe_2. This method would be effective at subtracting multiple sets of parameters for overlapping signals, assuming that the overlap does not cause bias in the initial fit, which the previously mentioned studies have shown is not always a correct assumption @bias_study_one @bias_study_two. + Finally, and most relevantly, *machine learning* methods can be employed as a global fit technique to try and extract parameters from overlapping signals. They come with all the usual advantages, (inference speed, flexibility, computational backloading) and disadvantages (lack of interpretability, unpredictable failure modes). Langendorff _et al._ @machine_learning_overlapping_pe attempt to use normalizing flows to output estimations for parameters. Most of the aforementioned methods benefit from having prior knowledge about each of the pairs of signals, especially the merger times of each signal. As well as acting as a method to distinguish between overlapping and lone signals, CrossWave was envisioned as a method to extract the merger times of each of the binaries in order to assist further PE techniques. Crosswave was able to achieve this and also demonstrated some more general, but limited PE abilities. == CrossWave Method We introduce Overlapnet and CrossWave, two neural network models for the identification and PE of overlapping CBC signals. This section describes two complementary models, Overlapnet for the separation of the overlapping case from the non-overlapping case and CrossWave as a PE follow-up to extract the merger times of the overlapping signals in order to allow other PE methods to be performed. Overlapnet can differentiate between overlapping signals and lone signals with efficiencies matching that of more conventional matched filtering techniques but with considerably lower inference times and computational costs. CrossWave can extract the merger times of the two overlapping CBCs with an average error of less than 0.1 s. We suggest these two models or similar architectures may be used to augment existing CBC detection and PE infrastructure, either as a complementary confirmation of the presence of overlap or to extract the merger times of each signal in order to use other PE techniques on the separated parts of the signals. Since the CrossWave project was an exploratory investigation rather than an attempt to improve the results of a preexisting machine learning method, it has a different structure to the Skywarp project. Initially, we applied architecture from the literature, again taking Gabbard _et al._ @gabbard_messenger_cnn, with architecture illustrated here @gabbard_diagram. This worked effectively for the differentiation of overlapping and lone signals. We named this simpler model OverlapNet. However, when attempting to extract the signal merger times from the data, we found this model to be inadequate, therefore, we utilized the attention methods described in @skywarp-sec, along with insights gained throughout other projects to construct a more complex deep network for the task, seen in @crosswave-large-diagram. We name this network CrossWave, as it utilises cross attention between a pair of detectors. It is hoped that this architecture can go on to be used in other problems, as nothing in its architecture, other than its output features, have been purpose-designed for the overlapping waveform case. === Crosswave Training, Testing, and Validation Data <crosswave-data> The dataset utilized in this section differs from previous sections, in that it was not generated using the GravyFlow data pipeline. Since this was part of a PE investigation, the exact morphology of the waveforms injected into the signal is crucial to validating performance. The cuPhenom IMRPhenomD waveform generator that was developed for rapid waveform generation on the GPU has a relatively high degree of mismatch (~5%) with IMRPhenomD signals @imrphenom_d generated with LALSimulation @LALSimulation in some areas of parameter space. This is primarily thought to be caused by cuPhenom's @cuphenom_ref reduced precision (32-bit in most areas rather than 64-bit) and the lack of implementation of several post-Fourier conditioning steps. Whilst this mismatch was deemed to be mostly adequate for detection searches, especially for comparison of methods, we considered it inadequate for PE tasks. IMRPhenomD is also an older approximant, which does not take into consideration the latest improvements to waveform approximation including several physical phenomena, such as higher modes. Whilst there is currently no one approximant that can generate waveforms that include all physical effects, we opted to use IMRPhenomTPHM @imrphenom_future, which is a Time-Domain approximant that includes the physics of precession, which allows for studies of Higher Modes. A static dataset was created using BBH waveforms generated using LALSimulation @LALSimulation and injected into Gaussian noise coloured by the LIGO Hanford and LIGO Livingston aLIGO design specifications @aLIGO_design_specificaton using the technique described in @noise_acquisition_sec but not with the GravyFlow @gwflow_ref pipeline. No BNS signals were considered. We used a #box("16" + h(1.5pt) + "s") on-source duration, to allow more space for different signal start times and to examine the effects of distant signal overlap on PE. We used a sample rate of #box($1024$ + h(1.5pt) + "Hz"), as this was considered adequate to contain the vast majority of relevant frequency content for the CBCs examined. Unlike in the detection case, wherein our training distribution consisted of some examples with obfuscated signals and some consisting of pure noise, we assume that a detection has already been made by a detection pipeline, so our examples always contain signal content of some kind. This assumption was made to simplify the task to its minimal possible case. Our generated waveform bank consisted of $2 times 10^5$ IMRPhenomTPHM approximants. From that template bank, we constructed $2 times 10^5$ examples of lone signals injected into obfuscated noise and $2 times 10^5$ examples of pairs of signals injected into obfuscated noise, totaling $4 times 10^5$ training examples. In the latter case, each waveform was unique to a single pair, generating $10^5$ pairs, but each pair was injected into two different noise realizations in order to generate identical numbers of lone and paired templates. The use of the same waveforms in both the single case and the pairs was a conscious decision that was made in order to attempt to reduce the chance of the network overfitting to any particular signal morphologies that it learned to associate specifically with lone signals or pairs of signals. The waveforms were generated with a wide parameter range drawn uniformly from across parameter space. The primary component of each waveform was generated with masses between #box("10.0" + h(1.5pt) + $M_dot.circle$) and #box("70.0" + h(1.5pt) + $M_dot.circle$), this is notably inconsistent with our previous studies, but was reduced to reduce task complexity and because this still covers most of the range that is of interest to PE studies. This also ensured that their visual duration, (starting at #box("20.0" + h(1.5pt) + "Hz"), which is both the whitening low-pass filter and around the limit that the detector design curve starts to make detection impossible), remained well contained within the #box("16" + h(1.5pt) + "s") on-source duration. Also unlike in our previous detection studies, the mass ratio was constrained between 0.1 and 1. Since the approximants were generated in an alternate method utilising luminosity distance as the scaling factor rather than SNR, the SNRs are not uniformly distributed, however, the Network SNR of any signal is not less than 5 or greater than 100. For each injection, luminosity distance in MPc was drawn from a power law distribution with base two scaled by 145, with a minimum distance of #box("5.0" + h(1.5pt) + "MPc"), this luminosity distance range was generated by a trial and error approach to achieve the desired SNR distribution. An overview of the parameters used to train both the CrossWave and Overlapnet models is shown in @crosswave-training-parameters. A validation dataset was also generated with independent signals and background noise, with $2 times 10^4$ singles and $2 times 10^4$ pairs generated similarly to the training data but with different random seeds, totalling $4 times 10^4$ validation examples. #figure( table( columns: (auto, auto), inset: 10pt, align: horizon, [*Hyperparameter*], [*Value*], [Batch Size], [32], [Learning Rate], [10#super("-4")], [Optimiser], [ Adam ], [Scaling Method], [Luminosity Distance], [Min Luminosity Distance], [5.0], [Max Luminosity Distance], [N/A], [Luminosity Distance Distribution], [$ ("Power-Law (base 2)" times 145) + 5 "MPc"$ ], [Data Acquisition Batch Duration], [ N/A ], [Sample Rate], [ #box("1024.0" + h(1.5pt) + "Hz")], [On-source Duration], [ #box("16.0" + h(1.5pt) + "s")], [Off-source Duration], [ N/A ], [Scale Factor], [10#super("21") ], ), caption: [The training and dataset hyperparameters used in CrossWavea and Overlapnet experiments.] ) <crosswave-training-parameters> In the case of the pairs of injections, the two waveforms are injected so that their merger times never have a separation exceeding #box("2" + h(1.5pt) + "s"). "Signal A" is defined as the signal whose merger arrives second at the Earth's centre, whereas "Signal B" is always defined as the signal whose merger time arrives first. This allows the model to differentiate between the two signals for the PE tasks. When only one waveform is present, that waveform is labelled "Signal A". It is possible, if the merger arrival time separation between signal A and signal B is low enough, that the order of A and B in the Hanford and Livingston detectors may be switched, if this is the case it would only happen in very few cases since the vast majority of training and validation examples with pairs of signals have merger time separations larger than the light travel time between detectors (~#box($0.01$ + h(1.5pt) + "s")). #figure( grid( columns: 1, rows: 2, gutter: 1em, [ #image("single_example.png", width: 80%) ], [ #image("overlap_example.png", width: 80%) ] ), caption: [Two illustrative examples of 1the example used to train CrossWave, the upper demonstrates the single signal case, the lower the multiple signal case. Since the real data used to train CrossWave was unwhitened, it is not easy to parse by eye. Thus, as an illustrative example, these two examples are shown in whitened data generated using cuPhenom and GravyFlow. The example duration has also been cropped from #box("16" + h(1.5pt) + "s") to #box("5" + h(1.5pt) + "s"), since the merger times never have a separation greater than #box("2" + h(1.5pt) + "s") this is ample as an example. Both examples show time series from both detectors, simulating LIGO Livingstone and LIGO Hanford. _Upper:_ Single waveform injected into noise drawn from the two LIGO detectors. _Lower:_ A pair of waveforms injected into noise drawn from the two LIGO detectors. The waveforms are always injected with merger times less than #box("2" + h(1.5pt) + "s") distant.] ) <overlap_injection_examples> ==== A note on Whitening Interestingly, since the data was generated independently from GravyFlow, it was not whitened prior to model injection. Since this is not a comparison to other machine learning methods that use whitening, this is not particularly an issue, but it also can't tell us about the efficacy we have lost/gained due to the lack of whitening. Since this investigation does have positive results, this could potentially be an area for future experimentation, forgoing the whitening step before ingestion by a model would streamline a lot of the problems faced by low-latency machine learning pipelines. It should be remembered, however, that the training and validation data was generated using the unchanging PSDs of the aLIGO design specification @aLIGO_design_specificaton for each given detector. Attempting to train models with real or pseudo-real noise which is non-stationary, and in the former case contains non-linear glitches, may not be as viable. The use of unwhitened noise, if possible, may have some benefits in the case of overlapping signal detection and PE. Because this work is only expected to become relevant in the regime of very long-lived signals, it may be difficult to get clean off-source data at a close enough time separation from the on-source data which is not also contaminated with other signals. == Overlapnet Results === Classification The first attempt to classify input examples generated with the method described in @crosswave-data utilized an architecture from the literature adapted from Gabbard _et al._ @gabbard_messenger_cnn, the model architecture of this model can be seen at @gabbard_diagram. To distinguish this model from later models, this model was named Overlapnet. We trained a binary classifier to output a score near or equal to one if there were two signals present in the input data, and a score near or equal to zero if there was only one signal in the data. Since data for this experiment was generated independently, validation was also performed alternately. Since we are assuming the presence of at least one signal, in either case, the problem is not hugely asymmetric as it is in the CBC detection case. The penalty for incorrectly classifying a single signal as a double is much less than for classifying noise as a signal. This is because a detection model must examine many more noise examples than an overlap detection model would signals since we are assuming at least one signal has already been detected by another method. If we assume that misidentifying a single signal as a double is as undesirable as misidentifying a pair of signals as a single, we can set a classification threshold of 0.5 so that neither class is favoured (unless the model intrinsically favours one class over another, which is also possible), as an equal ratio between classes is usually recommended to maximize overall model performance @inbalanced_datasets. This means we can focus on optimizing our model to gain as high accuracy as possible, without needing performance in extremely low FAR regimes; therefore FAR plots are not particularly useful. The trained model was run over the validation dataset consisting of $4 times 10^4$ examples generated independently but with the same method as the training data. The parameters for each waveform were recorded and compared with the classification results. This initial attempt at applying a preexisting model from the literature to the problem proved sufficient even in unwhitened noise. The model was able to correctly classify most pair examples where both of the optimal network SNRs are above 10, and correctly identify almost all single signals. See @overlapnet_classification_scores. For the single signal validation examples, the model can correctly identify almost all cases, (assuming a detection score threshold of 0.5). We note that although the classification error very rarely exceeds 0.5, there is still some notable error. It is thought that this may be because of deficiencies in the construction of the dataset. Since there is very little to differentiate between pairs of signals where one signal is rendered almost indetecable due to a small SNR, and single signals; and between pairs of signals where both detectors have a low SNR and a single signal with a low SNR, this adds significant confusion to the training process, which encourages the model to show less confidence when classifying signals as singles. This could be ameliorated by increasing the minimum SNR threshold of the signals to the point where no (or fewer) training examples have one undetectable signal, although this change may come to the detriment of other classification abilities. In the pair validation examples, the model has a much wider range of detection ability determined by the optimal network SNR of each of the examples' two signals. The model shows good performance when both signals have an optimal network SNR of at least ten, with a rapid decline below these values, which is roughly consistent with the efficiency curves we see in detection cases. This is anticipated. When one of the signals has a low SNR, the example becomes very similar to a single signal; when both of the signals have a low SNR, the example becomes indistinguishable from a single signal with a low SNR. In both of these cases, the model prefers to classify examples as single signals rather than double. This makes sense, the model will try to minimize the difference between its output and the ground truth values, half the examples in the training dataset are single signals, whereas considerably less than half the signals are pairs of signals with one low SNR --- if the model has to guess between the two, it is more likely that the example will be in the former category than the latter. This is also probably true for real signals, so this is possibly not a bad feature of the classifier. For the case when both signals in a pair have low SNR, it also makes sense that the classifier would want to classify these as single signals, as there are many more examples of a single signal with a low SNR in the training dataset than there are of a pair of signals both with a low SNR. It is also speculated that the model may have learned to associate low overall excess power with single signals. Since the two classes were not normalized to contain roughly equal excess power, the average excess power found in pair examples will be double that of the average excess power found in single examples. This is certainly a feature that the classifier could have learned to recognize. This could be alleviated by normalizing the excess power between the classes, which would force the detector to rely on the signal morphologies alone rather than the excess power contained within the example. It is not clear whether this would be a useful feature or not. Certainly, in nature, overlapping signals would, in general, mean greater excess power, but this may have detrimental effects in model training. #figure( grid( columns: 2, rows: 1, gutter: 1em, [ #image("crosswave_classification_corrected.png", width: 100%) ], [ #image("overlapnet_zoomed_classification.png", width: 100%) ], ), caption: [Classification error of Overlapnet output when fed validation examples, plotted with signal A optimal network SNR and signal B optimal network SNR. A total of $4 times 10^4$ validation examples were used to produce this plot. All examples consist of two-channel synthetic detector noise generated by colouring Gaussian white noise with the LIGO Hanford and LIGO Livingston aLIGO design specifications. Half the validation examples were injected with one each of $2 times 10^4$ IMRPhenomTPHM waveforms with no repetitions, these are the single injection examples, which only contain Signal A. In these cases the SNR of signal B is always zero, these signals are seen arranged along the bottom of the plot. The other half of the examples consist of two each of the same $2 times 10^4$ IMRPhenomTPHM waveforms with two repeats of the same pairs of signals injected into different noise realizations. A model score near one indicates the model has determined that the example has two hidden signals and a score near zero indicates that the model thinks the example has only one hidden signal. The classification score error shows the difference between the ground truth value and the predicted model output. Therefore an error nearer zero indicates good model performance, and an error nearer one indicates poor model performance. Assuming a classification threshold of 0.5 we can see that the model can successfully classify almost all single examples, and can successfully classify most pairs of signals when the Network SNR of both signals is above an optimal SNR of ten. We note that although classification is achieved in most cases, there is still substantial error in many cases, though mostly below the threshold required for an inaccurate classification, 0.5. It is theorised that this is because the model is trained with many examples of pairs of detectors with one low SNR that are hard to distinguish from single detectors with one signal. This confusion could add considerable uncertainty to the model predictions, and it is recommended that if this project were to be repeated the minimum SNR threshold for both of the signals should be increased. When either of the optimal network SNRs of one of the signals falls below 10, the rate of classification error increases in a curve that is consistently shaped with the detection efficiency curves discussed in previous sections. This is anticipated --- in the case that one of the SNRs becomes low, the signal will appear to look like a single signal as the other signal becomes hard to distinguish. In the case where both signals have a low SNR, both signals are hard to distinguish and it becomes difficult to differentiate between a single hard to identify signal and multiple hard to identify signals. In this latter case, where both signals have a low SNR, the model appears to favour classification as a single signal rather than double. It is hypothesized that this may be because the pairs and single examples were not normalized to have consistent excess power, meaning that the total excess power contained in the set of all two signal examples will be double that of the total excess power in all single signal examples. This might bias the network to associate low excess power with single signal examples. _Left:_ Full validation results. _Right:_ Zoomed result for increased detail below optimal network SNRs of 50.] ) <overlapnet_classification_scores> We also plot several pseudo-efficiency curves for easier comparison to other results in @overlapnet_efficiency_plots. Since there are now two network SNR values for each pair example and one network SNR value for each single example, we present the result in five distinct efficiency curves, four curves for the pair examples, and one curve for the single example. The four curves for the pair examples are generated by sorting the dataset by the maximum network SNR of the pairs, minimum network SNR of the pairs, and the SNR of signals A and B, then by generating a rolling average of the model scores. The single signal SNR is generated by sorting by the lone SNR value and calculating a rolling average. Note that unlike previous efficiency curves, which displayed the percentage of results that were above a certain FAR-calibrated model threshold, these curves just plot the rolling average model predictions, which are correct at a score of one for the four pair curves, and correct at zero for the one single signal curve. The minimum SNR signal curve reaches an average model score near one at a minimum network SNR of around 37, which is quite high. However, it still achieves relatively good scores above an optimal SNR of 16. The reason this curve appears with this different shape to the detection efficiency curves is presumably because there are factors other than the measured SNR that are relevant to the model performance. In all the pair examples, the other non-ranking SNR value will have a large effect on the model's ability to distinguish between the two cases, along with the merger time separation, and the parameter space difference between the two injections. Since the minimum network SNR is the only curve that reaches scores near one of the four curves for pair examples, we can infer that this is the bottleneck for detection ability. The other curves never reach one no matter how high the network SNR suggesting that a percentage of the other SNRs in the pairs are low and decrease detection ability. The maximum SNR line, as expected shows, considerably lower performance at lower SNRs. In each of the examples on this line, the lower SNR signal, which we know is the limiting factor, is equal to or lower than the SNR metric. This also explains why the line starts at a higher SNR --- because the line is a rolling average of the examples sorted by SNR metric, the first average SNR value for the maximum signals will naturally be higher than the first average of the minimum signals. Interestingly, the maximum SNR curve reaches approximate parity with the lines plotting effficiencies when sorted by the SNR of signals A and B. This is presumably because there is a large range of SNRs less than the maximum, so the lower SNR is likely to still be detectable, with the difference between this maximum efficiency and one due to the undetectable percentage of SNRs under this maximum value. Curves were also plotted by sorting the pair validation results when sorted by signal A SNR and signal B SNR. These were plotted to see if the model had any bias between the signals. Signal B is defined as the signal that arrives first in the detection, so it might in theory be possible for the classifier to develop bias toward one signal or another. However, the results show very little difference between the two curves, suggesting that the model does not have a preference between which signal has the higher SNR. Again the model does not reach an average model score of one, but this is because the other SNR in the pair is unconstrained, so a certain percentage of examples at each average calculation will have a pairing with an SNR that is undetectable. Finally, an efficiency curve was plotted for the single signal examples. Only one curve was plotted for this example, as a single network SNR value can capture the entire SNR information content of the example. Note that in this case, a model prediction of zero rather than one is correct, so lower scores indicate a more accurate prediction. We see that the model performs best at SNR values less than 20, before plateauing and slowly increasing beyond that. This shape is created because the training data contained many pair examples with one low SNR value which would look very similar to single signal examples, creating confusion in the training process and leading the model to predict single signals with some uncertainty. The higher performance at low SNRs is presumably due to the excess power bias because there are considerably more single signal examples on the lower excess power end, the model can more confidently predict a single signal if the total excess power is low. For the same reason, model performance degrades at higher single signal SNRs as it is more likely there is higher excess power in double signal examples, although in double signal examples with high excess power the double morphologies are more likely to be visible, so this bias is considerably less than the low power bias demonstrated. #figure( image("overlapnet_efficiencies_2.png", width: 80%), caption: [Overlapnet pair efficiency plots created from the combined overlapnet validation data pool using rolling averages with a window of 1000 and plotting every 100#super("th") average value. This plot gives an approximate comparison to the efficiency plots generated in the detection cases; although generation was done with a rolling average over a pool of logarithmically distributed SNRs rather than with pools of discrete SNRs at specific test SNR values that have been used previously in the detection cases. Also note that this plots the model output score, rather than the percentage of cases which fall above a calibrated SNR threshold. These efficiency plots show the relationship between the SNR values of one of the signals and the model prediction. One of the five lines gives the rolling average model score when the validation results pool is sorted by minimum SNR value. This is perhaps the most useful of the four lines as it is the bottleneck in classification ability. It reaches a classification score of 1.0 at a minimum optimal network SNR of around 37. It remains above 0.9 for SNRs above 19 and increases slowly until 37. This separates it from the detection case and is presumably because there are extra factors not accounted for on this plot, primarily the SNR of the second signal, but also things like the parameter space difference of the two waveforms and the merger time separation of the two waveforms, which could both add increased difficulty without being visible on the efficiency plot. Two of the lines plot the rolling average model score when plotted with the SNR of one of the two signals, signal A and signal B. Signal B always arrives before signal A. The similarity between these lines shows that it is unlikely there is any bias between whether signal A has the lower SNR or signal B. The maximum scores achieved by these lines are less than the minimum, as there are always low SNR signals in the average used to calculate this. The last of the four pair example lines shows the moving average when the validation pool is sorted by the maximum SNR of the two injected signals. This is the lowest still, as it is more likely that the uncounted-for signals have low SNR. Lastly, we show the single signal SNR scores. Unlike the other signals, a lower score is better in this case, as a model prediction of zero indicates the lack of a second signal. We see that at low SNRs this score is lowest; this is expected as there are considerably more low SNR single signals in the dataset than pairs of signals, and this supports our hypothesis that the network is using excess power as a differentiation method. Above an optimal network SNR of 18 the classification score plateaus at an average of 0.2, as stated previously it is believed this is induced through confusing examples in the training dataset where it is almost impossible for the network to determine between a pair of signals where one signal has a low SNR and a single signal, teaching the network to guess with some uncertainty in all apparent single signal cases. We also see a slight decrease in prediction accuracy as SNR increases, again this probably results from the excess power bias. From this plot we can conclude that as expected the lowest SNR signal in the pair is the largest factor in model efficiency, but that other factors are probably also relevant.] ) <overlapnet_efficiency_plots> We create additional plots to explore how classification performance varies with other areas of parameter space. First, we examine how the time difference between the merger arrival of signal B and signal A at the earth center (assumed to be very close to the arrival at any of the detectors. Only in a very small number of cases will the order of arrival at the Earth's center be different from the arrival time at any detector when using this range of time separations), which affects the classification performance. @overlapnet_classification_separation shows that there is little correlation between model performance and the arrival time difference, except when the time separation is very small. There appears to be some degradation of perfomance below #box("0.8" + h(1.5pt) + "s"), but this only becomes very significant below #box("0.2" + h(1.5pt) + "s"). Since the average model performance values are calculated using a rolling average, when we examine the distribution of individual example performance by eye this first bin also seems to be heavily weighted by examples whose separation is very close to zero. As the time separation moves toward zero, the model has less opportunity to use distinct merger peaks to aid in its classification and must begin to rely on morphology alone. Since the model maintains performance, though at a reduced efficiency, at separations down to zero seconds, we can determine that the model can use morphologies as well as distinct peaks, in order to distinguish between the two cases. Further analysis of a validation set consisting only of zero separated signals would be useful to examine this further. However, since signals arriving with such small separations are very unlikely even when detection rates are massively increased, this is not considered a priority. #figure( image("overlapnet_classification_separation.png", width: 70%), caption: [Overlapnet classification results plotted against the time elapsed in seconds between the arrival of the merger of signal B and signal A. The coloured circles represent individual validation classification results colour-coded for visual clarity. The red line is the moving average model prediction error at the stated time separation with a window of 500 validation examples. Only pairs are plotted, as single examples have no time separation. We see that for time differences above #box("0.8" + h(1.5pt) + "s") the separation has little effect on the average prediction error. Between #box("0.2" + h(1.5pt) + "s") and #box("0.8" + h(1.5pt) + "s") there is a slight but notable increase in error, and below a merger time difference of #box("0.2" + h(1.5pt) + "s") there is a more notable uptick in error. It appears that this uptick at lower time separations is mostly caused by signals that have very low separation ($<$ #box("0.1" + h(1.5pt) + "s")) --- this seems to be the only significant predictor of model performance, other than this, and the small decrease in performance below #box("0.8" + h(1.5pt) + "s") the classifier seems to work with equal efficiency across time separations. This is perhaps less of a correlation than might be expected, but it demonstrates that only very close signals are problematic if at detectable SNRs. This is a good sign for the chances of developing a useful classifier. ] ) <overlapnet_classification_separation> For our final analysis of the classification results, we explore the parameter space of the waveform by examining model performance at different values of chirp mass and mass ratio; see @overlapnet_classification_mass_parameters. These plots are less illuminatory, the only visible correlation exists between a lower chirp mass in one or both signals and poor performance. This is likely caused because sources will have a lower $h_"rss"$ and therefore SNR (assuming identical detector noise conditions, sky localization, and polarization) if they are at the same luminosity distance as another signal with a higher chirp mass. This is also corroborated by the decrease in single signal classification performance at higher chirp masses, which is seen with higher SNRs. However, if luminosity distance is variable, which it is in the dataset, chirp mass alone does not correspond directly to SNR. Thus we don't see as strong of a correlation as we see in @overlapnet_classification_scores. Since there is no visible correlation along the line where the two parameters are equal to each other, we can conclude that both waveforms having similar mass parameters have relatively little effect on the ability of the model to correctly classify the signal. This is slightly surprising, as we would expect signals with similar frequency contents to be more difficult for the model to separate. However, in most cases within the validation dataset, the arrival time separation was large enough that the model could use the distinct merger peaks as evidence for mergers rather than relying on the morphologies alone. Which may explain this seeming lack of dependence on waveform parameters. #figure( grid( columns: 2, rows: 1, gutter: 1em, [ #image("overlapnet_classification_chirp_mass.png", width: 100%) ], [ #image("overlapnet_classification_mass_ratio.png", width: 100%) ] ), caption: [Overlapnet classification results compared with the mass parameters of the constituent waveforms. _Left:_ Overlapnet classification scores plotted with source chirp masses for signal A and signal B. There appears to be some correlation between lower chirp masses and poor model performance, however, because there are highly performing examples even in cases where both chirp masses are low we can conclude that this does not explain the entire picture. It is hypothesized, that this correlation is primarily caused by the fact that lower chirp masses are more likely to produce a low SNR signal. If two sources were at the same luminosity distance but one had a higher chirp mass, the higher chirp mass would have a louder SNR (assuming identical detector noise conditions, sky localization, and signal polarisation). This hypothesis is supported by the lower model performance of single signals at higher chirp masses, as we have seen that single signal classification is slightly worse at higher SNRs. _Right:_ Overlapnet classification scores plotted with source mass ratio for signal A and signal B. This plot shows that there is very little, if any correlation between the mass ratio of the two signals, and model performance. This continues to show that signal morphology does not make a decisive difference in classification ability, which is primarily determined by the minimum SNR of a signal in the pairs, and secondarily weakened if the signals have a very small time separation. ] ) <overlapnet_classification_mass_parameters> We conclude that Overlapnet is capable enough to differentiate between single and overlapping signals in the majority of cases, and with some adjustment to the training dataset, performance could probably be improved by removing the ambiguity generated by impossible-to-distinguish examples. === Regression Following the relative success of Overlapnet in differentiating between examples with one CBC signal present and two overlapping CBC signals present, we attempted to use the same model, with an adjusted activation function on the last layer, (linear rather than softmax), to attempt a regression problem on pairs of overlapping signals, in an attempt to extract useful information that could be used in a parameter estimation pipeline. The most useful parameters that can be extracted from an overlapping signal, are the merger times of the two signals A and B. Using the same training dataset and procedure, we changed only the activation function applied to the model output to allow for regression rather than classification, the model loss function used, again to allow for regression, and the labels that the model was trained to output. Rather than output a score between zero and one, we trained the model to output a merger time for signal A, and a merger time for signal B. Due to an error in the data labelling procedure, which was not spotted until later experiments. Regression test results were extremely poor. Initially, it was thought that this was down to using an insufficient model for the task, therefore a much more complex and experimental network was constructed, utilizing much of the insight gained from previous experiments, as well as denoising autoencoder heads, and cross-attention layers between detectors. These concepts are explained in @additional-elements. We name this more complex network structure CrossWave. It should be noted that it is not proven that such a complex network is necessary for successfully overlapping parameter estimation of overlapping CBC signal with machine learning. == Aditional Structural Elements <additional-elements> === Cross-Attention <cross-attention-sec> The multi-head attention layers we have explored thus far in @skywarp-sec have all consisted of self-attention heads, which is the most natural application of attention @attention_is_all_you_need @attention_review. However, attention layers can also be used to compare two different sequences; this is a principle component of the archetypical transformer design @attention_is_all_you_need @transformer_review, wherein cross-attention layers compare the model-predicted vectors to the ground truth vectors of the training data. See @transformer-sec. Since we were not concerned with next-token prediction in @skywarp-sec we opted not to use cross-attention layers and instead focus entirely on self-attention. However, there is a scenario in gravitational-wave data science for which a natural application of cross attention can be applied --- between the detector outputs of the multiple interferometers of a gravitational-wave detection network. There are two ways in which we could deal with this scenario, we could apply the appropriate temporal encoding and add an encoding element informing the model which detector each sequence element originated from, or we could simply use cross attention between the multiple detectors. See @cross-attention-digaram> for an illustration of the cross-attention mechanism. In cross-attention, query, key, and value vectors are still generated, but for two sequences instead of one @attention_is_all_you_need @transformer_review. The query vectors from one sequence are then compared with the key vectors of the other sequence, and the value vectors of that sequence are summed together similarly as in self-attention. What this does is it allows the attention layer to accumulate information from the other sequence that is relevant to vectors in the first sequence. Because the choice of which sequence will provide the query and which the key and value, matters, cross-attention is not commutative. After calculating the cross-attention between detectors, you can then add this result to the self-attention result, allowing you to accumulate relevant information both from other temporal locations in one detector and from information provided by other detectors. Overlapnet used both LIGO detectors as input since there was no need to try and optimise for low FAR. When attempting to improve on this network operation with attention layers, it is a natural choice to apply cross-attention. #figure( image("cross_attention.png", width: 100%), caption: [Illustration of the action of a single cross-attention head. In contrast to a self-attention head, a cross-attention head takes two sequences as input: a querier sequence, and a queried sequence. The queryier sequence is converted into query vectors with a learned weights matrix, and the queried sequence is converted into key and value vectors. The rest of the attention mechanism functions identically to self-attention but uses query, key, and value vectors that originate from different sources. For more details on the self-attention mechanism see the description in @sec-attention.] ) <cross-attention-digaram> === Autoencoders and Denoising <autoencoder-sec> Autoencoders are a family of artificial neural network architectures @autoencoder_ref. They can utilise many different layer types including pure dense layers, convolutional layers, and attention-layers, but they are defined fundamentally by their inputs and outputs and the shape of the data as it moves through the network. The vanilla autoencoder can be described as a form of unsupervised learning since the model input is the same as the model output, and therefore, although it has in some sense a model label --- its input, the data does not need to be labeled, as it is its own label. A vanilla autoencoder attempts to compress the information content of its input into a latent vector that is typically significantly smaller than the input vector, then regenerate the original input vector from that reduced latent vector with as little loss as possible @autoencoder_ref. This has useful applications as a compression algorithm, but also sometimes in encryption and many other applications. Having access to a lower dimensional latent space that can represent elements of a unique distribution has many uses in generative models and classifiers. Many different subtypes of autoencoder try to regularise the latent space into a more useful format, the most common of which is the Variational AutoEncoder (VAE) @variational_autoencoder. Autoencoders can also be used for anomaly rejection @autoencoder_ref which has application in gravitational-wave analysis in both glitch @autoencoder_glitch_rejection and burst detection @source_agnostic_lstm. Because an autoendoer is trained to reconstruct data from a given distribution, if it is fed a result that lies outside that distribution this will likely result in a high reconstruction loss. The value of this loss then can be used to determine if the autoencoder has encountered something from outside its training distribution. In the case of gravitational-wave glitches, we can train a model on known glitch types or a single known glitch type. We can then reject glitches that the autoencoder can successfully reconstruct as specimens of known detector glitches. For anomaly detection, we can instead train the model to reconstruct a standard interferometer background, if the autoencoder fails to reconstruct a section of the background well, it could be an indication of the presence of an anomaly, which in some cases could be a gravitational wave burst. When detected in conjunction with coherent anomalies from multiple detectors, this could lead to a confirmed burst detection once glitches have been ruled out. An autoencoder has three parts, an encoder, a decoder, and a latent vector @autoencoder_ref. See @autoencoder-diagrams. The encoder attempts to reduce the input vector into a smaller latent space vector. Performing a kind of dimensional reduction which hopefully preserves most of the information content of the input vector by representing it in a more efficient data format. In most distributions that are interesting, there is a significant structure that can be learned and used to compress the data. Similar to compression algorithms, if the input data is random and has no structure, there will not be a way to represent that data in a much more efficient way. The encoder commonly has an identical structure to the convolutional layers in a CNN. A function to compress the input data down into smaller feature maps is identical to what we require from our encoder. Encoders can also be built with dense or attention layers, and share most of the benefits and drawbacks of these previously discussed. The decoder is similar but acts in reverse to reconstruct the original input data from the reduced representation generated by the encoder. Often the decoder is a mirror image of the enoder and uses inverted layers such as transpose convolutional layers. As well as acting as unsupervised models, it is possible to use pseudo-autoencoders which have the same structure as autoencoders but are not autoencoders in the truest definition, to produce an output that is not the same as its input, but instead an altered version of the input. This can be used to transform the input in some way, for example adding a filter to an image or audio input, or it can be used to try and remove noise from the original image. This latter type is known as a *denoising autoencoder* @denoising_autoencoder, and it is what we will be using as part of our expanded CrossWave architecture. Denoising autoencoders are no longer considered unsupervised models, as the labels must be denoised versions of the input vectors. During training, the denoising autoencoder learns to extract important features from the input image but ignores the noise, as it is not present in the output label and would be unnecessary information to propagate through the model. There have been some attempts to apply denoising autoencoders to gravitational-wave data in order to remove interferometer noise and reveal hidden gravitational-wave signals. #figure( grid( columns: 1, rows: 1, gutter: 1em, [ #image("dense_autoencoder.png", width: 80%) ], [ #image("convolutional_autoencoder.png", width: 100%) ] ), caption: [Illustration of two trivial autoencoder architectures, one using only dense layers, the other using convolutional layers. Both networks have very few neurons and would likely not see use in any real practical application but are presented for illustration only. Autoencoders consist of an encoder that performs dimensional reduction on an input vector to reduce its dimensionality to a smaller latent space and produce a latent vector, this latent vector is then processed by the decoder which attempts to perform the inverse operation and reconstruct the original input image, or a slightly altered version of the input, for example a denoised version of the original input. Often the decoder is simply an inversed version of the encoder, which introduces the concept of transposed convolutional layers which perform the inverted operation of convolutional layers. _Upper:_ Illustrative dense layer autoencoder with a two-layer encoder and a two-layer decoder. The latent space of this autoencoder has two dimensions meaning the dimensionality of the input vector has been reduced from five down to two _Lower:_ Illustrative convolutional autoencoder with a two-layer encoder consisting of convolutional layers and a two-layer decoder consisting of transposed convolutional layers. The latent vector of this autoencoder has four elements, which means there has only been a reduction of one element between the input vector and the latent space.] ) <autoencoder-diagrams> == CrossWave Architecture <crosswave-models> The CrossWave architecture is the most ambitious model architecture presented in this thesis. It attempts to combine many intuitions gained throughout the research, with contemporary network features that are known to work well in similar domains. We utilize several new conceptual elements: denoising autoencoder heads, and cross-attention layers, which are described in more detail in @additional-elements. The Crosswave architecture has a dual branch structure, which, rather than immediately combining both detectors into one multi-dimensional input stream, deals with input streams from both detectors in separate network branches in the first stage of the model. Each detector input is first fed into independent denoising autoencoder heads, with the idea that each autoencoder can learn to deal with the specificities of that particular detector noise, extract only the important signal features, and filter out detector glitches. These could first be trained independently to denoise signals before being used in the larger regression model, which was the original intention, however, due to time constraints, these were trained in unison with the greater model. The architecture of the autoencoder encoder is based on the model described in Gabbard _et al._ @gabbard_messenger_cnn, with the decoder consisting of the inverse of that architecture, using transpose convolutions in place of convolutions, and upscaling in place of pooling layers. This model was chosen as it is known to be able to effectively extract the features of a gravitational-wave signal. In order to facilitate an effective comparison between the two detectors inside the cross-attention layers, it is desirable to have the features from both models mapped inside a shared latent space. For this reason, after the denoising heads, each branch was fed through an identical set of feature-extracting convolutional layers with shared weights. It is hoped that these shared weights will force the denoising heads to output vectors of the same form, which can then be processed by the shared feature extractor into the same latent space to allow for easier comparison by the cross-attention layers. In this manner, it is hoped that the independent denoising heads can deal with and remove differences in detector noise and configuration, whilst the shared feature-extracting layers can learn to recognize the features that are shared across detectors due to the detector-agnostic intrinsic properties of the waveforms proper. The feature-extracting layers are identical to the encoding layers inside the denoising heads, taken from Gabbard _et al._ @gabbard_messenger_cnn. Two models were trialed, which are illustrated in @crosswave-small-diagram, and @crosswave-large-diagram. The small model only utilizes a single cross-attention layer, which combines the output of multiple self-attention blocks applied independently to each detector output. The larger model computes both self-attention and cross-attention inside each attention block, summing the results of both layers in order to combine both global contexts from other parts of the input vector, and the other detector into a single sequence for each detector. These results are finally concatenated after repetition and a final classification is performed with two dense layers. The larger model was found to have increased performance over the smaller one, and so was used for further experimentation. #set page( flipped: true ) #set align(center) #figure( image("crosswave_small_diagram_expanded.png", width: 85%), caption: [Diagram of the network structure of the smaller of the two trialled CrossWave models. Both the CrossWave models have a novel structure with denoising heads, and feature extraction layers adapted from Gabbard _et al._ @gabbard_messenger_cnn, as well as utilization of cross-attention layers. The denoising heads are composed of an autoencoder structure, with one for each input detector. In this case, we have used simulated inputs from the LIGO Hanford and LIGO Livingston detectors so there are two autoencoding heads. Each autoencoder has independently trained weights. It is envisioned that during network training these will adapt individually to the peculiarities of the noise in its given detector, and, due to the shared weights utilized by the feature extractor, learn to output a standardized denoised version of the input from each detector, although it is expected this will not be a perfect correlated to a denoised detector stream since the autoencoders were not independently pre-trained before training of the larger model. After the autoencoding heads, several feature-extracting layers also adapted from Gabbard _et al._ @gabbard_messenger_cnn are used to embed the autoencoder outputs into two sequences that can be ingested by the attention layers. It is imagined, that because these feature-extracting layers share weights between detectors, they will map the output of the denoising layers into a shared latent space that can be interpreted similarly by the attention layers, and is therefore useful for cross-attention between detectors. The core of the small CrossWave model utilizes a repeating block of self-attention layers applied repeatedly to each detector data stream, much like in the Skywarp transformer. These blocks are repeated three times. This repeating self-attention layer should help the model understand the global context of the data within each detector. After completion, these data streams are combined in a cross-attention block, and then processed by two dense layers to give the final regression output scores. This model was trialled and was somewhat performant, but the application of the cross-attention in this method was causing a lot of information to be lost, so the model was abandoned in favour of the larger variant shown in @crosswave-large-diagram.], ) <crosswave-small-diagram> #figure( caption: [Diagram of the network structure of the larger of the two trialled Crosswave models. Both the CrossWave models have a novel structure with denoising heads, and feature extraction layers adapted from Gabbard _et al._ @gabbard_messenger_cnn, as well as utilization of cross-attention layers. The denoising heads are composed of an autoencoder structure, with one for each input detector. In this case, we have used simulated inputs from the LIGO Hanford and LIGO Livingston detectors so there are two autoencoding heads. Each autoencoder has independently trained weights. It is envisioned that during network training these will adapt individually to the peculiarities of the noise in its given detector, and, due to the shared weights utilized by the feature extractor, learn to output a standardized denoised version of the input from each detector, although it is expected this will not be a perfect correlated to a denoised detector stream since the autoencoders were not independently pre-trained before training of the larger model. After the autoencoding heads, several feature-extracting layers also adapted from Gabbard _et al._ @gabbard_messenger_cnn are used to embed the autoencoder outputs into two sequences that can be ingested by the attention layers. It is imagined, that because these feature-extracting layers share weights between detectors, they will map the output of the denoising layers into a shared latent space that can be interpreted similarly by the attention layers, and is therefore useful for cross-attention between detectors. The core of the larger CrossWave block contains both self-attention blocks and cross-attention blocks in each iteration, this means that the model can compare data streams from both detectors multiple times, each time adding extra relevant information from the other detector into that detector's branch. Also, since the cross-attention is being performed in both directions, no information is lost as it was in the small model. Again, these blocks are repeated three times. After the repeating blocks, rather than using a cross-attention block to combine the branches, the outputs from each branch were concatenated before being fed into the dense tail, which then produced the final regression outputs. ], image("crosswave_large_diagram_expanded.png", width: 96%) ) <crosswave-large-diagram> #set align(left) #set page( flipped: false ) == CrossWave Dataset We used the same independently produced dataset to provide the regression labels for the CrossWave regression models, described in @crosswave-models. Fortunately, the dataset was produced with comprehensive labels of relevant parameters, including luminosity distance, signal arrival time, companion masses, and spin parameters for each of the two waveforms in the pairs. As opposed to binary classification, there is slightly more to consider when designing the form of the output labels. Initially, we must consider if we want to perform a classification at the same time as the regression of parameters. When training a model, the model can learn information from the input vector, but also from the labels. At the onset of experimentation with CrossWave it was thought that perhaps allowing the dataset more information about the data, in the form of more detailed labels of parameters, might aid in the classification process. This has several issues, primarily, if we wish to perform classification at the same time as a regression, we must include both pairs and singles in the training dataset. Therefore we must decide what to do with the parameter regression labels for signal B, in the case where there is no signal B. The natural decision might be to set these labels to zero, however, this might cause significant problems in the network. Whenever a training example without signal B is presented, the gradient descent algorithm will attempt to move the network toward producing zero for all of signal B's parameters, but zero is not an accurate representation of an undetectable signal for many of the parameters. For example, it might be more natural to put the luminosity distance very high for an invisible signal, but for other labels, it is not quite as easy to select a null value that won't disrupt the parameter estimation in some way. Joint classification regression trials proved ineffective across several different selections of null parameters for signal B, causing parameter estimation and classification confusion in the network. A method to gate outputs by multiplication with the classification output was also trialled, but this too proved ineffective. Due to the failure of early experiments to perform parameter estimation with datasets comprised of both pairs of signals and single singles, the decision was made to focus on the more specific task of extracting parameters from pairs of signals, without the capability to perform parameter estimation on single signals, or classify between the two cases. Therefore all single signals were removed from the validation and training datasets, shrinking the training dataset size to $2 times 10^5$ examples, and the validation dataset to $2 times 10^4$ examples. The second consideration is which parameters to attempt to extract. The inclusion of additional parameters did not appear to reduce the ability of the model to correctly extract other parameters, even if those additional parameters were difficult or impossible to extract. Neither did the opposite appear to be the case; the inclusion of additional labels did not appear to improve the ability of the model to classify other parameters. The following 24 parameters were selected for extraction by the models: ["*Signal A Geocentric Arrival Time (s)*", "*Signal B Geocentric Arrival Time (s)*", "*Signal A H1 Arrival Time (s)*", "*Signal B H1 Arrival Time (s)*", "*Signal A L1 Arrival Time (s)*", "*Signal B L1 Arrival Time (s)*", "*Signal A Companion Mass 1 ($M_dot.circle$)*", "*Signal B Companion Mass 1 ($M_dot.circle$)*", "*Signal A Companion Mass 2 ($M_dot.circle$)*", "*Signal B Companion Mass 2 ($M_dot.circle$)*", "*Signal A Luminosity Distance (MPc)*", "*Signal B Luminosity Distance (MPc)*", "*Signal A Dimensionless Spin Component Xß Companion 1*", "*Signal B Dimensionless Spin Component X Companion 1*", "*Signal A Dimensionless Spin Component Y Companion 1*", "*Signal B Dimensionless Spin Component Y Companion 1*", "*Signal A Dimensionless Spin Component Z Companion 1*", "*Signal B Dimensionless Spin Component Z Companion 1*", "*Signal A Dimensionless Spin Component X Companion 2*", "*Signal B Dimensionless Spin Component X Companion 2*", "*Signal A Dimensionless Spin Component Y Companion 2*", "*Signal B Dimensionless Spin Component Y Companion 2*", "*Signal A Dimensionless Spin Component Z Companion 2*", "*Signal B Dimensionless Spin Component Z Companion 2s*"]. Finally, we can consider if and how we want to normalize the values of the labels. Out of the selected parameters, many are in a different range, with the Spin Components between -1 and 1, the masses between 10 and 70, and the luminosity distance between 500 and 700. This is not necessarily a problem, and the model would still be able to produce these outputs assuming we use the right activation functions. However, this could cause the model to take a longer time to converge on correct solutions, as it would consider some parameters, with larger values, to be much more important the others. Ideally, we want the gradient descent mechanism to treat all parameters approximately equally. For that reason, all label values were normalized between 0 and 1. The only exception to this was the merger times for signal B, as these were incorrectly normalized leading to some ground truth values of less than one, which the model was unable to predict due to ReLU activation functions used on the output layer, which limit neuron outputs to values of zero or greater. == CrossWave Results === Merger Time Parameter Estimation Results <crosswave-merger-time-sec> Although a single model to predict all aforementioned parameters was produced, we shall focus on examining the merger time predictions before we look at the other results. These are the more important outputs of the model, as they fulfil the initial goal of providing more information for use in more established parameter estimation methods. The model was trained to output merger arrival time predictions for the LIGO Hanford Detector, the LIGO Livingston Detector, and the Earth's centre. Results are consistent between detectors, so for the first set of comparisons we compare only the predictions for the LIGO Hanford arrival time to the ground truth LIGO Hanford arrival time. First, can examine the arrival time predictions compared to the individual signal SNRs --- unlike in the classification case where only a single result is output, (albeit from two output neurons normalized by a SoftMax layer), the model is now tasked to output two regression values (along with the other parameters): a merger time for signal A and a merger time for signal B. Therefore, we have generated two plots, one for each signal; see @crosswave_regression_snr. Examining the results, we observe that there are a few outliers with high errors above #box("0.25" + h(1.5pt) + "s"), but the majority of merger times were predicted with errors under this margin. On average, errors were notably worse for signal B than for signal A, but their magnitudes do not seem to be correlated to the signals' network SNR so it is unclear whether this increased error in signal B lies in the method or is a result of systematic training degradation introduced by the normalisation error. Another possibility is that this increased error arises from the asymmetry caused because signal B always arrives in the detector before signal A (except in very rare edge cases previously discussed); because of this, the entirety of signal B is always contaminated with the inspiral of signal A, whereas signal often has at least some clear signal after signal B has merged and the ringdown has petered out. Some errors could originate from the misidentification of signals A and B. However, if this were to be the case, we would expect to see this error correlated with SNR, but we do not, at least on first inspection. #figure( grid( columns: 2, rows: 2, gutter: 1em, [ #image("regression_snr_A.png", width: 100%) ], [ #image("regression_snr_B.png", width: 100%) ], [ #image("regression_snr_A_less.png", width: 100%) ], [ #image("regression_snr_B_less.png", width: 100%) ] ), caption: [CrossWave merger time prediction error of Signal A, _upper left_, and Signal B, _upper right_. Compared to the classification results, the merger time errors look more consistent. This is primarily because the model output is not restricted between one and zero like it is in classification, so a few outliers with very high errors saturate the colour map. Given this, we have also plotted the same results with all examples that have errors greater than #box("0.25" + h(1.5pt) + "s") removed, for a more granular view of the bulk of the regression prediction errors. These are the lower two plots. In these focused plots, we can see that a significant number of results have a regression error of less than #box("0.1" + h(1.5pt) + "s"), which could be helpful to aid a secondary parameter estimation method. On these lower plots, there is also a notable difference between the average error on signal A merger time predictions, and the average error on signal B merger time predictions, with a higher average error on signal B. It is unclear exactly why this is the case, but we speculate that this is because signal B arrives first in the detector, meaning that the inspiral of signal A can interfere significantly with signal B, whereas the opposite is only the case when the merger separation is very small. It is also possible that sometimes, signal A can be misclassified as signal B. We would expect this latter confusion to have some correlation to SNR, but this does not seem to be the case. It could also be due to the aforementioned normalisation error reducing model training efficacy for signal B merger time predictions. Interestingly, the relationship between signal SNR and regression error appears low. This suggests that the substantive cause of regression error lies elsewhere, we plot additional comparisons to further investigate.] ) <crosswave_regression_snr> Next, we can plot the rolling average prediction error as it changes with SNR; see @crosswave_regression_efficiencies. The picture is mostly as anticipated, with the determining factor for high merger time error at low SNRs ($<20$) being the SNR of the signal whose merger time is being predicted. This suggests that the expected SNR relationship is present in @crosswave_regression_snr, but is hidden under variance created by other factors. At optimal Network SNRs between 20 and 60, the results are roughly consistent between signal A prediction error and signal B prediction error. With a high SNR in the opposing signal leading to higher error. Above an optimal network SNR of 60, the pictures change, with the signal A prediction error average roughly equal whether ranked by signal A SNR, signal B SNR, or maximum SNR. This suggests estimation of signal A merger time is independent of signal B. For signal B however, high signal A SNR increases error on signal B, suggesting that as theorized, the inspiral of signal A can interfere with the parameter estimation of signal B. #figure( grid( columns: 1, rows: 2, gutter: 1em, [ #image("regression_efficiency_A.png", width: 80%) ], [ #image("regression_efficiency_B.png", width: 80%) ] ), caption: [CrossWave rolling average merger time prediction error plotted when ranked by different SNR combinations. Since the model now has two outputs, one for each merger time in the input example, a plot was generated for each merger time prediction. A plot showing signal A merger time prediction on the left, and a plot showing signal B merger time prediction on the right. At low SNR, the error is dominated by the SNR in the given signal, which is anticipated --- a low SNR in a given signal would, evidently, make it difficult for the model to detect, and hence, estimate the merger time, of that signal. We can also see the notable difference in average prediction error between the upper signal A plot and the lower signal B plot. Interestingly, we see that the error on the signal B merger time increases when the SNR of signal A is higher. This seems to be the case regardless of the SNR of signal B. Since signal B always arrives first in the detector, this could be because a loud signal A inspiral obfuscates the presence of signal B, rendering the signal B merger time hard to identify.] ) <crosswave_regression_efficiencies> As was the case with classification, we might expect the difference in merger times to affect merger time estimation ability. Thus we have created similar plots to determine the effect of the difference in merger arrival time between the two signals on model prediction ability; see @crosswave_merger_times. In both cases, there is a sharp peak in the magnitude of the prediction error when the merger time separation nears zero. As would be expected, if it gets difficult to determine if there are one or two signals at a particular spot, with perhaps another smaller SNR signal hiding elsewhere, the model becomes confused when trying to predict the merger time. In the case of signal A, defined as the second signal to arrive in the detector, when incorrect, the model tends to predict the signal will arrive later than it does, and vice versa in the case of signal B. We also note that in both cases, though more distinctly in the signal B case, a cluster of errors falls along the line where error equals the merger time separation, we can label these events as misidentifications, where signal A has been misidentified as signal B or vice versa. In both cases, there seems to be a very slight uptick in error at high separations, this could be due to a smaller number of examples present in these areas of parameter space, leading the model to think these are unlikely parameters. #figure( grid( columns: 2, rows: 1, gutter: 1em, [ #image("error_time_difference_signal_a.png", width: 100%) ], [ #image("error_time_difference_signal_b.png", width: 100%) ] ), caption: [CrossWave merger arrival time prediction errors compared with the time separation between signal A and signal B merger arrival times in the LIGO Hanford detector. _Left:_ Error on signal A merger time prediction compared with the time separation between the two mergers. _Right:_ Error on signal B merger time prediction compared with the time separation between the two mergers. The colour of the plotted examples depicts the absolute error between the model prediction and the ground truth value, and the red line shows the rolling average absolute prediction error. For both merger times, we can see a spike in erroneous merger time predictions when the time separation is near zero. This is similar behaviour to what is seen in the classification examples. It is also expected here, since if the mergers are hard to distinguish from each other it will be difficult to determine the specific merger times. An asymmetry arises in which way the model will incorrectly predict the merger, in signal A, defined as the second to arrive in the detector, the model often predicts the signal will arrive later than it does, and for signal B, the model often thinks it will arrive earlier than it does. Since B always arrives first, these are logical assumptions for the model to make in both cases. In both cases, we also see lines of erroneous predictions where the model error equals the time separation. These are believed to be cases where the model believes signal A to be signal B and vice versa. This line is more pronounced for signal B errors, suggesting that signal B's are more commonly mistaken for signal A's than the other way around.] ) <crosswave_merger_times> We have decided not to plot the merger times against the mass parameters as we did in the classification case, as these did not seem to have much of an effect on classification ability. Finally, we finish with a direct comparison of the model prediction to the ground truth value; see @crosswave_arrival_time_prediction_error. This is plotted to align with the further presentation of parameter estimation results in @crosswave-further-pe-sec. We have plotted the predicted merger time in the LIGO Hanford detector compared to the ground truth, as well as the predicted merger time in the LIGO Livingston detector compared to the ground truth. The results for Earth centre merger arrival times are omitted, as these were not considered particularly relevant to this analysis. A full table of parameter estimation results is given by @crosswave-regression-results. #figure( grid( columns: 2, rows: 2, gutter: 1em, [ #image("h1_signal_a_arrival_time.png", width: 100%) ], [ #image("h1_signal_b_arrival_time_2.png", width: 100%) ], [ #image("l1_signal_a_arrival_time.png", width: 100%) ], [ #image("l1_signal_b_arrival_time_2.png", width: 100%) ], ), caption: [CrossWave signal merger time parameter estimation results. Each pair of plots shows the merger time estimate of signal A (_left_) and signal B (_right_). For each validation example, the ground truth value is represented on the x-axis, and the model prediction is on the y-axis. Each represents the signal merger time in seconds. The colour of each circle depicts the absolute difference between the ground truth value and the model prediction, which will be zero if the point falls on the line of $x = y$, which is also shown on the plot as a dashed grey line. Due to an error in label normalisation, some ground truth values for signal B were less than zero. Unfortunately, due to the choice of loss function used for the regression (ReLU), the model could not output predictions below zero, this meant that it was unable to predict these values correctly. This error may have interfered slightly with the rest of the training process, however other than its inability to classify these examples, there does not seem to be a significant reduction in the performance of classification of signal B merger times. Validation examples with round truth values below zero, and their associated predictions have been omitted from signal B plots for visual clarity. If training were to be repeated this error could be easily rectified, either by correcting the normalization or by altering the choice of activation function. _Upper Left:_ Predicted against actual signal A merger time in the simulated LIGO Hanford output. _Upper Right:_ Predicted against actual signal B merger time in the simulated LIGO Hanford output. _Lower Left:_ Predicted against actual signal A merger time in the simulated LIGO Livingston output. _Lower Right:_ Predicted against actual signal B merger time in the simulated LIGO Livingston output. ] ) <crosswave_arrival_time_prediction_error> === Other Parameter Estimation Results <crosswave-further-pe-sec> As well as attempting to predict the merger arrival times for both signal A and signal B, the model was also tasked to output several other parameters. This was initially done to attempt to increase the model's knowledge about the task at hand but was found to have no significant positive or negative effect on the estimation of the merger time parameters. Thus, it was kept as a feature of the final model as a potential feature of interest for future development into a more advanced fully machine learning-based parameter estimation model for overlapping signals. A full table of the results of the CrossWave parameter estimation model when run on the $2 times 10^5$ pair validation examples and compared to the ground truth labels can be seen in @crosswave-regression-results. For each parameter, an $R^2$ score is plotted as well as a Mean Absolute Error (MAE). The $R^2$ score or the "coefficient of determination" is a measure of the goodness of fit of a model. It provides an indication of how well the independent variables in a regression model explain the variability of the dependent variable. An $R^2$ score of one indicates a perfect predictor, whereas a $R^2$ score of zero indicates the model is doing no better than outputting the mean value, and a negative value indicates that the model is performing worse than outputting the mean and so possibly indicates an error in training. The MAE simply indicates the average magnitude of the difference between the model prediction and the ground truth value. #figure( table( columns: (auto, auto, auto, auto, auto), inset: 10pt, align: horizon, [*Parameter*], [*$R^2$ score A*], [*Mean Absolute Error A*], [*$R^2$ score B*], [*Mean Absolute Error B*], [H1 Time], [0.968], [0.100 s], [0.963], [0.0967 s], [L1 Time], [0.963], [0.0915 s], [0.963], [0.0965 s], [Geocent Time], [0.963], [0.0923 s], [0.963], [0.0974 s], [Luminosity Distance], [-0.834], [23.2 MPc], [-0.791], [22.7 MPc], [Mass 1], [0.613], [#box("7.95" + h(1.5pt) + $M_dot.circle$)], [0.623], [ #box("7.79" + h(1.5pt) + $M_dot.circle$)], [Mass 2], [0.718 ], [#box("5.59" + h(1.5pt) + $M_dot.circle$)], [0.715], [#box("5.47" + h(1.5pt) + $M_dot.circle$)], [Spin 1x], [-0.00897], [0.162], [-0.0119], [0.165], [Spin 1y], [-0.0780], [0.174], [-0.0749], [0.178], [Spin 1z], [0.268], [0.273], [0.234], [0.280], [Spin 2x], [-0.0117], [0.161], [-0.0114], [0.163], [Spin 2y], [-0.0709], [0.179], [-0.0705], [0.179], [Spin 2z], [0.0699], [0.311], [0.0620], [0.316], ), caption: [Results of the CrossWave parameter estimation model. For each of the model's outputted parameters, a Mean Absolute Error (MAE) along with an $R^2$ score is presented. The MAE indicates the average magnitude of the errors between the model's predictions on the validation dataset and the corresponding ground truth values. It is a measure of average prediction accuracy, though it doesn't distinguish between overestimation and underestimation. The $R^2$ score quantifies how well the model's predictions explain the variance of the ground truth values in the validation dataset. An $R^2$ score of one signifies perfect prediction accuracy in the validation examples used. In contrast, a score of zero suggests the model's predictive capability is no better than simply using the mean value of the validation examples. Negative $R^2$ values indicate that the model performs worse than a model that would always predict the mean, possibly signalling errors in the training process or model selection. ] ) <crosswave-regression-results> Out of the parameters that the model was trained to predict, the most accurate are the merger times, with $R^2$ scores between 0.96 and 0.97, and MAEs between #box("0.1" + h(1.5pt) + "s") and #box("0.09" + h(1.5pt) + "s"). As we have seen from previous analyses of these results, this average is driven up by outliers. There does not seem to be a particular detector that performs worse than any other, however, the Hanford signal A MAE is notably higher than the other predictions, whether this is driven up by statistical variance or some other factor is unknown. These results are discussed in more detail previously in @crosswave-merger-time-sec. The parameters that the model was next most proficient at extracting, were the mass parameters of each of the two component masses in each of the two binaries, generating four mass values in total: signal A mass 1, signal B mass 1, signal a mass 2, and signal B mass 2. The $R^2$ scores are lower and the MAE values are higher for mass 1 than for mass 2. This is probably because during parameter generation mass 1 is always forced, by convention, to be the higher mass, meaning that, in general, mass 1 has a larger range of possible values than mass 2. Because the model can use its prediction of mass 1 to constrain mass 2, it can reduce its error. This constraint also allows for a better guess at the average mass for mass 2, since its values have a smaller distribution than mass 2 values. These results can be seen in @crosswave_mass_prediction_error. The error margins may be low enough for these results to have some limited usefulness, however, since they lack any form of uncertainty it is unclear exactly what that would be. Perhaps they could be used to inform the priors of another parameter estimation search. #figure( grid( columns: 2, rows: 2, gutter: 1em, [ #image("signal_a_mass_1.png", width: 100%) ], [ #image("signal_b_mass_1.png", width: 100%) ], [ #image("signal_a_mass_2.png", width: 100%) ], [ #image("signal_b_mass_2.png", width: 100%) ], ), caption: [CrossWave companion mass parameter estimation results. Each pair of plots shows the companion mass estimates of signal A (_left_) and signal B (_right_). For each validation example, the ground truth value is represented on the x-axis, and the model prediction is on the y-axis. Each represents the companion mass in solar masses. The colour of each circle depicts the difference between the ground truth value and the model prediction, which will be zero if the point falls on the line of $x = y$, which is also shown on the plot as a dashed grey line. After the merger time predictions, the mass plots show the greatest promise, able to predict component masses with a moderate degree of accuracy. Without a comparison to another parameter estimation method, it is unclear exactly how much use these results can be. _Upper Left:_ Predicted against actual signal A companion 1 mass. _Upper Right:_ Predicted against actual signal B companion 1 mass. _Lower Left:_ Predicted against actual signal A companion 2 mass. _Lower Right:_ Predicted against actual signal B companion 2 mass.] ) <crosswave_mass_prediction_error> Beyond the companion mass parameter estimation results, CrossWave's parameter estimation ability is very limited and is explored here as a demonstration of its lack of proficiency, rather than as a suggestion of usefulness. Of the attempted extraction of the companion spins, only the Z components show any signs of successful estimation. In particular, the Z component of the larger companion shows partial predictive power, although the usefulness of this extraction is questionable. These results are shown in @crosswave_spin_error. #figure( grid( columns: 2, rows: 6, gutter: 1em, [ #image("signal_a_spin_1_x.png", width: 100%) ], [ #image("signal_b_spin_1_x.png", width: 100%) ], [ #image("signal_a_spin_1_y.png", width: 100%) ], [ #image("signal_b_spin_1_y.png", width: 100%) ], [ #image("signal_a_spin_1_z.png", width: 100%) ], [ #image("signal_b_spin_1_z.png", width: 100%) ], [ #image("signal_a_spin_2_x.png", width: 100%) ], [ #image("signal_b_spin_2_x.png", width: 100%) ], [ #image("signal_a_spin_2_y.png", width: 100%) ], [ #image("signal_b_spin_2_y.png", width: 100%) ], [ #image("signal_a_spin_2_z.png", width: 100%) ], [ #image("signal_b_spin_2_z.png", width: 100%) ], ), caption: [CrossWave regression results for the dimensionless spin components of the two companions in each binary merger, A and B. The left plots show the parameter extracted from merger A, whereas the right results show the same parameter extracted by CrossWave from merger B. The plots show the ground truth value of the dimensionless spin component plotted against the predicted value of the dimensionless spin component. The colour of each validation example indicates the difference between the ground truth and the predicted value, in this case, equivalent to the distance the point is from the line of $x = y$. The results are in the following order from upper to lower: + Mass 1 Spin Component X [_Left:_ Signal A, _Right:_ Signal B] + Mass 1 Spin Component Y [_Left:_ Signal A, _Right:_ Signal B] + Mass 1 Spin Component Z [_Left:_ Signal A, _Right:_ Signal B] + Mass 2 Spin Component X [_Left:_ Signal A, _Right:_ Signal B] + Mass 2 Spin Component Y [_Left:_ Signal A, _Right:_ Signal B] + Mass 2 Spin Component Z [_Left:_ Signal A, _Right:_ Signal B] There appears to be little difference in classification ability between signal A and signal B. The X and Y components show no classification ability, with the model finding an approximate output value to omit for all validation examples. It was known that extracting the spin parameters from the injected signals would be a challenging task, so this is anticipated. The model appears to show limited classification ability for the Z components, with the Z component for the more massive companion extracted with a stronger correlation than the lower mass companion, for which CrossWave shows only very slight predictive ability. ] ) <crosswave_spin_error> Finally, CrossWave attempted to extract the luminosity distance of the source. This extraction has failed in an unusual manner. Not only did the model fail to correctly predict the luminosity distance to any degree, but also the results produced a $R^2$ score, which indicates the model did not even find a good mean value. Why this is the case is unknown. The luminosity distance could be difficult to extract due to its degeneracy with the source inclination angle, a parameter for which prediction was not attempted. The negative $R^2$ score could be due to another normalisation error which further investigation may reveal. These results can be seen in @crosswave_luminosity_distance_error. #figure( grid( columns: 2, rows: 1, gutter: 1em, [ #image("signal_a_luminosity_distance.png", width: 100%) ], [ #image("signal_b_luminosity_distance.png", width: 100%) ], ), caption: [CrossWave model predicted luminosity distance vs ground truth luminosity distance of simulated BBH waveforms. _Left:_ Predicted signal A luminosity distance. _Right:_ Predicted signal B luminosity distance. The colour of each example point indicates the difference between the predicted and the ground truth value for that example. These plots indicate that there is almost no correlation between the predicted luminosity distance and the ground truth value. The model outputs a very similar value independent of luminosity distance, it is unclear whether this inability arises from a problem with model training and/or data processing, or whether luminosity distance is too difficult for the model to determine because of degeneracy with other parameters such as inclination. ] ) <crosswave_luminosity_distance_error> == Discussion and Limitations Overlapnet shows us that a machine learning model can be trained to distinguish between a single signal and a pair of signals separated by time separations of less than #box("2.0" + h(1.5pt) + "s") and greater than #box("0.1" + h(1.5pt) + "s") if the minimum SNR of the signal was sufficient for differentiation. With greater hyperparameter tuning and adjustments to the training dataset, substantive improvements in differentiation ability could be made. This suggests that a machine learning method such as Overlapnet may be a good addition to a future CBC pipeline which has to contend with the possibility of overlapping signals. It could act alone or as one of a suite of methods to switch between alternate parameter estimation methods designed to deal specifically with overlapping signals. CrossWave has shown that machine learning methods can be used to extract the merger times of two overlapping signals with moderate success. Again, CrossWave or an improved model could be used as part of a larger parameter estimation pipeline, to provide priors to a more established parameter estimation method, once a pair of overlapping signals has been identified in the data. CrossWave has also demonstrated limited parameter estimation ability of its own, across a few other parameters. It most successfully extracted predictions for the masses of the companions of signals A and B with some accuracy. It also showed limited potential to extract predictions of the Z dimensionless spin component. This method of directly extracting parameters using CrossWave has limited application, as it cannot deal with uncertainty or multi-modal probability spaces, which are needed for a robust and modern parameter search. It is possible that a machine learning method could be created with these features, through the use of Bayesian Neural Networks (notably distinct from Bayesian networks), or a multimodal latent space perhaps within an autoencoder framework, similar to the function of the VITAMIN parameter estimation method @vitamin. The large model developed for CrossWave has performed well, and warrants further investigation, and comparison against other detection methods in the single signal detection problem are recommended and were desired but abandoned due to time constraints.
https://github.com/ClazyChen/Table-Tennis-Rankings
https://raw.githubusercontent.com/ClazyChen/Table-Tennis-Rankings/main/history/2006/MS-03.typ
typst
#set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (1 - 32)", table( columns: 4, [Ranking], [Player], [Country/Region], [Rating], [1], [<NAME>], [CHN], [2953], [2], [<NAME>], [CHN], [2822], [3], [<NAME>], [GER], [2788], [4], [<NAME>i], [CHN], [2678], [5], [<NAME>], [CHN], [2648], [6], [<NAME>], [BLR], [2645], [7], [<NAME>], [HKG], [2561], [8], [<NAME>], [KOR], [2529], [9], [<NAME>], [KOR], [2520], [10], [<NAME>], [SWE], [2502], [11], [#text(gray, "<NAME>zheng")], [CHN], [2485], [12], [<NAME>], [DEN], [2469], [13], [<NAME>], [CHN], [2469], [14], [<NAME>], [KOR], [2456], [15], [<NAME>], [CHN], [2453], [16], [<NAME>], [BEL], [2446], [17], [<NAME>], [FRA], [2422], [18], [KREANGA Kalinikos], [GRE], [2420], [19], [<NAME>], [ROU], [2419], [20], [KONG Linghui], [CHN], [2388], [21], [<NAME>], [NED], [2384], [22], [<NAME>], [AUT], [2381], [23], [CHUANG Chih-Yuan], [TPE], [2369], [24], [<NAME>], [AUT], [2356], [25], [<NAME>], [CZE], [2347], [26], [BLASZCZYK Lucjan], [POL], [2343], [27], [<NAME>], [KOR], [2323], [28], [<NAME>], [SWE], [2322], [29], [<NAME>], [HKG], [2315], [30], [<NAME>], [NED], [2314], [31], [WALDNER Jan-Ove], [SWE], [2310], [32], [CHEUNG Yuk], [HKG], [2303], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (33 - 64)", table( columns: 4, [Ranking], [Player], [Country/Region], [Rating], [33], [YANG Zi], [SGP], [2303], [34], [ZHANG Chao], [CHN], [2292], [35], [CHIANG Hung-Chieh], [TPE], [2282], [36], [HE Zhiwen], [ESP], [2272], [37], [QIU Yike], [CHN], [2264], [38], [CHTCHETININE Evgueni], [BLR], [2252], [39], [<NAME>], [DEN], [2237], [40], [SHMYREV Maxim], [RUS], [2234], [41], [LEUNG Chu Yan], [HKG], [2233], [42], [<NAME>], [TPE], [2221], [43], [ROSSKOPF Jorg], [GER], [2218], [44], [<NAME>], [SWE], [2206], [45], [FRANZ Peter], [GER], [2203], [46], [SMIRNOV Alexey], [RUS], [2203], [47], [<NAME>], [DEN], [2199], [48], [<NAME>], [GRE], [2198], [49], [<NAME>], [SRB], [2197], [50], [<NAME>], [JPN], [2190], [51], [<NAME>], [CRO], [2185], [52], [LEGOUT Christophe], [FRA], [2181], [53], [<NAME>], [AUT], [2180], [54], [<NAME>], [POL], [2174], [55], [<NAME>], [DOM], [2168], [56], [SUSS Christian], [GER], [2164], [57], [<NAME>], [GER], [2164], [58], [<NAME>], [KOR], [2157], [59], [<NAME>], [CRO], [2151], [60], [KARAKASEVIC Aleksandar], [SRB], [2150], [61], [MAZUNOV Dmitry], [RUS], [2148], [62], [<NAME>], [SVK], [2143], [63], [<NAME>], [BUL], [2142], [64], [<NAME>], [FRA], [2137], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (65 - 96)", table( columns: 4, [Ranking], [Player], [Country/Region], [Rating], [65], [<NAME>], [SGP], [2137], [66], [<NAME>], [CHN], [2130], [67], [<NAME>], [GER], [2109], [68], [OVTCHAROV Dimitrij], [GER], [2100], [69], [<NAME>], [SWE], [2094], [70], [<NAME>], [RUS], [2092], [71], [<NAME>], [CZE], [2091], [72], [<NAME>], [GER], [2091], [73], [<NAME>], [CZE], [2091], [74], [<NAME>], [POL], [2086], [75], [<NAME>], [CZE], [2079], [76], [YANG Min], [ITA], [2073], [77], [<NAME>], [HKG], [2068], [78], [<NAME>], [CZE], [2065], [79], [<NAME>], [BEL], [2062], [80], [WOSIK Torben], [GER], [2060], [81], [<NAME>], [SWE], [2059], [82], [ERLANDSEN Geir], [NOR], [2055], [83], [KISHIKAWA Seiya], [JPN], [2054], [84], [<NAME>], [JPN], [2050], [85], [<NAME>], [QAT], [2049], [86], [<NAME>], [SVK], [2048], [87], [<NAME>], [CRO], [2047], [88], [<NAME>], [FRA], [2043], [89], [<NAME>], [POL], [2041], [90], [CHO Jihoon], [KOR], [2037], [91], [JIANG Tianyi], [HKG], [2024], [92], [<NAME>], [SWE], [2019], [93], [<NAME>], [UKR], [2016], [94], [TORIOLA Segun], [NGR], [2016], [95], [<NAME>], [SLO], [2015], [96], [MATSUSHITA Koji], [JPN], [2012], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (97 - 128)", table( columns: 4, [Ranking], [Player], [Country/Region], [Rating], [97], [TUGWELL Finn], [DEN], [2008], [98], [CHO Eonrae], [KOR], [2007], [99], [RI Chol Guk], [PRK], [1998], [100], [KIM Hyok Bong], [PRK], [1994], [101], [LEE Jungsam], [KOR], [1993], [102], [<NAME>], [HUN], [1990], [103], [YOON Jaeyoung], [KOR], [1986], [104], [HOU Yingchao], [CHN], [1986], [105], [<NAME>], [NOR], [1980], [106], [<NAME>], [BRA], [1975], [107], [<NAME>], [KOR], [1974], [108], [<NAME>], [CHN], [1974], [109], [<NAME>], [CAN], [1973], [110], [<NAME>], [FRA], [1973], [111], [<NAME>], [FRA], [1971], [112], [#text(gray, "<NAME>")], [KOR], [1969], [113], [<NAME>], [HUN], [1962], [114], [<NAME>], [CZE], [1961], [115], [<NAME>], [SWE], [1954], [116], [<NAME>], [ISL], [1951], [117], [<NAME>], [POL], [1948], [118], [<NAME>], [CRO], [1947], [119], [<NAME>], [KOR], [1945], [120], [LIU Song], [ARG], [1943], [121], [#text(gray, "<NAME>")], [SWE], [1941], [122], [<NAME>], [CAN], [1940], [123], [<NAME>], [ROU], [1936], [124], [<NAME>], [CHN], [1934], [125], [<NAME>], [FRA], [1933], [126], [CABESTANY Cedrik], [FRA], [1928], [127], [<NAME>], [CZE], [1927], [128], [<NAME>], [HUN], [1925], ) )
https://github.com/Bi0T1N/typst-iconic-salmon-fa
https://raw.githubusercontent.com/Bi0T1N/typst-iconic-salmon-fa/main/iconic-salmon-fa.typ
typst
MIT License
#import "@preview/fontawesome:0.1.0": * /// A function that creates a clickable link with the passed name as text. /// In addition to this, the associated icon of the web page is inserted /// /// - name (str): The visible name of the clickable link /// - url (str): The URL to the website (it is assumed to start with the http(s) scheme) /// - icon (function): A function from fontawesome that provides the associated icon /// -> content #let icon-link-generator( name, url, icon, ..args ) = { if name.len() == 0 { panic("The name must contain at least one character") } if url.len() == 0 { panic("The url must contain at least one character") } let styled_text = text( name, ..args ) let clickable_link if url.ends-with(regex("\.(com|org|net)/@?$")) { // links where the name is only appended, i.e. https://github.com/Bi0T1N clickable_link = url + name } else { // links where the profile link is more complicated, i.e. https://stackoverflow.com/users/20742512/bi0t1n clickable_link = url } // unify all links if not clickable_link.ends-with("/") { clickable_link = clickable_link + "/" } // content icon() " " link(clickable_link)[#styled_text] } #let facebook-info( name, url: "https://www.facebook.com/", ..args ) = { icon-link-generator(name, url, fa-facebook, ..args) } #let instagram-info( name, url: "https://www.instagram.com/", ..args ) = { icon-link-generator(name, url, fa-instagram, ..args) } #let tiktok-info( name, url: "https://www.tiktok.com/@", ..args ) = { // icon-link-generator(name, url, fa-tiktok, ..args) // icon-link-generator(name, url, fa-icon.with("\u{e07b}", fa-set: "Brands"), ..args) icon-link-generator(name, url, fa-icon.with("tiktok", fa-set: "Brands"), ..args) } #let youtube-info( name, url: "https://www.youtube.com/@", ..args ) = { icon-link-generator(name, url, fa-youtube, ..args) } #let vimeo-info( name, url: "https://vimeo.com/", ..args ) = { icon-link-generator(name, url, fa-vimeo, ..args) } #let linkedin-info( name, url: "https://www.linkedin.com/", ..args ) = { icon-link-generator(name, url, fa-linkedin, ..args) } #let xing-info( name, url: "https://www.xing.com/", ..args ) = { icon-link-generator(name, url, fa-xing, ..args) } #let github-info( name, url: "https://github.com/", ..args ) = { icon-link-generator(name, url, fa-github, ..args) } #let gitlab-info( name, url: "https://gitlab.com/", ..args ) = { icon-link-generator(name, url, fa-gitlab, ..args) } #let bitbucket-info( name, url: "https://bitbucket.org/", ..args ) = { icon-link-generator(name, url, fa-bitbucket, ..args) } // TODO: no icon available // #let codeberg-info( // name, // url: "https://codeberg.org/", // ..args // ) = { // icon-link-generator(name, url, fa-question, ..args) // } // TODO: no icon available // #let sourceforge-info( // name, // url: "https://sourceforge.net/", // ..args // ) = { // icon-link-generator(name, url, fa-question, ..args) // } #let docker-info( name, url: "https://hub.docker.com/", ..args ) = { icon-link-generator(name, url, fa-docker, ..args) } #let stackoverflow-info( name, url: "https://stackoverflow.com/", ..args ) = { // icon-link-generator(name, url, fa-stack-overflow, ..args) // icon-link-generator(name, url, fa-icon.with("\u{f16c}", fa-set: "Brands"), ..args) icon-link-generator(name, url, fa-icon.with("stack-overflow", fa-set: "Brands"), ..args) } #let stackexchange-info( name, url: "https://stackexchange.com/", ..args ) = { // icon-link-generator(name, url, fa-stack-exchange, ..args) // icon-link-generator(name, url, fa-icon.with("\u{f18d}", fa-set: "Brands"), ..args) icon-link-generator(name, url, fa-icon.with("stack-exchange", fa-set: "Brands"), ..args) } #let skype-info( name, url: "https://www.skype.com/", ..args ) = { icon-link-generator(name, url, fa-skype, ..args) } #let discord-info( name, url: "https://discord.com/", ..args ) = { icon-link-generator(name, url, fa-discord, ..args) } #let twitter-info( name, url: "https://twitter.com/", ..args ) = { icon-link-generator(name, url, fa-twitter, ..args) } #let x-twitter-info( name, url: "https://x.com/", ..args ) = { icon-link-generator(name, url, fa-icon.with("\u{e61b}"), ..args) } #let orcid-info( name, url: "https://orcid.org/", ..args ) = { icon-link-generator(name, url, fa-icon.with("orcid", fa-set: "Brands"), ..args) }
https://github.com/MrToWy/Bachelorarbeit
https://raw.githubusercontent.com/MrToWy/Bachelorarbeit/master/Code/publicDecorator.typ
typst
```ts import { SetMetadata } from '@nestjs/common'; export const Public = () => SetMetadata('isPublic', true); ```
https://github.com/GreyCorbel/docs
https://raw.githubusercontent.com/GreyCorbel/docs/main/gcdocs.typ
typst
MIT License
#let gc-docs(title: [], version: str, logo: str, doc) = { set text(font: "Arial",lang: "cs") set page(footer: grid( columns: (1fr, 1fr, 1fr), align(left + horizon, version), align(center + horizon, image(width: 80%, logo)), align(right + horizon, datetime.today().display()), )) show heading.where(level: 1): it => [ #pagebreak(weak: true) #it ] set heading(numbering: "1.") align(center, text(25pt, strong(title))) "" pagebreak(weak: true) outline() pagebreak(weak: true) doc } #let gc-config(displayname, name, example, description) = locate(loc => { block(breakable: false)[ #let level = query(selector(heading.where(outlined: true)).before(loc), loc).last().level #heading(level: level + 1, displayname, outlined: false) #label("gc-config-" + displayname.replace(regex("\s+"), "-")) #block( width: 100%, radius: 5pt, inset: 8pt, fill: silver, breakable: false, table( columns: 2, stroke: none, "Název:", name, "Příklad:", if type(example) == str [#raw(example)] else [#example], "Popis:", description, ), ) ] }) #let azure = ( func: "Azure Function", app_service: "Azure App Service", rg: "Resource Group", event_grid: "Azure Event Grid", event_hub: "Azure Event Hub", kv: "Azure Key vault", app_ins: "Azure Application Insights", app_reg: "Azure App registration", cosmos_db: "Azure Cosmos DB" )
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-1F800.typ
typst
Apache License 2.0
#let data = ( ("LEFTWARDS ARROW WITH SMALL TRIANGLE ARROWHEAD", "So", 0), ("UPWARDS ARROW WITH SMALL TRIANGLE ARROWHEAD", "So", 0), ("RIGHTWARDS ARROW WITH SMALL TRIANGLE ARROWHEAD", "So", 0), ("DOWNWARDS ARROW WITH SMALL TRIANGLE ARROWHEAD", "So", 0), ("LEFTWARDS ARROW WITH MEDIUM TRIANGLE ARROWHEAD", "So", 0), ("UPWARDS ARROW WITH MEDIUM TRIANGLE ARROWHEAD", "So", 0), ("RIGHTWARDS ARROW WITH MEDIUM TRIANGLE ARROWHEAD", "So", 0), ("DOWNWARDS ARROW WITH MEDIUM TRIANGLE ARROWHEAD", "So", 0), ("LEFTWARDS ARROW WITH LARGE TRIANGLE ARROWHEAD", "So", 0), ("UPWARDS ARROW WITH LARGE TRIANGLE ARROWHEAD", "So", 0), ("RIGHTWARDS ARROW WITH LARGE TRIANGLE ARROWHEAD", "So", 0), ("DOWNWARDS ARROW WITH LARGE TRIANGLE ARROWHEAD", "So", 0), (), (), (), (), ("LEFTWARDS ARROW WITH SMALL EQUILATERAL ARROWHEAD", "So", 0), ("UPWARDS ARROW WITH SMALL EQUILATERAL ARROWHEAD", "So", 0), ("RIGHTWARDS ARROW WITH SMALL EQUILATERAL ARROWHEAD", "So", 0), ("DOWNWARDS ARROW WITH SMALL EQUILATERAL ARROWHEAD", "So", 0), ("LEFTWARDS ARROW WITH EQUILATERAL ARROWHEAD", "So", 0), ("UPWARDS ARROW WITH EQUILATERAL ARROWHEAD", "So", 0), ("RIGHTWARDS ARROW WITH EQUILATERAL ARROWHEAD", "So", 0), ("DOWNWARDS ARROW WITH EQUILATERAL ARROWHEAD", "So", 0), ("HEAVY LEFTWARDS ARROW WITH EQUILATERAL ARROWHEAD", "So", 0), ("HEAVY UPWARDS ARROW WITH EQUILATERAL ARROWHEAD", "So", 0), ("HEAVY RIGHTWARDS ARROW WITH EQUILATERAL ARROWHEAD", "So", 0), ("HEAVY DOWNWARDS ARROW WITH EQUILATERAL ARROWHEAD", "So", 0), ("HEAVY LEFTWARDS ARROW WITH LARGE EQUILATERAL ARROWHEAD", "So", 0), ("HEAVY UPWARDS ARROW WITH LARGE EQUILATERAL ARROWHEAD", "So", 0), ("HEAVY RIGHTWARDS ARROW WITH LARGE EQUILATERAL ARROWHEAD", "So", 0), ("HEAVY DOWNWARDS ARROW WITH LARGE EQUILATERAL ARROWHEAD", "So", 0), ("LEFTWARDS TRIANGLE-HEADED ARROW WITH NARROW SHAFT", "So", 0), ("UPWARDS TRIANGLE-HEADED ARROW WITH NARROW SHAFT", "So", 0), ("RIGHTWARDS TRIANGLE-HEADED ARROW WITH NARROW SHAFT", "So", 0), ("DOWNWARDS TRIANGLE-HEADED ARROW WITH NARROW SHAFT", "So", 0), ("LEFTWARDS TRIANGLE-HEADED ARROW WITH MEDIUM SHAFT", "So", 0), ("UPWARDS TRIANGLE-HEADED ARROW WITH MEDIUM SHAFT", "So", 0), ("RIGHTWARDS TRIANGLE-HEADED ARROW WITH MEDIUM SHAFT", "So", 0), ("DOWNWARDS TRIANGLE-HEADED ARROW WITH MEDIUM SHAFT", "So", 0), ("LEFTWARDS TRIANGLE-HEADED ARROW WITH BOLD SHAFT", "So", 0), ("UPWARDS TRIANGLE-HEADED ARROW WITH BOLD SHAFT", "So", 0), ("RIGHTWARDS TRIANGLE-HEADED ARROW WITH BOLD SHAFT", "So", 0), ("DOWNWARDS TRIANGLE-HEADED ARROW WITH BOLD SHAFT", "So", 0), ("LEFTWARDS TRIANGLE-HEADED ARROW WITH HEAVY SHAFT", "So", 0), ("UPWARDS TRIANGLE-HEADED ARROW WITH HEAVY SHAFT", "So", 0), ("RIGHTWARDS TRIANGLE-HEADED ARROW WITH HEAVY SHAFT", "So", 0), ("DOWNWARDS TRIANGLE-HEADED ARROW WITH HEAVY SHAFT", "So", 0), ("LEFTWARDS TRIANGLE-HEADED ARROW WITH VERY HEAVY SHAFT", "So", 0), ("UPWARDS TRIANGLE-HEADED ARROW WITH VERY HEAVY SHAFT", "So", 0), ("RIGHTWARDS TRIANGLE-HEADED ARROW WITH VERY HEAVY SHAFT", "So", 0), ("DOWNWARDS TRIANGLE-HEADED ARROW WITH VERY HEAVY SHAFT", "So", 0), ("LEFTWARDS FINGER-POST ARROW", "So", 0), ("UPWARDS FINGER-POST ARROW", "So", 0), ("RIGHTWARDS FINGER-POST ARROW", "So", 0), ("DOWNWARDS FINGER-POST ARROW", "So", 0), ("LEFTWARDS SQUARED ARROW", "So", 0), ("UPWARDS SQUARED ARROW", "So", 0), ("RIGHTWARDS SQUARED ARROW", "So", 0), ("DOWNWARDS SQUARED ARROW", "So", 0), ("LEFTWARDS COMPRESSED ARROW", "So", 0), ("UPWARDS COMPRESSED ARROW", "So", 0), ("RIGHTWARDS COMPRESSED ARROW", "So", 0), ("DOWNWARDS COMPRESSED ARROW", "So", 0), ("LEFTWARDS HEAVY COMPRESSED ARROW", "So", 0), ("UPWARDS HEAVY COMPRESSED ARROW", "So", 0), ("RIGHTWARDS HEAVY COMPRESSED ARROW", "So", 0), ("DOWNWARDS HEAVY COMPRESSED ARROW", "So", 0), ("LEFTWARDS HEAVY ARROW", "So", 0), ("UPWARDS HEAVY ARROW", "So", 0), ("RIGHTWARDS HEAVY ARROW", "So", 0), ("DOWNWARDS HEAVY ARROW", "So", 0), (), (), (), (), (), (), (), (), ("LEFTWARDS SANS-SERIF ARROW", "So", 0), ("UPWARDS SANS-SERIF ARROW", "So", 0), ("RIGHTWARDS SANS-SERIF ARROW", "So", 0), ("DOWNWARDS SANS-SERIF ARROW", "So", 0), ("NORTH WEST SANS-SERIF ARROW", "So", 0), ("NORTH EAST SANS-SERIF ARROW", "So", 0), ("SOUTH EAST SANS-SERIF ARROW", "So", 0), ("SOUTH WEST SANS-SERIF ARROW", "So", 0), ("LEFT RIGHT SANS-SERIF ARROW", "So", 0), ("UP DOWN SANS-SERIF ARROW", "So", 0), (), (), (), (), (), (), ("WIDE-HEADED LEFTWARDS LIGHT BARB ARROW", "So", 0), ("WIDE-HEADED UPWARDS LIGHT BARB ARROW", "So", 0), ("WIDE-HEADED RIGHTWARDS LIGHT BARB ARROW", "So", 0), ("WIDE-HEADED DOWNWARDS LIGHT BARB ARROW", "So", 0), ("WIDE-HEADED NORTH WEST LIGHT BARB ARROW", "So", 0), ("WIDE-HEADED NORTH EAST LIGHT BARB ARROW", "So", 0), ("WIDE-HEADED SOUTH EAST LIGHT BARB ARROW", "So", 0), ("WIDE-HEADED SOUTH WEST LIGHT BARB ARROW", "So", 0), ("WIDE-HEADED LEFTWARDS BARB ARROW", "So", 0), ("WIDE-HEADED UPWARDS BARB ARROW", "So", 0), ("WIDE-HEADED RIGHTWARDS BARB ARROW", "So", 0), ("WIDE-HEADED DOWNWARDS BARB ARROW", "So", 0), ("WIDE-HEADED NORTH WEST BARB ARROW", "So", 0), ("WIDE-HEADED NORTH EAST BARB ARROW", "So", 0), ("WIDE-HEADED SOUTH EAST BARB ARROW", "So", 0), ("WIDE-HEADED SOUTH WEST BARB ARROW", "So", 0), ("WIDE-HEADED LEFTWARDS MEDIUM BARB ARROW", "So", 0), ("WIDE-HEADED UPWARDS MEDIUM BARB ARROW", "So", 0), ("WIDE-HEADED RIGHTWARDS MEDIUM BARB ARROW", "So", 0), ("WIDE-HEADED DOWNWARDS MEDIUM BARB ARROW", "So", 0), ("WIDE-HEADED NORTH WEST MEDIUM BARB ARROW", "So", 0), ("WIDE-HEADED NORTH EAST MEDIUM BARB ARROW", "So", 0), ("WIDE-HEADED SOUTH EAST MEDIUM BARB ARROW", "So", 0), ("WIDE-HEADED SOUTH WEST MEDIUM BARB ARROW", "So", 0), ("WIDE-HEADED LEFTWARDS HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED UPWARDS HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED RIGHTWARDS HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED DOWNWARDS HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED NORTH WEST HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED NORTH EAST HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED SOUTH EAST HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED SOUTH WEST HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED LEFTWARDS VERY HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED UPWARDS VERY HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED RIGHTWARDS VERY HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED DOWNWARDS VERY HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED NORTH WEST VERY HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED NORTH EAST VERY HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED SOUTH EAST VERY HEAVY BARB ARROW", "So", 0), ("WIDE-HEADED SOUTH WEST VERY HEAVY BARB ARROW", "So", 0), (), (), (), (), (), (), (), (), ("LEFTWARDS TRIANGLE ARROWHEAD", "So", 0), ("UPWARDS TRIANGLE ARROWHEAD", "So", 0), ("RIGHTWARDS TRIANGLE ARROWHEAD", "So", 0), ("DOWNWARDS TRIANGLE ARROWHEAD", "So", 0), ("LEFTWARDS WHITE ARROW WITHIN TRIANGLE ARROWHEAD", "So", 0), ("UPWARDS WHITE ARROW WITHIN TRIANGLE ARROWHEAD", "So", 0), ("RIGHTWARDS WHITE ARROW WITHIN TRIANGLE ARROWHEAD", "So", 0), ("DOWNWARDS WHITE ARROW WITHIN TRIANGLE ARROWHEAD", "So", 0), ("LEFTWARDS ARROW WITH NOTCHED TAIL", "So", 0), ("UPWARDS ARROW WITH NOTCHED TAIL", "So", 0), ("RIGHTWARDS ARROW WITH NOTCHED TAIL", "So", 0), ("DOWNWARDS ARROW WITH NOTCHED TAIL", "So", 0), ("HEAVY ARROW SHAFT WIDTH ONE", "So", 0), ("HEAVY ARROW SHAFT WIDTH TWO THIRDS", "So", 0), ("HEAVY ARROW SHAFT WIDTH ONE HALF", "So", 0), ("HEAVY ARROW SHAFT WIDTH ONE THIRD", "So", 0), ("LEFTWARDS BOTTOM-SHADED WHITE ARROW", "So", 0), ("RIGHTWARDS BOTTOM SHADED WHITE ARROW", "So", 0), ("LEFTWARDS TOP SHADED WHITE ARROW", "So", 0), ("RIGHTWARDS TOP SHADED WHITE ARROW", "So", 0), ("LEFTWARDS LEFT-SHADED WHITE ARROW", "So", 0), ("RIGHTWARDS RIGHT-SHADED WHITE ARROW", "So", 0), ("LEFTWARDS RIGHT-SHADED WHITE ARROW", "So", 0), ("RIGHTWARDS LEFT-SHADED WHITE ARROW", "So", 0), ("LEFTWARDS BACK-TILTED SHADOWED WHITE ARROW", "So", 0), ("RIGHTWARDS BACK-TILTED SHADOWED WHITE ARROW", "So", 0), ("LEFTWARDS FRONT-TILTED SHADOWED WHITE ARROW", "So", 0), ("RIGHTWARDS FRONT-TILTED SHADOWED WHITE ARROW", "So", 0), ("WHITE ARROW SHAFT WIDTH ONE", "So", 0), ("WHITE ARROW SHAFT WIDTH TWO THIRDS", "So", 0), (), (), ("ARROW POINTING UPWARDS THEN NORTH WEST", "So", 0), ("ARROW POINTING RIGHTWARDS THEN CURVING SOUTH WEST", "So", 0), ("RIGHTWARDS ARROW WITH LOWER HOOK", "So", 0), ("DOWNWARDS BLACK ARROW TO BAR", "So", 0), ("NEGATIVE SQUARED LEFTWARDS ARROW", "So", 0), ("NEGATIVE SQUARED UPWARDS ARROW", "So", 0), ("NEGATIVE SQUARED RIGHTWARDS ARROW", "So", 0), ("NEGATIVE SQUARED DOWNWARDS ARROW", "So", 0), ("NORTH WEST ARROW FROM BAR", "So", 0), ("NORTH EAST ARROW FROM BAR", "So", 0), ("SOUTH EAST ARROW FROM BAR", "So", 0), ("SOUTH WEST ARROW FROM BAR", "So", 0), (), (), (), (), ("LEFTWARDS ARROW FROM DOWNWARDS ARROW", "So", 0), ("RIGHTWARDS ARROW FROM DOWNWARDS ARROW", "So", 0), )
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-3000.typ
typst
Apache License 2.0
#let data = ( ("IDEOGRAPHIC SPACE", "Zs", 0), ("IDEOGRAPHIC COMMA", "Po", 0), ("IDEOGRAPHIC FULL STOP", "Po", 0), ("DITTO MARK", "Po", 0), ("JAPANESE INDUSTRIAL STANDARD SYMBOL", "So", 0), ("IDEOGRAPHIC ITERATION MARK", "Lm", 0), ("IDEOGRAPHIC CLOSING MARK", "Lo", 0), ("IDEOGRAPHIC NUMBER ZERO", "Nl", 0), ("LEFT ANGLE BRACKET", "Ps", 0), ("RIGHT ANGLE BRACKET", "Pe", 0), ("LEFT DOUBLE ANGLE BRACKET", "Ps", 0), ("RIGHT DOUBLE ANGLE BRACKET", "Pe", 0), ("LEFT CORNER BRACKET", "Ps", 0), ("RIGHT CORNER BRACKET", "Pe", 0), ("LEFT WHITE CORNER BRACKET", "Ps", 0), ("RIGHT WHITE CORNER BRACKET", "Pe", 0), ("LEFT BLACK LENTICULAR BRACKET", "Ps", 0), ("RIGHT BLACK LENTICULAR BRACKET", "Pe", 0), ("POSTAL MARK", "So", 0), ("GETA MARK", "So", 0), ("LEFT TORTOISE SHELL BRACKET", "Ps", 0), ("RIGHT TORTOISE SHELL BRACKET", "Pe", 0), ("LEFT WHITE LENTICULAR BRACKET", "Ps", 0), ("RIGHT WHITE LENTICULAR BRACKET", "Pe", 0), ("LEFT WHITE TORTOISE SHELL BRACKET", "Ps", 0), ("RIGHT WHITE TORTOISE SHELL BRACKET", "Pe", 0), ("LEFT WHITE SQUARE BRACKET", "Ps", 0), ("RIGHT WHITE SQUARE BRACKET", "Pe", 0), ("WAVE DASH", "Pd", 0), ("REVERSED DOUBLE PRIME QUOTATION MARK", "Ps", 0), ("DOUBLE PRIME QUOTATION MARK", "Pe", 0), ("LOW DOUBLE PRIME QUOTATION MARK", "Pe", 0), ("POSTAL MARK FACE", "So", 0), ("HANGZHOU NUMERAL ONE", "Nl", 0), ("HANGZHOU NUMERAL TWO", "Nl", 0), ("HANGZHOU NUMERAL THREE", "Nl", 0), ("HANGZHOU NUMERAL FOUR", "Nl", 0), ("HANGZHOU NUMERAL FIVE", "Nl", 0), ("HANGZHOU NUMERAL SIX", "Nl", 0), ("HANGZHOU NUMERAL SEVEN", "Nl", 0), ("HANGZHOU NUMERAL EIGHT", "Nl", 0), ("HANGZHOU NUMERAL NINE", "Nl", 0), ("IDEOGRAPHIC LEVEL TONE MARK", "Mn", 218), ("IDEOGRAPHIC RISING TONE MARK", "Mn", 228), ("IDEOGRAPHIC DEPARTING TONE MARK", "Mn", 232), ("IDEOGRAPHIC ENTERING TONE MARK", "Mn", 222), ("HANGUL SINGLE DOT TONE MARK", "Mc", 224), ("HANGUL DOUBLE DOT TONE MARK", "Mc", 224), ("WAVY DASH", "Pd", 0), ("VERTICAL KANA REPEAT MARK", "Lm", 0), ("VERTICAL KANA REPEAT WITH VOICED SOUND MARK", "Lm", 0), ("VERTICAL KANA REPEAT MARK UPPER HALF", "Lm", 0), ("VERTICAL KANA REPEAT WITH VOICED SOUND MARK UPPER HALF", "Lm", 0), ("VERTICAL KANA REPEAT MARK LOWER HALF", "Lm", 0), ("CIRCLED POSTAL MARK", "So", 0), ("IDEOGRAPHIC TELEGRAPH LINE FEED SEPARATOR SYMBOL", "So", 0), ("HANGZHOU NUMERAL TEN", "Nl", 0), ("HANGZHOU NUMERAL TWENTY", "Nl", 0), ("HANGZHOU NUMERAL THIRTY", "Nl", 0), ("VERTICAL IDEOGRAPHIC ITERATION MARK", "Lm", 0), ("MASU MARK", "Lo", 0), ("PART ALTERNATION MARK", "Po", 0), ("IDEOGRAPHIC VARIATION INDICATOR", "So", 0), ("IDEOGRAPHIC HALF FILL SPACE", "So", 0), )
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/string-15.typ
typst
Other
// Test the `replace` method with `Func` replacements. #test("abc".replace(regex("[a-z]"), m => { str(m.start) + m.text + str(m.end) }), "0a11b22c3") #test("abcd, efgh".replace(regex("\w+"), m => { upper(m.text) }), "ABCD, EFGH") #test("hello : world".replace(regex("^(.+)\s*(:)\s*(.+)$"), m => { upper(m.captures.at(0)) + m.captures.at(1) + " " + upper(m.captures.at(2)) }), "HELLO : WORLD") #test("hello world, lorem ipsum".replace(regex("(\w+) (\w+)"), m => { m.captures.at(1) + " " + m.captures.at(0) }), "world hello, ipsum lorem") #test("hello world, lorem ipsum".replace(regex("(\w+) (\w+)"), count: 1, m => { m.captures.at(1) + " " + m.captures.at(0) }), "world hello, lorem ipsum") #test("123 456".replace(regex("[a-z]+"), "a"), "123 456") #test("abc".replace("", m => "-"), "-a-b-c-") #test("abc".replace("", m => "-", count: 1), "-abc") #test("123".replace("abc", m => ""), "123") #test("123".replace("abc", m => "", count: 2), "123") #test("a123b123c".replace("123", m => { str(m.start) + "-" + str(m.end) }), "a1-4b5-8c") #test("halla warld".replace("a", m => { if m.start == 1 { "e" } else if m.start == 4 or m.start == 7 { "o" } }), "hello world") #test("aaa".replace("a", m => str(m.captures.len())), "000")
https://github.com/alexonea/cv
https://raw.githubusercontent.com/alexonea/cv/master/CV_AlexandruOnea.typ
typst
#import "lib/cv.typ" #show: doc => cv.setup( name: [<NAME>], headline: [Όσο υπάρχουν δάσκαλοι, θα βγαίνουν μαθητάδες], address: [Σαράντα Εκκλησίες, Θεσσαλονίκη, Ελλάδα], phone: "(+30) 698 905 9019", email: "<EMAIL>", debug: false, doc ) = Work experience #cv.jobrole( company: [Harman International Romania SRL], role: [Software Architect], period: [2018 - present], location: [Bucharest], )[ Leads the architecture and strategy activities aimed to fulfill the HARMAN software needs in terms of production hardware testing, design verification, product certification and qualification, in collaboration with multiple internal stakeholders. Main area of interest is the development of a unified division-wide software solution for validating the hardware of state-of-the-art multi-board multi-processor infotainment and telematics systems. where no control over the customer software is allowed or desired. Previous work includes core architecture activities in the context of product-level re-designs due to semiconductor procurement issues, as well as proof-of-concept work for the evaluation and adoption of new ideas within the department. Additionally, involved in training, mentoring and recruting activities. Key technologies: Embedded Linux with Yocto, Kernel integration and configuration, networking and inter-node communication, (Android) secure boot, virtualization, Enterprise Architect. ] #cv.jobrole( company: [Continental Automotive Romania SRL], role: [Software Project Manager / Technical Lead], period: [2017 - 2018], location: [Timisoara], )[ Main interest in the area of RFQs and conceptual work for incoming projects with a focus on cost reduction, system-level optimizations, and planning for reuse for AUTOSAR-based display and camera products. Involved in cross-division mentoring and bring-up activities in the field of system architecture and AUTOSAR configuration. ] #cv.jobrole( company: [Luxoft Professional Romania SRL], role: [Software Engineer / Technical Lead], period: [2015 - 2017], location: [Bucharest], )[ Leads the design and development of a software solution for the production testing and hardware qualification of infotainment systems. Areas of interest are: internal and external interface testing, stress testing of various system components, data path verification, environmental testing, product integration. Takes over and improves upon the design of a simple Python-based remote control and automation protocol for interogating and triggering features of a device-under-test. Develops an integration framework based on Yocto for above-mentioned software solution. ] = Education #cv.degree( title: [MEng Computer Engineering], university: [Polytechnic University of Timisoara], location: [Romania], period: [2017 - 2019], faculty: [Faculity of Computers and Automated Control], )[ Thesis: SimuLTE Based Model for Multi-User Preemptive Scheduling of URLLC Traffic in 5G Networks ] #cv.degree( title: [BSc (Hons) Computer Science], university: [University of Manchester], location: [United Kingdom], period: [2013 - 2015], faculty: [School of Computer Science], )[ Final Project: An Over-the-Air Music Identification System ] #cv.degree( university: [Polytechnic University of Bucharest], location: [Romania], period: [2011 - 2013], faculty: [Faculty of Computers and Automated Control], )[] #pagebreak(weak: true) = Projects #cv.project(name: [Junior Development Labs])[ Co-founded in 2014, Junior Development Labs is an educational program in Brăila, aiming to inspire active involvement into the community and reward talent. The main event is a Summer School where high school students meet university students and get a chance to build a better understanding of their future study fields of choice, by working together on hands-on projects and using their already existing knowledge at a practical level. Other smaller events with social and cultural emphasis are organized, of which the most remarkable are the "Redescoperă Brăila" scavenger hunt and the "JDTalks" conferences. The program ended in 2017. ] #cv.project(name: [TEDxTraianSq])[ Taking place in 2017, TEDxTraianSq is the local TEDx event of Brăila, organized and funded by a local team of volunteers. The theme of the event is "The Time is Now" and called for everyone\’s contribution to their communities by making use of their skills at early ages. The event has seen an astonishing success giving the available resources, although a second edition was never organized. ] #cv.project(name: [Byzantine Music])[ This is an ongoing effort to contribute to the Byzantine music sphere from a technology standpoint. The primary contribution is in the form of a database / collection of musical document references and their relations to one other, as well as to their authors, underlying texts and musical features. Another important backlog'd sub-project is the design of a digital encoding for the Byzantine musical notation which allows automated (possibly AI-assisted) analysis and correlation of musical features. ] = Certifications #cv.certification( title: [iSAQB® Software Architecture - Foundation Level], issuer: [iSQI GmbH], date: [2020], credential: "15016097" )[] = Interests #cv.interests( items: ( ( topic: [Technology], description: [System design with 32-bit micro-controllers, layout design, embedded security, docs-as-code, architecture-as-code, open-source contributions], ), ( topic: [Education], description: [Mentoring, computer engineering lectures for audiences with no background in computers], ), ( topic: [Physics], description: [Spintronics and micro-magnetism], ), ( topic: [Music], description: [Traditional middle-eastern music (learning kanonaki), maqamat, preserving the traditions of Byzantine music, digital music notation encodings], ), ) )[A modest list of topics which can easily become the subject of endless hours of conversation.]
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compute/calc-32.typ
typst
Other
// Test the `lcm` function. #test(calc.lcm(112, 77), 1232) #test(calc.lcm(12, 96), 96) #test(calc.lcm(13, 9), 117) #test(calc.lcm(13, -9), 117) #test(calc.lcm(272557, 272557), 272557) #test(calc.lcm(0, 0), 0) #test(calc.lcm(8, 0), 0)
https://github.com/simon-isler/zhaw-summaries
https://raw.githubusercontent.com/simon-isler/zhaw-summaries/main/Template/template.typ
typst
#let project(title: "", authors: (), columns: 2, body) = { set document(author: authors, title: title) // page layout set page( margin: (left: 12mm, right: 12mm, top: 14mm, bottom: 14mm), numbering: "1", number-align: center, header: [ #grid( columns: (33%, 33%, 33%), title, align(center, datetime.today().display()), align(right, ..authors) ) ], columns: columns, ) // font set text(font: "New Computer Modern", lang: "en", size: 10pt) show math.equation: set text(weight: 400) set heading(numbering: "1.1") set par(leading: 0.58em) set text(hyphenate: true) // custom headings show heading.where( level: 1 ): it => block(width: 100%, spacing: 0.2em)[ #block(fill: luma(200), inset: 0.35em, it, width: 100%) ] show heading.where( level: 2 ): it => block(width: 100%, spacing: 0.2em)[ #block(fill: luma(230), inset: 0.25em, it, width: 100%) ] show heading.where( level: 3 ): it => block(width: 100%, spacing: 0.2em)[ #block(fill: luma(250), inset: 0.1em, it, width: 100%) ] // code snippets show raw: set text(font: "JetBrainsMono NF") show raw.where(block: true): it => { set par(justify: false); set text(size: 8pt); block(radius: 1em, fill: luma(250), width: 100%, inset: 1em, it) } show par: set block(above: 0.75em, below: 0.75em) body }
https://github.com/QuadnucYard/crossregex-typ
https://raw.githubusercontent.com/QuadnucYard/crossregex-typ/main/src/process.typ
typst
MIT License
#let process-args( rows: none, row-len: none, total: none, constraints: none, constraint-size: none, answer: none, alphabet: none, rotators: (), progress-creator: auto, ) = { // transform constraints if constraints.len() != constraint-size { panic("Wrong constraint size. Expected " + str(constraint-size) + ", received " + str(constraints.len())) } constraints = constraints.map(t => if type(t) == content { t.text } else { t }) let max-len = calc.max(..constraints.map(t => t.len())) constraints = constraints.chunks(rows) // get the answer strings, and pad them let a = if answer == none { () } else if type(answer) == array { answer } else { answer.text.split("\n") } if a.len() < rows { a += ("",) * (rows - a.len()) } let filled = 0 for i in range(rows) { let len = row-len(i) if a.at(i).len() < len { a.at(i) += " " * (len - a.at(i).len()) } a.at(i) = a.at(i).slice(0, len) // count letters for c in a.at(i) { if c.match(alphabet) != none { filled += 1 } } } // build all views let aa = ( a, ..for rotator in rotators { let b = for i in range(rows) { ( for j in range(row-len(i)) { let (x, y) = rotator(i, j) (a.at(x).at(y),) }.join(), ) } (b,) }, ) if progress-creator == auto { progress-creator = (filled, total) => text(orange)[#filled/#total] } let progress = if progress-creator != none and answer != none { progress-creator(filled, total) } (constraints: constraints, max-len: max-len, answer: answer, filled: filled, a: a, aa: aa, progress: progress) }
https://github.com/olligobber/friggeri-cv
https://raw.githubusercontent.com/olligobber/friggeri-cv/master/link.typ
typst
#let simpleLink(address) = link(address, address) #let email(address, display: none) = link( "mailto:"+address, if display == none {address} else {display}, )