repo
stringlengths
26
115
file
stringlengths
54
212
language
stringclasses
2 values
license
stringclasses
16 values
content
stringlengths
19
1.07M
https://github.com/TypstApp-team/typst
https://raw.githubusercontent.com/TypstApp-team/typst/master/tests/typ/text/chinese.typ
typst
Apache License 2.0
// Test chinese text from Wikipedia. --- #set text(font: "Noto Serif CJK SC") 是美国广播公司电视剧《迷失》第3季的第22和23集,也是全剧的第71集和72集 由执行制作人戴蒙·林道夫和卡尔顿·库斯编剧,导演则是另一名执行制作人杰克·本德 节目于2007年5月23日在美国和加拿大首播,共计吸引了1400万美国观众收看 本集加上插播广告一共也持续有两个小时
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/layout/par-bidi_05.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page // Test spacing. L #h(1cm) ריווחR \ Lריווח #h(1cm) R
https://github.com/crdevio/Livres
https://raw.githubusercontent.com/crdevio/Livres/main/Avant%20la%20MP2I/template.typ
typst
#let cpt_def = counter("cpt_def") #let cpt_prop = counter("cpt_prop") #let cpt_thm = counter("cpt_thm") #let cpt_part = counter("cpt_part") #let cpt_exos = counter("cpt_exos") #let tot_exos = counter("tot_exos") #let cpt_qst = counter("cpt_qust") #let wedge = sym.and #let equiv = sym.equiv // Size of the left "margin" (note area) #let margin-size = 0% // Spacer so that main content and notes don't rub up against each other #let margin-space = 0.1in #let imp(cont) ={ text(fill: blue.darken(50%),cont,weight: "semibold",size: 1em) } #let ita(cont) = { text(cont,size:1em,style:"italic") } #let def(desc,title: "titre") ={ set align(left) cpt_def.step() box( rect( width:100%, fill:green.lighten(99%), radius:( left:5pt, right:5pt ), stroke: ( left:green, right: green, top: black, bottom: black ) )[ #align(left)[ #box( path( fill: green.darken(60%), closed: true, ((-1pt, -2pt)), (105pt,-2pt), ((108pt,-2pt)), ((105pt,10pt)), ((-1pt,10pt),(10pt,0pt)), ) + place(top+left)[#text(white,underline(smallcaps("Définition " + cpt_part.display("1") + "-" + cpt_def.display())),size:1.1em,weight: "medium")] ) #box( path( fill: green.darken(40%), closed: true, ((0pt,-2pt)), ((200pt,-2pt)), ((198pt,10pt)), ((-5pt,10pt)), ((-2pt,-2pt)), ) + place(top+left)[#text(white,title,size:1em,weight: "semibold")] ) #box( path( fill: green.darken(80%), closed: true, ((-2pt, -2pt)), (55pt,-2pt), ((58pt,-2pt),(-10pt,0pt)), ((55pt,10pt)), ((-4pt,10pt)), ) ) ] #align(left)[ #text(black,desc,size:1em) ] ] ) } #let exo_nd( description, title: none, etoile: 1, source: "Exercice" ) = { cpt_exos.step() tot_exos.step() set align(center) box( rect( width:100%, fill:blue.lighten(99%), radius:( left:5pt, right:5pt ), stroke: ( left:blue, right: blue, top: black, bottom: black ) )[ #align(left)[ #box( path( fill: blue.darken(60%), closed: true, ((-1pt, -2pt)), (105pt,-2pt), ((108pt,-2pt)), ((105pt,10pt)), ((-1pt,10pt),(10pt,0pt)), ) + place(top+left)[#text(white,underline(smallcaps(source + " (" + cpt_part.display("1") + "-" + cpt_exos.display()) + ")"),size:1.1em,weight: "medium")] ) #box( path( fill: blue.darken(40%), closed: true, ((0pt,-2pt)), ((200pt,-2pt)), ((198pt,10pt)), ((-5pt,10pt)), ((-2pt,-2pt)), ) + place(top+left)[#text(white,title,size:1em,weight: "semibold")] ) #box( path( fill: blue.darken(80%), closed: true, ((-2pt, -2pt)), (55pt,-2pt), ((58pt,-2pt),(-10pt,0pt)), ((55pt,10pt)), ((-4pt,10pt)), ) + place(top+left)[#text(white,smallcaps(str(etoile) + $star$),size:1.1em,weight: "bold")] ) ] #align(left)[ #text(black,description,size:1em) ] ] ) } #let prop( description, title: none ) = { set align(center) cpt_prop.step() box( rect( width:100%, fill:blue.lighten(99%), radius:( left:5pt, right:5pt ), stroke: ( left:blue.darken(20%), right: blue.darken(20%), top: black, bottom: black ) )[ #align(left)[ #box( polygon( stroke: blue.lighten(99%), fill: blue.darken(20%), (-5%, 0.55em), (0%,-0.25em), (45%,-0.25em), (90%,-0.25em), (90%,1.15em), (45%,1.15em), (0%,1.15em), (-5%,0.55em) ) + place(top+left)[#text(white,underline(smallcaps("Proposition " + cpt_part.display() + "-" + cpt_prop.display())),size:1.1em,weight: "medium") #text(white,"(" + title + ")",size:1em,weight: "semibold") ] ) ] #align(left)[ #text(black,description,size:1em) ] ] ) } #let cb(cont,title: "titre") ={ set align(left) box( rect( width:100%, fill:blue.lighten(99%), radius:( left:5pt, right:5pt ), stroke: ( left:blue, right: blue, top: black, bottom: black ) )[ #align(left)[ #box( path( fill: blue.darken(60%), closed: true, ((-1pt, -2pt)), (105pt,-2pt), ((108pt,-2pt)), ((105pt,10pt)), ((-1pt,10pt),(10pt,0pt)), ) + place(top+left)[#text(white,underline(smallcaps("Code")),size:1.1em,weight: "medium")] ) #box( path( fill: blue.darken(40%), closed: true, ((0pt,-2pt)), ((200pt,-2pt)), ((198pt,10pt)), ((-5pt,10pt)), ((-2pt,-2pt)), ) + place(top+left)[#text(white,title,size:1em,weight: "semibold")] ) #box( path( fill: blue.darken(80%), closed: true, ((-2pt, -2pt)), (55pt,-2pt), ((58pt,-2pt),(-10pt,0pt)), ((55pt,10pt)), ((-4pt,10pt)), ) ) ] #align(left)[ #text(black,cont,size:1em) ] ] ) } #let question( description, title: none, type: "code" ) = { cpt_qst.step() set align(center) box( rect( width:100%, fill:blue.lighten(99%), radius:( left:5pt, right:5pt ), stroke: ( left:blue, right: blue, top: black, bottom: black ) )[ #align(left)[ #box( path( fill: blue.darken(60%), closed: true, ((-1pt, -2pt)), (105pt,-2pt), ((108pt,-2pt)), ((105pt,10pt)), ((-1pt,10pt),(10pt,0pt)), ) + place(top+left)[#text(white,underline(smallcaps( "Question " + cpt_qst.display("1")) ),size:1.1em,weight: "medium")] ) #box( path( fill: blue.darken(40%), closed: true, ((0pt,-2pt)), ((200pt,-2pt)), ((198pt,10pt)), ((-5pt,10pt)), ((-2pt,-2pt)), ) ) #box( path( fill: blue.darken(80%), closed: true, ((-2pt, -2pt)), (55pt,-2pt), ((58pt,-2pt),(-10pt,0pt)), ((55pt,10pt)), ((-4pt,10pt)), ) + place(top+left)[#text(white,type,size:1em,weight: "semibold")] ) ] #align(left)[ #text(black,description,size:1em) ] ] ) } #let th( description, title: none ) = { set align(center) cpt_thm.step() box( rect( width:100%, fill:blue.lighten(70%), radius:( left:5pt, right:5pt ), stroke: ( left:blue.darken(50%), right: blue.darken(50%), top: black, bottom: black ) )[ #align(left)[ #box( polygon( fill: blue.darken(50%), (-5%, 0.55em), (0%,-0.25em), (45%,-0.25em), (90%,-0.25em), (90%,1.15em), (45%,1.15em), (0%,1.15em), (-5%,0.55em) ) + place(top+left)[#text(white,underline(smallcaps("Théorème "+ cpt_part.display() + "-" + cpt_thm.display())),size:1.1em,weight: "medium") #text(white,"(" + title + ")",size:1em,weight: "semibold") ] ) ] #align(center)[ #text(black,description,size:1em) ] ] ) } #let pl( description, title: none ) = { set align(center) cpt_thm.step() box( rect( width:100%, fill:red.lighten(70%), radius:( left:5pt, right:5pt ), stroke: ( left:red.darken(50%), right: red.darken(50%), top: black, bottom: black ) )[ #align(left)[ #box( polygon( fill: red.darken(50%), (-5%, 0.55em), (0%,-0.25em), (45%,-0.25em), (90%,-0.25em), (90%,1.15em), (45%,1.15em), (0%,1.15em), (-5%,0.55em) ) + place(top+left)[#text(white,underline(smallcaps("Aller plus loin ")),size:1.1em,weight: "medium") #text(white,"(" + title + ")",size:1em,weight: "semibold") ] ) ] #align(center)[ #text(black,description,size:1em) ] ] ) } #let rem( content )={ text(black,underline(smallcaps("Remarque"))) + ": " + content } #let reset_cpt()={ cpt_def.update(0) cpt_prop.update(0) cpt_thm.update(0) cpt_exos.update(0) } #let dem( content ) ={ set align(left) box( rect( width:100%, fill:blue.lighten(99%), radius:( left:5pt, right:5pt ), stroke: ( left:blue.darken(20%), ) )[ #align(left)[ #box( place(top+left)[#text(black,underline(smallcaps("Démonstration")),size:1.1em,weight: "medium") ] ) ] #align(left)[ \ #content ] ] ) } #let corr(desc,num:"nd") = { [#imp(num): #desc] } #let exo( description, title: none, ) = { cpt_exos.step() tot_exos.step() set align(center) box( rect( width:100%, fill:green.lighten(99%), radius:( left:5pt, right:5pt ), stroke: ( left:green, right: green, top: black, bottom: black ) )[ #align(left)[ #box( polygon( stroke: green.lighten(99%), fill: green.darken(20%), (-5%, 0.55em), (0%,-0.25em), (45%,-0.25em), (90%,-0.25em), (90%,1.15em), (45%,1.15em), (0%,1.15em), (-5%,0.55em) ) + place(top+left)[#text(white,underline(smallcaps("Exercice " + cpt_part.display("1") + "-" + cpt_exos.display())),size:1.1em,weight: "medium") #text(white,"(" + title + ")",size:1em,weight: "semibold") ] ) ] #align(left)[ #text(black,description,size:1em) ] ] ) } #let margin-note(dy: -1em, content) = { place( right, dx: margin-size + margin-space, dy: dy, block(width: margin-size, rect( width:100%, fill:blue.lighten(99%), radius:( left:1pt, right:1pt ), stroke: ( left:blue.darken(20%), top:blue.darken(40%) ) )[ #set text(size: 0.75em) #set align(left) #content ] )) } #let nb_exo() = { text(tot_exos.display("1")) } #let document( title: none, doc ) ={ show heading.where( level: 1 ): it => block(width: 100%)[ #reset_cpt() #cpt_part.step() #set align(center) #set text(1.1em, weight: "regular") #imp(counter(heading).display()) #underline(smallcaps(it.body)) ] show heading.where( level: 2 ): it => block(width: 100%)[ #set align(left) #set text(1.1em, weight: "regular") #imp(counter(heading).display()) #smallcaps(it.body) ] show heading.where( level: 3 ): it => block(width: 100%)[ #set align(left) #set text(1.1em, weight: "regular") #imp(counter(heading).display()) #smallcaps(it.body) ] show heading.where( level: 4 ): it => block(width: 100%)[ #set align(left) #set text(1.1em, weight: "regular") #imp(counter(heading).display()) #smallcaps(it.body) ] set page( paper: "us-letter", header : align(center)[Avant la MP2I], numbering : "1/1", ) set heading(numbering: "I.1.a -") set text(font: "DejaVu",size: 1em) set align(center) text(2em,smallcaps(title)) set align(left) set par(justify: true) doc }
https://github.com/kdog3682/mathematical
https://raw.githubusercontent.com/kdog3682/mathematical/main/0.1.0/src/demonstrations/beginner.typ
typst
#import "@local/typkit:0.1.0": * #let ratio(size: 12, ratios: (1, 2)) = { assert-is-ratio-divisible(size, ratios) let d = ratios.sum() let chunks = ratios.map((n) => int(size * n / d)) panic(chunks) } // #svg-test(ratio) //
https://github.com/EGmux/ControlTheory-2023.2
https://raw.githubusercontent.com/EGmux/ControlTheory-2023.2/main/prova1.typ
typst
#set heading(numbering: "1.") === 1. Pede-se para calcular *$V_o(s)/V(s)$* #figure(image("assets/2024-03-19-16-51-41.png", width: 80%)) <fig-2024-03-19-16-51-41> Aplicando análise nodal, temos #math.equation(block: true, $1. & (V-V_i)/R = V_i/(1/(s C)) + V_o/(1/(s C)) \ 1.1 & (V-V_i)/R = (s C)V_i + (s C)V_o $) #math.equation(block: true, $2. & V_o/(1/(s C)) = (V_i - V_o)/(s L) \ 2.1 & (s C)V_o = (V_i - V_o)/(s L) \ $) e isolando *$V_i$* temos #math.equation(block: true, $3. V_i = V_o (1 + s^2 C L) $) e substituindo 3 em 1.1 #math.equation(block: true, $ 4. & (V - V_i) = (s C R)(V_i + V_o) \ 4.2 & V = (s C R)(V_i + V_o) + V_i \ 4.3 & V = V_i (1 + s C R)+ V_o (s C R) \ 4.4 & V = (V_o (1 + s^2 C L)) (1 + s C R)+ V_o (s C R) \ 4.5 & V = V_o (1 + s C R + s^2 C L + s^3 C^2 R L + s C R)\ 4.6 & V = V_o (1 + 2 s C R + s^2 C L + s^3 C^2 R L)\ 4.7 & V_o/V_s =1/ (1 + 2 s C R + s^2 C L + s^3 C^2 R L)\ $) === 2. Equação estado espaço #figure(image("assets/2024-03-19-17-56-17.png", width: 50%)) <fig-2024-03-19-17-56-17> Como temos dois componentes de circuitos reativos então será necessário 2 variáveis de estado espaço. #math.equation(block: true, $ X = mat(i_l;v_c) $) e temos as seguintes relações #math.equation(block: true, $1. & (v - v_l) = i_l + (v_l - v_c) \ 2. & (v_l - v_c) = c (d v_c)/(d t) \ $) note que via substituição podemos isolar $v_l$ em 2 e substitutir em 1 de tal forma que 1 fica em função da entrada e $v_c$ que é variável de estado. #math.equation( block: true, $3. & v_l = accent(v_c, .)+v_c\ 3.1. & (v - ( accent(v_c, .) + v_c)) = i_l + (( accent(v_c, .) + v_c) - v_c) \ 3.2. & (v - ( accent(v_c, .) + v_c)) = i_l + accent(v_c, .) \ 3.3. & v - v_c - i_l =2 accent(v_c, .) \ 3.4. & accent(v_c, .) = i_l (-1/2) + v_c (-1/2) + v(1/2) \ $, ) e como *$accent(i_l, .) = v_l $*, temos #math.equation( block: true, $4.1 & accent(i_l, .) = ( i_l (-1/2) + v_c (-1/2) + v(1/2)) + v_c \ 4.2 & accent(i_l, .) = i_l (-1/2) + v_c (1/2) + v(1/2) \ $, ) e teremos portanto a seguinte representação matricial. #math.equation( block: true, $ mat(accent(i_l, .);accent(v_c, .)) = mat(-1/2, 1/2;-1/2, -1/2)mat(i_l;v_c) + mat(1/2;1/2)v(t) $, ) e para saída *$v_o$* que é $v_o = v_c$, temos #math.equation(block: true, $ y = v_o = v_c = mat(0, 1)mat(i_l;v_c) $) === 3. Função de transferência para equaçã estado espaço #figure(image("assets/2024-03-19-18-28-23.png", width: 50%)) <fig-2024-03-19-18-28-23> lembrar que é necessário tratar primeiro da função de transferência com numerador 1 isto é em dois blocos onde o primeiro tem a seguinte função de transferência #math.equation(block: true, $ 1. & Y(s)/R(s) = 1/(s^3 + 3s^2 + 3s + 1) \ 1.1. & Y(s)(s^3 + 3s^2 + 3s +1) = R(s) \ 1.2. & (d y^3)/(d t) + 3 (d y^2)/(d t) + 3 (d y^1)/(d t) + y = r \ $) aplicando o seguinte mapeamento para as variáveis de estado #math.equation( block: true, $ & y = x_1,\ & (d y^1)/(d t) = x_2 = accent(x_1, .) \ & (d y^2)/(d t) = x_3 = accent(x_2, .) \ & (d y^3)/(d t) = x_4 = accent(x_3, .) = r - (3(d y^2)/(d t) + 3 (d y^1)/(d t) + y) = r - (3x_3 + 3 x_2 + x_1)\ $, ) assumindo r como entrada, temos portanto a seguinte representação matricial #math.equation( block: true, $ mat(accent(x_1, .);accent(x_2, .);accent(x_3, .)) = mat(0, 1, 0;0, 0, 1;-1, -3, -3)mat(x_1;x_2;x_3) + mat(0;0;1)r $, ) temos agora a seguinte função de transferência, onde $Y(s)$ é a entrada #math.equation(block: true, $2. & C(s)/Y(s) = 2s + 1 \ 2.1. & c = 2 (d y^1)/(d t) + y \ 2.2 & c = 2x_2 + x_1\ $) e portanto a saída é #math.equation(block: true, $ c(t) = mat(1, 2)mat(x_1;x_2) $) === Conversão de estado espaço para função transferencia #figure(image("assets/2024-03-19-18-50-56.png", width: 80%)) <fig-2024-03-19-18-50-56> para chegar na formula desejada lembremos que a equação estado espaço e da forma e aplicando a transformada de Laplace temos #math.equation(block: true, $ & accent(X, .) = A X + B U\ & Y = C X + D U =>\ & s X = A X + B U \ & Y = C X + D U \ $) vamos isolar X #math.equation(block: true, $ & X (I s - A) = B U \ & X = (I s - A)^(-1) B U \ & Y = C((I s - A)^(-1) B U) + D U \ & Y/U = C(I s - A)^(-1) B + D\ $) temos então #math.equation( block: true, $1. & I s = mat(s, 0, 0;0, s, 0;0, 0, s) \ 2. & A = mat(0, 1, 0;0, 0, 1;-3, -2, -5) \ 3. & I s - A = mat(s, -1, 0;0, s, -1;3, 2, s+5) \ 4. & C(I s - A)^(-1)B = mat(1, 0, 0)mat( +mat(s, -1;2, s+5) -mat(-1, 0;2, s+5)+mat(-1, 0;s, -1);-mat(0, -1;3, s+5)+mat(s, 0;3, s+5)-mat(s, 0;0, -1);+mat(0, s;3, 2)-mat(s, -1;3, 2)+mat(s, -1;0, s) )mat(0;0;10) \ $, ) note que apenas a linha 1 da matriz não é nula e mais ainda que apenas o último termo de tal linha é não nulo, logo temos $10mat(-1, 0;s, -1)/det(I s - A)^(-1)$ e daí para calcular determinante, temos que #math.equation( block: true, $ det(I s - A)^(-1) = s mat(delim: "|", s, -1;2, s+5) - (-1)mat(delim: "|", 0, -1;3, s+5) = s(s^2+5s+2) - (-1)(3) = (s^3+5s^2+2s +3) $, ) e portanto a função transferência é: #math.equation(block: true, $ Y/U = 10/(s^3+5s^2+2s+3) $) === 4. Considere um sistema de segunda ordem subamortecido com tempo de pico $T_p$ de 1s e tempo de acomodação $T_s$ de 1s. Apresente os polos do sistema que tem esses tempo de pico de acomodação.
https://github.com/N3M0-dev/Notes
https://raw.githubusercontent.com/N3M0-dev/Notes/main/CS/Algorithm/Intro_to_Algor/Ch_4/ch4.typ
typst
#import "@local/note_template:0.0.1": * #import "@preview/algo:0.3.3": algo, i, d, comment, code #import "@local/tbl:0.0.4" #show: tbl.template.with(tab: "|", align: center) #set par(justify: true) #set heading(numbering: "1.1") #set page(numbering: "1", number-align: center) #frontmatter( title: "Chapter 4:\n Advanced Design and Analysis Techniques", date: "2023 Oct 31", authors: ("Nemo",) ) #outline(indent: auto) #pagebreak() = Dynamic Programming Dynamic programming applies when the subprobelms overlap. We typically apply dynamic programming to _optimization problems_. When developing a dynamic programming algorithm, we follow a sequence of four steps: 1. Characterize the structure of an optimal solution. 2. Recursively define the value of an optimal solution 3. Compute the value of an optimal solution, typically in a bottom-up fashion. 4. Construct an optimal solution from computed information. == Rod Cutting The _rod cutting problem_ is the following: Given a rod of length n inches and a table of prices $p_i$ according to the length $i$, determine the maximum revenue $r_n$ obtainable by cutting the rod and selling them. ```tbl L|CCCCCCCCCC. length $i$ |1|2|3|4|5|6|7|8|9|10 _ price $p_i$ |1|5|8|9|10|17|17|20|24|30 ``` == Matrix Chain Multiplication The problem of matrix-chain multiplication is that given a sequence of n matrices to be multiplied, we wish to conpute the product $A_1 A_2 A_3 dots A_n$ at the minimum cost. === Applying Dynamic Programming We follow the following four steps in order to apply dynamic programming: + Characterize the structure of an optimal solution. + Recursively define the value of an optimal solution. + Compute the value of an optimal solution. + Construct an optimal solution from computed information. ==== Step 1: The structure of the optimal solution First, for convenience we adopt the notation of $A_(i dots j)$ for $A_i A_(i+1) dots A_(j)$. For a nontrival problem of MCM, to parenthesize the chain of matrices, we can think it backwards: Since we can the operation of multiplication is defined between two matrices, the last step of MCM must be a multiplication of two matrices. Then we can break down the problem into two subproblems of smaller scales, i.e. we can split the chain an a point between $A_k$ and $A_(K+1)$. Then the same way, we can break down the problem recursively. Since we can do break down the problem recursively, it's natural to think if the optimal solutions to the problems combined together is the solution to the original problem. i.e. We need to try to check out if the problem contians optimal structure, and the answer is yes. Proof: Suppose that the the chain is optimally parenthesized, then the two subchain divided between $k$ and $k+1$ is also optimally parenthesized. Then, if the chain $A_(i dots k)$ or $A_(k+1 dots j)$ is not optimally parenthesized, i.e. there exists a better way to parenthesize either of the chains. Since the way we parenthesize the matirx chain dose not affect the shape of the outcome matrix, then the cost of multiplying the result of the two product of the chain stays the same. Then there exists a better way to parenthesize the chain, contridict to the assumption that it is the optimal way to parenthesize the chian. So the problem contians optimal structure. Proved above that we can construct the optimal solution, then we can solve the problem by solving the subproblems. Then we can move to the next step. ==== Step 2: A recursive solution Now we define the cost of the terms of the optioml solption the subproblems. We let $m[i,j]$ be the cost of the cost of $A_(i dots j)$, then for the full problem should be $m[i,j]$. According to the optimal structure, we should define the cost $m[i,j]$ as follows: - When $i=j$, there is no multiplication, so $m[i,j]=0$. - When $i<j$, $m[i,j]=min_(i lt.eq k lt j)(m[i,k]+m[k+1,j]+p_(i-1) p_k p_j)$, where p is the colums of matrices. ==== Step 3: Computing the Opitmal Costs We can foresee that since we have relatively few distince subprobelms than all the possibilities, we would enconter a subprobelms multiple times if we adopt a recursive method, i.e. subproblems overlap, together with the optimal structure, are two hallmarks of applying dynamic programming. Instead of using an recursive way, we use a bottom-up tabular way to calculate the cost. #algo( title:"Matrix-Chain-Order", parameters:("p",) )[ let n=p.length\ let cost[i...n, 1...n] sep[1...n, 1...n] be new tables=0\ for i from 1 to n:#i\ for j from i+1 to n:#i\ for k from i to j-1:#i\ cost[i,j]=max(cost[i,j],cost[i,k]+cost[k+1,j]+p[i]p[k]p[j])\ if cost[i,j] update: sep[i,j]=k#d#d#d\ return cost,sep ] == Elements of Dynamic Programming === Opitmal Structure === Overlap Subproblems === Reconstruct the Optimal Solution === Memoization == Longest Common Subsequence === Step 1: Characterizing a Longest Common Substructure #theorem()[ Optimal Substructure of an LCS: Let $X = angle.l x_1, x_2, dots, x_m angle.r$ and $Y= angle.l y_1, y_2, dots, y_n angle.r$ be sequances, and let $Z=angle.l z_1, z_2, dots, z_k angle.r$ be any LCS of $X$ and $Y$. + If $x_m = y_n$, then $z_k=x_m=y_n$ and $Z_(k-1)$ is an LCS of $X_(m-1)$ + If $x_m != y_n$, then $z_k != x_m$ implies that $Z$ is an LCS of $X_(m-1)$ and $Y$ + If $x_m != y_n$, then $z_k != y_m$ implies that $Z$ is an LCS of $Y_(m-1)$ and $X$ ] === Step 2: A Recursive Solution Let define $c[i,j]$ to be the length of an LCS of the sequences of $X_i$ and $Y_j$. So, we have: $ c[i,j]=cases(0 &i=0 "or" j=0, c[i-1\,j-1] &"if" i\,j>0 "and" x_i=y_j,max(c[i\,j-1],c[i-1\,j]) &"if" i\,j>0 "and" x_i!=y_j) $ === Step 3: Computing the length of an LCS #algo( title:"LCS_Length", parameters:("X","Y") )[ m = X.length\ n = Y.length\ let b[1...m,1...n] and c[1...m,1...n] be new tables\ for i from 1 to m:#i\ c[i,0]=0#d\ for j from 1 to n:#i\ c[0,j]=0#d\ for i from 1 to m:#i\ for j from 1 to n:#i\ if X[i]==Y[j]:#i\ c[i,j]=c[i-1,j-1]+1\ b[i,j]=$arrow.tl$#d\ *elseif* c[i-1,j] >= c[i,j-1]:#i\ c[i,j]=c[i-1,j]\ b[i,j]=$arrow.t$#d\ else:#i\ c[i,j]=c[i,j-1]\ b[i,j]=$arrow.l$#d#d#d\ return c and b ] === Step 4: Reconstruct an LCS When b[i,j]=$arrow.tl$ add c[i,j] in the LCS. == Optimal Binary Search Tree Binary search tree is a rooted binary tree data structure with the key of each internal node being greater than all the keys in the respective node's left subtree and less than the ones in its right subtree. === Step 1: Opitmal Substructure Easy to prove. === Step 2: A recursive Solution Let us define $e[i,j]$ be the search cost of an optimal BST contianing the nodes of $k_i, ...,k_j$ (contiguous). When constructing an optimal BST containing keys $k_i, ...,k_j$ from optimal subtrees, we need to choose one key as the root, then there exists a combination cost, which is all nodes of subtrees have their depth adding 1. So the combination cost is: $ w(i,j)= sum_(l=i)^j p_l + sum_(l=i-1)^j q_l $ And so the cost is: $ e[i,j]&=p_r+(e[i,r-1]+w(i,r-1))+(e[r+1,j]+w(r+1,j))\ &=e[i,r-1]+e[r+1,j]+w(i,j) $ So, $ e[i,j]=cases(q_(i-1) &"if" j=i-1, min_(i<=r<=j) {e[i\,r-1]+e[r-1\,j]+w(i\,j)} &"if" i<=j) $ === Step 3: Computing the Expected Search Cost of an Opitmal BTS pseudocode = Greedy Algorithms = Amortized Analysis == Aggregate Analysis The basic idea of aggregate analysis is that each operation costs $T(n)/n$, where $T(n)$ is the worst cost of a sequence of $n$ operations. === Stack Operations Consider a sequence of $n$ operations containing PUSH, POP, MULTIPOP, what is the _amortized cost_? For the worst case, all the operations are MULTIPOP, and the worst case cost of MULTIPOP is $O(n)$. So the worst cost of the sequence is $O(n^2)$ and the amortized cost is $O(n^2)/n=O(n)$. Is it correct? The answer is no. That's a quite rough estimate, there are better upper bounds. When calculating the amortized cost, we cannot ignore the inner relationship between the individual operations. In this case the time POP can be called is no more than the time PUSH is called. So $T(n)$ is at most $O(n)$, and the amortized cost is $O(n)/n=O(1)$. === Incrementing a Binary Counter Assume the cost of incrementing a binary counter is in proportion to the bits filpped in an operation. So by observiation, we know that the lowest bit is flipped $n$ times in a sequence of $n$ operations and the second is filpped $n/2$ times and etc. Then we can obtain the total cost is $sum_(i=0)^(k-1) (n/2^i)<sum_(i=0)^oo (n/2^i)=2n=O(n)$, and so the amortized cost is $O(n)/n=O(1)$ == The Accounting Method The accounting method is like this: + We assign amortized cost for each type of operation in the sequence + We take the difference of the assigned amortized cost and the actual cost as a _credit_ and attach it to the data object. + If we can pay all the actual cost of the opeartions using the credit of the data object, then the amortized cost hold, and we can obtain the upper bound of the total actual cost. == The Potential Method The potential method is somehow similiar to the accounting method, but we do not assign the exceeding part to the individual data objects, instand we attach the _potential_ to the data structure as a whole and accumulate them in the _potential function_: we define $hat(c_i)=c_i+Phi(D_i)-Phi(D_(i-1))$, where $hat(c_i)$ is the assigned amortized cost, the $c_i$ is the acutal cost of an operation, $Phi(D_i)$ is the potential funciton, inside the $D_i$ can be understood as the $i^("th")$ stage of the data structure. What we need to do is to first assign the amortized cost and ensure that after the sequence of operations the potential function is non-negative. == Dynamic Tables
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/bugs/parameter-pattern_00.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page #test((1, 2, 3).zip((1, 2, 3)).map(((_, x)) => x), (1, 2, 3))
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/chordx/0.2.0/lib.typ
typst
Apache License 2.0
#import "./src/chart.typ": new-chart-chords #import "./src/piano.typ": new-piano-chords #import "./src/single.typ": new-single-chords
https://github.com/tilman151/pypst
https://raw.githubusercontent.com/tilman151/pypst/main/docs/examples/table/main.typ
typst
MIT License
= My Section Heading #lorem(100) #include("my-table.typ") #lorem(100)
https://github.com/MrToWy/hsh-thesis
https://raw.githubusercontent.com/MrToWy/hsh-thesis/main/template/abbreviations.typ
typst
MIT License
#let hsh = "<NAME>"
https://github.com/frectonz/the-pg-book
https://raw.githubusercontent.com/frectonz/the-pg-book/main/book/201.%20nft.html.typ
typst
nft.html An NFT That Saves Lives May 2021Noora Health, a nonprofit I've supported for years, just launched a new NFT. It has a dramatic name, Save Thousands of Lives, because that's what the proceeds will do.Noora has been saving lives for 7 years. They run programs in hospitals in South Asia to teach new mothers how to take care of their babies once they get home. They're in 165 hospitals now. And because they know the numbers before and after they start at a new hospital, they can measure the impact they have. It is massive. For every 1000 live births, they save 9 babies.This number comes from a study of 133,733 families at 28 different hospitals that Noora conducted in collaboration with the Better Birth team at Ariadne Labs, a joint center for health systems innovation at Brigham and Women�s Hospital and Harvard T.H. Chan School of Public Health.Noora is so effective that even if you measure their costs in the most conservative way, by dividing their entire budget by the number of lives saved, the cost of saving a life is the lowest I've seen. $1,235.For this NFT, they're going to issue a public report tracking how this specific tranche of money is spent, and estimating the number of lives saved as a result.NFTs are a new territory, and this way of using them is especially new, but I'm excited about its potential. And I'm excited to see what happens with this particular auction, because unlike an NFT representing something that has already happened, this NFT gets better as the price gets higher.The reserve price was about $2.5 million, because that's what it takes for the name to be accurate: that's what it costs to save 2000 lives. But the higher the price of this NFT goes, the more lives will be saved. What a sentence to be able to write.
https://github.com/howardlau1999/sysu-thesis-typst
https://raw.githubusercontent.com/howardlau1999/sysu-thesis-typst/master/chapters/abstract-en.typ
typst
MIT License
#let 英文关键词 = ("thesis", "typst", "template") This is English abstract of the thesis. #lorem(100)
https://github.com/0x1B05/nju_os
https://raw.githubusercontent.com/0x1B05/nju_os/main/am_notes/main.typ
typst
#import "template.typ": * #show: template.with( title: [SimpleNote], short_title: "SimepleNote", description: [ 此模板修改自 #link("https://github.com/jskherman/jsk-lecnotes")[jsk-lecnotes] \ Winter 2023 ], date: datetime(year: 2023, month: 12, day: 19), authors: ( ( name: "0x1B05", github: "https://github.com/0x1B05", homepage: "https://github.com/0x1B05", // 个人主页 affiliations: "1", ), ), affiliations: ( (id: "1", name: "NUFE"), ), bibliography_file: "refs.bib", paper_size: "a4", text_font: "Linux Libertine", sc_font: "Noto Sans CJK SC", code_font: "DejaVu Sans Mono", // 主题色 accent: orange, // 封面背景图片 cover_image: "./figures/Pine_Tree.jpg", // 图片路径或 none // 正文背景颜色 // background_color: "#FAF9DE" // HEX 颜色或 none ) #include "content/chapter1.typ" #include "content/chapter2.typ"
https://github.com/rikhuijzer/phd-thesis
https://raw.githubusercontent.com/rikhuijzer/phd-thesis/main/chapters/acknowledgements.typ
typst
The Unlicense
#import "../functions.typ": avoid_indent #pagebreak() = Acknowledgements #avoid_indent() This thesis is based on the ideas from friends, family, mentors, and teachers in major ways. I would like to thank the following people by listing some ideas that they contributed. When there are quotation marks, the text is a literal quote. Otherwise, the text is my best effort to summarize an idea that people told me or taught me through their actions. \ *Age de Wit*: Hmm yes that sounds nice in theory, but in practice it won't work because ... \ *<NAME>*: Don't focus too much on reasons why the approach might not work, but instead have some optimism and focus on why the approach might work. \ *<NAME>*: Not everything is binary. \ *<NAME>*, *<NAME>*, and *<NAME>*: Challenge all assumptions. \ *<NAME>* and *<NAME>*: With enough free parameters, you can fit an elephant. \ *<NAME>*: Be humble. \ *<NAME>*, *<NAME>*, and *<NAME>*: "You're not right or wrong because a thousand people agree with you and you're not right or wrong because a thousand people disagree with you. You are right when your facts and reasoning are right." \ *<NAME>*: Don't forget to give sincere compliments from time to time. \ *<NAME>*: Don't postpone things that you can do today. \ *<NAME>*: "Gewoon rustig doorgaan." (Just calmly continue.) \ *<NAME>* and *<NAME>*: It's okay to sometimes complain about things, but at some point you just have to get the work done. \ \ To these people and the many other people who's ideas ended up in this thesis, thank you.
https://github.com/Az-21/typst-components
https://raw.githubusercontent.com/Az-21/typst-components/main/style/1.0.0/Components/checklist.typ
typst
Creative Commons Zero v1.0 Universal
#import "../dependencies.typ": * #import "../Colors/m3.typ": * #let m3 = material3 #let checklist-mode(it) = { set list(marker: none, indent: 0pt) it } #let checkbox-done() = { fa-icon("square-check", solid: true, fill: m3.green.light.primary) h(0.5em) } #let checkbox-todo() = { fa-icon("square", solid: false, fill: m3.blue.light.primary) h(0.5em) } #let checkbox-semi() = { fa-icon("square-minus", solid: true, fill: m3.blue.light.primary) h(0.5em) } #let checkbox-fail() = { fa-icon("square-xmark", solid: true, fill: m3.red.light.primary) h(0.5em) }
https://github.com/SillyFreak/typst-crudo
https://raw.githubusercontent.com/SillyFreak/typst-crudo/main/CHANGELOG.md
markdown
MIT License
# [unreleased](https://github.com/SillyFreak/typst-crudo/releases/tag/) ## Added ## Removed ## Changed ## Migration Guide from v0.1.X --- # [v0.1.1](https://github.com/SillyFreak/typst-crudo/releases/tag/v0.1.1) ## Added - `lines()` can now accept one- (or zero-)sided ranges, e.g. `"2-"`, `"-3"`, `"-"` in addition to two-sided ones. - The package now has unit tests. ## Changed - Documentation improvements: README has a simpler example and a thumbnail; fix some typos. # [v0.1.0](https://github.com/SillyFreak/typst-crudo/releases/tag/v0.1.0) Initial Release
https://github.com/FlyinPancake/bsc-thesis
https://raw.githubusercontent.com/FlyinPancake/bsc-thesis/main/thesis/pages/chapters/chapter_1_intro.typ
typst
= Introduction <intro> Kubernetes stands as the pinnacle in the realm of orchestrating reliable, scalable, and portable applications, emerging as the de facto standard for hosting cloud-native applications. With widespread support across major cloud providers and adaptability for on-premise deployments, Kubernetes excels particularly at scale, leveraging substantial resources. However, for smaller applications not harnessing Kubernetes' full potential, such as scaling and redundant pods, the overhead in cost and complexity may outweigh the benefits. Effectively operating a Kubernetes cluster entails the use of multiple nodes, with a dedicated server for the control plane, the central intelligence of the cluster. This resource-intensive configuration translates to a non-trivial cost, generally exceeding three times that of a single node — one for the control plane and two for worker nodes. While consolidating multiple applications within a single cluster offers cost efficiency, challenges arise. Merging applications within a shared Kubernetes cluster raises security concerns, primarily because Kubernetes' separation mechanism is not inherently tailored for multi-tenancy but primarily addresses naming conflicts. This amalgamation also elevates cluster complexity and demands a higher level of technical expertise, potentially resulting in version conflicts and other issues. An illustrative example that will be examined involves working with multiple versions of Kubernetes operators, each providing Custom Resource Definitions @crd[s], which may potentially lead to conflicts. The concept of virtual clusters presents a novel solution to address these challenges. Currently implemented by Loft as part of their managed Kubernetes service, the open-source incarnation, known as `vcluster`, is accessible on GitHub @vcluster-github licensed under Apache-2.0@apache-2. Virtual clusters introduce additional separation between applications by executing each application within its own virtual cluster atop the shared Kubernetes infrastructure. It is crucial to note that virtual clusters are not merely dockerized single-node Kubernetes clusters; rather, they represent an innovative and evolving paradigm. This thesis aims to delve into the intricacies of virtual clusters, examining their potential use-cases, limitations, and advantages. The study will elucidate the performance characteristics of virtual clusters, drawing comparisons with conventional Kubernetes clusters. Additionally, it will explore the functional benefits of virtual clusters, shedding light on how they can enhance the developer experience and reduce the operational costs associated with Kubernetes clusters. Notably, the security implications of virtual clusters will not be explored in this thesis, as this topic warrants dedicated attention in a separate research endeavour. The thesis will be structured as follows. Chapter 2 will provide a theoretical background on containerization, Kubernetes, virtual clusters, and related concepts. Chapter 3 will present the methodology used to conduct the study, including the experimental setup and the metrics used to evaluate the performance and functional characteristics of virtual clusters. Chapter 4 will present the resuts of the study. Finally, Chapter 5 will conclude the thesis with a discussion of the findings and an outlook on future research.
https://github.com/Jollywatt/typst-fletcher
https://raw.githubusercontent.com/Jollywatt/typst-fletcher/master/tests/label-side-auto/test.typ
typst
MIT License
#set page(width: auto, height: auto, margin: 1em) #import "/src/exports.typ" as fletcher: diagram, node, edge Default placement should be above the line. #let around = ( (-1,+1), ( 0,+1), (+1,+1), (-1, 0), (+1, 0), (-1,-1), ( 0,-1), (+1,-1), ) #diagram( spacing: 2cm, axes: (ltr, ttb), for p in around { edge(p, (0,0), $f$) }, ) Reversed $y$-axis: #diagram( spacing: 2cm, axes: (ltr, btt), for p in around { edge(p, (0,0), $f$) }, )
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/methods-04.typ
typst
Other
// Error: 2:2-2:43 cannot mutate a temporary value #let numbers = (1, 2, 3) #numbers.map(v => v / 2).sorted().map(str).remove(4)
https://github.com/suiranruofeng/notebook
https://raw.githubusercontent.com/suiranruofeng/notebook/main/GW/GW_learn.typ
typst
= Gravitational field vs gravitational waves We may wonder, how it is possible to infer the presence of an astronomical body by the gravitational waves that it emits, when it is clearly not possible to sense its much larger stationary (essentially Newtonian)gravitational potential. 静止场的潮汐力在 r 处衰减为$nabla^2 phi ~ r^(-3)$ 由引力波引起的潮汐力波幅$h$发射波幅$lambda$的衰减为$nabla^2~r(-1)lambda^(-2)$ 因此,在靠近引力体时类似于(由于$r<<lambda$)库伦势,引力波造成的影响较小$r^(-2)$,但在远处($r>>lambda$)引力波造成的效应强大的多。 同时,潮汐场的静止部分是一个DC效应,并且简单地叠加了宇宙中所有其他物体的静止潮汐力。不能将一个来源与另一个来源区分开来。 引力波有两个偏振,加模和叉模:$h_+$和$h_times$
https://github.com/xrarch/books
https://raw.githubusercontent.com/xrarch/books/main/documents/a4xmanual/chapapt.typ
typst
#import "@preview/tablex:0.0.6": tablex, cellx, colspanx, rowspanx #box([ = A3X Partition Table The partition table format understood by this firmware is the A3X Partition Table format, or *APT*. The following is an overview of this format. If a disk is formatted with *APT*, sector 0 (bytes 0-511 on disk) will contain the partition table with the following layout: ``` STRUCT AptBootBlock // Space for 15 bytes of boot code on relevant platforms. BootCode : UBYTE[15], // Always contains 0xFF. FfIfVariant : UBYTE, // Eight partition table entries. Partitions : AptEntry[8], // The 32-bit magic number must read 0x4E4D494D. Magic : ULONG, // A 15-character, null-terminated label for the disk. Label : UBYTE[16], END STRUCT AptEntry // A 7-character, null-terminated label for the partition. Label : UBYTE[8], // A 32-bit count of sectors in the partition. SectorCount : ULONG, // The status of the partition. Contains zero if the partition // table entry is unused. Otherwise, it contains any non-zero // value. Status : ULONG, END ``` ])
https://github.com/mrcinv/nummat-typst
https://raw.githubusercontent.com/mrcinv/nummat-typst/master/00_uvod.typ
typst
#heading(outlined: false, numbering: none)[Predgovor] Knjige o numerični matematiki se pogosto posvečajo predvsem matematičnim vprašanjem. Pričujoča knjiga poskuša nasloviti bolj praktične vidike numerične matematike, zato so primeri, če je le mogoče, povezani s problemom praktične narave s področja fizike, matematičnega modeliranja ali računalništva. Za podrobnejši matematični opis uporabljenih metod in izpeljav bralcu priporočam učbenik Osnove numerične matematike <NAME> @orel. Pričujoča knjiga je prvenstveno namenjena študentom Fakultete za računalništvo in informatiko Univerze v Ljubljani kot gradivo za izvedbo laboratorijskih vaj pri predmetu Numerična matematika. Kljub temu je primerna za vse, ki bi želeli bolje spoznati algoritme numerične matematike, uporabo numeričnih metod ali se naučiti uporabljati programski jezik #link("https://julialang.org/")[Julia]. Pri sem se od bralca pričakuje osnovno znanje programiranja v kakšnem drugem programskem jeziku. V knjigi so naloge razdeljene na vaje in na domače naloge. Vaje so zasnovane za samostojno delo z računalnikom, pri čemer lahko bralec naloge rešuje z različno mero samostojnosti. Vsaka vaja se začne z opisom naloge in jasnimi navodili, kaj je njen cilj oziroma končni rezultat. Sledijo podrobnejša navodila, kako se naloge lotiti, na koncu pa je rešitev z razlago posameznih korakov. Rešitev vključuje matematične izpeljave, programsko kodo in rezultate, ki jih dobimo, če programsko kodo uporabimo. V zbirki je več vaj, kot jih je mogoče predstaviti v 15 tednih, kolikor traja en semester. Poleg tega je smiselno vsaj dvoje vaj posvetiti izdelavi domačih nalog. Nekatere vaje (na primer vaja o minimalnih ploskvah) so toliko obsežne, da potrebujemo 2 tedna, da jih v celoti obravnavamo. V praksi se je izkazalo, da je mogoče v enem semestru v celoti obravnavati 10 do 12 vaj. Domače naloge rešuje bralec povsem samostojno, zato so naloge brez rešitev. Odločitev, da niso vključene rešitve za domače naloge je namerna, saj bralec lahko verodostojno preveri svoje znanje le, če rešuje tudi naloge, za katere nima dostopa do rešitev. Vsekakor bralcu svetujem, da vso kodo napiše in preskusi sam. Še bolje je, če kodo razširi, jo spreminja in se z njo igra. Koda, ki je navedena v tej knjigi, je najosnovnejša različica kode, ki reši določen problem in še ustreza minimalnim standardom pisanja kvalitetne kode. Pogosto je izpuščeno preverjanje ali implementacija robnih primerov, včasih tudi obravnava pričakovanih napak. Da je bralcu lažje razumeti, kaj koda počne, sem dal prednost berljivosti pred kompletnostjo. Na tem mestu bi se rad zahvalil <NAME>, <NAME>, <NAME> in <NAME>, s katerimi sem sodeloval ali še sodelujem pri numeričnih predmetih na FRI. Veliko idej za naloge, ki so v tej knjigi, prihaja prav od njih. Prav tako bi se zahvalil članom Laboratorija za matematične metode v računalništvu in informatiki, še posebej <NAME>-Kosta in Damirju Franetiču, ki so tako ali drugače prispevali k nastanku te knjige. Moja draga žena Mojca Vilfan je opravila delo urednika, za kar sem ji izjemno hvaležen. Na koncu bi se rad zahvalil študentom, ki so obiskovali numerične predmete. Čeprav sem jih jaz učil, so bili oni tisti, ki so me naučili marsikaj novega. #outline( title:[Kazalo], indent: auto, depth: 2 )
https://github.com/jonathan-iksjssen/jx-style
https://raw.githubusercontent.com/jonathan-iksjssen/jx-style/main/0.2.0/debug.typ
typst
#import "@jx/jx-style:0.2.0": * #show: docu.with( debug: true )
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/text/escape-02.typ
typst
Other
// Unterminated. // Error: 6 expected closing brace \u{41[*Bold*]
https://github.com/ukihot/igonna
https://raw.githubusercontent.com/ukihot/igonna/main/slide.typ
typst
#import "@preview/slydst:0.1.0": * #set text(font: "Fira Sans", weight: "light") #show math.equation: set text(font: "Fira Math") #let today = datetime.today() #import "const.typ" #show: slides.with( title: const.title, subtitle: none, date: today.display("[month repr:long] [day], [year]"), authors: (const.me,), layout: "medium", title-color: rgb("#3d9970"), ) // 要旨::Abstract #include "articles/abstract.typ" // 目次::Contents #outline(indent: auto, depth: 2) #set page(columns: 1) = Console #include "articles/shell-work/cui.typ" #include "articles/shell-work/cmd.typ" #include "articles/shell-work/reg.typ" + `Ctrl`+`c`キーを用いよ. + 別のターミナルを起動し,`ps`,`grep`で`yes`コマンドのPIDを調べ,`kill`コマンドを用いよ. + `top`コマンドを用いよ. = Coding #include "articles/rust/syntax.typ" = Algorithm #include "articles/algo/sort.typ" #include "articles/algo/search.typ" #include "articles/algo/backtracking.typ" #include "articles/algo/dp.typ" = Architecture #include "articles/design/test.typ" #include "articles/design/workflow.typ" = Git #include "articles/git/git.typ" #include "articles/git/branch.typ" = Web #include "articles/web/html.typ" = Database #include "articles/database/sql.typ" = Technical Writing #include "articles/technical-writing/sakubun.typ"
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/visualize/shape-circle_02.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page // Ensure circle directly in rect works. #rect(width: 40pt, height: 30pt, fill: forest, circle(fill: conifer))
https://github.com/zgzhmry/Docx-to-Typst
https://raw.githubusercontent.com/zgzhmry/Docx-to-Typst/main/README.md
markdown
# Docx-to-Typst 将doc系列模板转换为Typst的模板的工具
https://github.com/xkevio/parcio-typst
https://raw.githubusercontent.com/xkevio/parcio-typst/main/parcio-report/main.typ
typst
MIT License
#import "template/template.typ": * #import "template/tablex.typ": * #show: project.with("Title", "Subtitle", authors: ( ( name: "<NAME>", mail: "<EMAIL>" ), ( name: "<NAME>", mail: "<EMAIL>" ) ), [ #ipsum\ #h(1em)This report template is available at #link("https://github.com/parcio/templates")[`https://github.com/parcio/templates`] and consists of Sections @intro[] to @conc[]. ] ) // temp test for cite customization // changes every citation that has (...et al.) in it to use square brackets #show regex("[(].*(et al.).*[)]"): r => { r.text.replace("(", "[").replace(")", "]").replace(".", ".,") } // ------------------------------- #parcio-outline() #pagebreak() = Introduction<intro> // subfigures, needs "kind" of "sub" and a grid rn #figure(caption: "Caption")[ #grid(columns: 2)[ #figure(caption: "Left", kind: "sub", supplement: none, numbering: "a")[ #image(alt: "Blue OVGU logo", width: 75%, "template/ovgu.jpg") ]<fig1a> ][ #figure(caption: "Right", kind: "sub", supplement: none, numbering: "a")[ #image(alt: "Blue OVGU logo", width: 75%, "template/ovgu.jpg") ]<fig1b> ] ]<fig1> You can refer to the subfigures (Figures 1@fig1a and 1@fig1b) or the figure (@fig1). = Background<bg> You can comfortably reference literature #cite("DBLP:journals/superfri/DuweLMSF0B020").#footnote[This is a footnote.] #figure(caption: "Caption")[ // alignment change currently only for 3 columns, can be changed tho // scaling also dependent on header size // normal typst #table function works just fine in that regard but is less customizable rn #parcio-table(3, 3, [*Header 1*], [*Header 2*], [*Header 3*], [Row 1],[Row 1],[Row 1], [Row 2],[Row 2],[Row 2], ) ]<tb1> You can also refer to tables (@tb1). == Math<m> $ E = m c^2 $<eq1> You can also refer to _(numbered)_#footnote[Referable things *need* a numbering] equations (@eq1). = Evaluation<eval> #align(left)[ #figure(caption: "Caption")[ ```c printf("Hello world!\n"); // Comment for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { sum += 'a'; } } ``` ]<code1> ] You can also refer to listings (@code1). #pagebreak() = Conclusion<conc> #todo[FIXME] #lorem(100) #lorem(100) #lorem(100) #inline-todo[FIXME:\ remove\ this] // ------------------------- #pagebreak() #bibliography("report.bib", style: "apa", title: "References")
https://github.com/fenjalien/metro
https://raw.githubusercontent.com/fenjalien/metro/main/src/impl/num/num.typ
typst
Apache License 2.0
#import "/src/utils.typ": combine-dict, content-to-string #import "process.typ": process #import "parse.typ": parse, to-float #let default-options = ( // parsing input-decimal-markers: ("\.", ","), retain-explicit-decimal-marker: false, retain-explicit-plus: false, retain-negative-zero: false, retain-zero-uncertainty: false, parse-numbers: auto, // post-processing drop-exponent: false, drop-uncertainty: false, drop-zero-decimal: false, exponent-mode: "input", exponent-thresholds: (-3, 3), fixed-exponent: 0, minimum-integer-digits: 0, minimum-decimal-digits: 0, round-direction: "nearest", round-half: "up", round-minimum: 0, round-mode: "none", round-pad: true, round-precision: 2, round-zero-positive: true, // uncertainty-round-direction: "nearest", // Printing bracket-negative-numbers: false, digit-group-size: 3, digit-group-first-size: 3, digit-group-other-size: 3, exponent-base: "10", exponent-product: sym.times, group-digits: "all", group-minimum-digits: 5, group-separator: sym.space.thin, output-close-uncertainty: sym.paren.r, output-decimal-marker: ".", output-exponent-marker: none, output-open-uncertainty: sym.paren.l, print-implicit-plus: false, print-exponent-implicit-plus: false, print-mantissa-implicit-plus: false, print-unity-mantissa: true, print-zero-exponent: false, print-zero-integer: true, tight-spacing: false, bracket-ambiguous-numbers: true, zero-decimal-as-symbol: false, zero-symbol: sym.bar.h, // qty separate-uncertainty: "", separate-uncertainty-unit: none, ) #let build(options, sign, mantissa, exponent, power, uncertainty) = { sign += "" let is-negative = "-" in sign and (mantissa != "0" or options.retain-negative-zero) let bracket-ambiguous-numbers = options.bracket-ambiguous-numbers and exponent != none and uncertainty != none let bracket-negative-numbers = options.bracket-negative-numbers and is-negative // Return return math.equation({ let output = if options.print-mantissa { math.attach(mantissa, t: power) } if bracket-negative-numbers { output = math.lr("(" + output + ")") } else if is-negative { output = sym.minus + output } else if options.print-implicit-plus or options.print-mantissa-implicit-plus or ("+" in sign and options.retain-explicit-plus) { output = sym.plus + output } if "<" in sign { output = math.equation(sym.lt + output) } if options.separate-uncertainty == "repeat" and uncertainty != none { output += options.separate-uncertainty-unit } output += math.equation(uncertainty) if bracket-ambiguous-numbers { output = math.lr("(" + output + ")") } output += exponent if options.separate-uncertainty == "bracket" and uncertainty != none { output = math.lr("(" + output + ")") } return output }) } #let get-options(options) = combine-dict(options, default-options, only-update: true) #let num( number, exponent: none, uncertainty: none, power: none, options ) = { options = get-options(options) let (sign, integer, decimal, exp, pwr) = if options.parse-numbers != false { parse(options, number, full: true) } else { (auto,) * 5 } options.number = number if exp not in (none, auto) { exponent = exp } if pwr not in (none, auto) { power = pwr } let (options, sign, mantissa, exponent, power, uncertainty) = process(options, sign, integer, decimal, exponent, power, uncertainty) return build(options, sign, mantissa, exponent, power, uncertainty) }
https://github.com/Nianyi-GSND-Projects/GSND-5130-GP2
https://raw.githubusercontent.com/Nianyi-GSND-Projects/GSND-5130-GP2/master/Thesis/Extended%20Abstract.typ
typst
// Preambles #set page(paper: "us-letter", margin: 1in) #set par(justify: true) #set cite(style: "alphanumeric") #set text(font: "Times New Roman") #show text.where(lang: "zh"): set text(font: "SimSun"); #show link: set text(size: 0.9em, font: "Consolas") #set quote(quotes: true); #show quote: set text(style: "italic"); // Title #(body => { set align(left); set text(size: 19pt, weight: "bold"); v(1em); body; })[Inspecting the Usability Issues of _Fortnite: Battle Royale_'s Graphical Interface]; #{ v(-0.5em); let member(name: "", localname: "", mail: "") = { show link: set text(font: "Consolas"); set align(center); text()[#name (#localname)]; linebreak(); link("mailto:" + mail)[<#mail>]; }; table( columns: (1fr, 1fr), stroke: none, member( name: "<NAME>", localname: text(lang: "zh")[王念一], mail: "<EMAIL>" ), member( name: "<NAME>", localname: text(lang: "fa")[صدف نظام‌الدینی], mail: "<EMAIL>" ) ); } = Extended Abstract In 2017, _Epic Games_ released their online video game _Fortnite_. The game became a huge financial success quickly after being released, reaching over 10 million players within two weeks @wikipedia2024fortnite @josh2024howmany. Among its various game modes, _Battle Royale_ which is a competitive survival game mode, turns out to be the most popular one. @wikipedia2024fortnitebattleroyale. Albeit widely receiving positive reviews, there are players complaining about the game's graphical interface design. Some says that #quote[_Epic_ is intentionally inducing players into other game modes] @bt1234yt2023epicchangedui; some says that the UI design #quote[is mobile-first] and #quote[lacks proper beta tests because players definitely won't like it] @reddit2023fortniteinterface @paledot24662023epicchangedui. There are more issues with the in-game HUDs, which are used to deliver the players important information (e.g. HP, location, inventory) or indicate the interactable elements in the game world. Since there is too many information on the screen at the same time, the UI feels too complex and confusing. This complexity seems to disproportionately affect novice players, making the learning curve steep and often overwhelming. The game does not feature a tutorial system to guide the players. Some players might feel lost in their first round of battle, as they are unfamiliar with the goal of the _Battle Royale_ mode. Also, some important systems of the game (like the building system) could be totally missed by new players. This is because there is no in-game guidance introducing these systems to the players. The only way for players to discover them is by observing other players using these mechanics during live gameplay. But by the time they need to use the system themselves, they still don't know how to do it. The problem we aim to address is the usability of _Fortnite_'s interface for new players who are unfamiliar with the game. By interfaces, we mean the game lobby, menus, and HUD. Our study will focus on the learnability aspect of usability, as the challenges experienced by novice players are likely linked to their ability to learn and navigate the game's interface effectively. Our research sample consists of gamers with no prior experience playing _Fortnite_. Participants will engage in a brief play session (one round of the _Battle royale_ mode) and then complete a detailed survey. The survey begins with general questions to understand the participants' gaming backgrounds and preferences. We then will move to questions focused on their perceptions of different usability elements in the game, such as how easily they were able to navigate the game menus, how quickly they could find the information they needed on the screen, and what percentage of the interface elements they believe they understood. To gain deeper insights, we will include more detailed questions, about specific graphical elements on the game screen. Participants will be asked to explain the meaning and usage of these elements to verify their understanding. After all data is collected, we will analyze it by coding the player responses into categories. Then we will disscuss on each topic to see how they are reflected in the game and compare them to usability and design principles proposed by other researchers preliminarily. The expected outcome of this research is providing valuable insights not only for future _Fortnite_ updates but also for game developers more broadly. We hope to offer practical design recommendations that prioritize novice-friendly interfaces without compromising complexity for experienced players. = Related Works Beside of players' disputes, the graphical interface of a video game is often discussed from two opposite aspects in academical literatures. One aspect stands on the players' subjective stance and tries to analyze the players' experiences; the other aspect evaluates the graphical interface by applying general design principles. == Player-centric The earliest usability studies on video games could be traced back to the beginning of the 2000 decade. _<NAME>_'s study in 2004 examined usability challenges faced by new players of MMORPGs, the results of which showed that huge improvements for players that are not familiar with the target genre could be done @Cornett2004usability. In _<NAME>_'s study in 2012, players were asked to play a collection of high-rated FPS games for a week-long period @fricker2012game. Follow-up surveys yielded some useful understandings of certain UI features used with the FPS game that players found the most helpful. We will try to follow these understandings in our research. _<NAME>_ and _<NAME>_'s research in 2023 specifically studied the effect of one particular form of in-game GUIs---Head-up Display (HUD)---on game immersion @bergman2023effect. A group of experiment participants were asked to play _Fortnite_ while their biometric data were being measured. The results showed no significant effect of the choice of HUD design on game immersion, but the players' subjective rating were affected by the design. == Design Principle-based This style of approach started a bit later than the previous approach. In 2008, _Pinelle at al_ collected a wide range of game reviews and developed 10 usability heuristics based on the coded categories @Pinelle2008heuristic. _Desurvire_ and _Wiberg_ furtherly proposed a more refined and complete list of heuristics on game play in 2009 @Desurvire2009gameusability, called _Heuristics of Playability_ (PLAY) which can be applied game development to aid game developers in usability/playability research during the development cycle. Similar to @bergman2023effect, _Fagerholt_ and _Lorentzon_ tried a constructive approach by studying preliminary academical literatures in various related fields and formulating hypothesis how player immersion is connected to the user interfaces @fagerholt2009beyond. After evaluated by user studies and user tests, they proposed a set of guidelines that cover different aspects of the designing of an FPS game. Although with these heuristics it shall be easier to identify the usability problems in _Fortnite_, _Cheremnykh_'s research in 2024 showed that a one-size-fits-all approach to UX design or gamification may not be universally effective @cheremnykh2024gaming. Therefore in this research, we will be using the heuristics only as a general guidance instead of a fixed rulebook. = Research Method We ask players with no prior experience playing _Fortnite_ to be our research participants. The participants would be asked to perform two tasks: + Play Session: We ask participants to play a full round of Battle Royale mode in _Fortnite_ game. This takes about 20 minutes in total. + Survey: After the end of the play session, we send players a link to an online survey, which investigates on the following aspects: - Player information: The survey starts with general questions to learn the player's gaming background and their preferences. - Player feedback: In this part, we ask players for direct feedback on their playing experience including usability questions. - Player learning test: In this part, we ask more detailed questions using images of specific interface elements. This approach helps ensure their responses accurately reflect their usability experience. Refer to @table:survey-questions in the appendix for the survey questions. // Bibliography #bibliography("bibliography.bib", full: true, style: "ieee" ) = Appendix #show table.cell.where(y: 0): set text(weight: "bold") #figure( caption: [The questions used in the survey.], table( columns: (3em, 1fr), align: left, stroke: none, table.hline(stroke: 1pt), table.header([No.], [Question]), table.hline(stroke: 0.5pt), [1], [What games do you usually play and what are your favorite genres?], [2], [How many hours per week do you usually play games?], [3], [Have you played any battle royale games (like PUBG/Unturned) previously? How familiar are you with this game mode?], [4], [Have you encountered any trouble starting the game?], [5], [What elements in the game world did you interact with? Briefly describe how you interacted with them.], [6], [What weapons/props have you used in the play?], [7], [Have you used the building mechanics in the game? Under what circumstances?], [8], [Were you able to find the information you need easily on the screen?], [9], [How do you feel about the game's UI? Is it too simple, too detailed, or just right?], [10], [What is \[screenshot\] this UI element representing?], [11], [How did you know the answer to the previous question? ], [12], [Write anything you'd like to add about the game's UI.], table.hline(stroke: 1pt), ) ) <table:survey-questions>
https://github.com/mkhoatd/Typst-CV-Resume
https://raw.githubusercontent.com/mkhoatd/Typst-CV-Resume/main/README.md
markdown
MIT License
# Typst-CV-Resume This Typst CV template is inspired by Latex template [Deedy-Resume](https://github.com/deedy/Deedy-Resume). You can use it for both of industry and academia. In original Typst, we cannot create a reference list withou citation. So I modified the code for this purpose. Currently, I only create the Chicago style citation and reference list. If you want to use other citation styles, you need to modify the code. I have create a function to import your publication list. **remember: you need to use `json` file exported from Zotero with BetterBibTeX. I did not test other ways.** Then you can call the function by using `#chicago(json("bib.json"))`. ## Update I have update the `macfont` version with the some location changes. The `openfont` version is still the same. ## Use This project includes **three** files: - `example.typ`: the main file - `typstcv.typ`: the template file - `bib.json`: the bibliography file You can use `example.typ` as a template to create your own CV. You can also download `typstcv.typ` as a template. Then create a new file with the following code: <details> <summary>Click me</summary> ``` #import "typstcv.typ": * // Remember to set the fonttype in `typstcv.typ` #main( name: [#lorem(2)], //name:"" or name:[] address: [#lorem(4)], lastupdated: "true", date:"2023.4.7", contacts: ( (text:"08856",link:""), (text:"example.com",link:"https://www.example.com"), (text:"github.com",link:"https://www.github.com"), (text:"<EMAIL>",link:"mailto:<EMAIL>"), ), bibfile: [bib.json], [ //About #section("About") #descript[#lorem(50)] #sectionsep #section("Education") #subsection[#lorem(4)\ ] #term[xxxx-xxxx][UK] #subsectionsep #subsection[#lorem(4)\ ] #term[xxxx-xxxx][UK] #sectionsep #section("Skills") #descript("Programming Languages") #info[Python, C++, Java, JavaScript, HTML, CSS, SQL, LaTeX] #subsectionsep #descript("Frameworks") #info[React, Node.js, Express, Flask, Django, Bootstrap, jQuery] #subsectionsep #descript("Tools") #info[Git, GitHub, Docker, AWS, Heroku, MongoDB, MySQL, PostgreSQL, Redis, Linux] #sectionsep // Award #section("Awards") #awarddetail[2018][Scholarship][University] #awarddetail[2017][Grant][Organisation] #awarddetail[2016][Scholarship][University] #sectionsep ], [ //Experience #section("Experience") #jobtitle[#lorem(4)][#lorem(2)] #term[xxxx-xxxx][UK] #jobdetail[ - #lorem(10) - #lorem(10) - #lorem(10) - #lorem(10)] #subsectionsep #jobtitle[#lorem(4)][#lorem(2)] #term[xxxx-xxxx][] #jobdetail[#lorem(30)] #subsectionsep // Projects #section("Projects") #descript[#lorem(2)] #info[#lorem(40)] #subsectionsep #descript[#lorem(2)] #info[#lorem(40)] #subsectionsep #descript[#lorem(2)] #info[#lorem(40)] #sectionsep // Publication #section("Publications") #chicago(json("bib.json")) // #apa(json("bib.json")) ], ) ``` </details> **Remember: If you want to change the font, you should change the font setting `#let fonttype = "macfont"` to `openfont` and modify the font that you prefer for each section.** ## Example I only test the template on macOS. If you want to use it on other platforms, you should use template in the `openfont` folder. Then, modify the font in `typstcv.typ` to the font installed on your PC. **MacFont** ![WzzFAb](https://cdn.jsdelivr.net/gh/jxpeng98/imagerepo@main/2023/04/WzzFAb.png) **PT Sans** ![S4rnjN](https://cdn.jsdelivr.net/gh/jxpeng98/imagerepo@main/2023/04/S4rnjN.png) I also create a single column version, You can find it from the [`typstcv_single.typ`](CV/typstcv_single.typ) and [`example_single.typ`](CV/example_single.typ): ![RWRbMA](https://cdn.jsdelivr.net/gh/jxpeng98/imagerepo@main/2023/05/RWRbMA.png) ## Todo - [x] Combine the macfont and openfont into one file. - [ ] Create one column version.
https://github.com/csimide/SEU-Typst-Template
https://raw.githubusercontent.com/csimide/SEU-Typst-Template/master/seu-thesis/parts/outline-degree-fn.typ
typst
MIT License
#let outline-conf(outline-depth: 3, show-self-in-outline: true) = { set page( numbering: "I", number-align: center, ) set par(first-line-indent: 0pt, leading: 10pt) heading( numbering: none, outlined: show-self-in-outline, bookmarked: true, )[目录] show outline.entry.where(level: 1): it => { v(1.2em, weak: true) strong(it) } outline(title: none, depth: outline-depth, indent: 2em) }
https://github.com/crd2333/typst-theorem-box
https://raw.githubusercontent.com/crd2333/typst-theorem-box/master/examples/example.typ
typst
MIT License
#import "../lib.typ": * #set heading(numbering: "1.") #show: thmrules = First level heading #theorem(footer: [The showybox allowes you add footer for boxes, useful when giving some explanation.])[#lorem(20)] <thm1> = Another first level heading #theorem(title: "This is a title", lorem(20)) <thm2> == Second level heading #definition[The counter will be reset after the first level of heading changes (counting within one chapter).] #theorem(title: [#text(fill: green, "This is another title")])[Now the counter increases by 1 for type `Theorem`.] #corollary([One body.], footer: [As well as footer!])[Another body!] #lemma[#lorem(20)] #proof[By default the `Proof` will not count itself.\ And the `Proof` box will have a square at the right bottom corner.] @thm1 (Use the label name to refer) @thm2
https://github.com/bombless/typst-reproduction
https://raw.githubusercontent.com/bombless/typst-reproduction/master/main.typ
typst
Apache License 2.0
BERT模型中\ BERT模型中,每个编码器由两层组成,即自注意力层和全连接层。
https://github.com/max-niederman/MATH51
https://raw.githubusercontent.com/max-niederman/MATH51/main/hw/4.typ
typst
#import "../lib.typ": * #show: homework.with( title: "Math 51 Homework #4" ) = 12.3 We have that $ f(x, y, z) &= x z + y z $ == Let $g(x, y, z) = x^2 + y^2 - 4 z^2$. We are asked to find two points among which any extrema of $f$ constrained to the level set $g = 1$ must lie. We can use Lagrange multipliers to find these points. Any extremum of $f$ constrained to the level set $g = 1$ must satisfy $ nabla g = vname(0) or nabla f = lambda nabla g $ We take this in cases. If $nabla g = vname(0)$: $ vname(0) &= nabla g \ &= vec(2 x, 2 y, -8 z) \ &= vec(x, y, z) $ But $g(vname(0)) = 0 != 1$, so this is not a candidate point. If $nabla g != vname(0)$, then $ nabla f &= lambda nabla g \ vec(z, z, x + y) &= lambda vec(2x, 2y, -8z) \ lambda 2y = &z = lambda 2x \ y &= x \ x + y &= -8 z \ 2x &= -8 z \ x = &y = -4 z $ Constraining this to $g = 1$ gives $ g(-4 z, -4 z, z) &= 1 \ 16 z^2 + 16 z^2 - 4 z^2 &= 1 \ 28 z^2 &= 1 \ z &= plus.minus 1 / sqrt(28) = plus.minus 1 / (2 sqrt(7)) $ So the extrema, if they exist, must be among $plus.minus (-2 / sqrt(7), -2 / sqrt(7), 1 / (2 sqrt(7)))$. == First, we compute the values of $f$ at the two candidate points: $ f(-2 / sqrt(7), -2 / sqrt(7), 1 / (2 sqrt(7))) &= -2 / 7 \ f(2 / sqrt(7), 2 / sqrt(7), -1 / (2 sqrt(7))) &= -2 / 7 $ Two integer solutions to $g = 1$ are $(4, 1, 2)$ and $(4, 1, -2)$. Their images under $f$ are: $ f(4, 1, 2) &= &10 > -2 / 7 \ f(4, 1, -2) &= -&10 < -2 / 7 \ $ We know from Part (a) that if $f$ constrained to $g = 1$ has extrema, they must be at $plus.minus (-2 / sqrt(7), -2 / sqrt(7), 1 / (2 sqrt(7)))$, and from the above that these points are not extrema (since there are values of $f$ on $g = 1$ both lesser and greater). Therefore, $f$ has no extrema on the hyperboloid $g = 1$. = 12.8 We have that $ f(x, y, z) = 2 x^2 + 6 x y + y^2 + 2z \ \ R = { (x, y, z) | x, y, z >= 0 and x + y + z <= 1 } $ == $ nabla f (x, y, z) = vec(4x + 6y, 6x + 2y, 2) != vname(0) $ Therefore, $f$ has no critical points in the interior of $R$. == Let $t(x, y, z) = x + y + z$. Then the level set $t = 1$ is the plane containing $T$, and we can use Lagrange multipliers to find extrema of $f$ on this plane. $ nabla t = vec(1, 1, 1) = vname(1) != vname(0) $ With the $nabla t = vname(0)$ case eliminated, we have $ nabla f &= lambda nabla t \ vec(4x + 6y, 6x + 2y, 2) &= lambda vname(1) \ lambda &= 2 \ mat( 4, 6; 6, 2; ) vec(x, y) &= vec(2, 2) \ mat( -14, 0; 6, 2; ) vec(x, y) &= vec(-4, 2) \ mat( 1, 0; 0, 2; ) vec(x, y) &= vec(2 / 7, 2 - 6 dot 2 / 7) \ mat( 1, 0; 0, 1; ) vec(x, y) &= vec(2 / 7, 1 / 7) \ vec(x, y) &= vec(2 / 7, 1 / 7) \ x + y + z &= 1 \ z &= 1 - x - y \ &= 4 / 7 \ vec(x, y, z) &= 1 / 7 vec(2, 1, 4) $ So the only extremum candidate is $(2 / 7, 1 / 7, 4 / 7)$. == One edge of $T$ is the line segment $L = { (s, 1 - s, 0) | s in [0, 1] }$. Let $l(s) = (s, 1 - s, 0)$, so that $L$ is the image of $[0, 1]$ under $l$. Then finding the extrema of $f$ on $L$ is equivalent to finding the maximum of $f compose l$ on $[0, 1]$. We can compute this using single-variable calculus. First we compute the endpoints of $L$: $ f(l(0)) &= f(0, 1, 0) &= 1 \ f(l(1)) &= f(1, 0, 0) &= 2 $ Then, we find the critical points of $f compose l$: $ (f compose l)(s) &= 2 s^2 + 6 s (1 - s) + (1 - s)^2 + 2 (0) \ 0 &= dif / (dif x) (f compose l)(s) \ &= 4 s + (6 - 12 s) - 2(1 - s) \ &= 4 s - 12 s + 2 s + 6 - 2 \ &= -6 s + 4 \ 6 s &= 4 \ s &= 2 / 3 $ We use the second-derivative test to check this is indeed a maximum: $ dif^2 / (dif x^2) (f compose l)(s) &= -6 < 0 $ Knowing $s$, we can compute the maximum: $ (f compose l)(2 / 3) &= 2 (2 / 3)^2 + 6 (2 / 3) (1 - 2 / 3) + (1 - 2 / 3)^2 \ &= 2 dot 4 / 9 + 6 dot 2 / 3 dot 1 / 3 + 1 / 9 \ &= (8 + 12 + 1) / 9 \ &= 21 / 9 \ &= 7 / 3 $ The maximum of $(f compose l)(0) = 1$, $(f compose l)(1) = 2$, and $(f compose l)(2 / 3) = 21 / 9$ is $(f compose l)(2 / 3) = (21 / 9)$, so this is the maximum value of $f$ on $L$. == #enum(numbering: "A.")[ $f(0, 0) = 0$ ][ $f(2, 2) = 14$ ][ $f(3, 1) = 15$ ][ $f(5, 3) = 29$ ] We know that the maximum of $f$ on $R$ is at either $(2 / 7, 1 / 7, 4 / 7)$ or $l(2 / 3) = (2 / 3, 1 / 3, 0)$. Computing $f$ at these values yields $ f(2 / 7, 1 / 7, 4 / 7) &= 2 (2 / 7)^2 + 6 (2 / 7) (1 / 7) + (1 / 7)^2 + 2 (4 / 7) \ &= 2 dot 4 / 49 + 6 dot 2 / 7 dot 1 / 7 + 1 / 49 + 56 / 49 \ &= (8 + 12 + 1 + 56) / 49 \ &= 77 / 49 \ &= 11 / 7 \ f(2 / 3, 1 / 3, 0) &= 7 / 3 "(from part (c))" $ $7 / 3 > 11 / 7$, so $f(2 / 3, 1 / 3, 0) = 7 / 3$ is the maximum of $f$ on $R$. = 12.9 Let $f(x, y) = 4x + 3y$. == #figure( image("../images/ex12_9_a.png", width: 40%), caption: [The region $R$.] ) Suppose that there is a maximum of $f$ on $R$ at $(x, y)$. The point $(2x, 2y)$ is also in $R$, because $ x >= 1 &arrow.r.double 2x >= 1 \ y >= 1 &arrow.r.double 2y >= 1 \ x + y >= 3 &arrow.r.double 2x + 2y >= 3 $ And $ f(2x, 2y) &= 4 (2x) + 3 (2y) \ &= 8x + 6y \ &> 4x + 3y \ &> f(x, y) $ Which is a contradiction because $f(x, y)$ is a maximum. Therefore, there is no maximum of $f$ on $R$. == #figure( image("../images/ex12_9_b.png", width: 40%), caption: [The parallelogram formed by $vname(v)$, $vname(w)$, and $vname(v) + vname(w)$.] ) We are asked to find the maximum of $4x + 3y = f(x, y)$ on this parallelogram. $f$ is not maximized on the interior of the paralellogram, because $nabla f = vec(4, 3) != vname(0)$, so the maximum must be on the boundary. Similarly, the maximum cannot be on the interior of any of the bounding line segments, because the projection of the gradient of $f$ onto the direction vectors of these line segments ($Proj_(vname(v)) nabla f$ and $Proj_(vname(w)) nabla w$) are both nonzero. If the maximum was on the interior of one of these line segments, we could get a greater value of $f$ by moving in the direction of these projections. Therefore, the maximum of $f$ on the parallelogram must be at one of the vertices: #[ #set enum(numbering: "A.") + $f(0, 0) = 0$ + $f(2, 2) = 14$ + $f(3, 1) = 15$ + $f(5, 3) = 29$ ] So the maximum is $f(vname(v) + vname(w)) = f(5, 3) = 29$. == Let $g(x, y) = -x + 2y$. To minimize $g$ on the paralellogram, we can perform exactly the same steps as in Part (b): $g$ cannot be maximized on the interior of the parallelogram, because $nabla g = vec(-1, 2) != vname(0)$, so the minimum must be on the boundary. Like with $f$, $Proj_(vname(v)) nabla g != vname(0)$ and $Proj_(vname(w)) nabla g != vname(0)$, so the minimum cannot be on the interior of any of the sides, and must be on one of the vertices: #[ #set enum(numbering: "A.") + $g(0, 0) = 0$ + $g(2, 2) = 2$ + $g(3, 1) = -1$ + $g(5, 3) = 1$ ] So the minimum is $g(vname(w)) = g(3, 1) = -1$. = 12.12 Let $f(x, y) = x^2 + y^2$ and $g(x, y) = 3x + 2y$. Then the line $3x + 2y = 6$ is the level set $g = 6$. We are asked to minimize $f$ on the level set $g = 6$. The minimum must satisty either $nabla g = vname(0)$ or $exists lambda in RR : nabla f = lambda nabla g$. $nabla g = vec(3, 2) != vname(0)$, so we solve: $ nabla f &= lambda nabla g \ 3x + 2y &= 6 \ vec(2x, 2y) &= lambda vec(3, 2) \ 2 / 3 x = &lambda = y \ y &= 2 / 3 x \ 3x + 4 / 3 x &= 6 \ 13x &= 18 \ x &= 18 / 13 \ y &= 12 / 13 $ Therefore, the minimum is at $(18 / 13, 12 / 13)$. = 12.14 Let $v(x, y, z) = x y z$ and $s(x, y, z) = 2 x y + 2 x z + 2 y z$, so that $v(x, y, z)$ and $s(x, y, z)$ are, respectively, the volume and surface area of the rectangular box with side lengths $x$, $y$, and $z$. We minimize $s$ on the level set $v = V$ using Lagrange multipliers: For the case $nabla v = vname(0)$: $ nabla v &= vname(0) \ vec(y z, x z, x y) &= vname(0) $ Suppose that $x$ is nonzero. Then $x z = 0 arrow.r.double z = 0$ and $x y = 0 arrow.r.double y = 0$, so $y$ and $z$ are zero. This same reasoning applies to $y$ and $z$, so $nabla v = vname(0)$ when at most one of $x$, $y$, and $z$ is nonzero. However, if any of $x$, $y$, or $z$ is zero, then $v(x, y, z) = 0 != V$, so none of these points are candidates. Therefore, we solve: $ nabla s &= lambda nabla v \ x y z &= V \ vec(2 y + 2 z, 2 x + 2 z, 2 x + 2 y) &= lambda vec(y z, x z, x y) \ lambda &= 2 (y + z) / (y z) \ &= 2 (x + z) / (x z) \ &= 2 (x + y) / (x y) \ $ Equating these expressions for $lambda$ gives $ (y + z) / (y z) = (x + z) / (x z) = (x + y) / (x y) \ x(y + z) = y(x + z) = z(x + y) \ x y + x z = y x + y z = z x + z y \ \ x y + x z = y x + y z \ x z = y z \ x = y \ \ x y + x z = z x + z y \ x y = z y \ x = z \ \ x = y = z $ Therefore, the rectangular box with minimum surface area for a given volume has equal side lengths; i.e. it is a cube. = 13.2 == The geometric effect of $T_(2, 1/3)$ on a vector $vname(v) in RR^2$ is to scale $vname(v)$ by a factor of $2$ in the $x$ direction and a factor of $1/3$ in the $y$ direction. #figure( image("../images/ex13_2_a.png", width: 40%), caption: [The vectors $vname(u) = vec(-1, 1)$ and $vname(v) = vec(1, 2)$, along with their images $vname(u')$ and $vname(v')$ under $T_(2, 1/3)$.] ) In general, the transformation has the effect of "squishing" $RR^2$ down towards the $x$-axis by a factor of $1/3$ while also "stretching" the $x$ axis outwards, away from the $y$-axis by a factor of $2$. == $ &(T_(1/a, 1/b) compose T_(a, b))(vname(x)) \ = &T_(1/a, 1/b)(T_(a, b)(vname(x))) \ = &mat( 1/a, 0; 0, 1/b; ) mat( a, 0; 0, b; ) vname(x) \ = &mat( 1, 0; 0, 1; ) vname(x) \ = &vname(x) $ Similarly, $ &(T_(a, b) compose T_(1/a, 1/b))(vname(x)) \ = &T_(a, b)(T_(1/a, 1/b)(vname(x))) \ = &mat( a, 0; 0, b; ) mat( 1/a, 0; 0, 1/b; ) vname(x) \ = &mat( 1, 0; 0, 1; ) vname(x) \ = &vname(x) $ This makes geometric sense because $T_(a, b)$ has the effect of "expanding" by a factor $a$ in the $x$-direction and $b$ in the $y$-direction, and $T_(1/a, 1/b)$ has the _inverse_ effect of "compressing" by a factor of $a$ in the $x$-direction and $b$ in the $y$-direction. == $ vname(v) in C &implies vec(v_1, v_2) in C \ &implies v_1^2 + v_2^2 = 1 \ &implies (a v_1)^2 / a^2 + (b v_2)^2 / b^2 = 1 \ &implies vec(a v_1, b v_2) in E_(a, b) \ &implies T_(a, b)(vname(v)) in E_(a, b) $ And in reverse, $ T_(a, b)(vname(v)) in E_(a, b) &implies vec(a v_1, b v_2) in E_(a, b) \ &implies (a v_1)^2 / a^2 + (b v_2)^2 / b^2 = 1 \ &implies v_1^2 + v_2^2 = 1 \ &implies vec(v_1, v_2) in C \ &implies vname(v) in C $ == #figure( image("../images/ex13_2_d_1.jpeg", width: 40%), caption: [A sketch of the ellipse $x^2 / 4 + y^2 / 9 = 1$.] ) #figure( image("../images/ex13_2_d_2.jpeg", width: 40%), caption: [A sketch of the ellipse $x^2 / 4 + 4y^2 = 1$.] ) = 13.3 == #box(height: 4.5in, columns(2)[ #set enum(numbering: "(i)") + $ x^2 + 6y^2 &= 10 \ \ y = 0 implies x &= plus.minus sqrt(10) \ 3 < &sqrt(10) < 4 \ \ x = 0 implies y &= plus.minus sqrt(5 / 3) \ 1 < &sqrt(5 / 3) < 2 $ + $ 3x^2 + 5y^2 &= 13 \ \ y = 0 implies x &= plus.minus sqrt(13 / 3) \ 2 < &sqrt(13 / 3) < 3 \ \ x = 0 implies y &= plus.minus sqrt(13 / 5) \ 1 < &sqrt(13 / 5) < 2 $ 3. $ 7x^2 + 2y^2 &= 18 \ \ y = 0 implies x &= plus.minus sqrt(18 / 7) \ 1 < &sqrt(18 / 7) < 2 \ \ x = 0 implies y &= plus.minus sqrt(6) \ 2 < &sqrt(6) < 3 $ + $ 5x^2 + y^2 &= 21 \ \ y = 0 implies x &= plus.minus sqrt(21 / 5) \ 2 < &sqrt(21 / 5) < 3 \ \ x = 0 implies y &= plus.minus sqrt(21) \ 4 < &sqrt(21) < 5 $ ]) == #figure( image("../images/ex13_3_b.jpeg", width: 80%), caption: [A sketch of the ellipses i--iv.] ) == The general ellipse $A x^2 + B y^2 = C$ is bigger along the $x$-axis when $A < B$, bigger along the $y$-axis when $A > B$, and neither when $A = B$. = 13.9 == Let $vname(c)$ be any vector $vec(c_1, c_2, c_3) in RR^3$. To find the derivative matrix, we compute the gradients of the components of $f$: $ (f dot vname(e)_1)(vname(c)) &= -2 c_1 + 3 c_2 + c_3 + 5 \ nabla (f dot vname(e)_1)(vname(c)) &= vec(-2, 3, 1) \ \ (f dot vname(e)_2)(vname(c)) &= -4 c_1 + 0 c_2 - 2 c_3 - 7 \ nabla (f dot vname(e)_2)(vname(c)) &= vec(-4, 0, -2) \ $ Therefore, the derivative matrix is $ &(D f)(vname(c)) \ = &mat( nabla (f dot vname(e)_1)(vname(c)); nabla (f dot vname(e)_2)(vname(c)); ) \ = &mat( -2, 3, 1; -4, 0, -2; ) \ = &A $ == We again start by computing the gradients of the components of $f$: $ (f dot vname(e)_1)(vname(c)) &= a_11 c_1 + a_12 c_2 + a_13 c_3 + b_1 \ nabla (f dot vname(e)_1)(vname(c)) &= vec(a_11, a_12, a_13) \ \ (f dot vname(e)_2)(vname(c)) &= a_21 c_1 + a_22 c_2 + a_23 c_3 + b_2 \ nabla (f dot vname(e)_2)(vname(c)) &= vec(a_21, a_22, a_23) \ $ And assemble the derivative matrix: $ &(D f)(vname(c)) \ = &mat( nabla (f dot vname(e)_1)(vname(c)); nabla (f dot vname(e)_2)(vname(c)); ) \ = &mat( a_11, a_12, a_13; a_21, a_22, a_23; ) \ = &A $ == For the general case, where $A in RR^(m times n)$ and $vname(b) in RR^m$, we perform the same steps, but for all $m$ components of $f$: $ forall i <= m \ \ (f dot vname(e)_i)(vname(c)) = sum_(j = 1)^(n) a_(i j) c_j + b_i \ nabla (f dot vname(e)_i)(vname(c)) = vec(a_(i 1), a_(i 2), dots.v, a_(i n)) \ $ And assemble the derivative matrix: $ &(D f)(vname(c)) \ = &mat( nabla (f dot vname(e)_1)(vname(c)); nabla (f dot vname(e)_2)(vname(c)); dots.v; nabla (f dot vname(e)_m)(vname(c)); ) \ = &mat( a_(1 1), a_(1 2), dots.h.c, a_(1 n); a_(2 1), a_(2 2), dots.h.c, a_(2 n); dots.v, dots.v, dots.down, dots.v; a_(m 1), a_(m 2), dots.h.c, a_(m n); ) \ = &A $ = 14.3 == $vname(e)_1$ is first rotated to $vec(1 / sqrt(2), -1 / sqrt(2))$, then stretched in the $x$-direction to $vec(sqrt(2), -1 / sqrt(2))$, so $f(vname(e)_1) = vec(sqrt(2), -1 / sqrt(2))$. $vname(e)_2$ is first rotated to $vec(1 / sqrt(2), 1 / sqrt(2))$, then stretched in the $x$-direction to $vec(sqrt(2), 1 / sqrt(2))$, so $f(vname(e_2)) = vec(sqrt(2), 1 / sqrt(2))$. Therefore, $A = mat( sqrt(2), sqrt(2); -1 / sqrt(2), 1 / sqrt(2); )$. == $ R &= mat(R vname(e_1), R vname(e_2)) \ &= mat( 1 / sqrt(2), 1 / sqrt(2); -1 / sqrt(2), 1 / sqrt(2); ) \ \ M &= mat(M vname(e_1), M vname(e_2)) \ &= mat( 2, 0; 0, 1; ) \ \ A &= M R \ &= mat( sqrt(2), sqrt(2); -1 / sqrt(2), 1 / sqrt(2); ) $ == Composing the transformations in the opposite order gives a different transformation because rotation changes the effect of stretching a given vector. For example, consider the vector $vec(0, 1)$. If we first stretch it (apply $M$), it remains the same and rotation yields $vec(1/sqrt(2), 1/sqrt(2))$; but if we rotate it first (apply $R$), _then_ stretch it, we get $M vec(1/sqrt(2), 1/sqrt(2)) = vec(sqrt(2), 1/sqrt(2))$. $ R M &= mat( 1 / sqrt(2), 1 / sqrt(2); -1 / sqrt(2), 1 / sqrt(2); ) mat( 2, 0; 0, 1; ) \ &= mat( sqrt(2), 1 / sqrt(2); -sqrt(2), 1 / sqrt(2); ) $ = 14.5 == $ A &= mat( 0, -1, 0; 1, 0, 0; 0, 0, 1; ) \ A' &= mat( 1, 0, 0; 0, 0, -1; 0, 1, 0; ) $ == By matrix multiplication: $ (T' compose T)(vname(x)) &= A' A vname(x) \ &= mat( 1, 0, 0; 0, 0, -1; 0, 1, 0; ) mat( 0, -1, 0; 1, 0, 0; 0, 0, 1; ) vname(x) \ &= mat( 0, -1, 0; 0, 0, -1; 1, 0, 0; ) vname(x) $ By evaluation of $(T' compose T)(vname(e)_i)$: $ (T' compose T)(vname(x)) &= mat( (T' compose T)(vname(e)_1), (T' compose T)(vname(e)_2), (T' compose T)(vname(e)_3); ) vname(x) \ &= mat( 0, -1, 0; 0, 0, -1; 1, 0, 0; ) vname(x) $ == By matrix multiplication, $ (T compose T')(vname(x)) &= A A' vname(x) \ &= mat( 0, -1, 0; 1, 0, 0; 0, 0, 1; ) mat( 1, 0, 0; 0, 0, -1; 0, 1, 0; ) vname(x) \ &= mat( 0, 0, 1; 1, 0, 0; 0, 1, 0; ) vname(x) $ By evaluation of $(T compose T')(vname(e)_i)$: $ (T compose T')(vname(x)) &= mat( (T compose T')(vname(e)_1), (T compose T')(vname(e)_2), (T compose T')(vname(e)_3); ) vname(x) \ &= mat( 0, 0, 1; 1, 0, 0; 0, 1, 0; ) vname(x) $ = 14.6 == We can compute the columns of the matrix $A$ representing $p$ by taking $p$'s effect on the basis vectors: $ A &= mat( p(vname(e)_1), p(vname(e)_2), p(vname(e)_3), p(vname(e)_4), ) \ &= mat( 0, 0, 1, 0; 0, 0, 0, 1; ) $ And we can similarly find the matrix $B$ representing $i$: $ B &= mat( i(vname(e)_1), i(vname(e)_2), ) \ &= mat( 1, 0; 0, 1; 0, 0; 0, 0; ) $ == $(p compose i)$ has the effect of simply zeroing its argument, since $i$ always returns vectors with zero third and fourth components, and $p$ returns a vector of the third and fourth components. Therefore, the matrix $A B$ corresponding to $(p compose i)$ is the zero matrix $mat(0, 0; 0, 0)$. We can also compute the matrix $A B$ by matrix multiplication: $ A B &= mat( 0, 0, 1, 0; 0, 0, 0, 1; ) mat( 1, 0; 0, 1; 0, 0; 0, 0; ) \ &= mat( 0, 0; 0, 0; ) $ == $(i compose p)$ has the effect of replacing the first and second components with the third and fourth, respectively, and zeroing the third and fourth components. This is because $p$ returns a vector of the third and fourth components, and $i$ returns a vector with the same first and second components, but zero third and fourth components. Therefore, the matrix $B A$ corresponding to $(i compose p)$ is the matrix $ mat( 0, 0, 1, 0; 0, 0, 0, 1; 0, 0, 0, 0; 0, 0, 0, 0; ) $. We can also compute $B A$ by matrix multiplication: $ B A &= mat( 1, 0; 0, 1; 0, 0; 0, 0; ) mat( 0, 0, 1, 0; 0, 0, 0, 1; ) \ &= mat( 0, 0, 1, 0; 0, 0, 0, 1; 0, 0, 0, 0; 0, 0, 0, 0; ) $ = 14.9 == $ R vname(v) in H_+ &iff vec((v_1 + v_2) / sqrt(2), (-v_1 + v_2) / sqrt(2)) in H_+ \ &iff ((v_1 + v_2) / sqrt(2))^2 - ((-v_1 + v_2) / sqrt(2))^2 = 1 \ &iff 1/2 ((v_1 + v_2)^2 - (-v^1 + v_2)^2) = 1 \ &iff (v_1^2 + 2 v_1 v_2 + v_2^2) - (v_1^2 - 2 v_1 v_2 + v_2^2) = 2 \ &iff 2v_1 v_2 + 2v_1 v_2= 2 \ &iff v_1 v_2 = 1 / 2 $ Therefore, $v$ lies on the curve defined by $x y = 1/2$ if and only if $R vname(v)$ lies on the curve $H_+$ defined by $x^2 - y^2 = 1$. We can perform the same steps for $H_-$ and the curve $x^2 - y^2 = -1$: $ R vname(v) in H_- &iff vec((v_1 + v_2) / sqrt(2), (-v_1 + v_2) / sqrt(2)) in H_- \ &iff ((v_1 + v_2) / sqrt(2))^2 - ((-v_1 + v_2) / sqrt(2))^2 = -1 \ &iff 1/2 ((v_1 + v_2)^2 - (-v^1 + v_2)^2) = -1 \ &iff (v_1^2 + 2 v_1 v_2 + v_2^2) - (v_1^2 - 2 v_1 v_2 + v_2^2) = -2 \ &iff 2v_1 v_2 + 2v_1 v_2= -2 \ &iff v_1 v_2 = -1/2 $ == #columns[ #figure( image("../images/ex14_9_b_+.png", width: 80%), caption: [The curve $H_+$.] ) #colbreak() #figure( image("../images/ex14_9_b_-.png", width: 80%), caption: [The curve $H_-$.] ) ] We know that the coordinate axes are asymptotes of the curves $x y = plus.minus 1/2$, and that the curves $H_+$ and $H_-$ are 45#sym.degree rotations of $x y = plus.minus 1/2$, so the rotated coordinate axes, and the lines $y = plus.minus x$ are therefore asymptotes of $H_+$ and $H_-$. == $T_(2, 3)$ carries $H_+$ to the graph of $x^2/4 - y^2/9 = 1$, and the asymptotes of $H_+$, $y = plus.minus x$, to $1/3 y = plus.minus 1/2 x$. Similarly, $T_(2, 1/2)$ carries $H_-$ to the graph of $x^2/4 - 4y^2 = -1$ and the asymptotes of $H_-$ to $2y = plus.minus 1/2 x$. We can use this knowledge to graph: #columns[ #figure( image("../images/ex14_9_c_1.png", width: 80%), caption: [The graph of $x^2/4 - y^2/9 = 1$.] ) #colbreak() #figure( image("../images/ex14_9_c_2.png", width: 80%), caption: [The graph of $x^2/4 - 4y^2 = 1$.] ) ] = 15.3 == We compute $A M$ by rote matrix multiplication: $ A M &= mat( 1, a, 0; 0, 1, 0; 0, 0, 1; ) mat( m_11, m_12, m_13; m_21, m_22, m_23; m_31, m_32, m_33; ) \ &= mat( m_11 + a m_21, m_12 + a m_22, m_13 + a m_23; m_21, m_22, m_23; m_31, m_32, m_33; ) $ The first row of the matrix $A M$ is also the first row of $M$ plus $a$ times the second row of $M$. $M A$ is related to $M$ in that it is the result of adding $a$ times the first _column_ of $M$ to the second column of $M$. == We again compute the entries of the product: $ B M &= mat( 0, 1, 0; 1, 0, 0; 0, 0, 1; ) mat( m_11, m_12, m_13; m_21, m_22, m_23; m_31, m_32, m_33; ) \ &= mat( m_21, m_22, m_23; m_11, m_12, m_13; m_31, m_32, m_33; ) $ This matrix product, $B M$, is also the result of swapping the first and second rows of $M$. The matrix $M B$ is related to $M$ in that it is the result of swapping the first and second _columns_ of $M$. = 15.5 == $ f(A) &= 2A^2 + 3A - I \ &= 2 mat( 1, 2; -5, 2; )^2 + 3 mat( 1, 2; -5, 2; ) - mat( 1, 0; 0, 1; ) \ &= 2 mat( -9, 6; -15, -6; ) + 3 mat( 1, 2; -5, 2; ) - mat( 1, 0; 0, 1; ) \ &= mat( -16, 17; -46, -7; ) \ \ f(B) &= 2B^2 + 3B - I \ &= 2 mat( 2, 1, 0; 0, -1, 0; 0, 0, 3; )^2 + 3 mat( 2, 1, 0; 0, -1, 0; 0, 0, 3; ) - mat( 1, 0, 0; 0, 1, 0; 0, 0, 1; ) \ &= 2 mat( 4, 1, 0; 0, 1, 0; 0, 0, 9; ) + 3 mat( 2, 1, 0; 0, -1, 0; 0, 0, 3; ) - mat( 1, 0, 0; 0, 1, 0; 0, 0, 1; ) \ &= mat( 13, 5, 0; 0, -2, 0; 0, 0, 26; ) $ == $ g(A) &= A^2 - 3A + 12I \ &= mat( 1, 2; -5, 2; )^2 - 3 mat( 1, 2; -5, 2; ) + 12 mat( 1, 0; 0, 1; ) \ &= mat( -9, 6; -15, -6; ) -3 mat( 1, 2; -5, 2; ) + 12 mat( 1, 0; 0, 1; ) \ &= mat( 0, 0; 0, 0; ) \ &= 0 $ == #columns[ $ h(C) &= C^2 + 2C + 1 \ &= mat( 3, -1; 0, 1; )^2 + 2 mat( 3, -1; 0, 1; ) + mat( 1, 0; 0, 1; ) \ &= mat( 9, -4; 0, 1; ) + 2 mat( 3, -1; 0, 1; ) + mat( 1, 0; 0, 1; ) \ &= mat( 16, -6; 0, 4; ) $ #colbreak() $ (C + I_2)^2 &= ( mat( 3, -1; 0, 1; ) + mat( 1, 0; 0, 1; ) )^2 \ &= mat( 4, -1; 0, 2; )^2 \ &= mat( 16, -6; 0, 4; ) $ ] So $h(C) = (C + I_2)(C + I_2)$. = 15.9 We can compute the values of $A B$ and $B A$ for any $b_1, b_2, b_4, b_5, b_9 in RR$: #columns[ $ A B &= mat( 2, 0, 0; 0, 2, 0; 0, 0, 6; ) mat( b_1, b_2, 0; b_4, b_5, 0; 0, 0, b_9; ) \ &= mat( 2 b_1, 2 b_2, 0; 2 b_4, 2 b_5, 0; 0, 0, 6 b_9; ) \ $ #colbreak() $ B A &= mat( b_1, b_2, 0; b_4, b_5, 0; 0, 0, b_9; ) mat( 2, 0, 0; 0, 2, 0; 0, 0, 6; ) \ &= mat( 2 b_1, 2 b_2, 0; 2 b_4, 2 b_5, 0; 0, 0, 6 b_9; ) $ ] Therefore, $A B = B A$ for any $b_1, b_2, b_4, b_5, b_9 in RR$. However, this is not the case with $A B'$ and $B' A$. We can use the fact that $B' = B + mat( 0, 0, 1; 0, 0, 0; 0, 0, 0; )$ to compute the products: #columns[ $ A B' &= A (B + mat( 0, 0, 1; 0, 0, 0; 0, 0, 0; )) \ &= A B + A mat( 0, 0, 1; 0, 0, 0; 0, 0, 0; ) \ &= A B + mat( 0, 0, 2; 0, 0, 0; 0, 0, 0; ) \ $ #colbreak() $ B' A &= (B + mat( 0, 0, 1; 0, 0, 0; 0, 0, 0; )) A \ &= B A + mat( 0, 0, 1; 0, 0, 0; 0, 0, 0; ) A \ &= A B + mat( 0, 0, 6b_9; 0, 0, 0; 0, 0, 0; ) $ ] But $6b_9 != 2$ in general, so $A$ and $B'$ do not commute for all $b_1, b_2, b_4, b_5, b_9 in RR$.
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/string-16.typ
typst
Other
// Error: 23-24 expected string, found integer #"123".replace("123", m => 1)
https://github.com/Tiggax/zakljucna_naloga
https://raw.githubusercontent.com/Tiggax/zakljucna_naloga/main/references.typ
typst
#set heading(numbering: "1.1") #let done(body) = text(fill: green, body) = Reference summary key sections == Kim 2007 / q: specific productivity #done[ Recent advances in cell culture technology for rCHO cells have achieved significant improvement in protein production leading to titer of more than 10 g/L to meet the huge demand from market needs @Kim_2012[p. 917]. ] #done[ The popularity of CHO cells can be attributed to the following reasons. Firstly, since CHO cells have been dem onstrated as safe hosts for the past two decades, it may be easier to obtain approval to market the therapeutic proteins from regulatory agencies like the FDA. Secondly, low specific productivity (q), which is one of the disadvantages of using mammalian cells for protein production, can be over come by gene amplification in CHO cells. For CHO cells, powerful gene amplification systems, such as dihydrofolate reductase (DHFR)-mediated or glutamine synthetase (GS)- mediated gene amplification, are available. Thirdly, CHO cells have the capacity for efficient post-translational mod ification, and they produce recombinant proteins with glyco forms that are both compatible with and bioactive in humans. Finally, CHO cells can be easily adapted to growth in the regulatory-friendly serum-free (SF) suspension conditions, a characteristic preferred for large-scale culture in bioreactors. ] #done[ Currently, stirred tank bioreactors over 10,000 L scale are readily used for SF suspension cultures of recombinant CHO (rCHO) cells producing therapeutic antibody @Kim_2012[p. 917-918]. ] Over the past two decades, a more than 100-fold yield improvement of titers in rCHO cell culture has been ob served, and this improved product yield has been largely attributed to the development of serum-free medium as well as the optimization of feeding strategies (Hacker et al. 2009) @Kim_2012[p. 918]. For example, clones suitable for SF suspension fed-batch culture are selected for large-scale commercial production of therapeutic antibody. In the same context, many biotech companies have their own therapeutic protein production platform, and clones suitable for that platform are selected to facilitate and speed up the process development of new therapeutic proteins @Kim_2012[p. 918]. Once the DNA enters the host cell nucleus, it integrates into the chromosome at a random location. If necessary, high producing parental clones are subjected to gene ampli fication for further enhancement of q @Kim_2012[p. 918]. Clones are selected on the basis of their high expression levels, and their performance is further tested in SF suspension fed-batch culture, which is most widely used for large-scale commercial production of therapeutic antibody. Clones show ing the best performance in SF suspension fed-batch culture in lab-scale bioreactors with pre-developed feeding cocktails are usually selected as production cell lines @Kim_2012[p. 918]. As described earlier, the establishment of a high-producing, stable rCHO cell line by gene amplification followed by an extensive screening is time-consuming and labor-intensive. Vector engineering which modulates transcriptional activity facilitates rCHO cell line development @Kim_2012[p. 918]. Cell death occurs in two forms—necrosis or two types of programmed cell death (PCD), namely, apoptosis and autophagy. While necrosis is a sudden and passive form, PCD is an active, genetically controlled process. Many researchers have recognized PCD as a target to overcome the problem mentioned above. Apoptosis has gained impor tance in mammalian cell culture, including that of rCHO cells for therapeutic protein production over the last two decades. The prevention of apoptosis by establishing apoptosis-resistant rCHO cells appears to be beneficial @Kim_2012[p. 920]. == Ritacco 2018 F128 Ham’s nutrient mixture was an early successful example of a fully synthetic, chemically defined, and serum-free medium which could support clonal growth of Chinese hamster ovary (CHO) cells. This medium was designed for single cell cultivation and expansion, but was not well suited to sup 105 port growth to high cell densities (greater than cells per ml). Sato and colleagues later determined that this deficiency could be remedied by mixture of Ham’s F12 with Dulbecco’s medium, plus supplementation with hormones, growth factors, and transferrin, creating a new formulation called DMEM/F12.9 Building upon this work, <NAME> and colleagues identified four critical additives necessary for the replacement of serum in chemically defined media: insulin, transferrin, ethanolamine, and selenium. Combined together, these four additives formed a supplement (ITES), which was used in place of serum in early serum-free media @Ritacco_2018[p. 1-2]. == Cui 2023 Quantitative modeling integrates both kinetics for some key reaction steps and optimization-driven metabolic flux allocation, using flux balance analysis; this is known to lead to certain mathematical inconsistencies. Here, we propose a physically-informed data-driven hybrid model (a “gray box”) to learn models of the dynamical evolution of Chinese Hamster Ovary (CHO) cell bioreactors from process data. The approach incorporates physical laws (e.g. mass balances) as well as kinetic expressions for metabolic fluxes @Cui_2023[p.1] #done[The advantage of using CHO cells is that the correct (i.e., mammalian-specific) glycosylation patterns are achieved for the protein therapeutics (e.g., therapeutic antibodies) @Cui_2023[p.1].] == Butler 2005 #done[ There has been a rapid increase in the number and demand for approved biopharmaceuticals produced from animal cell culture processes over the last few years. In part, this has been due to the efficacy of several hu manized monoclonal antibodies that are required at large doses for therapeutic use. ] Characterization of cellular metabolism and physiology has enabled the design of fed-batch and perfusion bioreactor processes that has allowed a significant improvement in product yield, some of which are now approaching 5 g/L. Many of these pro cesses are now being designed in serum-free and animal component-free media to ensure that products are not contaminated with the adventitious agents found in bovine serum. The first human therapeutic protein to be licensed from this technology in 1982 was recombinant insulin (Humulin from Genentech) but the relative structural simplicity of this molecule allowed its large-scale production to be developed in Escherichia coli, which is fast growing and robust compared to mammalian cells. It was soon realised that the subsequent targets for recombinant therapeutics were more complex and required the post-translational metabolic machinery only available in eukaryotic cells. @Butler_2005[p. 283] #done[ Chinese hamster ovary (CHO) cells have become the standard mammalian host cells used in the production of recombinant proteins, although the mouse myeloma (NS0), baby hamster kidney (BHK), human em bryonic kidney (HEK-293) or human-retina-derived (PER C6) cells are alternatives. All these cell lines have been adapted to grow in suspension culture and are well suited for scale-up in stirred tank bioreactors. ] The advantage of CHO and NSO cells is that there are well-characterised platform technologies that allow for transfection, amplifi cation and selection of high-producer clones. @Butler_2005[p. 284] The most successful strategies involve feeding concentrates of nutrients based upon the predicted requirements of the cells for growth and production. This can involve slow feeding of low concentrations of key nutrients. #done[The maintenance of low concentration set points of the major carbon substrates enables a more efficient primary metabolism with leads to lower rates of production of metabolic by-products, such as ammonia and lactate. As a result the cells remain in a productive state over extended time frames. *The strategic use of fed-batch cultures has enabled considerable enhancement of yields from these processes. *] This is often combined with a biphasic strategy of production in which cell proliferation is allowed in the first phase so that high cell densities accumulate, followed by a phase in which cell division is arrested to allow cells to attain a high specific productivity. By directly supplying cells with a balanced nutrient feed, a fed-batch culture can now be expected to yield upwards of 2 g/L of recombinant protein, which is probably at least tenfold higher than the maximum that could be expected by a simple batch culture in standard culture medium. Producer cells can be made to be sufficiently robust in this environment if they are provided with suitable growth media and gas sparging is carefully controlled. The capacity of commercial bioreactors for animal cells has gradually increased over the past two decades, with capacities now reported up to 20,000 L from some of the larger biopharmaceutical companies. Airlift bioreactors have also been applied to large-scale animal cells and these have been shown to be efficient for protein production. @Butler_2005[p. 285] #done[Bovine serum was used as a supplement of cell culture media for several decades. It is a rich source of hormones, growth factors and trace elements that promote rapid cell growth and also its high albumin content ensures that the cells are well protected from potentially adverse conditions such as pH fluctuations or shear forces. However, the composition of serum is variable and undefined, which leads to inconsistent growth and productivity.] #done[However, the mad cow crisis in the beef industry alerted a concern for the use of animal serum and any other animal derived components in the production of biotherapeutics. This has now led to a strong demand for cell culture for mulations that are free of all animal components.] It turns out that producer cell lines are quite fastidious in their growth requirements and that such requirements vary considerably from one cell line to another. Therefore, it has not been possible to design a single serum free formulation to act as a serum substitute suitable for the growth of all cell lines. In fact even different clones of CHO cells may require different formulations for optimal growth. It is of considerable value to be able to prevent or inhibit apoptosis in culture in order to extend the time of high cell viability and prolong protein production. There are two strategies that can be used for this. The cellular environment can be manipulated through media supplementation or the intracellular environment can be modified by genetic engineering. @Butler_2005[p. 286] With an increase in the number and demand for recombinant biopharmaceuticals, there is a requirement for greater biomanufacturing capacity. This created a major problem in 2001 when the demand for Enbrel, a recombinant fusion protein commercialized by Immunex for the treatment of rheumatoid arthritis, exceeded expectations. One of the reasons for this extra demand for biomanufacturing capacity is the dose requirement for the novel therapeutic humanized monoclonal antibodies that are now being commercialized. The requirement for hundreds of kilograms per annum far exceeds other recombinant therapeutics such as erythropoietin, which is more potent at smaller doses. The extra demand for production is being met by the construction of increased bioreactor capacity by some biopharmaceutical companies such as Biogen, Lonza Biologics and Genentech. However, the requirement for large-capacity bioreactors may be offset by an increased productivity of cell culture systems, some of which maybe capable of producing up to 5 g/L and is up to 100 times the productivity that would have been expected a few years ago. Clearly, a tenfold enhancement of cell line productivity reduces the volumetric capacity required of the bioreactor for manufacture by an equivalent factor. @Butler_2005[p.288] == Kretzmer 2002 #done[ The developments of the 1970s – fusion of cells to form hybridomas, and genetic engineering – triggered a second wave of products. Monoclonal antibodies and recombinant proteins for diagnosis and therapy set new challenges for the inventors. ] Products with high value and small demand can be produced in multiple unit systems whereas “bulk” products for vaccination and therapy may need large-scale bioreactors to be cost effective. #done[In 1907, <NAME> was the first to record the maintenance and growth of nerve cells in a hanging drop over a period of up to 30 days. These experiments (Harrison 1907) showed that normal cell functions can continue in vitro and therefore the year 1907 is commonly regarded as marking the beginning of cell cultivation. ] Harrison and his successors observed that strict aseptic conditions were crucial for the success of such experiments. During the next four decades, the progress of cell cultivation was limited due to the stringent sterility controls necessary. The development of antibiotics in the late 1940s was another milestone. The addition of antibiotics eased the handling of complex undefined culture media. At the same time, the development of sterility techniques took place. During the following decade, great progress was made towards mass cultivation of animal cells and production processes. #done[The most important breakthrough towards large scale cultivation was achieved by Earle and Eagle, who made an extensive analysis of the requirements of cells in vitro. In 1955, Eagle reported a chemically defined medium known as EMEM (Eagle’s minimum essential medium; Eagle 1955) which could replace the biological fluids used thus far. The only handicap was the necessity for the addition of undefined blood serum.] @Kretzmer_2002[p. 135] The success of Capstick and co workers (1962) in culturing BHK (baby hamster kidney) cells in suspension like microorganisms meant the break through to industrial usage of animal cells. This, together with the establishment of permanent cell lines, drove the development of large-scale processes in industry. The replacement of the primary monkey kidney cells with safer cell lines (WI-38 and MRC-5) increased the number of licensed human vaccines: measles (1963), rabies (1964), mumps (1969) and rubella (1969). All these processes were batch type since the cells should grow to high density and then be infected with the virus. After virus propagation the cells are of no further interest and the virus is harvested. Examples of the success of this strategy are the, now licensed, monoclonal antibodies for human therapy, e.g. for immunosuppression in organ transplant operations (Zenapax, Simulect), against non-Hodgkin lymphoma (Rituxan), against Morbus Crohn (Remicade), against breast cancer (Herceptin) and angioplastic surgery (Reopro). #done[In the early 1980s, a new kind of product was introduced to the pharmaceutical industry: recombinant proteins.] Genetic engineering had progressed to the point that stable insertion of target DNA into mammalian cells was possible. The first non-native product produced by animal cells was tissue plasminogen activator (tPA), a protein necessary for dissolving blood clots for the treatment of cardiac infarction. @Kretzmer_2002[p. 136] Inserting the human tPA gene into CHO cedce(chinese hamster ovary) cells and growing the cells in agitated-tank reactors increased productivity to 50 mg 109 CHO $"cells"^(-1)"day"^(-1)$ or even higher. Developing a new product and studying the growth and production characteristics of the cell line used is carried out on the laboratory scale, normally starting with small flasks and then changing to spinner flasks or roller bottles depending on the growth characteristics of the cell line (anchorage dependent or not). For initial quality studies of the new product this scale is sufficient but a scale-up is necessary as soon as preclinical studies will follow. Depending on the product requirements there are two possibilities: keeping the scale and working in multiple units or transferring the process to larger reactors. @Kretzmer_2002[p. 137] === BATCH REACTOR #done[Antibodies for research and some diagnostic applications may be produced in the small-scale reactors described above. ] Antibody doses for some therapeutic applications can range from 0.5 to more than 5 mg/kg and this can translate into a requirement for tens to hundreds of kilograms per year. Large scale (tens of grams upwards) needs systems with the potential for scale up. The ideal reactor for optimal growth and/or production provides a homogenous environment and can be easily controlled. #done[ These conditions can be achieved in stirred-tank reactors, which are the preferred reactors for scaling up processes with suspension cells. ] #done[ The basic parameters for biotechnological processes, like temperature, pH value, dissolved oxygen, etc., are measured and controlled by standard devices implemented in every commercial stirred tank. ] #done[For optimal mixing, various types of impellers are available as well as standard sampling probes for monitoring the environment and the cells during the process. ] A disadvantage of the stirred tank reactor compared to the small-scale reactors is the oxygen supply. At small-scale, the ratio of surface area to culture volume is high enough to maintain the oxygen level in the supernatant. Using a stirred-tank reactor, this ratio is negligible with respect to aeration. Devices such as spargers, baffles and low shear impellers, have been developed to supply oxygen to cultures in stirred-tank reactors. @Kretzmer_2002[p. 138] Another possibility to overcome the problem of the rising air bubbles is bubble-free aeration using membranes for indirect aeration. The supply of oxygen is diffusion-controlled and no bubbles arise. The length of the membrane is limited. The depletion of oxygen increases with increasing length of the membrane, i.e. the average oxygen transfer rate is decreasing. On the other hand the pressure inside the tubing is limited. If the critical transmembrane pressure difference (“bubble point”) is reached, bubbles are formed. A 2-l reactor needs a membrane length of at least 6 m to reach an oxygen transfer rate of 30–40 mg (lh)–1. Several hundred metres of membrane for one reactor makes handling complicated and disturbances by rupture and contamination probable. Therefore, the scale-up of this system is limited to bioreactors of about 500 l but it works very well for small scale tank reactors. @Kretzmer_2002[p. 188-139] However, problems and challenges still remain for the optimisation of this kind of process. #done[The nature of a batch process generates a gradient of nutrients during the run. This leads not only to a depletion of the nutrients, and therefore a drop in viability and death of the cells, but also to a steady increase in toxic metabolites.] Cell density and culture life-span can be increased by systematic modification of the culture medium with addition of the most important nutrients to keep the level constant. Fed-batch operation increases the product output of the process. Nevertheless, batch reactors ultimately result in a toxification of the process since the toxic metabolites are not removed. The main challenges to be addressed are the cell demands concerning nutrients (requiring study of cell metabolism), developing optimised feeding strategies, and detoxification of the culture supernatant. === Perfusion Stirred-tank reactors can also be operated in continuous mode. This technique is well-known from microorganisms, where large quantities of product are produced. Unfortunately, the technique is not directly transferable to animal cell cultures. In contrast to microorganisms, animal cells have very slow growth rates and this makes cell retention inevitable. This has led to the development of a variety of internal and external separation devices like spin filters, external tangential flow filtration units, acoustic resonance sedimentation, cell settlers and continuous centrifuges. Perfusion cultures, on the other hand, combine homogenous mixing with control of the environment and detoxification of the reactor. The continuous exchange of medium allows maintenance of very low concentrations of toxic metabolites. The overall advantage of perfusion cultures is the very small scale required compared with batch cultures in order to obtain the desired amount of product. @Kretzmer_2002[p. 139] Validation of the perfusion process is much more complicated and time-consuming than for batch cultures. The validation has to be done for the whole culture run. It has to be shown that the process is stable throughout the whole run (viability, product release and cell-related performance), the product quality is the same at different cultivation times and reactor scale, and finally that the cells are genetic stability for the entire process run. This disadvantage is compensated for by the advantages of perfusion cultures. They provide a high degree of control: conditions for optimal medium requirement can be kept constant and a real steady state is achieved. A high cell density perfusion culture can be a very effective and economic large-scale process. Before more industrial processes can be transferred to perfusion mode, some challenges remain to be addressed. For example, cell retention has to be more sophisticated. The devices available so far are a major bottleneck because of fouling and blocking. Another important topic is the run-time of the perfusion. This depends on the cell line and culture medium. Therefore, cell metabolism under production conditions has to be studied carefully to gain more knowledge for developing better strategies for process control. @Kretzmer_2002[p. 140] == Gibbons 2021 The Quality by Design (QbD) approach to the production of therapeutic monoclonal antibodies (mAbs) emphasizes an understanding of the production process ensuring product quality is maintained throughout. Current methods for measuring critical quality attributes (CQAs) such as glycation and glycosylation are time and resource intensive, often, only tested offline once per batch process. @Gibbons_2021[p. 1] == mandenius 2016 the bioreactor is the designed space where biological reactions take place. The bioreactor should create a biosphere that as profoundly and adequately as possible provides the ideal environment for the biological reaction. The path for reaching, attaining, and maintaining this is the main task for bioreactor engineers to find. @mandenius_2016[p. 1] #done[ The bioreactor is a historical apparatus known since ancient times. Old antique cultures were able to solve bioengineering design challenges for practical purposes such as wine and beer making from mere experience and observations. This paved the way for the evolvement of biotechnological processes, primarily for preparation and production of food products @soetaert_2010. ] #done[In the early twentieth century, large-scale fermentation processes were set up with impact onto the war-time industry of that period. Glycerol production for use in the manufacture of explosives, using yeast for conversion from glucose, was established.] #done[Another contemporary example is the large-scale production of butanol and acetone by butyric acid bacteria, as developed by ChaimWeizmann, used first for explosives and then for rubber manufacture in the emerging car industry @santangelo_1996.] However, these bioprocesses were soon abandoned for petroleum based products that had better process economy. @mandenius_2016[p. 2] The original discovery in 1929 by <NAME> of the antibiotic effect of a Penicillium culture was in a series of steps for amplifying the yield and activity of cultures transferred into large-scale production @brown_2005. And other renowned scientists such as <NAME>, <NAME>, <NAME>, <NAME>, and others in close collaboration with pharmaceutical companies managed to identify, stabilize, exploit, select strains, exploit genetics, mutational methods and, finally, establish large-scale bioproduction in bioreactors for meeting global medical needs for curing infections @aminov_2010. It also gave ample examples of how knowledge and skills from one group of products could be transferred into others and, by that, pave way for other antibiotics such as cephalosporins, streptomycins, and aminoglycosides. In parallel with the progress of developing antibiotics, other microbial primary and secondary products were realized. These included amino acids (e.g., glutamate and lysine) and organic acids (e.g., vitamins) used as food ingredients and commodity chemicals and reached considerable production volumes. Microbial polymers such as xanthan and polyhydroxyalkanoates are other examples of bio process unfolding during the mid-1950s @ratledge_2006. Protein manufacture, especially industrial enzymes, became comparatively soon a part of the industrial biotechnology with large-scale production sites at a few specialized companies (e.g., Novo, Genencor, Tanabe). At these upscaled processes, very important findings and experiences were reached concerning bioreactor design and operation. Although not yet exploiting gene transfer between species for these proteins, significant technology development for later use was accomplished @soetaert_2010. Subsequently, the emerging industrial use of animal cells came about. Culturing at large scale, at lower cell densities than fungi and yeasts, and with much lower product titers posed a next challenge to bioreactor engineering @freshney_2015. @mandenius_2016[p. 4] #done[However, it was the genetic engineering and recombinant DNA technology that created a revolution in the field of industrial biotechnology with macromolecular products from cells, first in bacteria and yeast and subsequently in animal and human cells @butler_2012.] In California, Cetus and Genentech were established in the early 1970s. In the years thereafter, Biogen, Amgen, Chiron, and Genzyme followed, all with successful biotherapeutic products in their pipelines – insulin, erythropoietin, interferons, growth hormones, blood coagulation factors, interleukins, and others reached the therapeutic market with relatively short development times, in spite of regulatory requirements and the multitude of novel production conditions spanning from clinical considerations to new manufacturing methodology. Especially, the later embodied numerous challenges for bioprocess and bioreactor engineering to disentangle. The controllability demands of bioreactors for these purposes are higher due to more vulnerable cell types, more complicated growth behavior, and substantially different operations. This addresses again the critical issues of mass transfer and barriers of oxygen, nutrients, and sterility of the cultures. @mandenius_2016[p. 5] #done[In textbooks, a bioreactor is typically described as an apparatus shaped like a chamber for growing organisms such as bacteria or yeasts that can be used for the production of biomolecular metabolites or biopolymers or for the conversion of organic wastes.] This very general bioreactor description clearly highlights the main purpose of the design efforts: to accomplish conditions where diverse cell types are able to grow efficiently and produce a variety of biological products with a wide range of molecular sizes in a single unit. The diversity of the design mainly caused by the time factor; due to the fact that rates differ largely from one organism to another, in reproduction rates, in rates of molecular processing in the individual organisms, and transfer across biological barriers of the cellular systems. The time factor also applies to the operational procedures. When cells grow, the design must adapt to compensate for the magnification of the dynamics due to higher cell numbers. This mostly concerns supply of nutrients and growth factors. However, it may also be about removal of mass and energy to avoid overloading the system with any of these. Sterilization is an operational procedure that differs only slightly depending on the organism but must be carefully adapted to the bioreactors’ geometrical shape and construction materials. The prevalence of single-use units made in plastic materials highlights the actuality of this issue. @mandenius_2016[p. 8] == Maria 2020 #bibliography("references.bib", style: "ieee")
https://github.com/hongjr03/shiroa-page
https://raw.githubusercontent.com/hongjr03/shiroa-page/main/book.typ
typst
#import "@preview/shiroa:0.1.0": * #show: book #book-meta( title: "shiroa-page", repository: "https://github.com/hongjr03/shiroa-page", repository-edit: "https://github.com/hongjr03/shiroa-page/edit/main/{path}", language: "zh", summary: [ #prefix-chapter("main.typ")[Hello, shiroa] - #chapter("24spring.typ", section: none)[2024 春季学期] = 数字图像处理 | DIP - #chapter("DIP/chapters/1导论.typ", section: "1")[导论] - #chapter("DIP/chapters/2数字图像处理基础.typ", section: "2")[数字图像处理基础] - #chapter("DIP/chapters/3空间域图像增强.typ", section: "3")[空间域图像增强] - #chapter("DIP/chapters/4频率域图像增强.typ", section: "4")[频率域图像增强] - #chapter("DIP/chapters/5图像复原.typ", section: "5")[图像复原] - #chapter("DIP/chapters/6多分辨率处理.typ", section: "6")[多分辨率处理] - #chapter("DIP/chapters/7图像压缩.typ", section: "7")[图像压缩] - #chapter("DIP/chapters/8形态学处理.typ", section: "8")[形态学处理] - #chapter("DIP/chapters/9图像分割.typ", section: "9")[图像分割] - #chapter("DIP/chapters/10特征提取和模式识别.typ", section: "10")[特征提取和模式识别] = 数据结构与算法 | DSA - #chapter("DSA/chapters/1绪论.typ", section: "1")[绪论] - #chapter("DSA/chapters/2线性表.typ", section: "2")[线性表] - #chapter("DSA/chapters/3栈和队列.typ", section: "3")[栈和队列] - #chapter("DSA/chapters/4串.typ", section: "4")[串] - #chapter("DSA/chapters/5数组和广义表.typ", section: "5")[数组和广义表] - #chapter("DSA/chapters/6树和二叉树.typ", section: "6")[树和二叉树] - #chapter("DSA/chapters/7图.typ", section: "7")[图] - #chapter("DSA/chapters/8查找.typ", section: "8")[查找] - #chapter("DSA/chapters/9排序.typ", section: "9")[排序] - #chapter("DSA/chapters/10Exercise.typ", section: "10")[Exercise] = 移动软件开发 | WXAPP - #chapter("WXAPP/lab1/main.typ", section: "1")[实验 1:第一个微信小程序] - #chapter("WXAPP/lab2/main.typ", section: "2")[实验 2:天气查询小程序] ], ) #build-meta(dest-dir: "docs") #get-book-meta() // re-export page template #import "/templates/page.typ": project, heading-reference #let book-page = project #let cross-link = cross-link #let heading-reference = heading-reference
https://github.com/hugo-s29/typst-algo
https://raw.githubusercontent.com/hugo-s29/typst-algo/master/docs/manual.typ
typst
MIT License
//#import "../algo.typ": * #import "algo.typ": * #let version = "1.0.0" #set page( numbering: "1/1", header: align(right, smallcaps[the typst-algo package, version #version]), ) #let todo = (it) => text(red, 12pt, it) #set heading(numbering: "1.") #set par(justify: true) #set raw(lang: "typ") #show math.equation: set text(font: "Latin Modern Math") // The following code has been extracted from "Typst Math for Undergrads" #let kern(length) = h(length, weak: true) #let normalsize = 10pt #let TeX = style(styles => { let e = measure(text(normalsize, "E"), styles) let T = "T" let E = text(normalsize, baseline: e.height / 2, "E") let X = "X" box(T + kern(-0.1667em) + E + kern(-0.125em) + X) }) #let LaTeX = style(styles => { let l = measure(text(10pt, "L"), styles) let a = measure(text(7pt, "A"), styles) let L = "L" let A = text(7pt, baseline: a.height - l.height, "A") box(L + kern(-0.36em) + A + kern(-0.15em) + TeX) }) // End of extracted code #show raw.where(block: false): box.with( fill: luma(250), inset: (x: 3pt, y: 0pt), outset: (y: 3pt), radius: 2pt, ) #show raw.where(block: true): block.with( fill: luma(240), inset: 10pt, radius: 4pt, ) #show "LaTeX" : (_) => LaTeX #align(center)[ #set par(leading: 0.8em) #text(20pt)[*The `typst-algo` package.*]\ #text(14pt)[_Typeset algorithms in Typst._]\ Hugo #smallcaps[Salou] --- #link("https://github.com/hugo-s29/typst-algo")[https:/\/github.com/hugo-s29/typst-algo]\ (last documentation update : #datetime.today().display()) ] #v(1fr) *Goals.* This project aims to be a Typst equivalent of the LaTeX package `algpseudocode`. There is already a Typst package aimed at algorithm typsetting, but `typst-algorithms`'s style is a lot closer to code than algorithms. The main objective of this package is to be able to render an algorithm like~@sample-algorithm. A step-by-step breakdown of the code (in @sample-algorithm-code) is available in @principles. #figure( algorithm[ $n <- "length"(T)$\ #algo_while[$T$ isn't sorted] #algo_block[ $i <- cal(U)({1, ..., n})$\ $j <- cal(U)({1, ..., n})$\ Swap elements at index $i$ and index $j$ in $T$ ] #algo_end_while ], caption: [A _very efficient_ sorting algorithm], kind: "algo", supplement: [Algorithm], ) <sample-algorithm> #v(1fr) #outline() #v(6fr) #pagebreak() = Principles <principles> To typeset an algorithm with `typst-algo`, you use functions for each "instruction" you want to show. In order to better understand, I'll explain step-by-step the code (@sample-algorithm-code) used to typeset @sample-algorithm. In @examples, there are more complex examples (procedures, for loops, "blocks within blocks," _etc_). #figure( [ ``` #algorithm[ $n <- "length"(T)$\ #algo_while[$T$ isn't sorted] #algo_block[ $i <- cal(U)({1, ..., n})$\ $j <- cal(U)({1, ..., n})$\ Swap elements at index $i$ and index $j$ in $T$ ] #algo_end_while ] ``` ], caption: [The code used to typeset @sample-algorithm], ) <sample-algorithm-code> Firstly, the whole algorithm is wrapped in a function named `algorithm`. This function takes only one argument, the algorithm's content. To write simple lines like $n <- "length"(T)$, you don't need special instructions; you can just add it inside the algorithm's content. However, remember to add a `\` at the end of your line to add a line break. To write the _while_ loop, you use the `algo_while` function. This function takes one argument, the "test" used by the while loop. The while loop's content needs to be added afterwards. If the content cannot be displayed after the while instruction, you need to use the `algo_block` function. (You can look at more examples in @examples.) In our case, the while loop's body contains three lines so we need to add a _block_. The `algo_block` function works in a similar manner to the `algorithm` function: you can directly write text, or add instructions (see more complex examples in @examples). You don't need to add a line break after the while instruction, since `algo_block` does it automatically. After the block is filled with instructions, we can call the `algo_end_while` function, it'll add "End While." All other instructions work similarly, there's a list of usable functions in @reference. = First examples <examples> In this section, there will be some examples of algorithms typeset with `typst-algo` and the code used. == An algorithm to approximate $pi$. #grid( columns: (1fr, 1fr), gutter: 1cm, algorithm[ Input a value $n$.\ $m <- 0$\ #algo_for[$i in {1,...,n}$] #algo_block[ $x <- cal(U)([0,1])$\ $y <- cal(U)([0,1])$\ #algo_if[$x^2 + y^2 <= 1$] $m <- m + 1$ ] #algo_end_for #algo_return $4 dot m \/ n$ ], [ ``` #algorithm[ Input a value $n$.\ $m <- 0$\ #algo_for[$i in {1,...,n}$] #algo_block[ $x <- cal(U)([0,1])$\ $y <- cal(U)([0,1])$\ #algo_if[$x^2 + y^2 <= 1$] $m <- m + 1$ ] #algo_end_for #algo_return $4 dot m \/ n$ ] ``` ] ) == The Quine–McCluskey algorithm for solving #smallcaps[sat]. #figure( algorithm[ #algo_procedure(args: [$F,p,v$])[Assume] #algo_block[ This procedure will return $F[p |-> v]$ where $F$ is written in #smallcaps[cnf], $p$ is one of its variables, and $v in BB$ is a boolean. #footnote[For boolean values, we'll write $bold(F)$ for false, and $bold(T)$ for true, and thus, $BB = {bold(F), bold(T) }$.] The notation $F[p |-> v]$ means we are substituting the variable $p$ with the value~$v$.\ #v(0.5cm) Let $ell_bold(T)$ be the literal $p$ if $v = bold(T)$, otherwise $not p$.\ Let $ell_bold(F)$ be the literal $p$ if $v = bold(F)$, otherwise $not p$.\ #algo_for[$C in F$] #algo_block[ #algo_if[$ell_bold(T) in C$] we remove $C$ from $F$.\ #algo_else_if[$ell_bold(F) in C$] we remove $ell_bold(F)$ from $C$.\ #algo_end_if ] #algo_end_for ] #algo_end_procedure #v(0.5cm) #algo_procedure(args: [$F$])[Quine] #algo_block[ #algo_if[$nothing = F$] #algo_return $bold(T)$\ #algo_else_if[$nothing in F$] #algo_return $bold(F)$\ #algo_else_if[$exists {ell} in F$] #algo_block[ #algo_if[$ell = p$, with $p in "vars"(F)$] #algo_return #algo_call([Quine], args: algo_call([Assume], args: [$F,p,bold(T)$]))\ #algo_else_if[$ell = not p$, with $p in "vars"(F)$] #algo_return #algo_call([Quine], args: algo_call([Assume], args: [$F,p,bold(F)$]))\ #algo_end_if ] #algo_else #algo_block[ Let $p in "vars"(F)$.\ #algo_return #algo_call([Quine], args: algo_call([Assume], args: [$F,p,bold(T)$])) $or$ #algo_call([Quine], args: algo_call([Assume], args: [$F,p,bold(F)$]))\ ] #algo_end_if ] #algo_end_procedure ], caption: [The Quine–McCluskey algorithm for solving #smallcaps[sat]], kind: "algo", supplement: [Algorithm], ) <example1> #figure( [ ``` #algorithm[ #algo_procedure(args: [$F,p,v$])[Assume] #algo_block[ This procedure ... the variable $p$ with the value~$v$.\ #v(0.5cm) Let $ell_bold(T)$ be the literal $p$ if $v = bold(T)$, otherwise $not p$.\ Let $ell_bold(F)$ be the literal $p$ if $v = bold(F)$, otherwise $not p$.\ #algo_for[$C in F$] #algo_block[ #algo_if[$ell_bold(T) in C$] we remove $C$ from $F$.\ #algo_else_if[$ell_bold(F) in C$] we remove $ell_bold(F)$ from $C$.\ #algo_end_if ] #algo_end_for ] #algo_end_procedure #v(0.5cm) #algo_procedure(args: [$F$])[Quine] #algo_block[ #algo_if[$nothing = F$] #algo_return $bold(T)$\ #algo_else_if[$nothing in F$] #algo_return $bold(F)$\ #algo_else_if[$exists {ell} in F$] #algo_block[ #algo_if[$ell = p$, with $p in "vars"(F)$] #algo_return #algo_call([Quine], args: algo_call([Assume], args: [$F,p,bold(T)$]))\ #algo_else_if[$ell = not p$, with $p in "vars"(F)$] #algo_return #algo_call([Quine], args: algo_call([Assume], args: [$F,p,bold(F)$]))\ #algo_end_if ] #algo_else #algo_block[ Let $p in "vars"(F)$.\ #algo_return #algo_call([Quine], args: algo_call([Assume], args: [$F,p,bold(T)$])) $or$ #algo_call([Quine], args: algo_call([Assume], args: [$F,p,bold(F)$]))\ ] #algo_end_if ] #algo_end_procedure ] ``` ], caption: [Code used to typeset @example1] ) = Reference <reference> - *Conditionals.* `#algo_if[condition]` will produce "#algo_if[condition]". This should be followed by `#algo_end_if` (after the _if_ instruction's content). - *Block.* `#algo_block[block\ content]` will produce #algo_block[block\ content.] This can be used inside between any pairs of instructions (_e.g._ "if", "for", "while", ...) if the content needs to be on multiple lines. - *Procedures.* `#algo_procedure(args: [args])[name]` will produce "#algo_procedure(args: [args])[name]". This should be followed by `#algo_end_procedure` (after the procedure's content). - *Functions.* Similar to procedures, but using `#algo_function` and `#algo_end_function` instead. - *Calling procedures or functions.* `#algo_call(args: [args])[name]` will appear in your document as "#algo_call(args: [args])[name]". This can be used to call a procedure or a function. - *For loops.* `#algo_for[loop_iteration]` will result in "#algo_for[loop_iteration]". This should be followed by `#algo_end_for` (after the for loop's content). - *While loops.* Similar to for loops, but using `#algo_while` and `#algo_end_while` instead. If some instruction is missing, please see @contributing to know how to contribute to `typst-algo`. = Contributing <contributing> This project is open-source (MIT-licensed). Feel free to contribute if you think a feature is missing, the code could be improved, or anything else.
https://github.com/Fr4nk1inCs/typreset
https://raw.githubusercontent.com/Fr4nk1inCs/typreset/master/tests/chinese-font.typ
typst
MIT License
#import "../src/lib.typ": font #show: font.set-font.with(lang: "zh-cn") 测试 _测试_ *测试* *_测试_*
https://github.com/PA055/5839B-Notebook
https://raw.githubusercontent.com/PA055/5839B-Notebook/main/Entries/drivetrain/mecanum-drive-testing.typ
typst
#import "/packages.typ": notebookinator #import notebookinator: * #import themes.radial.components: * #show: create-body-entry.with( title: "Mecanum Drive Testing", type: "test", date: datetime(year: 2024, month: 3, day: 19), author: "<NAME>", witness: "<NAME>" ) While Building the Mecanum Drive a large oversight was made. Vex wheels may be advertised in standard sizes like 4in but that is seldom the case. For the wheels various forum posts and the Perdu Vex Sig Robotics website placed both the older 4 in omni wheels and 4in mecanum wheels as having a 4.125in diameter. However once we built the drive we found the omni wheel to be slightly bigger. Going into inventor confirmed these as the Mecanum wheels measured .0625in smallers then the omni Wheels. #figure( rect(fill: black.lighten(10%))[ #image("./Wheel Comparison.png", width: 80%) ], caption: [ Comparison of Mecanum Wheels and both the new (right) and old (left) omni wheel diameters ] ) This means that for now until we can get the newer omni Wheels whish should be compatible with the mecanum wheels them alogn with the pto attached to them will be left out of the build. The older Wheels can not be used as they cause only one of the mecanum wheels to be in contact with the groudn which defeats the purpose of the drive. Additionally weight plates needed to be screwed into the front to ensure the drive was balanced allowing it to properly strafe. Overall the Drive was a success after a few minor tweeks and will serve as a good test base until the season starts.
https://github.com/kdog3682/2024-typst
https://raw.githubusercontent.com/kdog3682/2024-typst/main/src/exponent-expr.typ
typst
#import "/home/kdog3682/2024/base-utils.typ": * #let exponent-expr(interval) = { let store = () let numbers = () for n in step(..interval) { let palette = (red, blue, green, green, red, blue) let exp = colored(n, palette.at(n - 1)) store.push(exp) numbers.push(n) $x^#exp$ // the exp is colored by the palette let ref = ( "plus": ( "delimiter": "", "aggregator": sum, ), "times": ( "delimiter": "exp", "aggregator": multiply, ), ) let key = "plus" let (delimiter, aggregator) = ref.at(key) if (is-last(n, interval.at(1))) { let expr-value = wrap(store.join([$space #delimiter space$]), "()") $space = space x^#expr-value$ $space = space x^#aggregator(..numbers)$ } else { wrap($dot$, " ") } } } #panic(exponent-expr((5, 7)))
https://github.com/ayoubelmhamdi/typst-phd-AI-Medical
https://raw.githubusercontent.com/ayoubelmhamdi/typst-phd-AI-Medical/master/chapters/ch05-the.typ
typst
MIT License
#import "../functions.typ": heading_center, images, italic,linkb #let finchapiter = text(size: 24pt, fill:rgb("#1E045B"),[■]) /* * * THESE 05 * */ = DETECTING LUNG CANCER NODULES. = Introduction Medical images are extensively used in oncology for diagnosis, therapy planning and monitoring of tumors. Oncologists analyze images to locate tumors and assess their different characteristics. Different types of medical images are used, depending on the task (search of metastases, radiotherapy planning) and the region of interest (brain, lungs, digestive system). The commonly used types of imaging include computed tomography (CT), magnetic resonance imaging (MRI) and positron-emission tomography (PET). Positron-emission tomography [Gambhir 2002] is based on injection of a radioactive tracer in the blood of the patient in order to observe the metabolism of different tissues. A commonly used tracer is fludeoxyglucose which is a structural analog of glucose. As cancer cells need an important glucose supply due to their divisions, the tumoral tissues may be detected by their abundant absorption of the radioactive tracer. PET scan is particularly useful for diagnosis and staging of tumors, for detecting cancer metastases and monitoring effects of a therapy. However, due to physical limitations, PET scans have usually a considerably lower spatial resolution than MRI and CT scans. Computed tomography [Hsieh 2009] measures the absorption of X-rays of different tissues in the body. The radiation is emmited from different angles in order to acquire a series of $2 D$ radiographic images from which a $3 D$ scan is then reconstructed. Even if CT scans have generally a better spatial resolution than MRI, they offer a significantly weaker contrast between soft tissues such as the ones present in (...the brain...). Morever, the exposure to X-rays may induce cancers by damaging DNA of body cells. === Radiotherapy planning and organs at risk Treatment of brain tumors often includes radiotherapy [Khan 2014], which uses a ionizing radiation to kill cancer cells or to stop their division by damaging their DNA. The most common type of radiotherapy is external beam radiotherapy, in which the radiation is emitted from the exterior of the patient. Radiotherapy planning is a particularly important application of automatic segmentation. The objective of radiotherapy planing is to compute optimal irradiation doses, i.e. to deliver a radiation which destroys tumoral cells while sparing healthy structures. The segmentation process requires medical expertise and takes typically several hours per patient for an experienced clinican. It represents therefore a considerable cost and eventually delays the therapy. The objective of this rapport is to propose efficient methods for segmentation tasks in neuro-oncology. = Deep learning in medical imaging The methods presented in this thesis are mainly based on deep learning, which is a branch of machine learning. In this section, we briefly present the general principles of deep learning, we motivate its use for segmentation tasks in neuro-oncology and we discuss its limitations, some of which are addressed in this thesis. Given an input space $X$ and a label space $Y$, the objective of supervised machine learning is to find a predictive function $f: X arrow Y$, using a database of training examples $x_{i}, y_{i}$, where $x_{i} \in X$ and $y_{i} \in Y$. To achieve this goal, three main elements have to be defined: - Family of candidate functions $f_{\theta}$, parametrized by a vector of parameters $\theta \in \Theta$ - Loss function $L: \Theta arrow \mathbb{R}$, which quantifies the mismatch between the outputs predicted by a candidate function $f_{\theta}$ and the ground truth. - Training algorithm, which minimizes the loss function (with respect to the parameters $\theta$ ) over the training data The main particularity of deep learning is the nature of the considered candidate functions. The term deep is related to multiple compositions of functions. The considered composed functions are differentiable and organized in layers, with the idea to progressively transform the input vector, extracting more and more complex information. The term neural network is related to the considered family of functions, represented typically by a graph. Training of the model (minimization of the loss function) is typically based on iterative optimization with variants of the stochastic gradient descent. Convolutional Neural Networks (CNN) [LeCun 1995] are a commonly used type of neural networks for image processing and analysis (classification, segmentation). They exploit spatial relations between pixels (or voxels, in 3D) and are based on application of local operations such convolution, pooling (maximum, average) and upsampling. The objectives of such design are to limit the number of parameters of the network and to limit computational costs, as images correspond generally to very large inputs. CNNs for image segmentation are usually trained in an end-to-end manner, i.e. their input is the image and the output is the segmentation. With an end-to-end training, the model automatically learns to extract relevant information from images, using the training database. Despite the progress of GPU capacities, computational costs still severely limit the potential of CNNs for segmentation tasks in medical imaging. A typical segmentation network, such as U-net [Ronneberger 2015] performs thousands of convolutions, max-poolings and upsamplings. Outputs of these operations have to be stored in the memory of the GPU during each iteration of the the training, in order to compute gradients of the loss function by the Backpropagation algorithm. A typical CT is composed of several millions of voxels. Training of neural networks for an end-to-end segmentation on entire CTs requires therefore a huge amount of GPU memory and is often impossible using the currently available GPUs. For this reason, current segmentation models are usually trained on subvolumes of limited size and have limited receptive fields. Another important problem is the cost of the ground truth annotations necessary to train neural networks, and machine learning models in general. Manual segmentation of tumors is particularly costly as it is not only time-consuming but also requires medical exprtise and therefore has to be performed by experienced clinicians. Other difficulties are related to the use of multimodal data. = Approach overview The main idea of our approach is (...) We address the problem of missing image modalities (...) Our method was tested on a publicly available database of the (...) challenge and obtained one of the best performances of the challenge. We assume that the training database contains a small (... number of segmented images and a large number of images with global labels ...), simply indicating presence or absence of a tumor tissue within the image (without any information on the location of the tumor, if present). The main idea of our approach is to (...). = Convolutional Neural Networks for Tumor Segmentation. = Contents = Introduction = Methods Our generic 2D-3D approach is illustrated on Fig. 2.2. The main components of our method are described in the following. First, we introduce an efficient 2D-3D model with a long-range 3D receptive field. Second, we present our neural network architecture with modality-specific subnetworks. The channels of a layer are called feature maps whose points represent neurons. === Training of the model === Loss functions and dealing with class imbalance To train our models, we use a weighted cross-entropy loss. In the 3D case, given a training batch $b$ and the estimated model parameters $\theta$, the loss function penalizes the output of the classification layer: // \[ // \operatorname{Loss}_{b}^{3 D}(\theta)=-\sum_{i=1}^{|b|} \sum_{(x, y, z)} \sum_{c=0}^{C-1} \deltaG_{(x, y, z)}^{i, b}, c W_{c, b} \log p_{i,(x, y, z)}^{c}(\theta) // \] where $delta$ denotes the Kronecker delta, $W_{c, b}$ is a voxelwise weight of the class $c$ for the batch $b, p_{i,(x, y, z)}^{c}(\theta)$ is the classification softmax score given by the network to the class $c$ for the voxel at the position $(x, y, z)$ in the $i^{\text {th }}$ image of the batch and $G_{(x, y, z)}^{i, b}$ is the ground truth class of this voxel. The purpose of using weights is to counter the problem of severe class imbalance, tumor subclasses being considerably under-represented. In contrast to common approaches, the voxelwise weights are set automatically depending on the composition of the batch (number of examples of each class greatly varies accross batches). We suppose that in each training batch there is at least one voxel of each class. Let's note $C$ the number of classes and $N_{b}^{c}$ the number of voxels of the class $c$ in the batch $b$. For each class $c$ we set a target weight $t_{c}$ with $0 \leq t_{c} \leq 1$ and $\sum_{c=0}^{C-1} t_{c}=1$. Then all voxels of the class $c$ are assigned the weight $W_{c, b}=t_{c} / N_{b}^{c}$ so that the total sum of their weights accounts for the proportion $t_{c}$ of the loss function. To better understand the effect of this parameter, note that in the standard non-weighted cross-entropy each voxel has a weight of 1 and the total weight of the class $c$ is proportional to the number of voxels labeled $c$. It implies that setting a target weight $t_{c}$ larger than the proportion of voxels labeled $c$ increases the total weight of the class $c$ (favoring its sensitivity) and conversely. The same strategy is applied in the $2 D$ case, for each classification layer of the model. The final loss of the 2D model is a convex combination of all intermediate losses, associated respectively with the main network and all subnetworks: \[ \operatorname{Loss}_{b}^{2 D}(\theta)=c^{\text {main }} \operatorname{Loss}_{b}^{\text {main }}(\theta)+\sum_{k=1}^{K+1} c^{k} \operatorname{Loss}_{b}^{k}(\theta) \]where $K$ is the number of input channels, $0 \leq c^{\text {main }} \leq 1,0 \leq c^{k} \leq 1 \forall k \in[1 . . K+1]$ and $c^{\text {main }}+\sum_{k=1}^{K+1} c^{k}=1$ === Training algorithm Our training algorithm is a modified version of Stochastic Gradient Descent (SGD) with momentum [Rumelhart 1988]. In each iteration of the standard SGD with momentum, the loss is computed on one batch $b$ of training examples and the vector $v$ of updates is computed as a linear combination of the previous update and the gradient of the current loss with respect to the parameters of the network: $v^{t+1}=$ $\mu v^{t}-\alpha_{t} \nabla \operatorname{Loss}_{b}\theta^{t}$ where $\theta^{t}$ are the current parameters of the network, $\mu$ is the momentum and $\alpha_{t}$ is the current learning rate. The parameters of the network are then updated: $\theta^{t+1}=\theta^{t}+v^{t+1}$. We apply two main modifications to this scheme. First, in each iteration of the training, we minimize the loss over several training batches in order to take into account a large number of training examples while bypassing hardware constraints. In fact, due to GPU memory limits, backpropagation can only be performed on a training batch of limited size. For large models, training batches may be too small to correctly represent the training database, which would result in large oscillations of the loss and a difficult convergence. If we note $N$ the number of training batches per iteration, the loss at one iteration is given by $\operatorname{Loss}^{N}(\theta)=\sum_{b=1}^{N} \operatorname{Loss}_{b}(\theta)$ where $\operatorname{Loss}_{b}(\theta)$ is the loss over one training batch. Given the linearity of derivatives, the gradient of this loss with respect to the parameters of the network is simply the sum of gradients of losses over the $N$ training batches: $\nabla \operatorname{Loss}^{N}(\theta)=\sum_{b=1}^{N} \nabla \operatorname{Loss}_{b}(\theta)$. Each of the $N$ gradients is computed by backpropagation. The second modification is to divide the gradient by its norm. With the update rule of the standard SGD, strong gradients would cause too high updates of the parameters which can even result in the divergence of the training and numerical problems. Conversely, weak gradients would result in too small updates and then a very slow training. We want therefore to be independent of the magnitude of the gradient in order to guarantee a stable training. To summarize, our update vector $v$ is computed as following: \[v^{t+1}=\mu v^{t}-\alpha_{t} \frac{\nabla \operatorname{Loss}^{N}\theta^{t}}{\|\nabla \operatorname{Loss}^{N}\theta^{t}\|} \]In order to converge to a local minimum, we decrease the learning rate automatically according to the observed convergence speed. We fix the initial value $\alpha_{\text {init }}$ and the minimal value $\alpha_{\text {min }}$ of the learning rate. After each $F$ iterations we compute the mean loss accross the last $F / 2$ iterations (Loss current) $^{\text {) and we }}$ compare it with the mean loss accross the previous $F / 2$ iterations (Loss previous) . We fix a threshold $0<d_{\text {loss }}<1$ on the relative decrease of the loss: if we observe Loss $_{\text {current }}>d_{\text {loss }} \times$ Loss $_{\text {previous }}$ then the learning rate is updated as follows: $\alpha_{t+1}=\max \frac{\alpha_{t}}{2}, \alpha_{\min }$. Given that the loss is expected to decrease slower with the progress of the training, the value of $F$ is doubled when we observe an insufficient decrease of the loss two times in a row. For the training of our models we fixed $\alpha_{\text {init }}=0.25, \alpha_{\text {min }}=0.001, F=200$ and $d_{\text {loss }}=0.98$, i.e. initially we expect a $2 \%$ decrease of the loss every 200 iterations. The high values of the learning rate are due to the fact that we divide gradients by their norm. The values of these hyperparameters were chosen by observing the convergence of performed trainings for different values of $\alpha_{\text {init }}$ and choosing a high value for which the convergence is still observed. Subsequently, the value of the learning rate is automatically adapted by the algorithm following the observed relative decrease of the loss (if the loss stops to decrease, the learning rate is halved). The parameter $\alpha_{\min }$ (minimal value of the learning rate) was introduced in order to prevent the learning rate to decrease infinitely after convergence. === Fusion of multiclass segmentations In order to be robust to limitations of particular choices of neural network architectures (kernels, strides, connectivity between layers, numbers of features maps, activation functions) we propose to combine multiclass segmentations produced by several models. The final segmentation is obtained by a voxelwise voting strategy exploiting the following relations between tumor subclasses: - Whole tumor region includes tumor-induced edema (class 2) and tumor core- Tumor core region includes contrast-enhancing core (class 3) and nonenhancing core (class 1) ![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-029.jpg?height=877&width=1054&top_left_y=621&top_left_x=521) Figure 2.7: Tree representing our decision process: leaves represent classes and nodes represent decisions according to aggregated votes for tumor subregions. The class of a voxel is progressively determined by thresholding on proportions of models which voted for given subregions. Suppose we have $n$ multiclass segmentations produced by different models and let's note $v_{c}$ the number of models which classified voxel $(x, y, z)$ as belonging to the class $c$, with $c \in\{0,1,2,3\}$. The main idea is to aggregate the votes for classes according to their common regions and to take the decision in the hierarchical order, progressively determining the tumor subregions. The number of votes for one region is the sum of votes for all classes belonging to the region (for example the votes for 'tumor core' are either votes for 'enhancing core' or 'non-enhancing core'). We define the following quantities:- $P_{\text {tumor }}=v_{1}+v_{2}+v_{3} /v_{0}+v_{1}+v_{2}+v_{3}$ (proportion of votes for the whole tumor region in the total number of votes)- $P_{\text {core }}=v_{1}+v_{3} /v_{1}+v_{2}+v_{3}$ (proportion of votes for the 'tumor core' region among all votes for tumor subclasses)- $P_{\text {enhancing }}=v_{3} /v_{1}+v_{3}$ (proportion of votes for the contrast-enhancing core among all votes for the tumor core) The decision process can be represented by a tree (Fig. 2.7) whose internal nodes represent the application of thresholding on the quantities defined above and whose leaves represent classes (final decision). The first decision is therefore to determine if a given voxel represents a tumor tissue, given the proportion of networks which voted for one of the tumor subclasses. If this proportion is above a chosen threshold, we consider that the voxel represents a tumor tissue and we apply the same strategy to progressively determine the tumor subclass. For each internal node $R$ (corresponding to a tumor subregion) of the decision tree, we therefore have to choose a threshold $T_{R}$ with $0<T_{R} \leq 1$. A high $T_{R}$ implies that a large proportion of models have to vote for this tumor subregion in order to consider its presence. The choice of this threshold therefore allows the user to control the trade-off between sensitivity and specificity of the corresponding tumor subregion. A low threshold gives priority to the sensitivity while a high threshold gives priority to the specificity. A voting strategy was also used by the organizers of the BRATS 2015 challenge [Menze 2015] to combine multiclass segmentations provided by few experts. In the merging scheme of BRATS 2015, the tumor subregions are ordered and the votes for different subregions are successively thresholded by the number of total votes divided by 2. In contrast to this approach, in each step of our decision process we only consider the votes for the 'parent' region in the decision tree and we consider varying thresholds. = $2.3 Experiments We perform a series of experiments in order to analyze the effects of the main components of our method and to compare our results with the state of the art. Our method is evaluated on a publicly available database of the BRATS 2017 challenge. === Data and evaluation The datasets of BRATS 2017 contain multisequence MR preoperative scans of patients diagnosed with malignant brain tumors. For each patient, four MR sequences were acquired: T1-weighted, post-contrast (gadolinium) T1-weighted, T2-weighted and FLAIR (Fluid Attenuated Inversion Recovery). The images come from $19 imag-$ ing centers and were acquired with different MR systems and with different clinical protocols. The images are provided after the pre-processing performed by the organizers: skull-stripped, registered to the same anatomical template and interpolated to $1 ~mm^{3}$ resolution. The Training dataset contains 285 scans (210 high grade gliomas and 75 low grade gliomas) with provided ground truth segmentation. The Validation dataset consists of 46 patients without provided segmentation and without provided information on the tumor grade. The evaluation on this dataset is performed via a public benchmark. The first test dataset used in our experiments is composed of 50 randomly chosen patients from the Training dataset and the networks are trained on the remaining 235 patients. We refer to this dataset as 'test dataset' in the remainder (locally generated split training/test). We then evaluate our method on the Validation dataset of BRATS 2017 (networks are trained on all 285 patients of the Training dataset). The ground truth corresponds to voxelwise annotations with 4 possible classes: non-tumor (class 0), contrast-enhancing tumor (class 3), necrotic and non-enhancing tumor (class 1), tumor-induced edema (class 2). The performance is measured by the Dice score between the segmentation $\tilde{Y}$ produced by the algorithm and the ground truth segmentation $Y$ : \[ \operatorname{DSC}(\tilde{Y}, Y)=\frac{2|\tilde{Y} \cap Y|}{|\tilde{Y}|+|Y|} \]We perform t-tests (paired, one-tailed) to measure statistical significance of the observed improvements provided by the main components of our method (2D-3D model, modality-specific subnetworks, merging strategy). We consider the significance level of $5 \%$. === Technical details The ranges of image intensities highly vary between the scans due to image acquisition differences. We perform therefore a simple intensity normalization: for each patient and each MR sequence separately, we compute the median value of non-zero voxels, we divide the sequence by this median and we multiply it by a fixed constant. In fact, median is likely to be more stable than the mean, which can be easily impacted by the tumor zone. Experimentation with other normalization approaches such as histogram-matching methods [Nyúl 2000] will be a part of the future work. Another potentially useful pre-processing could be bias field correction [Sled 1998]. Models are trained with our optimization algorithm described previously. In each iteration of the training, gradients are computed on 10 batches (parameter $N$ introduced in section 2.2.3.2) in the $2 D$ case and on 5 batches in the 2D-3D case. Batch normalization [Ioffe 2015] was used in the 2D model but was not required to train the 2D-3D model. In the latter case, we normalized the input images to approximatively match the ranges of values of extracted $2 D$ features. To train the $2 D$ model, the following target weights (defined in section 2.2.3.1) were fixed: $t_{0}=0.7, t_{1}=0.1, t_{2}=0.1, t_{3}=0.1$, corresponding respectively to 'non-tumor', 'non-enhancing core', 'edema' and 'enhancing core' classes. The choice of these values has an influence on the sensitivity to different tumor subclasses, however, the final segmentation performance in terms of Dice score was not found to be very sensitive to these hyperparameters. We fixed the same target weight for all tumor subclasses and we fixed a relatively high target weight for the non-tumor class to limit the risk of oversegmentation. However, given that non-tumor voxels represent approximately $98 \%$ of voxels of the batch, we significantly decreased the weight of the non-tumor class compared to a standard cross-entropy loss (0.98 vs 0.7). In the $3 D$ case, the following weights were fixed: $t_{0}=0.4, t_{1}=0.2, t_{2}=0.2$, $t_{3}=0.2$. We observe a satisfying convergence of the training both for the $2 D$ and the 2D-3D model. Fig. 2.8 shows the evolution of the training loss of the 2D model along with Dice scores of tumor subclasses. The weights of the classification layers of the 2D model (section 2.2.3.1) were the following: $c^{\text {main }}=0.75, c^{k}=0.05 \forall k \in[1 . .5]$ (4 modality-specific subnetworks, one subnetwork combining all modalities and the main part of the network having a weight of 0.75 in the loss function). A high weight was given for the main classification layer as it corresponds to the final output of the 2D model. The classification layers of subnetworks were all given the same weight. === Training with missing modalities We test our 2D model with modality-specific subnetworks in the context of missing MR sequences in the training database. In this setting, we suppose that the four MR sequences are available only for $20 \%$ of patients and that for the remaining patients, one MR sequence out of the four is missing. More precisely, we randomly split the training set of 235 patients in five equal subsets (47 patients in each) and we consider that only the first subset contains all the four MR sequences whereas the four other subsets exclusively miss one MR sequence (T1, T1c, T2 or T2-FLAIR). We previously noted that modality-specific subnetworks can be trained independently: in this case, a subnetwork specific to a given MR sequence can be trained on $80 \%$ of the training database (on all training images except the ones for which the MR sequence is missing). The goal of the experiment is to test if the training of these subnetworks improves the segmentation performance in practice. We first evaluate the performance obtained by 2D model 1 (version CNN-2DAxl) trained only on the training subset containing all MR sequences ( 47 patients). Then we evaluate the performance obtained when the subnetworks are pretrained, each of them using $80 \%$ of the training database. The results are reported in Table 2.1. Pretraining of the modality-specific subnetworks improved the segmentation performance on the test set for all tumor sub-![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-032.jpg?height=324&width=1380&top_left_y=2046&top_left_x=314)Figure 2.8: Evolution of the loss and of Dice scores of tumor subclasses during the training of the $2 D$ model. regions. Even if the multiclass segmentation problem is very difficult for a small network using only one MR sequence, this pretraining forces the subnetwork to learn the most relevant features, which will then be used by the main part of the network, trained on the subset of training cases for which all MR sequences are available. The improvement was found statistically significant ( $p$-value $<0.05$ ) for all the three tumor subregions (Table 2.5). === Using long-range $2D$ context We perform a series of experiments to analyze the effects of using features learned by $2 D$ networks as an additional input to 3D networks. In the first step, 2D model 1 is trained separately on axial, coronal and sagittal slices and the standard 3D model is trained on $70 \times 70 \times 70$ patches. Then we extract the features produced by the $2 D$ model for all images of the training database and we train the same 3D model on 70x70x70 patches using these extracted features (Fig. 2.9) as an additional input (2D-3D model A specified on Fig. 2.4). The experiment is performed on two datasets: the test dataset of 50 patients (networks trained on the remaining 235 patients) and the Validation dataset of BRATS 2017 (networks trained on 285 patients). The results on the two datasets are reported respectively in Table 2.2 and Table 2.3. Further experiments, involving varying $2 D$ and $3 D$ architectures are presented in section 2.3.5. Qualitative analysis is performed on the first dataset, for which the ground truth segmentation is provided. For comparison, we also display the scores obtained by U-net processing axial slices, using our implementation (with batchnormalization). On the two datasets and for all tumor subregions, our 2D-3D model obtained aTable 2.1: Mean Dice scores on the test dataset (50 patients) in the context of misssing MR sequences in the training database. EC, TC and WT refer respectively to 'Enhancing Core', 'Tumor Core' and 'Whole Tumor' regions. The numbers in brackets denote standard deviations. \begin{tabular}{|c|c|c|c|} \hline & EC & TC & WT \\ \hline 2D model 1, missing data & $70.2(22.3)$ & $68.6(27.9)$ & $83.0(14.6)$ \\ \hline 2D model 1 missing data + pretrained subnetworks & $\mathbf{7 1 . 9}(20.9)$ & $\mathbf{7 3 . 7}(23.7)$ & $\mathbf{8 4 . 1}(13.6)$ \\ \hline 2D model 1 full data & $73.6(19.8)$ & $79.4(15.7)$ & $86.6(11.1)$ \\ \hline \end{tabular} Table 2.2: Mean Dice scores on the test dataset (50 patients). The numbers in brackets denote standard deviations. \begin{tabular}{|c|c|c|c|} \hline & EC & TC & WT \\ \hline Unet axial slices & $73.9(19.7)$ & $78.1(17.9)$ & $86.5(11.6)$ \\ \hline 2D model 1 axial slices & $73.6(19.8)$ & $79.4(15.7)$ & $86.6(11.1)$ \\ \hline Standard 3D model (without 2D features) & $73.7(19.9)$ & $77.0(18.5)$ & $85.7(8.3)$ \\ \hline 2D-3D model A, features from 2D model 1 & $\mathbf{7 7 . 4}(16.6)$ & $\mathbf{8 0 . 9}(16.9)$ & $\mathbf{8 7 . 3}(11.7)$ \\ \hline \end{tabular} Chapter 2. 3D Convolutional Neural Networks for TumorSegmentation using Long-range $2 D$ Context![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-034.jpg?height=840&width=1354&top_left_y=358&top_left_x=318)Figure 2.9: $2 D$ features computed for three different patients from the test set. These features correspond to unnormalized outputs of the final convolutional layers of three versions of a 2D model (CNN-2DAxl, CNN-2DSag, CNN-2DCor). The values of these features are used as an additional input to a 3D CNN. Each feature highlights one of the tumor classes (columns 3-6) and encodes a rich information extracted from a long-range 2D context within an axial, sagittal or coronal plane (rows 1-3). Each row displays a different case from the test set (unseen by the network during the training).better performance than the standard 3D CNN (without the use of 2D features) and than 2D model 1 from which the features were extracted (Table 2.2 and Table 2.3). The qualitative analysis (Fig. 2.10) of outputs of 2D networks highlights two main problems of $2 D$ approaches. First, as expected, the produced segmentations show discontinuities which appear as patterns parallel to the planes of processing. The second problem are false positives in the slices at the borders of the brain and containing artefacts of skull-stripping. Segmentations produced by the standard 3D model are more spatially consistent but the network suffers from a limited input information from distant voxels. The use of learned features as an additional input toTable 2.3: Mean Dice scores on the Validation dataset of BRATS 2017 (46 patients). \begin{tabular}{|c|c|c|c|} \hline & EC & TC & WT \\ \hline Unet axial slices & $71.4(27.4)$ & $76.6(22.4)$ & $87.7(10.6)$ \\ \hline 2D model 1 axial slices & $71.1(28.8)$ & $78.4(21.3)$ & $88.6(8.7)$ \\ \hline Standard 3D model (without 2D features) & $68.7(30.0)$ & $74.2(23.7)$ & $85.4(10.9)$ \\ \hline 2D-3D model A, features from 2D model 1 & $\mathbf{7 6 . 7}(27.6)$ & $\mathbf{7 9 . 5}(21.3)$ & $\mathbf{8 9 . 3}(8.5)$ \\ \hline \end{tabular} $MRI$ T2![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-035.jpg?height=1486&width=1378&top_left_y=380&top_left_x=360)Figure 2.10: Examples of segmentations obtained with models using a different spatial context. Each row represents a different patient from the local test dataset (images unseen during the training). From left to right: MRI T2, '2D model 1' processing the image by axial slices, standard 3D model (without 2D features), '2D-3D model A' using the features produced by '2D model 1', ground truth segmentation. Orange, blue and green zones represent respectively edema, contrast-enhancing core and non-enhancing core.the network gives a considerable advantage by providing rich information extracted from distant points. The difference of performance is particulary visible for 'tumor core' and 'enhancing core' subregions. The improvements of our 2D-3D approach compared to the standard 3D CNN (without the use of 2D features) were found statistically significant ( $p$-value $<0.05$ ) in all cases except the 'whole tumor' region $MRI T 2$![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-036.jpg?height=810&width=352&top_left_y=434&top_left_x=314)![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-036.jpg?height=223&width=354&top_left_y=608&top_left_x=657)![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-036.jpg?height=166&width=363&top_left_y=428&top_left_x=661)![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-036.jpg?height=402&width=368&top_left_y=428&top_left_x=656)MRI T1![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-036.jpg?height=974&width=1050&top_left_y=434&top_left_x=658)Figure 2.11: Results obtained by the 2D-3D model, displayed for each available MR sequence. While both T2 and T2-FLAIR higlight the edema, T2-FLAIR allows for distinguishing it from the cerebrospinal fluid. T1 with injection of a gadoliniumbased contrast agent highlights the degradation of the blood-brain barrier induced by the tumor.in the first dataset (Table 2.5). === Varying network architectures and combining segmentations We perform experiments with varying architectures of $2 D$ and 2D-3D models. The first objective is to test if the use of $2 D$ features provides an improvement when different 2D and 2D-3D architectures are used. The second objective is to test our decision process combining different multiclass segmentations. The third goal is to compare performances obtained by different models. The experiments are performed on the Validation set of BRATS 2017, the performance is evaluated by the public benchmark of the challenge. In our experiments we use two architectures of our 2D model and three architectures of the 2D-3D model. The main difference between the two 2D networks used in experiments is the architecture of subnetworks processing the input MR sequences. In the first 2D model, the subnetworks correspond to reduced versions ![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-037.jpg?height=312&width=1398&top_left_y=378&top_left_x=360) 2D model 2![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-037.jpg?height=360&width=1350&top_left_y=665&top_left_x=366)Figure 2.12: Architectures of complementary networks used in our experiments.of U-Net (Fig. 4.2) whereas in the second model, the subnetworks are composed of three convolutional layers (Fig. 2.12, top). In the remainder, we refer to these models as ' $2 D$ model 1 ' and ' $2 D$ model 2'. The difference between the two first 2D-3D models is the choice of the layer in which the $2 D$ features are imported: in the first layer of the network (Fig. 2.4) or before the final sequence of convolutional layers (Fig. 2.12, bottom left). The third 2D-3D model (Fig. 2.12, bottom right) is composed of two streams, one processing only the 3D image patch and the other stream taking also the $2 D$ features as input. We refer to these models as 2D-3D model A, 2D-3D model B and 2D-3D model C. Please note that the two first models correspond to a standard 3D model with the only difference of taking an additional input. Each of the 2D-3D models is trained twice using respectively features learned by $2 D$ model 1 or features learned by $2 D$ model 2 . We combine the trained 2D-3D models with the voting strategy described in section 2.2.4. As we observe that $2 D$ model 1 performs better than 2D model 2, we consider two ensembles: combination of all trained 2D-3D models and combination of three models using features from 2D model 1. We use the following thresholds for merging (defined in section 2.2.4): $T_{\text {tumor }}=0.4, T_{\text {core }}=0.3, T_{\text {enhancing }}=0.4$. The results are reported in Table 2.4. In all experiments, the 2D-3D models obtain better performances than their standard 3D counterparts and than 2D networks from which the features were extracted. The merging of segmentations with our decision rule further improves the performance. For all tumor subregions, the ensemble of 6 models (the last row of Table 2.4) outperforms each of the individual models. The improvement over the main 2D-3D model (2D-3D model A with features from 2D model 1) was found statistically significant (p-value $<0.05$ ) for 'whole tumor' and 'tumor core' subregions, as reported in the last row of Table 2.5. Table 2.4: Mean Dice scores on the Validation dataset of BRATS 2017 (46 patients). \begin{tabular}{|c|c|c|c|} \hline & EC & TC & WT \\ \hline 2D model 1 axial slices & 71.1 & 78.4 & 88.6 \\ \hline 2D model 2 axial slices & 68.0 & 78.3 & 88.1 \\ \hline Standard 3D model (without 2D features) & 68.7 & 74.2 & 85.4 \\ \hline$*$ 2D-3D model A, features from 2D model 1 & 76.7 & 79.5 & 89.3 \\ \hline * 2D-3D model B, features from 2D model 1 & 76.6 & 79.1 & 89.1 \\ \hline$*$ 2D-3D model C, features from 2D model 1 & 76.9 & 78.3 & 89.4 \\ \hline * 2D-3D model A, features from 2D model 2 & 73.4 & 79.5 & 89.7 \\ \hline *2D-3D model B, features from 2D model 2 & 74.1 & 79.4 & 89.5 \\ \hline$*$ 2D-3D model C, features from 2D model 2 & 74.3 & 79.4 & 89.6 \\ \hline Combination of models A-C features from model 1 & 76.7 & 79.6 & 89.4 \\ \hline Combination of all models * (final segmentation) & $\mathbf{7 7 . 2}$ & $\mathbf{8 0 . 8}$ & $\mathbf{9 0 . 0}$ \\ \hline \end{tabular} Table 2.5: p-values of the t-tests (in bold: statistically significant results, with $p$ $<0.05)$ of the improvement provided by the different components of our method. To lighten the notations, '2D' refers to '2D model 1 axial slices' and '2D-3D' refers to '2D-3D model A, features from 2D model 1'. 'Combination of 2D-3D' refers to the result obtained by merging 6 models with our hierarchical decision process. \begin{tabular}{|c|c|c|c|} \hline & EC & TC & WT \\ \hline 2D vs 2D with pretrained subnetworks, missing data & $\mathbf{0 . 0 0 5 4}$ & $\mathbf{0 . 0 0 0 3}$ & $\mathbf{0 . 0 0 7 4}$ \\ \hline Standard 3D vs 2D-3D, dataset 1 & $\mathbf{0 . 0 0 8 2}$ & $\mathbf{0 . 0 0 1 6}$ & 0.0729 \\ \hline Standard 3D vs 2D-3D, dataset 2 & $\mathbf{0 . 0 0 7 7}$ & $\mathbf{0 . 0 0 0 5}$ & $<\mathbf{0 . 0 0 0 1}$ \\ \hline 2D-3D vs combination of 2D-3D & 0.1058 & $\mathbf{0 . 0 1 3 8}$ & $\mathbf{0 . 0 4 9 6}$ \\ \hline \end{tabular} While the three 2D-3D architectures yield similar performances, 2D model 1 (subnetworks similar to U-net) performs better than $2 D$ model 2 for all three tumor regions. However, the 2D-3D models trained with the features from 2D model 2 are useful for the merging of segmentations: the ensemble of all models yields better performances than the ensemble of three models (two last rows of Table 2.4). === Comparison to the state of the art We have evaluated our segmentation performance on the public benchmark of the challenge to compare our results with few dozens of teams from renowned research institutions worldwide. Our method compares favorably with competing methods of BRATS 2017 (Table 2.6): among 55 teams which evaluated their methods on all test patients of the Validation set, we obtain top-3 performance for 'core' and 'enhancing core' tumor subregions. We obtain mean Dice score of 0.9 for the 'whole tumor' region, which is almost equal to the one obtained by the best scoring team $(0.905)$. The winning method of UCL-TIG [Wang 2017] proposes to sequentially use three Table 2.6: Mean Dice scores of the 10 best scoring teams on the validation leaderboard of BRATS 2017 (state of January 22, 2018) \begin{tabular}{|c|c|c|c|c|c|c|c|} \hline & EC & TC & WT & Rank EC & Rank TC & Rank WT & Average rank \\ \hline UCL-TIG & 78.6 & 83.8 & 90.5 & $1 / 55$ & 1 & 1 & 1.0 \\ \hline MIC_DKFZ & 77.6 & 81.9 & 90.3 & $2 / 55$ & 2 & 2 & 2.0 \\ \hline inpm (our method) & 77.2 & 80.8 & 90.0 & $3 / 55$ & 3 & 7 & 4.3 \\ \hline UCLM_UBERN & 74.9 & 79.1 & 90.1 & $9 / 55$ & 6 & 3 & 6.0 \\ \hline biomedia1 & 73.8 & 79.7 & 90.1 & $12 / 55$ & 5 & 5 & 7.3 \\ \hline stryker & 75.5 & 78.3 & 90.1 & $6 / 55$ & 10 & 6 & 7.3 \\ \hline xfeng & 75.1 & 79.9 & 89.2 & $8 / 55$ & 4 & 11 & 7.7 \\ \hline Zhouch & 75.4 & 77.8 & 90.1 & $7 / 55$ & 12 & 4 & 7.7 \\ \hline tkuan & 76.5 & 78.2 & 88.9 & $4 / 55$ & 11 & 13 & 9.3 \\ \hline Zhao & 75.9 & 78.9 & 87.2 & $5 / 55$ & 7 & 16 & 9.3 \\ \hline \end{tabular} Table 2.7: Distribution of Dice scores (final result). The numbers in brackets denote standard deviations. \begin{tabular}{|c|c|c|c|} \hline & EC & TC & WT \\ \hline Mean & $77.2(24.4)$ & $80.8(18.9)$ & $90.0(8.1)$ \\ \hline Median & 85.4 & 88.3 & 91.8 \\ \hline Quantile $25 \%$ & 76.9 & 75.0 & 89.6 \\ \hline Quantile $75 \%$ & 90.0 & 93.5 & 94.5 \\ \hline \end{tabular} 3D CNNs in order to progressively determine the tumor subclass. Each of the networks performs a binary segmentation (tumor/not tumor, core/edema, enhancing core/non-enhancing core) and was designed for one tumor subregion of BRATS. A common point with our method is the hierarchical process, however in our method all models perform multiclass segmentation. The method of the team MIC_DKFZ, according to [Isensee 2017], is based on an optimized version of 3D U-net and an extensive use of data augmentation. The leaderboard of BRATS 2017 only shows mean performances obtained by participating teams. However, the benchmark individually provides detailed scores and complementary statistics, in particular quartiles and standard deviations reported in Table 2.7. Our method yields promising results with median Dice score of 0.918 for the whole tumor, 0.883 for the tumor core and 0.854 for the enhancing core. While the Dice scores for the whole tumor region are rather stable (generally between 0.89 and 0.95 ), we observe a high variability of the scores obtained for the tumor subregions. In particular the obtained median Dices are much higher than the means, due to the sensitivity of Dice score to outliers. = Discussion and conclusion In this work, we presented a deep learning system for multiclass segmentation of tumors in multisequence MR scans. The goal of our work was to propose elements to improve performance, robustness and applicability of commonly used CNN-based systems. In particular, we proposed a new methodology to capture a long-range 3D context with CNNs, we introduced a network architecture with modality-specific subnetworks and we proposed a voting strategy to merge multiclass segmentations produced by different models. First, we proposed to use features learned by 2D CNNs (capturing a long-range 2D context in three orthogonal directions) as an additional input to a 3D CNN. Our approach combines the strengths of $2 D$ and 3D CNNs and was designed to capture a very large spatial context while being efficient in terms of computations and memory load. Our experiments showed that this hybrid 2D-3D model obtains better performances than both the standard 3D approach (considering only the intensities of voxels of a subvolume) and than the $2 D$ models which produced the features. Even if the use of the additional input implies supplementary reading operations, the simple importation of few features to a CNN does not considerably increase the number of computations and the memory load. In fact, in typical CNNs performing hundreds of convolutions, max-poolings and upsamplings, the data layer represents typically a very small part of the memory load of the network. One solution to limit the reading operations could be to read downsampled versions of features or to design a 2D-3D architecture in which the features are imported in a part of the network where the feature maps are relatively small. The improvement provided by the 2D-3D approach has the cost of increasing the complexity of the method compared to a pure 3D approach as it requires a twostep processing (first 2D, then 3D). However, its implementation is rather simple as the only supplementary element to implement is the extraction of 2D features, i.e. computation of outputs of trained 2D networks (with a deep learning software such as TensorFlow) and saving the obtained tensors in files. In the 3D part, the extracted features are then simply read as additional channels of the input image. Despite the important recent progress of GPUs, pure 3D approaches may be easily limited by their computational requirements when the segmentation problem involves an analysis of a very large spatial 3D context. In fact, Convolutional Neural Networks require an important amount of GPU memory and a high computational power as they perform thousands of costly operations on images (convolutions, maxpoolings, upsamplings). The main advantage of our 2D-3D approach is to considerably increase the size of the receptive field of the model while being efficient in terms of the computational load. The use of our 2D-3D model may therefore be particularly relevant in the case of very large 3D scans. Second, we proposed a novel approach to process different MR sequences, using an architecture with modality-specific subnetworks. Such design has the considerable advantage of offering a possibility to train one part of the network on databases containing images with missing MR sequences. In our experiments, training of modality-specific subnetworks improved the segmentation performance in the setting with missing MR sequences in the training database. Moreover, the fact that our 2D model obtained promising segmentation performance is particularly encouraging given that $2 D$ networks are easier to apply for the clinical use where images have a variable number of acquired slices. Our approach can be easily used with any deep learning software (e.g. Keras). In the case of databases with missing MR sequences, the user only has to perform a training of a subnetwork (on images for which the given MR sequence is provided) and then read the learned parameters for the training of the main part of the network (on images for which all MR sequences are available). In order to be less prone to limitations of particular choices of neural network architectures, we proposed to merge outputs of several models by a voxelwise voting strategy taking into account the semantics of labels. In constrast to most methods, we do not apply any postprocessing on the produced segmentations. Our methodological contributions can be easily included separately or jointly into a CNN-based system to solve specific segmentation problems. The implementation of our method will be made publicly available on https://github.com/ PawelMlynarski. = Related work In the literature, there are several works related to weakly-supervised and semisupervised learning for object segmentation or detection. Most of the related works were applied to natural images. === Deep learning model for binary segmentation We designed a novel deep learning model, which aims to take advantage of all available voxelwise and image-level annotations. We propose to extend a segmentation CNN with an additional subnetwork performing image-level classification and to train the model for the two tasks jointly. Most of the layers are shared between the classification and segmentation subnetworks in order to transfer the information between the two subnetworks. In this work we present the $2 D$ version of our model, which can be used on different types of medical images such as slices of a CT scan or a multisequence MRI. The proposed network takes as input a multimodal image of dimensions 300x300 and extends U-Net [Ronneberger 2015] which is currently one of the most used architectures for segmentation tasks in medical imaging. The different image modalities (e.g. sequences of a MRI) correspond to channels of the data layer and are the input of the first convolutional layer of the network (as in most of the currently used CNNs for image segmentation). U-Net is composed of an encoder part and a decoder part which are connected by concatenations between layers at the same scale, in order to combine low-level and local features with high-level and global features. This design is well suited for the tumor segmentation task since the classification of a voxel as tumor requires to compare its value with its close neighborhood but also taking into account a large spatial context. The last convolutional layer of U-net produces pixelwise classification scores, which are normalized by softmax function during the training phase. We apply batch normalization [Ioffe 2015] in all convolutional layers except the final layer. We propose to add an additional branch to the network, performing image-level classification (Fig. 3.2), in order to exploit the information contained in weaklyannotated images during the training. This classification branch takes as input the second to last convolutional layer of U-net (representing a rich information extracted from a local and a long-range spatial context) and is composed of one mean-pooling, one convolutional layer and 7 fully-connected layers. The goals of taking a layer from the final part of U-Net as input of the classification branch are both to guide the image-level classification task and to force the major part of the segmentation network to take into account weakly-annotated 3.3. Joint classification and segmentation with Convolutional Neural Networks![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-049.jpg?height=557&width=1396&top_left_y=367&top_left_x=364)Figure 3.2: Architecture of our model for binary segmentation. The numbers of outputs are specified below boxes representing layers. The height of rectangles represents the scale (increasing with pooling operations). = Experiments === Data We evaluate our method on the challenging task of brain tumor segmentation in multisequence MR scans, using the Training dataset of BRATS 2018 challenge. It contains 285 multisequence MRI of patients diagnosed with low-grade gliomas or high-grade gliomas. For each patient, manual ground truth segmentation is provided. In each case, four MR sequences are available: T1, T1+gadolinium, T2 and FLAIR (Fluid Attenuated Inversion Recovery). Preprocessing performed by the organizers includes skull-stripping, resampling to $1 ~mm^{3}$ resolution and registration of images to a common brain atlas. The resulting volumes are of size 240x240x155. The images were acquired in 19 different imaging centers. In order to normalize image intensites, each image is divided by the median of non-zero voxels (which is supposed to be less affected by the tumor zone than the mean) and multiplied the image by a fixed constant. Each voxel is labelled with one of the following classes: non-tumor (class 0), contrast-enhancing core (class 3), non-enhancing core (class 1), edema (class 2). The benchmark of the challenge groups classes in three regions: whole tumor (formed by all tumor subclasses), tumor core (classes 1 and 3, corresponding to the visible tumor mass) and enhancing core (class 3). Given that all 3D images of the database contain tumors (no negative cases to train a 3D classification network), we consider the 2D problem of tumor segmentation in axial slices of the brain. === Test setting The goal of our experiments is to compare our approach with the standard supervised learning. In each of the performed tests, our model is trained on fully-annotated and weakly-annotated images and is compared with the standard U-Net trained on fully-annotated images only. The goal is to compare our model with a commonly used segmentation model on a publicly available database. We consider three diffferent training scenarios, with a varying number of patients for which we assume a provided manual tumor segmentation. In each scenario we perform a 5 -fold cross-validation. In each fold, 57 patients are used for test and $T 2$![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-053.jpg?height=694&width=400&top_left_y=434&top_left_x=360)T2-FLAIR![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-053.jpg?height=680&width=278&top_left_y=441&top_left_x=774) $T 1$![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-053.jpg?height=684&width=284&top_left_y=440&top_left_x=1047) $T 1 c$![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-053.jpg?height=680&width=280&top_left_y=442&top_left_x=1339)Figure 3.4: Examples of multisequence MRI from the BRATS 2018 database. While T2 and T2-FLAIR highlight the edema induced by the tumor, T1 is suitable for determining the tumor core. In particular, T1 acquired after injection of a contrast agent (T1c) highlights the tumor angiogenesis, indicating presence of highly proliferative cancer cells. 228 patients are used for training. Among the 228 training images, few cases are assumed to be fully-annotated and the remaining ones are considered to be weaklyannotated, with slice-level labels. The fully-annotated images are different in each fold. If the 3D volumes are numbered from 0 to 284 , then in $k^{\text {th }}$ fold, the test images correspond to the interval $[(k-1)^{*} 57, k^{*} 57-1]$, the next few images correspond to fully-annotated images and the remaining ones are considered as weakly-annotated (the folds are generated in a circular way). In the following, $F A$ denotes the number of fully-annotated cases and $W A$ denotes the number of weakly-annotated cases (with slice-level labels). In particular, note that the split training/test is on 3D MRIs, i.e. the different slices of the same patient are always in the same set (training or test). In the first training scenario, 5 patients are assumed to be provided with a manual segmentation and 223 patients have slice-level labels. In the second and the third scenario, the numbers of fully-annotated cases are respectively 15 and 30 and the numbers of weakly-annotated images are therefore respectively 213 and 198 . The three training scenarios are independent, i.e. folds are re-generated randomly (the list of all images is permuted randomly and the folds are generated). In fact, results are likely to depend not only on the number of fully-annotated images but also on qualitative factors (for example the few fully-annotated images may correspond to atypical cases), and the goal is to test the method in various settings. Overall, our approach is compared to the standard supervised learning on 60 tests (5-fold cross-validation, three independent training scenarios, three binary problems and Chapter 3. Deep Learning with Mixed Supervision for Brain Tumorone multiclass problem). We evaluate our method both on binary segmentations problems (separately for each of three tumor regions considered in the challenge) and on the end-to-end multiclass segmentation problem. In each binary case, the model is trained for segmentation and classification of one tumor region (whole tumor, tumor core or enhancing core). Segmentation performance is expressed in terms of Dice score quantifying the overlap between the ground $\operatorname{truth}(Y)$ and the output of a model $(\tilde{Y})$ : \[ \operatorname{DSC}(\tilde{Y}, Y)=\frac{2|\tilde{Y} \cap Y|}{|\tilde{Y}|+|Y|} \]In order to measure the statistical significance of obtained results, we perform two-tailed and paired t-tests. Pairs of observations correspond to segmentation scores obtained with the standard supervised learning (U-Net trained on fullyannotated images) and with our approach. Dice scores for all patients from 5 folds are concatenated to form a set of 285 pairs of observations. The statistical test is performed for each training scenario and for each segmentation task (three binary problems and one multiclass problem). We consider a significance level of $5 \%$. === Model hyperparameters === Loss function and training of the model The main introduced training hyperparameter is the parameter $a$, corresponding to the trade-off between classification and segmentation losses. We report mean Dice scores obtained with a varying value of the parameter $a$, on a validation set of 57 patients ( $20 \%$ of the database used for testing and $80 \%$ used for training) in the case with 5 fully-annotated cases and 223 weakly-annotated cases. Segmentation accuracy obtained for the whole tumor in the binary case is reported on Fig. 3.5. The peak of performance is observed for $a=0.7$ (improvement of approximately 12 points of Dice over the standard supervised learning on this validation set), i.e. for the configuration where the segmentation loss accounts for $70 \%$ of the total loss. With high values of $a$, the improvement over the standard supervised learning is limited: around 2.5 points of Dice for $a=0.9$. In fact, setting a high value of $a$ corresponds to giving less importance to the image-level classification task and therefore ignoring weakly-annotated images. For too low values of $a$, segmentation accuracy decreases too, probably because the model focuses on the secondary task, of image-level classification. In the end-to-end multiclass case (Fig. 3.6), lower values of $a$ seem more suitable, possibly because of an increased complexity of the image-level classification task. In all subsequent tests, we fix $a=0.7$ for binary segmentations problems and $a=0.3$ for the end-to-end multiclass segmentation. Training batches in our experiments contain 10 images, including 8 images with tumors (4 images with provided tumor segmentation and 4 without provided segmentation) and 2 images without tumors. The number of images was fixed to fit in the memory of the used GPUs (Nvidia GeForce GTX 1080 Ti), i.e. to form training batches for which Backpropation can be performed using the memory of the GPU. In each training batch there are only 2 images without tumors because most of the pixels of tumor images correspond to non-tumor zones. The parameters $t_{c}$, corresponding to target weights of classes in the segmentation loss, were fixed manually. Both in binary and multiclass cases, we chose $t_{0}=$ 0.7 , which corresponds to giving a target weight of $70 \%$ to non-tumor voxels. In fact, tumor pixels represent approximately $1 \%$ of pixels of the training batch and therefore non-tumor pixels account approximately for $99 \%$ of non-weighted crossentropy segmentation loss. With $t_{0}=0.7$, relative weight of non-tumor pixels is therefore decreased compared to the standard, non-weighted cross entropy, while still giving the non-tumor class a high weight in order to avoid oversegmentation. In the multiclass setting, we fixed the same target weight to all three tumor subclasses, i.e. $t_{1}=0.1, t_{2}=0.1, t_{3}=0.1$. As a good convergence of the training was obtained in terms of Dice scores of tumor subclasses, we did not further need to optimize these hyperparameters. Morever, U-Net trained with these weights and using 228 fullyannotated images obtained a mean Dice score of almost 0.87 for whole tumor (last row of Table 3.1), which is a satisfactory performance for a model independently processing axial slices without any postprocessing. Mean Dice (validation set 5 FA + 223 WA)![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-055.jpg?height=608&width=1191&top_left_y=1598&top_left_x=455)Figure 3.5: Mean Dice scores for the 'whole tumor' region obtained with a varying value of the parameter 'a', corresponding to the trade-off between segmentation and image-level classification losses. Segmentation scores are evaluated on a validation set of $57 MRI$ in the training scenario where 5 fully-annotated MRI and 223 weaklyannotated MRI are available for training. The case $a=1.0$ corresponds to ignoring the classification loss and therefore ignoring weakly-annotated images. ![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-056.jpg?height=648&width=1136&top_left_y=407&top_left_x=423)Figure 3.6: Mean Dice scores for 'whole tumor' and 'tumor core' regions obtained with a varying value of the parameter 'a' in the multiclass case. Segmentation scores are evaluated on a validation set of $57 MRI$ in the training scenario where 5 fullyannotated MRI and 223 weakly-annotated MRI are available for training. The case $a=1.0$ corresponds to ignoring the classification loss and weakly-annotated images. === Model architecture One of the most important attributes of our method is the architecture of classification branches extending segmentation networks. We perform experiments to compare our model with alternative types of architectures of classification subnetworks. We report the segmentation accuracy obtained on the previously defined validation set of 57 patients. In the binary case, we consider two alternative architectures of classification subnetworks. The first one is composed of four fully-connected layers having respectively 2000, 500, 100 and 2 neurons. It corresponds therefore to a shallow variant of the classification subnetwork with a relatively high number of parameters. We name this architecture Shallow model. The second variant has the same architecture as our model (7 fully-connected layers) but with removed concatenation between the first and the fifth fully-connected layer. We name this architecture Deep-sequential. The comparison of segmentation accuracy for whole tumor obtained by these two variants and by our model is reported on Fig. 3.7. All three models using mixed level of supervision obtain a better segmentation accuracy than the standard U-Net using 5 fully-annotated images (64.48). Among the three architectures, the shallow variant yields the lowest accuracy (72.29). Our model obtains the highest accuracy (76.56) and performs slightly better than its counterpart with removed concatenation, Deep-sequential model (75.78). The improvements over the standard model and the Shallow model were found statistically significant (two-tailed and paired t-test). ![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-057.jpg?height=642&width=1131&top_left_y=373&top_left_x=474)Figure 3.7: Mean Dice scores for the 'whole tumor' region obtained by the standard U-Net and by different models using mixed level of supervision. Standard deviations are represented by error bars. The segmentation scores are evaluated on a validation set of $57 MRI$ in the training scenario where 5 fully-annotated MRI and 223 weaklyannotated MRI are available for training. Our model corresponds to U-Net extended with a classification branch composed of 7 fully-connected layers and containing one skip-connection. We also report results obtained with an alternative architecture of the multiclass model. In our model, we considered separate classification branches for all tumor subclasses. We consider an alternative architecture, having only one classification branch (with the same architecture as our model for binary segmentation and classification) shared between the three final fully-connected layers performing image-level classification. In this configuration, the classification layer of each tumor subclass takes as input the 6th fully-connected layer of the shared classification branch. We name this architecture Shared classification. The comparison with our multiclass model (separate classification branches for all tumor subclasses) on the same validation set as previously is reported on Fig. 3.8. Our model obtains the highest accuracy for the three tumor subregions while the alternative model (Shared classification) obtains higher accuracy than the standard multiclass U-Net for whole tumor and tumor core. The improvements of our model over the standard model were found statistically significant for whole tumor and tumor core regions. The improvements over the alternative model with mixed supervision (Shared classification) were not found statistically significant ( $p$-values $>0.05$ ). === Results The main observation is that our model with mixed supervision provides a significant improvement over the standard supervised approach (U-Net trained on fullyannotated images) when the number of fully-annotated images is limited. In the two Chapter 3. Deep Learning with Mixed Supervision for Brain Tumor Segmentation = Mean Dice (validation set 5 FA + 223 WA) ![](https://cdn.mathpix.com/cropped/2023_06_21_375ec9675e3c660da887g-058.jpg?height=699&width=1379&top_left_y=473&top_left_x=316)Figure 3.8: Mean Dice scores for the 'whole tumor' region obtained by the standard multiclass U-Net and by different multiclass models using mixed level of supervision. The error bars represent standard deviations. The segmentation scores are evaluated on a validation set of $57 MRI$ in the training scenario where 5 fullyannotated MRI and 223 weakly-annotated MRI are available for training. Our model is multiclass U-Net extended with three separate classification branch (for each tumor subclass), each branch having the same architecture as in the binary segmentation/classification problem.first training scenarios (5 FA and $15 FA$ ), our model outperformed the supervised approach on the three binary segmentation problems (Table 3.1) and in the multiclass setting (Table 3.3). The largest improvements are in the first scenario (5 FA) for the whole tumor region where the improvement is of 8 points of the mean Dice score in the binary setting and of 9 points of Dice in the multiclass seting. Results on different folds of the second scenario (intermediate case, $15 FA$ ) are displayed in Table 3.2 for the binary problems and in Table 3.4 for the multiclass problem. Our approach provided an improvement in all folds of the second scenario and for all tumor regions, except one fold for enhancing core in the binary setting. In the third scenario (30 FA + $198 WA$ ), our approach and the standard supervised approach obtained similar performances. Furthermore, we observe that standard deviations are consistently lower with our approach, in all training scenarios and for all tumor subregions. The results obtained with mixed supervision are therefore more stable than the ones obtained with the standard supervised learning. All improvements were found statistically significant for binary segmentations problems. In the multiclass case, all improvements were found statistically significant except for enhancing core in the first training scenario and for whole tumor in the third training scenario. Qualitative results are displayed on Figures 3.9, 3.10 and 3.11. Each figure shows segmentations of one tumor region (whole tumor, tumor core, enhancing core) produced by models trained with a varying number of fully-annotated and weaklyannotated images available for training. Segmentation performance increases quickly with the first fully-annotated cases, both for the standard supervised learning and the learning with mixed supervision. For instance, mean Dice score obtained by the supervised approach for whole tumor increases from 70.39 , in the case with 5 fully-annotated images, to 77.9 in the case with 15 fully-annotated images. Our approach using 5 fully-annotated images and 223 weakly-annotated images obtained a slightly better performance (78.3) than the supervised approach using 15 fully-annotated cases (77.9). This result is represented on Fig. 3.12. On Fig. 3.13, we report cross-validated results obtained with a varying number of weakly-annotated while images keeping a fixed number of fully-annotated images. This complementary experiment is performed for segmentation of whole tumor in the first training scenario (5 fully-annotated images). We observe that the improvement slows down with the number of added weakly-annotated scans. Inclusion of the first 100 weakly-annotated MRIs yields an improvement of approximately 5 points of the cross-validated mean Dice score (from 70.39 to 75.28), while addition of the remaining 123 weakly-annotated images improves this score by 3 points (from 75.28 to 78.34$)$. Note that each fully-annotated case corresponds to a large $3 D$ volume with voxelwise annotations. Each manually segmented axial slice of size $240 \times 240$ corresponds to 57600 labels, which represents indeed a huge amount of information compared to one global label simply indicating presence of absence of a tumor tissue within the slice. Table 3.1: Mean Dice scores (5-fold cross-validation, 57 test cases in each fold) in the three binary segmentation problems obtained by the standard supervised approach and by our model trained with mixed supervision. The numbers in brackets denote standard deviations computed on the distribution of Dice scores for all patients of the 5 folds. In terms of the annotation cost, manual delineation of tumor tissues in one MRI may take about 45 minutes for an experienced oncologist using a dedicated segmentation tool. Determing the range of axial slices containing tumor tissues may take 1-2 minutes but can be done without a specialized software. More importanty, determining global labels may require less medical expertise than performing an exact tumor delineation and can therefore be performed by a larger community. Table 3.3: Mean Dice scores (5-fold cross-validation, 57 test cases in each fold) obtained by the standard supervised approach and by our model in the multiclass setting. The numbers in brackets denote standard deviations computed on the distribution of Dice scores for all patients of the 5 folds. The asterisks represent statistically significant improvements ( p-value $<0.0$ ) provided by our method compared to the standard supervised learning. The results are displayed on MRI $T 1$ +gadolinium. The observations are similar to the problem of binary segmentation of the 'whole tumor' region. In particular, in the first training scenario, the standard supervised approach does not detect the tumor core zone, in contrast to our method. Figure 3.11: Comparison of our approach with the standard supervised learning for binary segmentation of the 'enhancing core' region. Each row corresponds to a different training scenario (5, 15 or 30 fully-annotated scans available for training). The example shows false positives obtained by the model trained with standard supervision. The number of false positives decreases with the number of fully-annotated images available for training. No false positives are observed for our model trained with mixed supervision, in any of the three training scenarios.
https://github.com/lebinyu/typst-thesis-template
https://raw.githubusercontent.com/lebinyu/typst-thesis-template/main/main.typ
typst
Apache License 2.0
#import "template/cover_style.typ": * // #import "chapter1.typ" // #import "chapter2.typ" // #import "abstract.typ" #import "template/global_style.typ" #import "template/abstract_style.typ": * #import "template/tableofcontent_style.typ": * #import "template/chapter_style.typ": * // test import #import "template/reference_style.typ": * //import matadata #import "thesis/metadata.typ": * #import "thesis/abstract.typ" #import "thesis/chapter1.typ" #import "thesis/chapter2.typ" // #include "chapter1_test.typ" // #include "chapter2.typ" #cover( title: title, supervisor: supervisor, group_name: group_name, institute: institute, author: author, ID: ID, address: address, email: email, closing_date: closing_date, ) #abstractpage(mainbody: abstract.mainbody) #tableofcontent() #referencepage(mainbody: bibliography("reference.bib", title: none)) #chapterpage( chapterheading: chapter1.title, chaptnumber: chapter1.chaptnumber, introduction:chapter1.introduction, mainbody: chapter1.mainbody, title: title ) #chapterpage( chapterheading: chapter2.title, chaptnumber: chapter2.chaptnumber, introduction:chapter2.introduction, mainbody: chapter2.mainbody, title: title )
https://github.com/polarkac/MTG-Stories
https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/046%20-%20Streets%20of%20New%20Capenna/009_Episode%205%3A%20Hymn%20of%20the%20Angels.typ
typst
#import "@local/mtgstory:0.2.0": conf #show: doc => conf( "Episode 5: Hymn of the Angels", set_name: "Streets of New Capenna", story_date: datetime(day: 05, month: 04, year: 2022), author: "<NAME>", doc ) = STREETS OF NEW CAPENNA "What do you think?" Elspeth asked her companions, perching on the same ledge she had used when tailing the informant. It was hard to believe her tests with Xander were already a few weeks ago. Somehow, they seemed both like yesterday and years past. "It's the quietest section of the city we've seen so far," Vivien admitted. #figure(image("009_Episode 5: Hymn of the Angels/01.jpg", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) "Giada?" The young woman looked bone-tired, so Elspeth wasn't surprised in the slightest when she said, "I think staying here is a good idea." "You look after Giada. I'll take a quick survey of the perimeter," Vivien offered. "Thank you." Elspeth pointed to the side of the building. "The raccoonfolk were in the alley adjacent; keep an eye out." Vivien leapt off the ledge, descending into the inky night. She continued to impress Elspeth. Every motion was sure and deliberate. Even though New Capenna was new to her as well, she walked like she owned every bit of glass and concrete without seeming boastful or prideful. She had adapted to the city more seamlessly than Elspeth had in what seemed like the same amount of time, even though it was supposed to be Elspeth's home. "Elspeth." Giada broke the silence, saving Elspeth from personal turmoil and doubt. "Yes?" When Giada didn't immediately speak, Elspeth turned her gaze to her. Giada was stretched out on the ledge, hands folded under her chin, worrying her bottom lip between her teeth. Elspeth had felt that level of uncertainty before and placed her palm lightly between the teen's shoulders to offer some consolation. Giada continued to stare beyond the hazy skyline of New Capenna. "I'm scared." "What are you scared about?" Elspeth could think of several thousand reasons for Giada to be scared. But she wanted to hear which one of them weighed on her small shoulders. "What if I'm not enough?" "Enough of what?" Elspeth gently prodded. "What if I can't help New Capenna? Can my magic really be enough? What happens if—#emph[when] —it runs out?" Giada shook her head. "I don't know how much more I have to give, and I don't really know if it'll make a difference if I try. This city is so~broken." The words came out as a rush of air. As though, somewhere within Giada, a dam broke and these questions that had been slowly eating her from the inside now found a free moment for release. Elspeth listened intently to her companion's heartbreaking inquiries. Each question colored one of their past interactions in a new light—tainted it with a shade of fear that Elspeth had seen in Giada but had never understood until this moment. So much had been placed on Giada's shoulders, without giving her autonomy or credit. The Cabaretti, the families, the Adversary, all saw her as a solution to their problems, a tool. They would wring her dry for their cheap solution to the dwindling Halo supply until there was no more blood in her veins, marrow in her bones, or magic in her soul, and they would do it without a second thought for her well-being. Elspeth should have done so much more for Giada so much earlier. Giada turned her eyes to Elspeth, who searched for answers she didn't know if she could give. She wondered if this was how Ajani felt all those times she had gone to him in search of answers she knew, deep down, he didn't have. She wondered if Giada would resent her for her answer as much as she had resented her dear friend in those long-ago moments. "You're right," Elspeth started softly. "New Capenna #emph[is] broken, and Halo is a flimsy bandage on the wounds of this city." #emph[Real peace, real prosperity, had to come from within] —by addressing the demons that literally built the city and the figurative ones that still haunted its streets. "What do I do, then? I still want to help—I want a purpose." "Fulfillment~purpose~" Elspeth started softly, getting lost in her own thoughts as she had for months on this topic. But, for the first time, her chest didn't ache. The hollow feeling was not as gaping as it had once been. "Those things have to come from within you. I can't give them to you. No one can." Giada frowned, resting her chin back on her hands dejectedly. Elspeth lightly rubbed between her shoulders. "But I'll tell you this, Giada. You will have the opportunity to find those answers—for yourself. You will find your purpose." #emph[Just as I will.] "And I will personally ensure you are safe to take the time you need to do so, however long that is." "You promise?" Giada turned hopeful eyes back to her. "I swear it." Their conversation was cut short by Vivien's return. She landed lightly on the platform Elspeth and Giada were stretched upon. "It looks sufficiently abandoned. No signs of life within." "Good." Elspeth stood. "Then we'll stay here tonight and catch our breath." = WAREHOUSE Elspeth woke to a weight on her shoulder. The hazy light of dawn, spilling down through a skylight above, cast Giada's face in a warm glow as she slumbered against Elspeth's side. The three of them had huddled in a small office near the back of the warehouse. There was only one entrance and exit, easy enough to secure. And a mirror on the wall opposite the window overlooking the warehouse floor gave them a line of sight without exposing themselves. "We should get moving. I don't think we're alone here," Vivien whispered, eyes darting to sounds Elspeth didn't hear. She was propped against the wall opposite them. "Likely just the raccoonfolk. Give her a little bit longer." Elspeth had yet to move. She had never seen Giada look so peaceful. In every previous interaction, Giada had been haunted by turmoil that Elspeth hadn't understood until last night's revelations. What was Giada's history? Had she always been trapped with the Cabaretti? How had she discovered her power to make Halo? All questions Elspeth would keep wondering. Giada had enough asked of her already; Elspeth didn't need to be another making demands of her. For as long as she was able, Elspeth would keep her vow and protect her. That was enough. Metal rang out, sharp and grating. Giada jolted upright, and Elspeth's hand flew to her mouth. Her other arm wrapped tightly around her shoulders, holding the teen to her. "Stay silent," Elspeth hissed, her eyes scanning the mirror for movement. Vivien was on her knees, reaching for her bow as the door swung open and glass rained down around them, their peaceful morning shattering with the window. Jinnie stood in the doorway. Two Cabaretti enforcers stood behind her, brandishing axes. In the mirror, Elspeth could see three more with swords drawn. Before Vivien could draw an arrow, Jinnie threw a dagger at her. Vivien raised her arm to block the dagger, which dug a gash down her forearm. One of the men rushed around Jinnie, knocked the bow from Vivien's wounded arm, and claimed it for himself. "Not so tough without this, are you?" Vivien's eyes shone with a challenge for him to find out just how deadly she could be, even without her bow. She smiled slowly, almost placidly. The sort of smile that promised to be the last he would ever lay eyes on. "You didn't really think you could escape us, did you?" Jinnie stalked over, a knife in hand that she placed under Elspeth's chin. #emph[How did Jinnie find them?] The warehouse was abandoned, the city huge. There had to be an explanation, something they overlooked that allowed Jinnie to track them. "To think, I trusted you." The blade was warmer than the cold hard stare Jinnie regarded Elspeth with. Even though she was still breathing, to Jinnie, she was very much dead. "I was trying to—" "Spare me your lies," Jinnie snapped. "You in league with the Adversary?" The edge of her knife bit into Elspeth's neck. "Never." Jinnie probed with her stare. Finally believing her, she asked, "Then, why?" "I was keeping Giada safe." "Liar. You wanted the Font for yourself." Jinnie thrust the knife forward. A little further, and she'd hit the vein in Elspeth's neck. Elspeth didn't even dare to swallow. "Jinnie, isn't it up to Jetmir on how we deal with traitors?" Giada found her voice. Hurt and confusion flashed across Jinnie's face. Elspeth glanced toward Giada. Did she know what she was doing defending her? But Giada clearly had been learning from the Cabaretti as she deftly wielded her words. "Let him decide what to do with the two of them. He always has a clear head. But #emph[I'm] #emph[so glad] to see you again. Thank you for saving me." Jinnie's grip relaxed, and the knife eased away from Elspeth's throat. "It's good to see you, too. I thought we'd lost the Font forever." "I'm right here." Giada smiled weakly. "Yes." Jinnie exhaled anger, a cooler head prevailing. But when she looked back to Elspeth, the same hatred still burned. "Shackle them. We're taking them back to Jetmir." "Are we going back to the Vantoleone?" Giada stood. "No, it's compromised. We're going to friends," Jinnie answered ambiguously. "No family wants to see the Adversary take New Capenna for his own. And now that the Font is secure once more, we have a bargaining chip to make sure the others will work with us." #emph[The Font. Bargaining chip. . .] She#emph[ has a name] , Elspeth wanted to shout. "We can all make it out of this alive." Jinnie's gentle tone was in stark contrast to the men roughly shackling Elspeth and Vivien. Elspeth felt a tingle of magic singe her wrists as they were locked into place. Vivien seemed to be following Elspeth's lead. She paid careful attention to the Cabaretti man who collected her sword, keeping her protests behind firmly closed lips. Going along with this, for now, was the best way to remain close to Giada. "And then you can bring balance to our plane," Jinnie finished. Giada nodded, her lips pressed in a hard line. Jinnie grabbed her hand, and Giada's eyes drifted back to Elspeth, who dipped her chin slowly. #emph[I will keep my vow to you] , Elspeth vowed silently. She hoped Giada understood. But Giada kept her face blank as Jinnie escorted them out of the warehouse. = FORTUNE TELLER Elspeth and Vivien shared wary glances as they were escorted through the Mezzio. Elspeth didn't dare say anything with the Cabaretti so close. She'd find a quiet moment to regroup with Vivien whenever they got to this "safe place." The aroma of sandalwood and orange tickled Elspeth's nose. "Jinnie," Elspeth called out, stopping in her tracks. "Keep moving." One of the Cabaretti shoved her. Elspeth stumbled forward, trying to use it to conceal the wide steps she now used to close the gap between her and Jinnie. "This is an Obscura hideout," Elspeth said. "You don't think I know that?" Jinnie arched her eyebrows. "Who do you think my friends are?" Elspeth's heart began to race. "You were at the Crescendo. The Maestros, Cabaretti, and Riveteers—every family was infiltrated. This could be a trap." "Unlike #emph[you] , some are actually loyal." Jinnie came to a stop before a door and knocked. It was identical to the knock Elspeth used when delivering Xander's package. Elspeth stepped back to stand next to Vivien, catching her eyes. "Stay alert," Elspeth murmured. Vivien nodded. Elspeth didn't exactly have the most faith in Jinnie's ability to identify a loyal follower versus someone operating in their own interest. After all, she had thought Elspeth was nothing more than an eager sycophant. The door opened before Jinnie could say anything more, revealing a cephalid woman in a navy trench coat embellished with gold. Her clothing and cloche hat were of a similar make to the Obscura Elspeth had delivered the package to. The hairs on the back of Elspeth's neck stood on edge. "Kamiz," Jinnie said with relief. "How is Jetmir?" #figure(image("009_Episode 5: Hymn of the Angels/02.jpg", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) "Stable, though ailing. I see you secured the Font. Excellent, I knew you would. Come in, before anyone sees you." "Thank you for offering us refuge." Jinnie stepped inside, the rest of them following. "Who are they?" Kamiz glanced between Elspeth and Vivien. "Traitors—the ones who took the Font. Jetmir will know how best to deal with them." They entered a receiving area with a desk and a few chairs. Behind a curtain was a square table covered with a navy silk cloth. At the table's center was a large crystal ball and a deck of cards. Kamiz pulled aside a curtain to reveal a secret door that opened to a much larger back room. Judging from the crates and bookshelves that no doubt contained precious secrets, it was a resting place for Obscura spies. "I see you were successful." Jetmir was settled in a lower bunk, a Cabaretti healer tending to him. The woman stepped aside as Jinnie rushed over. "Father, how do you feel?" "You worry too much." It was a non-answer. Even from across the room, Elspeth could see that Jetmir's eyes were dull. There was the heavy scent of blood in the air, a saturated cloth discarded at his bedside. He'd need a miracle to pull through. "Giada, come, #emph[come] ." Jinnie waved the young woman over, fumbling with a satchel of supplies and producing a small vial. "Please, heal him." Giada took the vial, and her eyes fluttered closed. There was a small flair of light, and Giada swayed, holding out the Halo to Jinnie. The vial never had a chance to exchange hands. Both the door they had entered through and the back door were rammed open at the same time, revealing a dozen brawlers in aprons and heavy workers' clothing. Chaos erupted. "Riveteers?" Jinnie whirled, her expression souring instantly to pure hate. "Riveteer #emph[traitors] ." Vivien seized the moment. She balled both hands into fists and spun toward the man still holding her bow—arms straight like a battering ram—to strike him clean across the face. Her bow dropped to the floor, and she scooped it up. Still cuffed, she was unable to shoot with it, but she could, and did, use it to strike the other Cabaretti in the temple. Elspeth forced power into her hands. It shimmered the air around her shackles. With a thought, her magic cracked open the locks, and the metal fell to the floor with a clang. Swinging her hand, she cast the spell Vivien's way, her ally's shackles dropping as well. "Where have you been hiding that trick?" Vivien appraised, collecting Elspeth's sword from the fallen man and tossing it over. Elspeth caught the blade by the scabbard. "Tried to keep a low profile and save my skills for when I really needed them. Now seems like the time." "Glad you finally joined the fight properly." Vivien shifted her grip on her bow, reaching for an arrow in her quiver. "Get Giada. I'll clear a path." "Thank you." Elspeth scrambled forward, drawing her sword as green flashed. Giada was cornered with Jinnie and Jetmir. Jinnie was putting up a good fight, but she was vastly outnumbered against the Maestro assassins and Riveteer brawlers. #emph[Were these brutes going to kill Giada? Didn't they want the Font for themselves?] Elspeth wasn't going to wait to find out. She dashed to the right, swinging her sword for the Riveteer hoisting his hammer overhead. Her blade met his shoulder, and he dropped his weapon before he could strike. In her periphery, another lunged for her. She caught his arm and disarmed him. Hooking his dagger with the toe of her shoe, she tossed it up, grabbing the hilt in time to parry another. A third was lunging for her, and Elspeth dodged, sinking her elbow into his gut as she reared back to jab with the blade, sinking it between the ribs of the first attacker. The fight was claustrophobic. Every movement Elspeth made had to account for the actions of several others. In addition, she had to keep an eye on Giada. Elspeth had made a promise that she would keep Giada safe until her last breath. But there were too many, and she could hardly tell friend from foe. It was only a matter of time until Elspeth made a mistake. She ducked away from one man's swing, stepping back to get distance enough to swing her heavy blade. She didn't see the mallet until it was too late. It smashed into her ribs as Elspeth had been straining to hoist her sword, knocking the wind from her. She could feel her chest compress, bones shatter. Elspeth coughed blood. A dagger skewered through her shoulder. Giada's screams were distant. She knew this cold. The creeping of Erebos's cold, bony finger up her spine. He would wrap it around her neck and hold until the last breath left her. #emph[Giada, I'm sorry. I tried to protect you.] Just as her vision tunneled, the man who had been rearing back to deal the final blow on Elspeth crumpled. Worker's clothing was replaced with deep green, the practical coat Vivien wore. A new figure appeared as the sounds of battle faded. An arm wrapped around Elspeth's shoulders, hoisting her upright. A familiar pair of dark eyes looked on worriedly. "Giada?" Elspeth blinked, trying to make sense of what she saw. "Take this." Giada thrust something to Elspeth's mouth, leaving her no choice but to swallow. Warmth radiated through her. Her bones shifted, knitting. Wounds mended. Invisible hands put her broken body back together, slowly restoring consciousness and clarity and shaking Erebos's grasp. The world had never looked sharper. The lights were brighter and— "Giada~" Elspeth lightly touched the young woman's cheek. "You're radiant." Giada's lips parted slightly with surprise. "You see it, too?" she whispered. "I—" Elspeth didn't have a chance to ask if "it" was the hazy aura surrounding Giada. "This way!" Kamiz shouted. Jinnie grabbed Giada, hoisting her by the arm. "Don't help #emph[them] , we have to flee." "Wait." Elspeth was back on her feet, the Halo surging through her veins making her nimble and strong once more. "We're coming, too." "You think—" Jinnie's rage was cut short as she had to dodge an attack. She cursed loudly and glared between Elspeth and her fallen Cabaretti soldiers. "Fine. You're too good in a scrap. Keep up, and don't try anything funny." Led by Kamiz, they escaped into a back alley, Riveteers and Maestros charging after them. Elspeth and Vivien took up the rear, holding off the attackers. Eventually no more followed. "I think we lost them." Jinnie heaved a sigh of relief. "Quick, in here." Kamiz opened a door, and they all plunged into near total darkness. "These are Obscura tunnels," she explained as they trudged upward, winding through New Capenna. "We use them to get around without being seen." "How do you know they're not compromised?" Vivien stole Elspeth's question. "I don't," Kamiz answered honestly. "That's why we need to keep moving." "Where are we moving toward?" Jinnie asked. "Park Heights. The main Obscura stronghold, the Cloud Spire. If it's not safe there, we won't be safe anywhere." #figure(image("009_Episode 5: Hymn of the Angels/03.jpg", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) = PARK HEIGHTS CATHEDRAL As they continued to climb, Elspeth grabbed Giada's hand and gave it a light squeeze. #emph[Thank you for saving me] , Elspeth mouthed, hoping Giada could see in the blue-tinted light that emitted from strips along the tops of the walls. The young woman's lips curled into a weary smile, and she nodded. As Elspeth's fingers slipped from hers, Giada's attention remained on her own hand. Giada slowly slid a bracelet off her wrist and glanced between Jinnie and Elspeth. After confirming Jinnie's focus was ahead, she dropped the bracelet and pointed to it, mouthing two words, #emph[Tracking spell] . Elspeth ground the bracelet under her boot. Giada had been asking the same questions that plagued Elspeth about how Jinnie had found them. A swell of pride rushed through Elspeth. Giada was clever, becoming stronger and more confident by the moment. The move to rid herself of the bracelet was also all the confirmation Elspeth needed to know that Giada still wanted to go with her. At the first opportunity, Elspeth would take her away again. The passage came to a dead end. Kamiz slid open a door, and the familiar earthy scents of Park Heights greeted them. Elspeth blinked into the afternoon light, already growing angry with a dusk that promised to be as bloody as New Capenna. "Not much farther," Kamiz said, leading through carefully manicured hedges. "Right around here." Vivien stopped short. Elspeth heard footsteps and the clanking of weapons, too. "Wait, it's a tr—" Giada and Jinnie had already rounded the corner of the hedges. "What is the meaning of this?" Jinnie shouted. Vivien readied her bow, turning to the Obscura enforcers circling them in from behind. Elspeth trusted her to watch her back as she raced ahead. More Obscura enforcers were waiting in a clearing. Jinnie had already engaged with them as a wounded Kamiz attempted to crawl away from Jinnie's feet. Jinnie had no doubt turned her rage on Kamiz immediately upon realizing her deception. This had all been a setup. The Obscura were as compromised as everyone else was. The fight in the hideout had just been an excuse to separate them from the other Cabaretti and lure them into a trap. But she'd bet that Kamiz hadn't counted on Elspeth and Vivien still being around. "Let's go." Elspeth grabbed Giada. "But Jinnie—" "She made her choice." Elspeth practically scooped Giada up. "We have to leave or we're going to die." Giada complied. #figure(image("009_Episode 5: Hymn of the Angels/04.jpg", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) Vivien at their back, they raced through the park, branches reaching out, clawing at their faces and arms as they bolted in search of a reprieve. #emph[Let there be one safe place in this city, one sanctuary] , Elspeth silently beseeched the cruel and uncaring gods. They emerged back onto a path that connected to a nearby cathedral suspended below angel statues. "In there." Elspeth decided, dashing into the antechamber. Her steps slowed as they echoed into the nave. The cathedral was a masterpiece. Countless angel statues lined the aisles leading up to the transept. Each had their hands upward, reaching toward the skylights above that highlighted them with a column of sunlight cutting through the relative shadow of the cathedral itself. Elspeth blinked, several times. It wasn't a trick of the light. These statues gave off their own glow. Much like Giada. Like— She stared at her palms. How had she not seen it before now? Dimmer than the rest~but Elspeth was also emitting the faintest golden haze. "Do you hear it?" Giada whispered. "I do." The chorus resonated from deep within every statue. It reverberated around the choir and ambulatory to fill the whole cathedral with a solemn requiem. There were no words, just sound, wrought from turmoil and a pain so deep that it made Elspeth's eyes prickle. A high soprano soared over the rest, singing notes in a language of reckless hope that they all so desperately needed. It was warmth and goodness. It was fulfilling yet longing. It was~ "What is it?" Elspeth whispered. "My family. I'm home," Giada said reverently, as if struck with unexpected clarity. #figure(image("009_Episode 5: Hymn of the Angels/05.jpg", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) Suddenly, the word "home" held meaning. Elspeth shared a long stare with Giada who wore an enigmatic smile. She radiated like the angels of the cathedral. Her very form seemed to belong here, as though it were a piece finally returning to its spot. "Home," Elspeth echoed. Home was purpose. It was defending those who needed her. Ajani had been right; home had never been about place. And for the first time, Elspeth felt like she had found somewhere she belonged—she had found purpose, someone and something to believe in and defend. A low rumbling filled the cathedral and interrupted the song, followed by thunderous steps. Elspeth turned to see a hulking, horned man. Two wings of membrane, the color of old blood, stretched behind him. The Adversary. "Get behind me, Giada." Elspeth drew her sword. "Did you really think you could escape #emph[me] ?" "Good of you to show yourself, <NAME>." Vivien didn't wait for a response. She fired the first arrow. <NAME>ilis smashed his fist straight through the face of the ghostly wolf. It dissipated with a yelp. Vivien had two fresh arrows knocked when more enforcers lined the cathedral behind him. "Take care of him, I'll deal with the rest. And be careful, he's #emph[like us] !" Vivien shouted. #emph[A planeswalker] , she meant. Elspeth gripped her sword tighter. "Run," Ob Nixilis growled as Vivien passed. His voice was sandpaper and fire. "Play with my agents until I'm ready to torment you." He kept his focus solely on Elspeth and Giada, wearing a cruel smile. Self-satisfied. As though everything was kindling for him to burn. "You thought you could best me? I'll show you what happens to people who dare contradict me—who even think of getting in the way of my power. Once the Font is in my possession, I'll end you both, one at a time, #emph[slowly] ." #figure(image("009_Episode 5: Hymn of the Angels/06.jpg", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) Elspeth could feel the immense power radiating from him. "Giada, if this goes sideways, run. Do it before I fall," Elspeth whispered. "I'll hold him off as long as I can, but you must get away while I can distract him." Ob Nixilis moved with the speed of a much lither man. Elspeth had incorrectly assumed that all his bulging muscle would slow him down. But using his wings for balance, he could propel himself forward at alarming speeds. "I'm going to enjoy destroying you!" Elspeth focused on defensive attacks. She had to wear him down. He had her beat on strength and speed, and her only shot was using those against him. Whenever she saw an opening, she jabbed or slashed. But she landed nothing more than glancing blows. Enough to encumber and frustrate. But not enough to slow or stop. The sword was too large for her and unruly to wield. She had to wait for the right opening to present itself. Reaching his tolerance for her diversions, Ob Nixilis unleashed a burst of power. It shot out from him, sending Elspeth flying back. Her head cracked against the stone, and everything spun. Nausea shot up from her stomach. "Giada," Elspeth wheezed. She tipped her head back, but it only made more stars pop into her vision. "Run." "No, there is nowhere for me to run." Giada hovered; her form was growing hazy, glowing even brighter than before. Ob Nixilis's rumbling footsteps drew closer. His rough laughter rattled Elspeth's bones. "First, you. Then the Font. Then the other planeswalker~and then nothing will stop me." "Run," Elspeth pleaded, eyes burning. She had sworn she would protect Giada. She had found duty and purpose only to be met with more failure. "Don't fear for me any longer, Elspeth. There is more for me out there—I am going to be with my family now." Giada rounded to Elspeth's side, kneeling. She was more of an outline of shimmering magic now than a physical being. #emph[Family] ~it was something Elspeth once knew through Ajani, Daxos~Giada's words ignited something deep within her. A flickering candle of hope, no brighter than the faint glow she'd seen covering her earlier. Ob Nixilis's footsteps stopped. "What do you think you're doing?" Giada's focus remained solely on Elspeth. "Thank you, for everything. I found my answers. Let me protect you now." She tipped her chin up and looked just like the rest of the sculpted angels. "I'm ready," she whispered to unseen ears. Light filled the room. It shot out in every direction from Giada's body, the force of the blast strong enough to throw Ob Nixilis away. Elspeth, however, was unaffected. She looked on in awe as Giada was transformed into the radiant magic of Halo. #figure(image("009_Episode 5: Hymn of the Angels/07.jpg", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) Elspeth breathed it in, allowing it to coat her flesh like armor and sink into her bones. The singing returned, a full chorus in which every part was in perfect harmony. It reached a true crescendo of joy, as if to overwrite the screams of the Cabaretti celebration that had dared to take the same name. Slowly, as the swirling lights began to fade, Elspeth sat up. Ob Nixilis groaned, still prone. Vivien and the men outside were also on their backs. Elspeth wondered if Giada's transformation had stunned all of New Capenna. #emph[Elspeth] , Giada's voice whispered from a great distance, barely audible over the fading chorus. She was fading, but she wasn't alone. Giada was framed by the shining outlines of others like her. #emph[End this, protect New Capenna. You have the weapon you need. It's been at your side all along, following you, waiting until you were ready.] #emph[I'm not strong enough.] #emph[You are] , Giada insisted. #emph[Your failures do not define you. Don't give up now, not when you're so close to everything you've ever wanted. Fight!] Elspeth heard Ajani in Giada's words. Her friend still spoke to her, across time and place. Her eyes fluttered closed, and she sighed softly. #emph[Home was duty.] #emph[Family was those she chose to defend.] #emph[She'd always had everything she ever needed.] Elspeth opened her eyes and stood. In one fluid motion, she hoisted the sword at her side. The weapon was no longer a clunky broadsword. It had been transformed into a narrower weapon, one far more suited in length and weight for her frame. The pommel wasn't a practical steel guard but an orb of Halo, shining with colors that shifted so often, it was every color at once. The Halo seeped into the blade, running up tracks in the fuller. From middle to edges, the weapon glowed with the same faint light as Giada. Elspeth brought the hilt to her nose, blade upright, knowing that somehow, somewhere, Giada watched the salute and could finish her metamorphosis now that Elspeth would carry on her vow. Elspeth would defend New Capenna. #figure(image("009_Episode 5: Hymn of the Angels/08.jpg", width: 100%), caption: [Art by: <NAME>], supplement: none, numbering: none) She lunged forward, wielding her new sword with both hands. Ob Nixilis barely had time to roll away from her thrust. As he dodged, he raised up a hand, pointing at her. She could feel the magic collecting in the air and narrowly avoided his shot. Fueled by Halo and purpose, wielding the blade gifted by Giada, Elspeth could go toe to toe with the mighty Adversary. Her blows were no longer clumsy and glancing but purposeful and skilled. She felt like her old self. #emph[No.] These weren't the movements of the woman she once was. These were the movements of someone stronger, better. Who she had been meant little when compared to who she would become. Ob Nixilis grew increasingly frustrated every time she landed a strike on him. He roared, dodged back, and tried to raise his hand for one more attack. Elspeth didn't allow it. She closed the gap, jabbing straight for his chin. At the last second, he attempted to get out of the way, but he wasn't fast enough. Steel met flesh, and she grazed off the side of his neck. Ob Nixilis gasped, though it only made the wound worse. Blood flowed from between his fingers as he applied futile compression. Elspeth withdrew, determined to slash again. She'd cut through his fingers if that's what it took to finish the job. But Ob Nixilis staggered backward. The air wavered, distorting everything around him. It folded in on itself, collapsing Ob Nixilis's form beyond the realm of perception. In a blink, he was gone, having planeswalked away. Elspeth stared at the now empty spot where Ob Nixilis once stood. Several curses were about to escape her lips when a groan jolted her back to reality.#emph[ Vivien!] Elspeth raced over to the entry, helping her friend up. Vivien massaged her head. "What happened?" "Giada saved us all. But Ob Nixilis got away. This is far from over." = EPILOGUE – MUSEUM Elspeth and Vivien walked up the steps of the museum. Unsurprisingly, the ash had already been cleaned. But there were still pockmarks in the marble of the main hall. Maestros were hard at work putting their museum back together. "I've been expecting you." Anhelo crossed over from where he had been ordering around a few younger members of the now much smaller family. "Expecting me?" Elspeth asked, hand resting lightly on the pommel of the Halo sword looped through her belt. She didn't come here for a fight. But she wasn't about to take no for an answer when it came to finally getting access to Xander's archives. "Yes. I found this stitched in my coat on the night of the Crescendo. As soon as I read it, I came straight back, but I was already too late~though it spared me from the bloodbath in the Vantoleone." Anhelo fished out a letter from his pocket. It bore Xander's seal. Elspeth unfolded it, reading its contents: #emph[Anhelo,] #emph[I thank you for your years of obedient service. You are as good a man as you are an assassin. But, my friend, I fear this will be where we part.] #emph[The Maestros are now in your hands, and I trust that after the long night ahead, you will help them usher in a new era. Everything that I have collected over the years should serve you well. Make our family your own as you see fit. It is well past time for the guiding vision of a younger leader.] #emph[Finally, should she survive the night ahead, which I have little doubt she will, Elspeth will come seeking my Archives. Give her access and try not to ask too many questions.] #emph[For one last time,] #emph[Yours,] #emph[Xander] "He knew I would come." Elspeth scanned the letter twice more before handing it back to Anhelo. "Xander always knew what would happen in New Capenna, often before any of the rest of us." Anhelo folded his hands at the small of his back. "This way." She followed Anhelo through the museum and up to Xander's office. He pulled back a curtain in the far corner of the room to expose a door. Unlocking it, he motioned for Vivien and Elspeth to enter. "Anything in Xander's archives is yours. Come whenever you please." Anhelo left them. They spent the day searching and scouring. No page was left unturned. Anhelo was kind enough to bring them lunch~and dinner, as twilight descended on the city. Histories of Capenna hidden in Xander's office spelled out the story: In the distant past, the Phyrexians made an attempt on this plane. The angels tried to stop the invasion, but the threat was too great for them to face alone. In desperation, they formed an alliance with the Demon Lords. In the face of the Phyrexians, Capenna's own rivalries were petty; however, those rivalries would not be forgotten. The demons ultimately betrayed the angels, trapping them in a kind of stasis from which they could convert the angels' bodies into Halo, an essence that—as Xander had told her—could be taken to help protect the city. It was messy, but it worked. The Demon Lords used Halo to defeat the Phyrexians and then disappeared themselves. Halo had been the key. If it held the Phyrexians at bay here~this could be the answer Ajani sought, even if the supply of Halo was dwindling. Luckily, among the archives was a small cache of Halo that Xander was no doubt saving for a rainy day. This seemed as rainy as things could be. Elspeth would take it and all the information back to Ajani and the Gatewatch. Elspeth and Vivien stood side by side, staring out the massive window in the same spot Xander had. They hadn't said a word for hours. The silence was filled with revelations and knowledge. "New Capenna will continue to fight over Halo," Elspeth said finally. "The supply is almost used up, and if it runs dry, they'll tear themselves apart." Her mind briefly wandered to Giada and the shining figures the young woman had departed with. Would the angels return to New Capenna to usher in a new age if the city were in dire need? Or had they all gone on to something greater? "There's more to this plane than one city. If New Capenna's fate is to destroy itself, then nature will reclaim it. Life will persist on this plane." Vivien's words weren't cold, but thoughtful. Perhaps even intended to be reassuring that #emph[something] would thrive long after the fall of the city. "Even still, I can't turn my back on them." "You will never be able to make them safe if you stay. The Phyrexians are now a threat to everyone." "I know," Elspeth said. "But now we can fight them," Vivien said, with a nod to Elspeth's sword and then to the cache of Xander's hidden Halo. "And if Urabrask is to be believed~" "What did he tell you?" Vivien folded her arms. "Revolution." She furrowed her brow in intense thought. "On New Phyrexia. It might give us the window we need to stop them." #emph[To stop ] her. The porcelain rictus of the Grand Cenobite—her former prison and the Phyrexian capital—loomed large in Elspeth's memory. Walking away from New Capenna meant heading once more into that metallic hell. #emph[Koth, Melira, Karn] ~going back meant facing her nightmares. It meant another battle, another war. Was she strong enough? Did she have a choice? Did anyone? All she wanted to do was rest, but how could she step away from battle now? She had to get back to Dominaria and tell Ajani all she'd learned, show him the Halo, and ready herself. #emph[There was too much to be done.] Elspeth nodded, determination fueling her as hot as the Halo swirling in the sword at her hip. If she had purpose and people to defend, she would find her home along the way. That was the best path forward for her. "We should go," Elspeth declared. "Where to?" Elspeth was pleased it seemed like Vivien was ready to continue this journey with her. If war was coming, she could use more powerful allies. "Dominaria." Elspeth turned away from the city skyline of New Capenna. "Time to meet some old friends." #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) #grid( columns: (1fr, 1fr), gutter: 2em, figure(image("009_Episode 5: Hymn of the Angels/09.png", width: 100%), caption: [], supplement: none, numbering: none), figure(image("009_Episode 5: Hymn of the Angels/10.png", width: 100%), caption: [], supplement: none, numbering: none), )
https://github.com/lcharleux/LCharleux_Teaching_Typst
https://raw.githubusercontent.com/lcharleux/LCharleux_Teaching_Typst/main/src/sandbox/demo.typ
typst
MIT License
// TEMPLATE IMPORT #import "../templates/conf.typ": conf, todo, comment, idea, note, important #import "../templates/drawing.typ": dvec, dpoint, dangle3p, dimension_line, arotz90, arrnumprod, arrsub, anorm, normalize, rotmat2D, dispvcol, arradd, mvec, arrcrossprod, arrdotprod, torseur1, torseur2, torseur6, part_label #import "@preview/unify:0.6.0": num, qty, numrange, qtyrange #import "@preview/cetz:0.2.2" #import "@preview/showybox:2.0.1": showybox #import "@preview/chic-hdr:0.4.0": * #import cetz.draw: * // #set math.equation(numbering: "(1)") // DOCUMENT SETUP #let course = "Demo - Sandbox" #let block = "Expérimentations" #let section = "PAC" #let teacher = "<NAME>" #let email = "<EMAIL>" #show: doc => conf( course: course, block: block, section: section, teacher: teacher, email: email, doc, ) = Calculs sur les vecteurs #let a0 = (1, 1, 0) #let a1 = (6, 5, 4) On peut afficher des vecteus formels: $ #dispvcol(($theta$, $alpha$, 0)) $ On peut calculer des choses avec:$ a_0 = dispvcol(#a0) $ Et: $ a_1 = dispvcol(#a1) $ On peut additionner les vecteurs: $ a_0 + a_1 = dispvcol(arradd(a0, a1)) $ Et les soustraire: $ a_0 - a_1 = dispvcol(arrsub(a0, a1)) $ Ou normaliser le vecteur #mvec($a_0$) en #mvec($v_0$): $ mvec(v) = dispvcol(normalize(#a0)) $ #arrnumprod(a0, 10) = Dessins avancés #align(center)[ #cetz.canvas({ let A = (0, 0) let B = (5, 5) dpoint(A, label: "A", anchor: "south") dpoint(B, label: "B", anchor: "south") dimension_line(A, B, label: [$L$], inv: true, offs: 2, ratio: 90%) // let label = [$L$] // let inv = true // let offs = 2 // let ratio = 90% // let offset = 10pt // line(A, B, stroke: (paint: blue, thickness: 1pt)) // dpoint(A, label: "A", anchor: "south") // dpoint(B, label: "B", anchor: "south") // let AB = arrsub(B, A) // let u = normalize(AB) // let v = arotz90(u, inv:inv) // let v2 = arrnumprod(v, offs) // let C = arradd(B, v2) // let D = arradd(A, v2) // line(A, D, stroke: (paint: black, thickness: 0.5pt), name: "l0") // line(B, C, stroke: (paint: black, thickness: 0.5pt), name: "l1") // // dpoint(C, label: "C", anchor: "north") // dvec((name: "l0", anchor: ratio), (name: "l1", anchor: ratio) , label: label, color: black, shrink: 1pt, rotate_label: true, mark: (end: "straight", start: "straight"), thickness: 1pt) }) ] = Gregoire = Classes #let data = ("name": "tutu", "label": "hello") #data.name #( for i in (1, 2, 3) { str(i) } ) = Repères en 3D #align(center)[ #cetz.canvas({ ortho( x: 30deg, y: 30deg, // z: 0deg, { let O0 = (0, 0, 0) let x = (3, 0, 0) let y = (0, 3, 0) let z = (0, 0, 3) let M = (10, 5, 1) catmull((12,3, 1), (10,6,1), (8,4,1), (10,2,1), tension: .4, stroke: black, close:true, name:"potato", fill: yellow.lighten(50%)) content((name:"potato", anchor:90%), anchor: "north", padding: .3)[$(S_1)$] dpoint(O0, label: [$O_0$], anchor: "north") dvec(O0, x, label: [$#mvec[x]_0$], color: green, shrink: 0, rotate_label: false, thickness: 1pt) dvec(O0, y, label: [$#mvec[y]_0$], color: green, shrink: 0, rotate_label: false, thickness: 1pt) dvec(O0, z, label: [$#mvec[z]_0$], color: green, shrink: 0, rotate_label: false, thickness: 1pt) dvec(O0, M, label: [$#mvec[$O_0M$]$], color: blue, shrink: 4pt, rotate_label: false) dpoint(O0, label: [$O_0$], anchor: "north") dpoint(M, label: [$M$], anchor: "north-west", color:red) }, ) }) ] = Torseurs $ torseur6(rx: F_y , mz: L F_y ) $ $ torseur2(R: #mvec[F]_y , p:A ) $ $ torseur1(T: T_0 ) $ = RDM #let load(x) = 1 + x - calc.pow(x, 2) / 8 #align(center)[ #cetz.canvas({ ortho( x: 30deg, y: 30deg, // z: 0deg, { let L=10 let Nv= 20 let O0 = (0, 0, 0) let A = (L, 0, 0) let x = (3, 0, 0) let y = (0, 3, 0) let z = (0, 0, 3) dpoint(O0, label: [$O_0$], anchor: "north") dpoint(A, label: [$A$], anchor: "west") line(O0, A, stroke: (paint: black, thickness: 1pt)) for i in range(Nv+1) { let x = L* i / Nv let y = load(x) let B = (x, 0, 0) let C = (x, y, 0) // dpoint(B, label: [$B B_#str(i)$], anchor: "north") dvec(C, B, label: none, color: red, shrink: 0, rotate_label: false, thickness: 1pt) } for i in range(Nv) { let x = L* i / Nv let x2 = L* (i+1) / Nv let y = load(x) let y2 = load(x2) let P = (x, y, 0) let P2 = (x2, y2, 0) // dpoint(B, label: [$B B_#str(i)$], anchor: "north") line(P, P2, stroke: (paint: red, thickness: 1pt)) } dvec(O0, x, label: [$#mvec[x]_0$], color: green, shrink: 0, rotate_label: false, thickness: 1pt, anchor:"north", label_fill:none) dvec(O0, y, label: [$#mvec[y]_0$], color: green, shrink: 0, rotate_label: false, thickness: 1pt) dvec(O0, z, label: [$#mvec[z]_0$], color: green, shrink: 0, rotate_label: false, thickness: 1pt) }, ) }) ] = Champ de moment d'un torseur #align(center)[ #cetz.canvas({ ortho( // x: 30deg, // y: 30deg, // z: 0deg, { let R = (0, 0, 7) let MO = (0, 0, 20) let O0 = (0, 0, 0) let nr = 10 let ntheta = 4 let rmax = 5 let rmin = 1 let mscale = .1 for i in range(nr) { let r = rmin + (rmax - rmin) * i / nr for j in range(ntheta){ let theta = 2 * calc.pi * j / ntheta let P = (r * calc.cos(theta), r * calc.sin(theta), 0.) dpoint(P, color: black) let PO0 = arrsub(O0, P) let M = arrnumprod(arradd(arrcrossprod(PO0, R), MO), mscale) dvec(P, arradd(M, P), color: red, shrink: 0, rotate_label: false, thickness: 1pt) } } dvec(O0, R, label: [$#mvec[R]$], color: blue, shrink: 0pt, rotate_label: false, thickness: .5pt) dpoint(O0, label: none, anchor: "north") if MO.at(2) != 0 { dvec(O0, arrnumprod(MO, mscale), color: red, shrink: 0, rotate_label: false, thickness: 1pt) } }, ) }) ] = Champ équiprojectif Cas du champ de vitesse d'un solide indéformable. #align(center)[ #cetz.canvas({ let O0 = (-8, 0, 0) let x = (3, 0, 0) let y = (0, 3, 0) let A = (1, 2, 0) let B = (-2.5, 5, 0) let BA = arrsub(A, B) let AB = arrsub(B, A) let lAB = anorm(AB) let u = normalize(BA) let v = arotz90(u) let R = (0, 0, -.35) let VA = arradd(arrnumprod(u, 1.5), arrnumprod(v, -2.5)) let VB = arradd(VA, arrcrossprod(BA, R)) let ua = normalize(VA) let va = arotz90(ua, inv: true) let ub = normalize(VB) let vb = arotz90(ub, inv: true) let Ha = arradd(A, arrnumprod(u, arrdotprod(VA, u))) let Hb = arradd(B, arrnumprod(u, arrdotprod(VB, u))) let construction_line_thickness = 0.5pt catmull((2, 3), (0, 6), (-4, 4), (-1, 2), tension: .4, stroke: black, close: true, name: "potato", fill: yellow.lighten(50%)) content((name: "potato", anchor: 10%), anchor: "west", padding: .3)[$(S_1)$] dpoint(A, label: "A", anchor: "west") dpoint(B, label: "B", anchor: "south") dvec(A, arradd(A, VA), color: blue, label: [$#mvec[V] (A in 1 slash 0)$], rotate_label: false, thickness: 2pt, anchor: "north-west", label_fill: none, anchor_at: 100%) dvec(B, arradd(B, VB), color: blue, label: [$#mvec[V] (B in 1 slash 0)$], rotate_label: false, thickness: 2pt, anchor: "north", label_fill: none, anchor_at: 100%) dpoint(O0, label: [$O_0$], anchor: "north") dvec(O0, arradd(O0, x), label: [$#mvec[x]_0$], color: green, shrink: 0, rotate_label: false, thickness: 1pt) dvec(O0, arradd(O0, y), label: [$#mvec[y]_0$], color: green, shrink: 0, rotate_label: false, thickness: 1pt) line(A, arradd(A, arrnumprod(va, 10)), stroke: (paint: black, thickness: construction_line_thickness), name: "perpA") line(B, arradd(B, arrnumprod(vb, 10)), stroke: (paint: black, thickness: construction_line_thickness), name: "perpB") line(arradd(A, arrnumprod(u, .5 * lAB)), arradd(B, arrnumprod(u, -.5 * lAB)), stroke: (paint: black, thickness: construction_line_thickness)) intersections("i", "perpA", "perpB") dpoint("i.0", label: $I$, anchor: "north") dangle3p(A, arradd(A, ua), arradd(A, va), right: true, radius: .25) dangle3p(B, arradd(B, ub), arradd(B, vb), right: true, radius: .25) line(arradd(A, VA), Ha, stroke: (paint: black, thickness: construction_line_thickness)) line(arradd(B, VB), Hb, stroke: (paint: black, thickness: construction_line_thickness)) dangle3p(Ha, A, arradd(A, VA), right: true, radius: .25, color: red) dangle3p(Hb, B, arradd(B, VB), right: true, radius: .25, color: red) dimension_line(Ha, A, label: [$V_"AB"$], inv: true, offs: 3, ratio: 90%) dimension_line(Hb, B, label: [$V_"AB"$], inv: true, offs: 3, ratio: 90%) hide(line("i.0", A, stroke: (paint: black, thickness: 2pt), name : "IA")) line("i.0", arradd(A, VA), stroke: (paint: blue, thickness: .5pt), name : "IA2") hide(line("i.0", B, stroke: (paint: black, thickness: 2pt), name : "IB")) line("i.0", arradd(B, VB), stroke: (paint: blue, thickness: .5pt), name : "IB2") let nv = 5 let pv = 100% / nv for i in range(1, nv) { dvec((name:"IA", anchor:i * pv), (name:"IA2", anchor:i * pv), color: blue, label: none, thickness: .5pt, anchor: "north-west", label_fill: none, anchor_at: 100%) dvec((name:"IB", anchor:i * pv), (name:"IB2", anchor:i * pv), color: blue, label: none, thickness: .5pt, anchor: "north-west", label_fill: none, anchor_at: 100%) } // dvec(O0, z, label: [$#mvec[z]_0$], color: green, shrink: 0, rotate_label: false, thickness: 1pt) // dvec(O0, M, label: [$#mvec[$O_0M$]$], color: blue, shrink: 4pt, rotate_label: false) // dpoint(O0, label: [$O_0$], anchor: "north") // dpoint(M, label: [$M$], anchor: "north-west", color:red) }) ] = Liaisons 3D #let polygon(verts, ..args) = { let n = verts.len() for i in range(n) { let A = verts.at(i) let B = verts.at(calc.rem(i + 1, n)) line(A, B, ..args) } } #align(center)[ #cetz.canvas({ ortho( // x: 30deg, // y: 30deg, // z: 0deg, { let R = (0, 0, 7) let MO = (0, 0, 20) let O0 = (0, 0, 0) let nr = 10 let ntheta = 4 let rmax = 5 let rmin = 1 let mscale = .1 let x = (3, 0, 0) let y = (0, 3, 0) let z = (0, 0, 3) let ntheta = 20 let radius = 2 // circle(O0, radius:radius) for theta in range(ntheta){ let theta0 = 2 * calc.pi * theta / ntheta let theta1 = 2 * calc.pi * (theta + 1) / ntheta let verts = ( (radius*calc.cos(theta0), radius*calc.sin(theta0), 0), (radius*calc.cos(theta1), radius*calc.sin(theta1), 0), (radius*calc.cos(theta1), radius*calc.sin(theta1), 2), (radius*calc.cos(theta0), radius*calc.sin(theta0), 2), ) polygon(verts, stroke: (paint: black, thickness: 1pt), fill:white) } circle((0,0, 2), radius:radius, stroke: (paint: black, thickness: 1pt), fill:white) dvec(O0, x, label: [$#mvec[x]_0$], color: green, shrink: 0, rotate_label: false, thickness: 1pt, anchor:"north", label_fill:none) dvec(O0, y, label: [$#mvec[y]_0$], color: green, shrink: 0, rotate_label: false, thickness: 1pt) dvec(O0, z, label: [$#mvec[z]_0$], color: green, shrink: 0, rotate_label: false, thickness: 1pt) }, ) }) ]
https://github.com/pavelbezpravel/cv
https://raw.githubusercontent.com/pavelbezpravel/cv/main/README.md
markdown
# cv ## Build ```bash typst compile main.typ cv-talashchenko.pdf ``` ## Links [Download latest build](https://github.com/pavelbezpravel/cv/releases/latest/download/cv-talashchenko.pdf)
https://github.com/mem-courses/calculus
https://raw.githubusercontent.com/mem-courses/calculus/main/homework-2/homework13.typ
typst
#import "../template.typ": * #show: project.with( course: "Calculus II", course_fullname: "Calculus (A) II", course_code: "821T0160", title: "Homework #13: 格林公式 & 路径无关性", authors: ( ( name: "<NAME>", email: "<EMAIL>", id: "#198", ), ), semester: "Spring-Summer 2024", date: "June 4th, 2024", ) = 习题10-1 == P212 4(1) #prob[ 利用格林公式计算第二类曲线积分: $ intcb(C) (x+y)^2 dx - (x^2 + y^2) dy $ 其中围线 $C$ 依正方向经过以 $A(1,1)$,$B(3,2)$,$C(2,5)$ 为顶点的三角形 $Delta A B C$。 ] 设围线 $C$ 是区域 $D$ 的边界曲线,由题意知 $C$ 是正向的。根据格林公式得 $ & intcb(C) (x+y)^2 dx - (x^2 + y^2) dy = iintb(D) ((diff (x^2 + y^2)) / (diff x) - (diff ((x+y)^2)) / (diff y)) dx dy\ =& iintb(D) (2x - 2(x+y)) dx dy = -2 iintb(D) y dx dy = -2 (int_1^2 7 / 4 (y-1) y dy + int_2^5 (-7 / 12 y + 35 / 12) y dy)\ =& -2 (49 / 12 - 21 / 8 - 819 / 36 + 735 / 24) = -56 $ #correction[ 注意 $P = - (x^2+y^2)$ 而不是 $P= x^2 + y^2$。所以应用格林公式后得到的积分应是: $ iintb(D) (-(diff (x^2 + y^2)) / (diff x) - (diff ((x+y)^2)) / (diff y)) dx dy = iintb(D) (-4x-2y) dx dy $ ] == P212 4(4) #prob[ 利用格林公式计算第二类曲线积分: $ intb(accent(A B O, paren.t)) (e^x sin y - m y) dx + (e^x cos y - m) dy $ 其中 $accent(A B O, paren.t)$ 为由点 $A(a,0)$ 至点 $O(0,0)$ 的上半圆周 $x^2 + y^2 = a x$。 ] 设区域 $D$ 的边界曲线为 $accent(A B O, paren.t)$ 与 $overline(O A)$ 拼接而成的曲线,记为 $L^+$。由格林公式得: $ & intb(accent(A B O, paren.t)) (e^x sin y - m y) dx + (e^x cos y - m) dy\ =& iintb(D) (e^x cos y - (e^x cos y - m)) dx dy - intb(overline(O A)) (e^x sin y - m y) dx + (e^x cos y - m) dy\ =& iintb(D) m dx dy - 0 = m dot 1 / 2 dot pi (a / 2)^2 = (pi m a^2) / 8 $ == P213 5 #prob[ 计算: $ I=intcb(C) (x dy - y dx) / (x^2 + y^2) $ 式中 $C$ 为依正向而不经过坐标原点的简单封闭曲线。 ] 设 $D$ 为正向封闭曲线 $C$ 所围成的区域,则依格林公式得: $ I &= iintb(D) ((diff display(x/(x^2+y^2))) / (diff x) + (diff display(y/(x^2+y^2))) / (diff y)) dx dy = intb(D) ((-x^2 + y^2) / ((x^2+y^2)^2) + (x^2 - y^2) / ((x^2+y^2)^2)) dx dy \ &= 0 $ #correction[ 这是坐标原点在区域 $D$ 外部的情况,有 $I=0$;还需要考虑坐标原点在 $D$ 内部的情况,这时候由于 $P,Q$ 在坐标原点无定义,不能直接应用格林公式。 取 $a>0$ 使得中心在原点半径为 $a$ 的圆周 $L_a$:$x^2+y^2=a^2$ 完全位于围线 $C$ 之内,用 $D'$ 来表示 $C$ 与 $L_a$ 之间的环形闭区域,显然,在 $D'$ 上,$P,Q$ 及其偏导数均连续,可用格林公式: $ (intc_C + intc_(-L_a)) P dx + Q dy = iintb(D') ((diff Q) / (diff x) - (diff P) / (diff y)) dx dy = 0 $ 故 $ I =& intcb(L_a) P dx + Q dy = intcb(L_a) (x dy - y dx) / (x^2 + y^2) $ 应用极坐标变换得 $ I =& int_0^(2 pi) (a cos theta dif (a sin theta) - a sin theta dif (a cos theta)) / ((a cos theta)^2 + ( a sin theta )^2) dif theta\ =& int_0^(2 pi) (a^2) / (a^2) dif theta = 2pi $ ] == P213 6 #prob[ 设位于点 $(0,1)$ 的质点 $A$ 对质点 $M$ 的引力大小为 $G"/"r^2$($G>0$ 为万有引力系数,$r$ 为质点 $A$ 与 $M$ 之间的距离),质点 $M$ 沿曲线 $y=sqrt(2x - x^2)$ 自 $B(2,0)$ 运动到 $O(0,0)$,求在此过程中点 $A$ 对质点 $M$ 的引力所做的功。 ] 万有引力的方向即两点间连线的方向,故: $ W &= int_accent(B M O, paren.t) arrow(A) dot arrow(T^circle.small) dif l = int_accent(B M O, paren.t) G / r^3 (x dx + (y-1) dy) $ 其中 $display(r = sqrt(x^2 + (y-1)^2))$。从物理意义出发,可验证 $display((diff Q)/(diff x) = (diff P)/(diff y))$ 且连续,故积分关于路径无关: $ W &= int_overline(B O) G / r^3 (x dx + (y-1) dy) = int_2^0 (x dx) / ((sqrt(x^2+1))^3) = k(1-1 / sqrt(5)) $ // #align(center, image("images/2024-06-12-11-51-46.png", width: 100%)) == P213 7(1) #prob[ 利用第二类曲线积分计算曲线所围的面积:(星形线) $ x = a cos^3 t, quad y = b sin^3 t quad (0 <= t <= 2pi) $ ] 设 $D$ 为星形线所围成的区域,由格林公式得(取 $display(P = -1/2\;space Q=1/2)$): $ S =& iintb(D) dx dy = 1 / 2 intc_C x dy - y dx = (3 a b) / 2 int_0^(2 pi) (cos^4 t sin^2 t + cos^2 t sin^4 t) dif t \ =& 3 / 8 a b int_0^(2 pi) sin^2 t dif t = 3 / 8 pi a b $ == P213 8(1) #prob[ 计算第二类曲线积分: $ int_((0,1))^((2,3)) (x+y) dx + (x-y) dy $ ] $P = x + y;space Q = x-y$ 都连续。且 $display((diff Q)/(diff x) = (diff P)/(diff y) = 1)$ 都连续。故原积分关于路径无关: $ & int_((0,1))^((2,3)) (x+y) dx + (x-y) dy = int_((0,1))^((2,1)) (x+y) dx + int_((2,1))^((2,3)) (x-y) dy\ =& int_0^2 (x+1) dx + int_1^3 (2-y) dy = 4 + 0 = 4 $ == P213 8(3) #prob[ 计算第二类曲线积分: $ int_((0,-1))^((1,0)) (x dy - y dx) / ((x-y)^2) $ 沿着与直线 $y=x$ 不相交的路径。 ] 由于 $display((diff Q)/(diff x)=(diff P)/(diff y))$ 且连续,故原积分关于路径无关: $ int_((0,-1))^((1,0)) (x dy - y dx) / ((x-y)^2) =& int_(0)^1 dx / ((x+1)^2) + int_(-1)^0 dy / ((1-y)^2) =& (atpos(-1/(1+x), 0, 1)) + (atpos(1/(1-y), -1, 0)) = 1 $ == P213 9(1) #prob[ 求原函数 $u$: $ dif u = (x^2 + 2x y - y^2) dx + (x^2 - 2 x y - y^2) dy $ ] $ (diff Q) / (diff x) = 2x - 2y; quad (diff P) / (diff y) = 2x - 2y $ 故 $display((diff Q) / (diff x) = (diff P)/(diff y))$ 且连续, 原积分关于路径无关。可以求得其原函数: $ u(x,y) &= int_0^x P(x,0) dx + int_0^y Q(x,y) dy + C = int_0^x x^2 dx + int_0^y (x^2 - 2x y - y^2) dy + C\ &= x^3 / 3 + x^2 y - x y^2 - y^3 / 3 + C $ == P213 9(2) #prob[ 求原函数 $u$: $ dif u = (y dx - x dy) / (3x^2 - 2x y + 3y^2) $ ] $ (diff Q) / (diff x) &= diff / (diff x) ((-x) / (3x^2 - 2 x y + 3 y^2)) = (-(3x^2 - 2 x y + 3 y^2) + x(6x - 2y)) / ((3x^2 - 2 x y + 3 y^2)^2) = (3(x^2-y^2)) / ((3x^2 - 2 x y + 3 y^2)^2)\ (diff P) / (diff y) &= diff / (diff y) ((y) / (3x^2 - 2 x y + 3 y^2)) = ((3x^2 - 2 x y + 3 y^2) - y(2x + 6y)) / ((3x^2 - 2 x y + 3 y^2)^2) = (3(x^2 - y^2)) / ((3x^2 - 2 x y + 3 y^2)^2) $ 故 $display((diff Q) / (diff x) = (diff P)/(diff y))$ 且连续, 原积分关于路径无关。可以求得其原函数: $ u(x,y) &= int_0^x P(x,0) dx + int_0^y Q(x,y) dy + C = int_0^y (-x dy) / (3x^2 -2x y + 3y^2) + C\ &= -x / 3 int_0^y dy / (y^2 - 2 / 3 x y + x^2) + C = -1 / (2 sqrt(2)) arctan (3y-x) / (2 sqrt(2) x) + C // TBD:好难算 $
https://github.com/chendaohan/bevy_tutorials_typ
https://raw.githubusercontent.com/chendaohan/bevy_tutorials_typ/main/02_behaviors/behaviors.typ
typst
#set page(fill: rgb(35, 35, 38, 255), height: auto, paper: "a3") #set text(fill: color.hsv(0deg, 0%, 90%, 100%), size: 22pt, font: "Microsoft YaHei") #set raw(theme: "themes/Material-Theme.tmTheme") = 1. 行为 Bevy 中的行为被称为系统。每个系统都是你编写的 Rust 函数(```Rust fn```)或闭包(```Rust FnMut```),它接受特殊的参数类型来指示它需要访问哪些数据。 这是一个系统的样子,通过查看函数参数,我们就可以知道访问了哪些数据。 ```Rust fn enemy_detect_player( // 创建/删除实体、资源等 mut commands: Commands, // 查询实体/组件的数据 players: Query<&Translation, With<Player>>, mut enemies: Query<&mut Translation, (With<Enemy>, Without<Player>)>, // 访问资源的数据 game_mode: Res<GameMode>, mut ai_settings: ResMut<EnemyAiSettings>, ) { // 游戏行为 } ``` = 2. 并行系统 基于你编写的系统中的参数,Bevy 知道每个系统可以访问哪些数据,以及是否与其他系统冲突。没有冲突的系统将自动并行运行。这样就可以有效的利用多核 CPU。 为了获得最佳的并行性,建议你保持功能和数据的细粒度。在一个系统中放入过多的功能,或在单个组件或资源中放入过多的数据,会限制并行性。 = 3. 独占系统 独占系统为您提供了可以获得对 ECS World 完全直接访问的权限。它们无法与其他系统并行运行,因为它们可以访问任何内容并执行任何操作。 ```Rust fn save_game(world: &mut World) { // 游戏行为 } ``` = 4. Schedules == 4.1 Schedule Bevy 将系统存储在 Schedule 中。Schedule 包含系统及所有相关元数据,以组织它们,告诉 Bevy 何时以及如何运行它们。Bevy App 中通常包含多个 Schedule ,每个 Schedule 都是在不同场景中调用的系统集合(每帧更新、固定时间更新、应用程序启动时、在状态转换时)。 ```Rust #[derive(Debug, Default, Clone, PartialEq, Eq, Hash, States)] enum GameState { #[default] Menu, Start, Paused, } App::new() .add_plugins(MinimalPlugins) .init_state::<GameState>() // 应用程序启动时运行 .add_systems(Startup, system_1) // 每帧运行 .add_systems(Update, system_2) // 固定时间运行 .add_systems(FixedUpdate, system_3) // 状态转换时运行 .add_systems(OnEnter(GameState::Start), system_4) .add_systems(OnExit(GameState::Menu), system_5) .add_systems( OnTransition { exited: GameState::Start, entered: GameState::Paused, }, system_6) .run() ``` == 4.2 系统元数据 存储在计划中的元数据使你能够控制系统的运行方式: - 添加运行条件以控制系统在 Schedule 运行期间是否运行。 - 添加排序约束,如果一个系统依赖于另一个系统完成后才能运行。 ```Rust App::new() .add_plugins(MinimalPlugins) // 运行条件 .add_systems(Update, system_1.run_if(system_2)) // 排序约束 .add_systems(Update, (system_3, system_4.before(system_3))) .add_systems(Update, (system_5, system_6.after(system_5))) .add_systems(Update, (system_7, system_8, system_9).chain()) .run() ``` == 4.3 系统集 在 Schedule 中,系统可以被分组为集合。集合允许多个系统共享公共配置/元数据。 ```Rust #[derive(Debug, Clone, PartialEq, Eq, Hash, SystemSet)] enum MySystemSet{ First, Last, } App::new() .add_plugins(MinimalPlugins) .configure_sets(Update, MySystemSet::First.before(MySystemSet::Last)) .add_systems(Update, system_1.in_set(MySystemSet::First)) .add_systems(Update, (system_2, system_3).in_set(MySystemSet::Last)) .run() ```
https://github.com/katamyra/Notes
https://raw.githubusercontent.com/katamyra/Notes/main/Compiled%20School%20Notes/CS2110/Quiz3StudyGuide.typ
typst
#import "../../template.typ": * #show: template.with( title: [CS 2110 Quiz 3 Study Guide], authors: ( ( name: "<NAME>", ), ), ) = Pseudo-Ops (Assembler Directives) #note[ * Pseudo-ops* (or assembler directives) do not refer to operations performed by assembly, but rather it is a message from the assembly language to the assembler to help the assembler during its process. ] == .ORIG #definition[ *.ORIG* tells the assembler where in memory to place the LC-3 program. So .ORIG x3050 says place the first LC-3 ISA instruction at location x3050 ] == .FILL #definition[ *.FILL* tells the assembler to set aside the next location in the program and initialize it with the value of an operand. The value can either be a number or label. ] == .BLKW #definition[ *.BLKW* tells the assembler to set aside some number of sequential memory locations (BLocK of Words) in the program. A common use of BLKW is to set aside a piece of memory and then have another section of code produce a number and store it at that memory. ] == .STRINGZ #definition[ *.STRINGZ* tells the assembler to initalize a sequence of n+1 memory locations. It takes in a string as an argument, enclosed in double quotes. `.STRINGZ "Hello, World!"` ] == .END #definition[ *.END* tells the assembler it has reached the end of the program and need never look at everything after ] = Two Pass Process In order to work properly, assembly has to go through the code in two separate passes, otherwise it will encounter errors with not understanding symbolic names or labels, such as `PTR`. #theorem[ The objective of the *first pass* is to identify the actual binary addresses corresponding to symbolic labels. These set of correspondences is stored in the _symbol table_. In the first pass, we construct the symbol table. In pass two, we translate the individual assembly language instructions into their corresponding language instructions. ] For example: ```yasm GETCHAR ADD R3, R3, #-1 LDR R1, R1, #0 BRnzp TEST ``` In this case, if we only had a 1 pass technique, assembly wouldn't be able to recognize what `GETCHAR` and `TEST` refer to, so it would fail. Instead on the first pass we add `GETCHAR` & `TEST` to the symbol table with their respective address. Now on the second pass, we can go through each line and substitute a symbol for its value in the symbol table. = Subroutines #definition[ *Subroutines* are program fragments that are reusable, similar to functions in other languages. ] The *call/return mechanism* allows us to execute instruction sequences only once by requiring us to include it as a subroutine in our program only ones. THere are two instructions that use this call/return mechanism: == JSR(R) #definition[ *JSR(R)* calls the subroutine. - It loads the PC with the starting address of the subroutine and it loads R7 with the address immediately after the address of the JSR(R) instruction. R7 now holds the address we want to come back to after our subroutine is done, also known as the _return linkage_. ] #theorem[ JSR(R) uses two addressing modes for computing the starting address of the subroutine, PC-Relative addressing or Base Register addressing. You can use JSR/JSRR depending on which mode you are using. ] JSR uses PC offset, while JSRR uses baseR offset. #note[ Because subroutines destroy/overwrite the values in registers, we must save the values in the registers we are using, and then restore them after. ] = Stack #definition[ *Stacks* are _LIFO_, last in first out ] The stack consists of a *stack pointer*, which keeps track of the top of the stack. We use R6 as the stack pointer. == Stack Buildup Stack buildup is the first half of the full calling convention. *Caller Prepares the Subroutine* + Push arguments to the stack in reverse order (from last arg to first) + JSR/JSRR to subroutine *Callee preserves values and allocates space* + Allocate space for return value + Store the return address + Store old frame pointer(R5) + Set frame pointer to be the space above the old frame pointer (which holds the first local variable) + Allocate space for local variable + Store R0-R4 == Stack Teardown *Callee prepares for return* + Store return value in previously allocated space + Restore R0-R4 to previous value and pop from stack
https://github.com/pluttan/typst-g7.32-2017
https://raw.githubusercontent.com/pluttan/typst-g7.32-2017/main/gost7.32-2017/styles/raw.typ
typst
MIT License
#import "../g7.32-2017.config.typ":config #let style_raw(content) = { show raw: it => { box( fill: config.rawBg, inset: (x:6pt, y:0pt), outset: (y:3pt), radius: 4pt, align(left)[it] ) } content }
https://github.com/polarkac/MTG-Stories
https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/015%20-%20Commander%20(2014%20Edition)/001_Loran's%20Smile.typ
typst
#import "@local/mtgstory:0.2.0": conf #show: doc => conf( "Loran's Smile", set_name: "Commander (2014 Edition)", story_date: datetime(day: 27, month: 10, year: 2014), author: "<NAME>", doc ) #emph[This short story originally ran in ] The Colors of Magic#emph[ anthology published in 1999. It retells portions of the story of ] Antiquities#emph[, including the story of Feldon, who received his own legendary creature card in ] Commander (2014 Edition)#emph[, previewed today in an article by <NAME> and <NAME>. Ethan took a great deal of Feldon's design from this very story, so we thought we'd share it with you all.] #emph[Enjoy!] #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) Loran died ten years after the devastation—after Urza and Mishra destroyed most of the world with their war, after the tumultuous explosion that eliminated Argoth and altered the rest of the world forever. Loran died in part because of that devastation. She did not die in battle, for she was not a warrior. Nor did she die in a duel of magical forces, for though her lover Feldon had mastered the study of magic, she found she could not. She did not die of intrigue, or of passion, or of some fatal flaw. She died in bed, weakened by wounds suffered over a decade previous—wounds inflicted by Ashnod the Uncaring, Mishra's assistant. She was weakened by the lengthening winters and the cold mountain air, weakened by her own great age, weakened, and eventually defeated, by the world that the brothers, Urza and Mishra, had created. At first she just winded easily when in the garden or cooking, and Feldon would put aside his own work to help. Then she had trouble working in the garden at all, and Feldon did the best he could, under her direction, to substitute for her. Later she could not work around the house, and Feldon brought in servants from the nearby town to aid. When she could not get out of bed, Feldon sat beside her and read to her, told her stories of his own youth, and listened to hers. After a time he had to feed her as well. At length she died in bed in her sleep, Feldon sitting beside her, asleep as well from his long guardianship. When he awoke her flesh was cold and pale, and the breath had long-since left her body. He commanded the servants to dig a grave behind the house, among the now weed-choked garden that Loran had begun with Feldon's grudging, grumbling aid shortly after they first arrived. She had kept it going through several seasons by sheer force of will, but when she took ill that last, final time, she had to surrender the garden to the weeds and the cold rains. It was raining when they laid her to rest, wrapped in her bed sheets and sealed within a coffin of thick oak planks. Feldon and the servants uttered a few prayers, then the old mage watched as the servants methodically piled the dirt atop the lid. Feldon's tears were lost in the rain. For days afterward Feldon stayed by the fire, and the servants brought him his meals, much as they had brought Loran hers. Feldon's library and workshop stood empty for the nonce, the books closed, the forges cold, the various reagents and solutions settling quietly in their glass jars. He stared into the fire and sighed. Feldon remembered: the touch of Loran's hand, the Argivian lilt to her voice, and her thick, dark hair. Most of all, he thought of the smile that she gave. It was a slightly sad, slightly knowing smile. It was a soft smile, and it warmed Feldon whenever he saw it. Now, Feldon was a practitioner of the Third Path, the way that was neither Urza nor Mishra, charting a new course between the two warring brothers and their technological miracles. He could pull from his mind great magics, fueled by the memories of his mountain home, and work wonders with them. He could cause fire to appear or the land itself to shift or summon the strokes of a lightning storm and bend them to his will. Yet he could not heal Loran's body or dying spirit. He could not keep the life within her. His magics had failed him and had failed his love. The old man sighed and raised a hand toward the fire. He unlocked a part of his brain that held the memories of the mountains around them. He pulled the energies from those lands, as he learned to do in Terisia City with Drafna, Hurkyl, the archimandrite, and the other mages of the Ivory Towers. He concentrated, and the flames writhed as they rose from the logs, twisting upon themselves until they finally formed a soft smile. Loran's smile. It was the most that he could do. For five days and five nights Feldon sat by the fire, and for a brief time the servants wondered if they would soon have to tend the master as they had tended the mistress. Indeed, Feldon was never fully healthy himself, overweight and walking only with the aid of a silver cane he had rescued from the heart of a glacier. His dark beard was now streaked with silver, and the corners of his eyes drooped from grief and age. The servants wondered if he would ever rise from the fireside again. On the sixth day, Feldon retreated from the hearth to his workshop. Soon afterwards a short note appeared for the servants—a list of items that they were to procure as soon as possible. The list called for thin sheets of copper, iron rivets, cords made of various spun metals, brass gears if they could get them, steel otherwise, glass blown into a variety of shapes (with illustrations and dimensions). And there was a letter to be delivered to a place far to the south and west. For the next two months the workshop clattered. Feldon brought the forge to life, and the small anvil rang with ear-splitting blows. Fire was within the domain of mountain magics, and Feldon was its master. He could cause it to heat a precise location with the exact amount of heat needed merely by ordering it to do so. Such was the nature of the old mage's magic. The wire arrived, and the gears (iron, not brass), sheets of copper, and some of bronze. The glass was sub-standard, and Feldon had to resort to teaching himself how to blow it to form the shapes he needed. More wire arrived, this new amount spun with horsehair to form thick, long cords like braids of human hair. At the end of two months, Feldon looked at his work and shook his head. The joints were stiff, and the arms jutted in the wrong directions. The head was too large, and the hair looked like what it was: a collection of wire and horsehair. The eyes were little more than badly-crafted glass spheres. It was too tall at the shoulders and too large in the hips. The creation looked nothing like Loran. Only around the mouth, where there was the ghost of a smile, came the hint of a memory. Feldon shook his head, and thick tears gathered at the corners of his eyes. He took a sledge and knocked the automaton to pieces. And he began again. He pored over Loran's journals in the library. She had studied with Urza himself and knew something of artifice. He restrung the wires and ligatures through the arms and legs, building first miniature models, then full-fledged mock-ups before proceeding to the final version. He worked in animal bone and wood as well as metal and stone. His glasswork became better, so he could provide a glass eye for an old woman in the village that matched her good one. Slowly he built the automaton in the shape of Loran, sculpting her out of myriad materials. After six months she was finished. The statue missed only the heart. Feldon waited patiently for that organ to appear. He spent his days in the workshop, polishing, testing, and rebuilding the automaton. When he first met Loran, she had use of both arms. Later she lost the use of one of them, crippled by Ashnod. He went back and forth, removing and replacing the arm. Finally he restored the statue to its complete state. A month later a package arrived from a place far to the south and west, from a scholar whom Loran and Feldon had known when they were at Terisia City, at the Ivory Towers. The package contained a small chip of a crystal, glowing softly—a powerstone, the heart of artifice. There were fewer and fewer stones of this type in the years since the devastation, but this was one. The package contained a note as well, signed by Drafna, master of the School of Lat-Nam. It said simply, "I understand." Feldon held the powerstone and noticed that his fingers were trembling. Cradling the crystal in both hands, he went to the automaton, standing guard in the center of the workshop. He had placed the bracket for the crystal where the heart would be in a living woman. Feldon set the crystal within its framework, and closed the compartment door. He reached behind the automaton's left ear and touched a small switch. The automaton jerked to life like a puppet whose strings had suddenly been pulled. Its head shook then cocked slightly to one side. One leg tensed, the other relaxed. One shoulder dipped slightly. Feldon nodded and raised a hand, pointing to the far side of the room. The automaton in the shape of Loran walked gingerly, like a woman finding her land legs after a long sea voyage. By the time she had reached the end of the workshop she was walking normally. She reached the opposite side, turned, and walked back. She smiled, hidden wires rippling the lips over ivory teeth. The smile was perfect. Feldon smiled back, the first time he had truly smiled since Loran had left him. Every day the automaton stood patiently in his workshop. He talked to it but had to point to command it. For the first month it was enough. But it was silent, save for the high-pitched whirring of gears and wire spooling and unspooling. At first Feldon thought he could live with it, but after the first month it became an irritant. After the second it was insufferable. The silence, its metallic lips crafted into that perfect smile, was more than he could bear. It seemed to mock him, to taunt him. He asked it questions, then reprimanded himself for he knew it could not answer. The Loran he had built was a creature of copper skin and geared muscles. It was not the woman he had loved. At last he reached behind her ear and touched the small toggle, deactivating her. She stiffened as the power left her, though the smile remained on her lips. He removed the powerstone from her heart, set the stone on the shelf, and placed the inactive automaton in the garden standing guard over Loran's grave. Within a week the steel gears had rusted solid, locking it forever in its stance, its glass eyes seeing but not recording the world around it. In the week that followed Feldon returned to the fireside, staring into the flickering flames as if they held some secret. At the end of the week, under a cold rain, he departed, leaving his servants to keep up the house in his absence. He left the town in a small wagon, heading eastwards into the lands most affected by the devastation of the Brothers' War. As he traveled, he asked questions. Did anyone know of mages, of spellcasters, of individuals with wondrous power? Before the destruction of the Ivory Towers, there had been many who had explored the paths of magic, but they had been scattered when Terisia City had fallen. Surely some survived, somewhere. He asked merchants and mendicants, farmers and priests. Some looked at him as if he were mad, and some were frightened, terrified that he was seeking to bring back the powers that created the devastation in the first place. But enough understood what he was looking for, and of those a few knew of this wise man or that shaman who walked the Third Path. In time he heard of the Hedge Wizard, and he turned his wagon to the east. He found the Hedge Wizard near the wreckage that had been Sarinth, one of the great cities that had resisted Mishra and was destroyed for its sin. Most of the great forests of that land had been later lumbered and its mountains stripped to feed the war machines of the brothers' battles. Now it was a barren landscape, its soil runneled and ravined by eternal rain. What forests that survived were overrun by a tangle of briars and young trees. In one of those briar-choked shambles Feldon found a hermit. The man had defended his patch of ground from Mishra's armies, and the strain had nearly broken both his mind and his spirit. He was a hunched figure, bent nearly double with age, with a drooling grin and a cackling laugh. Feldon approached him with open hands, showing he was weaponless. The hermit had heard of the Council of Mages at Terisia City and had known of Feldon's name among them. He laughed and capered and allowed Feldon to come within his forest, to study the hermit's magics. Feldon offered to teach the hermit his own spells in return, but the hunched madman would have nothing to do with the mountains or their power. Instead, he taught Feldon of the woods, and they crossed and re-crossed his small domain, which he had so laboriously held against all invaders. Over the course of the next month Feldon felt he knew the land as well as the old hermit. They spoke of many things—of plants, of trees, and of the seasons. The hermit felt the world was getting colder beyond his borders, and Feldon agreed. It seemed to him that the glaciers of his home were swelling slightly with every passing year. Finally, they spoke of magic. Feldon showed his power, summoning images from the flames of birds, mythical dragons, and, finally, a simple, knowing smile. When Feldon had finished, the hermit cackled and nodded. The madman stood, arms folded in front of him. Feldon started to say something, but the hermit held up a hand to quiet him. For a moment there was silence in the forest. Then there was a noise, or rather, a sensation, a rumble that pounded through the ground and into Feldon's bones. The ground quaked beneath his feet, and the campfire collapsed in on itself from the shuddering ground. Feldon cried out despite himself, but the hermit did not move. Then the wurm appeared. It was a great, ancient creature, as large as one of Mishra's dragon engines of old. Its scales were golden and green, and it had baleful, red eyes that glimmered in the dark. It loomed above them for an instant, and was gone. A wall of scales surged past them—the wurm's elongated body hurtling before them. After a long time, the wurm's whiplike tail spun out, smashing the trees like a line pulled from a runaway wagon. The ground stopped shaking. The old hermit turned and bowed deeply. Feldon returned the bow and understood how the ancient mage had kept this patch of forest for all these years. Carefully, Feldon outlined his problem: He had lost someone dear to him, and his own magics lacked the power to restore her. Did the power of the hermit hold more? The old hermit rocked back on his heels and grinned. "Is this one who is dear still alive?" he asked. Feldon shook his head, and the hermit's grin faded. He, too, shook his head. "I can only summon the living—that is the power of the growing briar. But perhaps I can send you to someone who might have the power you seek." Feldon left the hermit's forest the next morning, heading north. Ronom Lake bordered the lands of Sarinth, and the lake had faired as badly as the land. Where once there were expanses of white beach now only leprous gray moss flourished, and the lake itself was little more than wide expanses of stagnant, oily water broken by pungent algae blooms in greasy shades of green and red. Feldon guided his small wagon along the perimeter of the lake. The hermit said he would recognize the signs when he reached the domain of the sorceress who ruled part of the shore. Indeed he did. The gray moss began to fade and at last retreated fully, leaving only a cascade of white sand as pure as any Feldon had seen. It was broken at the shore by a thin line of rounded black stones, themselves smoothed by the rolling surf. Feldon took a deep breath and smelled the fresh spray, without a tinge of musty fog. He found her at the foot of a crystalline waterfall, in a small pavilion that seemed to be spun from golden threads. She was taller than he, dressed in a shimmering robe that looked like a translucent rainbow. She granted him an audience as muscular servants brought a simple meal of cheese and dried apples. The provender seemed insufficient for such opulent surroundings, but Feldon said nothing and accepted the sorceress's hospitality. She asked him his quest, and he told her: He sought to regain a love that had been lost. She nodded, and a tight smile appeared on her face. "Such matters have a price," she said. Feldon bowed his head and asked her to name the price. "Stories," she said. "You must tell me the stories of Loran, so I may better grant your wish." Slowly, Feldon began to tell the tale. He recounted what he knew of Loran from her own tales and her journals—of her life in the far east, in the distant land of Argive, of her early life with the brothers, and how she eventually rejected their war to seek another path. He spoke of how she came to Terisia City and joined a band of scholars looking for that path—scholars that included Feldon. He stumbled a few times, but the sorceress said nothing. He told of how the two met, how they studied together, and how they had fallen in love. He explained how they had separated when Mishra attacked their city and what had happened to Loran at Ashnod's hands. She seemed to heal slowly in their time together before spiraling downward into her eventual death. As he spoke, he halted fewer times, and his mind was alive with her memory. He recalled her black hair, her lithe figure, her touch, and her smile—always that knowing smile. He spoke of how she had died, and what he had done afterward. He recounted his construction of the automaton and his trip to the hermit and now his visit to her. As he spoke, he forgot the sorceress was there. Loran was alive for him. At last he came to the end of the tale and looked at the enchantress. Her face was impassive, but a single tear trickled down her cheek. "I rule in the sea and sky," she said, "much as you rule in the mountains, and the hermit the growing vegetation. You have paid my price with a story. Now let me see what I can do." She shut her eyes, and for a moment, it seemed that outside the golden pavilion the sun passed behind a cloud. Then it brightened again, and Loran stood before Feldon. She was young again, and whole, her black hair shimmering like a dark waterfall. She smiled that knowing, secretive smile she always had for him. Feldon rose and reached out to embrace her. His hands passed through her like smoke. The relief in his heart was replaced with fire, and he turned toward the sorceress. She had risen from her divan now and held up her hands as if to ward off a blow. "She isn't real," cried Feldon, spitting out the words. "I rule in the blue," said the sorceress, "and blue is the stuff of air and water, of mind and imagination. I cannot bring back that which is gone, only create its image. If you want her truly back, you must seek another." "Who is this other?" asked Feldon, and the sorceress hesitated. Again, Feldon asked, "Who is this other?" The sorceress looked at him with cold crystalline eyes. "There is a swamp farther north. He who lives there rules in the black. He can bring back what you seek. But be warned"—and here her voice softened—"his price is higher than mine." And another tear appeared on the sorceress's cheek. Feldon bowed, and the enchantress offered him her hand, which the old man kissed. While the sorceress's flesh appeared young and supple, to Feldon's lips it felt leathery and ancient. He re-boarded his wagon and continued. A short distance beyond the golden pavilion, he dismounted on the pristine white beach and felt the ground. It looked like pure white sand but felt like rocks covered with gray moss. Feldon gave an understanding grunt and set out for the swamp. Here along the northern border of Ronom Lake there had been a village, but the land of the village had settled, or the lake had risen, so that it was nothing more than a collection of buildings rotting in a ruined swamp. Great dark birds hovered through the arch-rooted trees. No, Feldon corrected himself. Bats. They were bats, which no longer feared the light in this land of eternal gloom. The village had a rough, rotting palisade, little more than a collection of sharpened logs driven into the muck. The guards at the gate were sallow, hollow-eyed men dressed in tattered armor. They threatened Feldon with capture, but he summoned fire in a great wall between him and them. After the guards stepped back from the flames, and after a quick consultation with each other, they chose to escort Feldon to their master. Their master was an aged spider of a man who received his visitors on a throne carved from a gigantic skull. Feldon thought briefly of the great wurm that the green hermit had summoned, and wondered if the fleshless skull before him was of the same type. The ruler of the swamp was short, pot-bellied, and bald, and slouched in a corner of the throne as Feldon explained his quest. He had lost someone dear, said Feldon, and was told that the master could find a way to return her. The man gave a watery, choking laugh. "I am the master of black magics, redling," he said. "I know the powers of life and death. Are you willing to pay my price?" "And your price is?" asked Feldon. The master stroked his hairless chin. "I want your walking stick." Feldon gripped his silver cane tightly. "I cannot part with it. I pulled it from a glacier many years ago. It is like a part of me." "Ah," said the master, "and your love is such a pale, insubstantial thing that you cannot part with a hunk of metal for it." Feldon looked at the twisted spider of a man, and then at his rune-carved cane. He held it out. "Your price is met." "Excellent," hissed the master of the swamp, taking the cane. "Let us begin." For three days and three nights Feldon studied at the feet of the master. He memorized the marshes around the village, and felt the thick, viscous pull of the land in his mind. It was very different than the cold, clear mountains that he normally used. It left him feeling soiled and unclean. At the end of the third day the hollow-eyed guards escorted Feldon to a small, windowless hut at the edge of the village, just within the walls of the palisade. Here Feldon worked the spell that the master of the swamp gave him. In the light of a single tallow candle, Feldon cleared his mind and meditated. Normally he would think of the mountains, but now he thought of the bogs around him. He felt their watery pull, sucking him down, embracing him with their power. He spoke the words of the spell and called forth Loran. The candle flickered for a moment, scattering Feldon's shadow behind him on the wall. Far above him, the wind coursed through the mangrove branches and sounded as if the lake itself had built a great wave to swallow the village. Everything grew quiet. There was the sound of footsteps outside. They moved slowly and ploddingly, the thick mud pulling at heavy feet as the sound approached. It was the sound of a figure staggering and sloshing through the muck. For a moment Feldon's heart leaped. Had he succeeded? Something heavy and wet thumped against the door, sounding like a bag of wet earth. Slowly Feldon pulled himself to his feet (he no longer had his cane) and shuffled to the door. The door gave another sloshing thud and then another, as Feldon reached it and grasped the knob. The stench hit him. It was a moldering, heavy smell, of rotted flesh and damp earth. It was the smell of death. Feldon's heart sank as he realized what he had done with the master of the swamp's spell. There was another thump, and the door shifted, but Feldon was leaning against it now, seeking now to keep whatever was on the far side out. He did not want to see if the spell had succeeded. He did not want to know. There was another thud and a gurgling cry that sounded like sloshing water. Feldon's heart shattered as he reached inside himself and willed the spell to end, to send whatever was beyond the door back where it had come from. The smell of death was gone, and with it the sounds. Feldon stayed pressed against the door, holding it shut with all his might, until morning. When morning came, he slowly opened the door. There were no footprints in the muck outside the door. Indeed, the entire village had been abandoned. There were no hollow-eyed guards, no master of the swamp. Nothing called his name in a gurgling voice like sloshing water. Feldon staggered to his wagon, pausing only to use a piece of black driftwood as a makeshift walking stick. He did not look back. In time, as he traveled, the ground began to rise, and dry. He had circumnavigated the lake now, and all that was left was to return home. He dreaded that, for fear of what he would find in the garden. He was three days from his village when he heard of the scholar in a small town further west. Propelled in part by curiosity, in part by dread, Feldon turned his wagon westward. He found the scholar in the musty remains of a temple library. The building had been shattered long ago by an earthquake, and the snows and rains had rotted most of the books. Yet among the tattered remains of books and scrolls, the scholar hopped like a bird-shaped automaton. He was a spindly thing and regarded Feldon from behind thick lenses of crystal. Feldon spoke of his tale—of his loss, of his resolve to regain what he had lost. He told of the hermit, the sorceress, and the master of the swamp. And when he finished his story, the scholar blinked at him behind heavy lenses. "What do you want?" he said at last. Feldon let out an exasperated sigh. "I want to have Loran back. If magic can do everything, why can it not do this?" "Of course it can do this," said the scholar. "The question is—do you want it to?" Now it was Feldon's tum to blink, and the scholar gave a thin, amused smile. "Green calls to the living," he said. "Black calls to the dead. Blue creates the shadow of life. Red consumes, and that's very important as well, because you must often destroy before you can build. I study, and the magic I wield is White, which is the magic of comprehension and understanding." "Can you bring her back to life?" asked Feldon, his voice catching. The memory of the swamp was still with him. "No, I can't," said the scholar, and, despite himself, Feldon sighed in relief. "But I can help you to create an exact duplicate." "l tried that with the automaton," said Feldon. "l speak of a creation not of gears and wires but of magic," replied the scholar, "identical in every way." "I don't understand," said Feldon. "When you cast a spell using fire," explained the scholar, "I believe you do not create fire. Rather you take the magical energy and form it into the shape of fire, which then does your bidding. It is for all intents and purposes fire, but it is made of magic." "But what about when I use fire," asked Feldon, "or when the hermit calls a great wurm?" The scholar waved his hand, "Different uses for the same tools. Yes, in those cases it is a real fire and a real wurm, but the magic alters it. For the moment, assume that you can create something made of magical energy." Feldon thought about it and nodded slowly. "So if you study an object, you can create the object over time," said the scholar. Again, Feldon nodded. "If you study me," he said, "you would be studying that which makes me a scholar. Therefore you could call at a later time that part of me which is my scholarliness and rely on its advice." Feldon shook his head. "I'm not sure I understand," he said. "Study me for two weeks," said the scholar, "and then see if you understand. Don't talk to me. Just bring me my meals. Two weeks. That's my price. That, and later you'll have to let me and other scholars into your library. Is it a bargain?" For the next two weeks Feldon brought the scholar his meals, in much the same way as he had brought Loran hers when she was bedridden. Feldon used his magic to keep a small flame going and to cook for the scholar as he pawed through the rotting texts and decaying scrolls of the ruined temple. For the first two days the scholar seemed little more than an amusing bird, hopping from one location to another. But soon Feldon noticed there was method to the madness, that there was intent behind each of the scholar's movements. He began to see how the man thought and knew. Through it all the scholar ignored him, save at meal times. At the end of the two weeks the little man turned to Feldon and said, "Summon me." Feldon shook his head. "Pardon?" he asked. "You have watched me for two weeks," said the scholar. "Now see if you can use your magics to bring me into being." Feldon blinked. "But you're already here." "So bring another me," said the scholar. "You've got the power. Use it." Feldon took a deep breath and called upon the powers of the land. He thought of the nervous scholar in his thick spectacles, rummaging relentlessly through the decaying paper and rotting vellum. He tried to call a being that summed up the nature of the creature in one place. There was a pause, and then an identical duplicate of the scholar appeared. No, not identical. It was taller, and its flesh had a ruddier hue. But it was thin and nervous and had thick spectacles and a knowing manner. The scholar (the real one), walked up to the created being and looked over his glasses at it. The duplicate did the same. Feldon was amazed. "Is it real?" he choked out at last. The scholar reached out and touched the quasiduplicate, and the duplicate touched back. "Feels like it," said the scholar. "A lot of the little details are wrong, but you aren't just summoning me. You're summoning the essence of my me-ness as a scholar. You can keep this me around by keeping that part of your mind aware of me, but it isn't. Me, that is." Feldon worked his way around the scholar's thinking process. "But what can I do with this—you." "What you would expect a scholar to do," returned the bespectacled man, "research, investigate, know certain things." In a slightly more excited voice he added, "but I wouldn't know anything about fighting or lands I had never visited or anything like that. It would be beyond my nature as a scholar." "And I could do the same with… Loran?" asked Feldon. Both scholars nodded. Feldon found the duplication unnerving and dismissed the part of the spell that held the magical scholar in place. He faded from view like snow in the rain. "You can summon your lost love back," said the scholar, "if that's what you truly want." Feldon thought about the scholar's words on the way back to his home, the wagon shuddering through the deep ruts in the road. It was raining again by the time he returned, and the servants had kindled a fire in the hearth. Before he entered the house, he checked Loran's grave, beneath the inert, rusting form of the automaton. The earth was undisturbed, and that made him feel slightly better. He thanked the servants and retreated to his workshop. There, among the tables draped with cloth and the reagents settled into multicolored layers in their beakers, he allowed himself to remember. He remembered Loran. Not just the feel of her touch or the way her hair moved like a dark waterfall. He remembered her: when she was happy, when she was angry, when she was gardening. When she was dying. Feldon thought of Loran and the life she spent with him, of the tales of her youth, of their work and lives together. The joy of life with her and the sadness of her departure felt like a great bubble rising within him. He fed his memories of the land into that bubble, memories of the mountains, the forests and shore, the swamps and the temple, and he filled it with power and life. When Feldon opened his eyes, Loran was there. She was perfect and whole and as young as when he first met her at the gates of Terisia City. She gave him a knowing smile and said, "Why am I here?" "You died," said Feldon, his voice choking as he spoke. She nodded and said, "I seem to remember that. Why am I here?" "You're here because I missed you," said Feldon. "I missed you as well," replied the spell-Loran, and she reached out to him. Despite himself, Feldon shrank from her embrace. She paused, then asked, "What's wrong?" "You're not her," he said at last. "No, I am not," she said, her voice in the lilting Argivian accent he remembered. "We both know that, and you know that I could be nothing less than what you remember of her. You remember her as being honest and strong. I am the sum of her, taken through your feelings. I am what you remember." "You are memories," sighed Feldon, "and though you are pleasant memories, I must leave you as memories. If you are here, you are no more than the automaton in the garden—un-living, an imitation of what was. I'm sorry. I went to so much trouble to bring you about, but I know that I cannot keep you." "Then why am I here?" she said. "You are here," said Feldon, taking a deep breath, "so that I can say good-bye." The spell-Loran paused, then smiled slightly. "I understand," she said at last. Feldon crossed to her and embraced her. She felt very much like Loran as he had known her. All that was Loran in his memories was encased in the spell-creature he had created. When they parted, there were tears in both of their eyes. "Good-bye," he said, his voice thick with emotion. "Good-bye," she replied. Feldon allowed the spell to elapse, and the form of Loran began to dissolve. "I understand," he said to her vanishing form. "At last, I think I understand." All that was left was a knowing, soft smile. Then that was gone as well. Feldon returned to the work in his library and workshop, taking up small matters that had been abandoned ages ago. In a few weeks, the scholar appeared at Feldon's doorstep and was amused to see that save for the servants, Feldon was alone. After a meal the birdlike scholar asked, "What became of your lost love?" "She was lost," said Feldon with a deep sigh, "and it was beyond my power to bring her back. It was beyond my desire. But I had a chance to say good-bye." "That is what you truly wanted?" asked the scholar. "That is what I truly wanted," said Feldon. The scholar spent three weeks in Feldon's library and then left, but he promised to send interested students to the grizzled man's home. Every so often, some would-be scholar or mage would appear, and Feldon, remembering his promise, would let the wizard go through the library. Over dinner he would tell his own story of what he had learned about magic. Sometimes the aspiring mage would listen politely, sometimes intently. Occasionally, after everyone had gone to bed, a mage would creep down and find Feldon sitting by the fire. The flames twisted into the form of a smile, a soft and knowing smile. And Feldon, the ancient wizard, seemed to be content. #figure(image("001_Loran's Smile/01.jpg", width: 100%), caption: [Feldon of the Third Path | Art by Chase Stone], supplement: none, numbering: none)
https://github.com/NIFU-NO/nifutypst
https://raw.githubusercontent.com/NIFU-NO/nifutypst/main/_extensions/nifu_pub/typst-show.typ
typst
MIT License
#show: doc => NIFU_report( title: "$title$", subtitle: "$subtitle$", abstract: "$abstract$", authors: ( $for(by-author)$ ( "$it.name.literal$" )$sep$, $endfor$ ), report_type: "$report_type$", report_no: "$report_no$", project_no: "$project_no$", isbn: "$isbn$", issn: "$issn$", funder: "$funder$", funder_address: "$funder_address$", date: "$date$", preface: "$preface$", signer_1: "$signer_1$", signer_2: "$signer_2$", signer_1_title: "$signer_1_title$", signer_2_title: "$signer_2_title$", figure_table: $figure_table$, table_table: $table_table$, references: "$references$", doc, )
https://github.com/polarkac/MTG-Stories
https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/025_Eternal%20Masters.typ
typst
#import "@local/mtgset:0.1.0": conf #show: doc => conf("Eternal Masters", doc) #include "./025 - Eternal Masters/001_The Prodigal Sorcerers.typ" #include "./025 - Eternal Masters/002_All That Came Before.typ"
https://github.com/soul667/typst
https://raw.githubusercontent.com/soul667/typst/main/PPT/typst-slides-fudan/themes/polylux/book/src/polylux.md
markdown
![logo](logo2.png) **Polylux** is a package for the typesetting system [*Typst*](https://typst.app) to create presentation slides, just like you would use the _beamer_ package in LaTeX. (So far, it is much less advanced than beamer, obviously.) If you haven't heard of things like LaTeX's beamer before, here is how this works: As a rule of thumb, one slide becomes one PDF page, and most PDF viewers can display PDFs in the form of a slide show (usually by hitting the F5-key). Polylux gives you: - Elegant yet powerfull typesetting by the ever-improving Typst. - Fully customisable slides. - Dynamic slides (or *overlays* or (dis-)appearing content, or however you want to call it). - Decently looking themes. If you like it, consider [giving a star on GitHub](https://github.com/andreasKroepelin/polylux)! ## Why the name? A [*polylux*](https://en.wikipedia.org/wiki/Polylux_(overhead_projector)) is a brand of overhead projectors very common in Eastern German schools (where the main author of this package grew up). It fulfils a similar function to a projector, namely projecting visuals to a wall to aid a presentation. The German term for projector is *beamer*, and now you might understand how it all comes together. (The original author of the aforementioned LaTeX package is German as well.) ## Contributing This package is free and open source. You can find the code on [GitHub](https://github.com/andreasKroepelin/polylux) where you can also create issues or pull requests. ## License Polylux is released under the [MIT license](https://github.com/andreasKroepelin/polylux/blob/main/LICENSE).
https://github.com/Otto-AA/definitely-not-tuw-thesis
https://raw.githubusercontent.com/Otto-AA/definitely-not-tuw-thesis/main/src/translations/translations.typ
typst
MIT No Attribution
#import "@preview/linguify:0.4.1": linguify, set-database #let set-defaults = (obj, keys, default) => { // for each key, sets default value if key does not exist for key in keys { obj.insert(key, obj.at(key, default: default)) } return obj } #let init_translations = (additional-translations) => { let lang_data = toml("translations.toml") for (key, value) in additional-translations { if value != none { value = set-defaults(value, ("en", "de"), "") lang_data.lang.en.insert(key, value.en) lang_data.lang.de.insert(key, value.de) } } set-database(lang_data); } #let translate = key => { linguify(key) }
https://github.com/jamesrswift/springer-spaniel
https://raw.githubusercontent.com/jamesrswift/springer-spaniel/main/src/package/drafting.typ
typst
The Unlicense
#import "@preview/drafting:0.2.0": *
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/break-continue-06.typ
typst
Other
// Test continue outside of loop. // Error: 12-20 cannot continue outside of loop #let x = { continue }
https://github.com/Aariq/mycv
https://raw.githubusercontent.com/Aariq/mycv/main/_extensions/mycv/typst-template.typ
typst
// This is an example typst template (based on the default template that ships // with Quarto). It defines a typst function named 'article' which provides // various customization options. This function is called from the // 'typst-show.typ' file (which maps Pandoc metadata function arguments) // // If you are creating or packaging a custom typst template you will likely // want to replace this file and 'typst-show.typ' entirely. You can find // documentation on creating typst templates and some examples here: // - https://typst.app/docs/tutorial/making-a-template/ // - https://github.com/typst/templates #import "@preview/fontawesome:0.4.0": * #let darkblue=rgb("#1F3A7F") #let cv( title: none, author: none, affiliation: none, email: none, phone: none, website: none, github: none, linkedin: none, font: none, doc, ) = { set list(marker: text(darkblue)[○]) set text(font: font) show heading.where( level: 1 ): it => block(width: 100%)[ #set align(left) #set text( 28pt, weight: "medium" ) #it.body ] show heading.where( level: 2 ): it => block(width: 100%)[ #set text( darkblue, 16pt, weight: "regular" ) #grid( columns: (0.15fr, 0.85fr), box(baseline: 0.4em, line(stroke: 3pt + darkblue, length: 90%)), [#it.body] ) ] //header grid( columns: (1fr, 1fr), [ = #author #text( weight: "light", size: 14pt, luma(80), style: "oblique" )[#title] ], [ #set align(right) #set text(weight: "light", luma(80), style: "oblique") #affiliation \ //possibly easier to have people include icons in quarto YAML with shortcodes than to hard-code this. #fa-icon("phone") #phone \ //these regexes are janky and there's probabably a better way to deal with escape characters #fa-icon("envelope") #link("mailto:" + email)[#email.replace(regex("\\\@"), "@")] \ #fa-icon("linkedin") #link(linkedin)[#linkedin.replace(regex("https?:\\\/\\\/www\."), "")] \ #fa-icon("globe") #link(website)[#website.replace(regex("https?:\\\/\\\/"), "")] \ #fa-icon("github") #link("https:\\\github.com\\" + github)[#github] \ ] ) doc }
https://github.com/Rhinemann/mage-hack
https://raw.githubusercontent.com/Rhinemann/mage-hack/main/src/chapters/Credits.typ
typst
#import "../templates/interior_template.typ": chapter #show: chapter.with(chapter_name: "Credits") #show: columns.with(2, gutter: 1em) // #columns(gutter: 1em) #let subh(body) = { set text(font: "Abbess", size: 14pt) set block(below: 0.5em) body } #subh[Credits] #par(first-line-indent: 0pt)[ *Written By:* Rhinemann\ *Developed By:* Rhinemann\ *Edited By:* Rhinemann ] #v(1fr) #align(center)[#image("../../assets/images/interior/WW_Logo.svg", width: 50%)] #colbreak() #subh[Special Thanks to:] Jeremy.Forbing, <NAME>, Vecna and the entire Cortex Prime discord for the input, brainstorming, advice and help. <NAME> for Manual of Monsters, Minions & Mountebanks Jeremy.Forbing, <NAME>, <NAME> and <NAME> for Cortex Lite. Riley Routh for The Arcanist's Toolkit. Cam Banks for designing Cortex Prime. #v(1fr) Cortex Prime is a trademark and IP of Direwolf Digital. World of Darkness, Vampire: The Masquerade, Vampire: The Dark Ages, Victorian Age: Vampire, Werewolf: The Apocalypse, Werewolf: The Wild West, Mage: The Ascension, Mage: The Sorcerers Crusade, Wraith: The Oblivion, Wraith: The Great War, Changeling: The Dreaming, Hunter: The Reckoning, Demon: The Fallen, Mummy: The Resurrection, Orpheus, Exalted, Chronicles of Darkness, Vampire:The Requiem, Werewolf: The Forsaken, Mage: The Awakening, Changeling: The Lost, Hunter: The Vigil, Giest: The Sin Eaters, Demon: The Descent, Mummy: The Curse, Beast: The Primordial, Promethean: The Created, World of Darkness, Storyteller System™, Storytelling System™, and Storytellers Vault™ and their respective logos, icons and symbols are trademarks or registered trademarks of Paradox Interactive AB. All rights reserved. #align(center)[#image( "../../assets/images/interior/Cortex Prime Community - Light Background.png", width: 70%, ) ]
https://github.com/xbunax/tongji-undergrad-thesis
https://raw.githubusercontent.com/xbunax/tongji-undergrad-thesis/main/README.md
markdown
MIT License
# :page_facing_up: 同济大学本科生毕业设计论文 Typst 模板(理工类) 中文 | [English](README-EN.md) > [!CAUTION] > 由于 Typst 项目仍处于密集发展阶段,且对某些功能的支持不完善,因此本模板可能存在一些问题。如果您在使用过程中遇到了问题,欢迎提交 issue 或 PR,我们会尽力解决。 > > 在此期间,欢迎大家使用[我们的 $\LaTeX$ 模板](https://github.com/TJ-CSCCG/tongji-undergrad-thesis)。 ## 样例展示 以下依次展示 “封面”、“中文摘要”、“目录”、“主要内容”、“参考文献” 与 “谢辞”。 <p align="center"> <img src="https://media.githubusercontent.com/media/TJ-CSCCG/TJCS-Images/tongji-undergrad-thesis-typst/preview/main_page-0001.jpg" width="30%"> <img src="https://media.githubusercontent.com/media/TJ-CSCCG/TJCS-Images/tongji-undergrad-thesis-typst/preview/main_page-0002.jpg" width="30%"> <img src="https://media.githubusercontent.com/media/TJ-CSCCG/TJCS-Images/tongji-undergrad-thesis-typst/preview/main_page-0004.jpg" width="30%"> <img src="https://media.githubusercontent.com/media/TJ-CSCCG/TJCS-Images/tongji-undergrad-thesis-typst/preview/main_page-0005.jpg" width="30%"> <img src="https://media.githubusercontent.com/media/TJ-CSCCG/TJCS-Images/tongji-undergrad-thesis-typst/preview/main_page-0019.jpg" width="30%"> <img src="https://media.githubusercontent.com/media/TJ-CSCCG/TJCS-Images/tongji-undergrad-thesis-typst/preview/main_page-0020.jpg" width="30%"> </p> ## 使用方法 ### 本地编译 #### 1. 安装 Typst 参照 [Typst](https://github.com/typst/typst?tab=readme-ov-file#installation) 官方文档安装 Typst。 #### 2. clone 本项目 ```bash git clone https://github.com/TJ-CSCCG/tongji-undergrad-thesis-typst.git cd tongji-undergrad-thesis-typst ``` #### 3. 下载字体 请到本仓库的 [`fonts`](https://github.com/TJ-CSCCG/tongji-undergrad-thesis-typst/tree/fonts) 分支下载字体文件,并将其放置在 `fonts` 文件夹中,或者将字体文件安装到系统中。 #### 4. 编译 按照需求修改相关文件,然后执行以下命令以编译。 ```bash typst --font-path ./fonts compile main.typ ``` > [!TIP] > 若您发现字体无法正常显示,请将 `fonts` 文件夹中的字体文件安装到系统中,再执行编译命令。 ### 在线编译 在 [Typst App](https://typst.app) 上使用本模板进行在线编译。 ## 如何为该项目贡献代码? 还请查看 [How to pull request](CONTRIBUTING.md/#how-to-pull-request)。 ## 开源协议 该项目使用 [MIT License](LICENSE) 开源协议。 ### 免责声明 本项目使用了方正字库中的字体,版权归方正字库所有。本项目仅用于学习交流,不得用于商业用途。 ## 有关突出贡献的说明 * 该项目起源于 [FeO3](https://github.com/seashell11234455) 的初始版本项目 [tongji-undergrad-thesis-typst](https://github.com/TJ-CSCCG/tongji-undergrad-thesis-typst/tree/lky)。 * 后来 [RizhongLin](https://github.com/RizhongLin) 对模板进行了完善,使其更加符合同济大学本科生毕业设计论文的要求,并增加了针对 Typst 的基础教程。 我们非常感谢以上贡献者的付出,他们的工作为更多同学提供了方便和帮助。 在使用本模板时,如果您觉得本项目对您的毕业设计或论文有所帮助,我们希望您可以在您的致谢部分感谢并致以敬意。 ## 致谢 我们从顶尖高校的优秀开源项目中学到了很多: * [lucifer1004/pkuthss-typst](https://github.com/lucifer1004/pkuthss-typst) * [werifu/HUST-typst-template](https://github.com/werifu/HUST-typst-template) ## 联系方式 ```python # Python [ 'rizhonglin@$.%'.replace('$', 'epfl').replace('%', 'ch'), ] ``` ### QQ 群 * TJ-CSCCG 交流群:`1013806782`
https://github.com/lucifer1004/leetcode.typ
https://raw.githubusercontent.com/lucifer1004/leetcode.typ/main/problems/p0018.typ
typst
#import "../helpers.typ": * #import "../solutions/s0018.typ": * = 4Sum Given an array `nums` of `n` integers, return an array of all the unique quadruplets `[nums[a], nums[b], nums[c], nums[d]]` such that: - `0 <= a, b, c, d < n` - `a`, `b`, `c`, and `d` are *distinct*. - `nums[a] + nums[b] + nums[c] + nums[d] == target` You may return the answer in *any order*. #let _4sum(nums, target) = { // Solve the problem here } #testcases( _4sum, _4sum-ref, ( (nums: (1, 0, -1, 0, -2, 2), target: 0), (nums: (2, 2, 2, 2), target: 8), (nums: range(-5,5), target: 3), ) )
https://github.com/nafkhanzam/typst-common
https://raw.githubusercontent.com/nafkhanzam/typst-common/main/src/common/binary.typ
typst
#import "@preview/oxifmt:0.2.1": * #let decimal-to-binary(v, n: none) = { if n != none { strfmt("{:0" + str(n) + "b}", v) } else { strfmt("{:b}", v) } } #let binary-count(v) = { let n = 0 while v > 0 { v = v.bit-rshift(1) n += 1 } n } #let k-bit(v, k) = (v.bit-rshift(k)).bit-and(1)
https://github.com/ckunte/typst-snippets-vim
https://raw.githubusercontent.com/ckunte/typst-snippets-vim/master/README.md
markdown
MIT License
# Custom Typst snippets for use in Vim and Neovim Typesetting documents, letters, reports, or even books in [Typst] is not as verbose as LaTeX, but certainly error-prone, given the need for strict syntax. A handful of Vim snippets provided in this repository try to reduce this tedium to as low as practicable. This repository contains the following custom snippets: | Snippet | Inserts | | ------------------------- | ------------------ | | `apdx` + <kbd>tab</kbd> | appendix block | | `bib` + <kbd>tab</kbd> | bibliography entry | | `cod` + <kbd>tab</kbd> | code file | | `fig` + <kbd>tab</kbd> | figure block | | `file` + <kbd>tab</kbd> | file | | `hd` + <kbd>tab</kbd> | set heading number | | `letter` + <kbd>tab</kbd> | letter block | | `lnk` + <kbd>tab</kbd> | add link | | `ltmpl` + <kbd>tab</kbd> | letter template | | `note` + <kbd>tab</kbd> | note block | | `ntmpl` + <kbd>tab</kbd> | note template | | `pb` + <kbd>tab</kbd> | page break | | `ref` + <kbd>tab</kbd> | bibliography block | | `tbl` + <kbd>tab</kbd> | table block | ## What are snippets and how do they work? The concept of a snippet is simple. Think of a block of pre-formatted text (i.e., a template) that one needs to use often. One can of-course type or copy-paste such blocks of text repeatedly the hard way, or one could instead assign such common blocks of text with an abbreviated keyword, which in turn calls the entire block of text. To ensure such blocks do not accidentally appear while typing the actual content of the note, paper, or report, a trigger is required. The trigger in this case is a <kbd>tab</kbd> key. In Vim's insert mode, typing `note` and hitting <kbd>tab</kbd> key on the keyboard inserts the following block, and focuses on the first user input _Title_ in the template. Type the title. To jump to the next placeholder _Author_, hold <kbd>ctrl</kbd> and press <kbd>j</kbd> (`c-j` in Vim parlance). The shortcut to jumping between placeholders can be set in `~/.vimrc` file. ```typst #import "${1:template_incl_path}": note #show note.with( title: [${2:title}], author: "${3:author}", date: [${4:date}], ) // content hereon ``` ## Requirements To be able to use snippets, the following are required: 1. Vim or Neovim with python3 support. 2. [UltiSnips][us] 3. [typst-snippets-vim][ck] ## Installation 1. Set up [vim-plug][vp] plug-in manager 2. Set the required UltiSnips plug-in for [typst-snippets-vim][ck] by adding the following to `.vimrc` file: ```vim call plug#begin('~/.vim/plugged') " UltiSnips for snippets Plug 'sirver/ultisnips' " Typst snippets for Vim using UltiSnips (downloads only tagged releases) Plug 'ckunte/typst-snippets-vim', { 'tag': '*' } call plug#end() let g:UltiSnipsExpandTrigger = '<tab>' let g:UltiSnipsJumpForwardTrigger = '<c-j>' let g:UltiSnipsJumpBackwardTrigger = '<c-k>' ``` Reload `.vimrc` and `:PlugInstall` to install plug-ins. [Typst]: https://typst.app [us]: https://github.com/SirVer/ultisnips [vp]: https://github.com/junegunn/vim-plug [ck]: https://github.com/ckunte/typst-snippets-vim
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/text/raw-00.typ
typst
Other
// No extra space. `A``B`
https://github.com/Arsenii324/matap-p2
https://raw.githubusercontent.com/Arsenii324/matap-p2/main/t-repo/lecture5.typ
typst
#import "macros.typ" : * = Лекция == Степенные ряды === Опр. степенного ряда Фукнциональный ряд вида $limits(sum)_(n = 0)^infinity c_n (x - x_0)^n$, где $x_0, c_n in RR$ фиксированные числа - называется степенным рядом с центром в точке $x_0$. === Опр. радиус сходимости Пусть $D = {"множество точек сходимости ряда"}$. Тогда $R = limits(sup)_(x in D){abs(x)}$ - называется радиусом сходимости ряда. Множество $(-R, R)$ - называется интервалом сходимости ряда. === Теорема о радиусе сходимости *Пусть:* Дан степенной ряд $limits(sum)_(k = 0)^infinity c_k x^k$ и R - радиус его сходимости. *Тогда:* 1) $abs(x) < R => limits(sum)_(k = 0)^infinity c_k x^k$ сходится абсолютно. 2) $abs(x) > R => limits(sum)_(k = 0)^infinity c_k x^k$ расходится. 3) $forall [a, b] subset (-R, R) => limits(sum)_(k = 0)^infinity c_k x^k$ сходится равномерно (и абсолютно) на $[a, b]$. 4) Если $limits(sum)_(k = 0)^infinity c_k R^k$ - сходится, то на $[0, R]$ сходится равномерно (и абсолютно). 5) Если $limits(sum)_(k = 0)^infinity c_k R^k$ - расходится, то на $[0, R)$ сходится неравномерно. *Proof:* 1) $abs(x) < R = sup {abs(x) : x in D} => exists y in D : abs(y) > abs(x)$ Возьмем ряд $limits(sum)_(k = 0)^infinity abs(c_k x^k) = limits(sum)_(k = 0)^infinity abs(c_k) abs(y)^k abs(x/y)^k $ $limits(sum)_(k = 0)^infinity c_k y^k$ - сходится, т.к. $y in D => c_k y^k --> 0 => exists M : abs(c_k y^k) < M," "forall k in NN$ Тогда $limits(sum)_(k = 0)^infinity abs(c_k) abs(y)^k abs(x/y)^k <= underbrace(M dot limits(sum)_(k = 0)^infinity abs(x/y)^k, "сходится, геом. прогр.") => $ исходный ряд сходится абсолютно. 2) $abs(x) > R$. Предположим обратное - ряд $limits(sum)_(k = 0)^infinity c_k x^k$ сходится. Тогда $x in D => R >= abs(x)$. Противоречие. 3) $abs(c_k x^k) <= abs(c_k) dot underbrace((max{abs(a), abs(b)}), lambda)^k$ $limits(sum)_(k = 0)^infinity abs(c_k) lambda^k - "сходится по пункту (1)" => limits(sum)_(k = 0)^infinity c_k x^k underbrace(" сх. равномерно", [a, b])$ по признаку Вейрштрасса, т.к.\ $abs(c_k x^k) <= abs(c_k) lambda^k $ и $limits(sum)_(k = 0)^infinity abs(c_k) lambda^k - "сходится"$. 4) $limits(sum)_(k = 0)^infinity c_k x^k = limits(sum)_(k = 0)^infinity c_k R^k (x/R)^k$. Положим $a_k = c_k R^k$ - сходится равномерно, т.к. не зависит от иксов. Положим $b_k = (x/R)^k. forall k, forall x : abs(b_k (x)) <= 1 $. Тогда по признаку Абеля ряд сходится равномерно. 5) Предположим, что сходится равномерно. Тогда заметим, что $R$ - предельная точка $E = [0, R)$. Пусть также $u_n (x) = c_n x^n$ и $exists limits(lim)_(x -> R-0) u_n (x) = c_n R^n$. Тогда по теореме о почленном переходе к пределу ряд $c_n R^n$ - сходится. Противоречие. === Теорема Коши-Адамара *Пусть:* a) $limits(sum)_(k = 0)^infinity c_k x^k$ b) $R = sup{ abs(x) : limits(sum)_(k = 0)^infinity c_k x^k - "сходится"}$ c) $tau = limits(overline(lim))_(k -> infinity) abs(c_k)^(1/k)$ *Тогда:* 1) $tau = 0 => R = infinity$ 2) $tau = infinity => R = 0$ 3) $tau in (0, infinity) => R = 1/tau$ R - радиус сходимости. *Proof:* Положим $x != 0$, если $x = 0$, то всё очевидно. Далее будем пользоваться радикальным признаком Коши. 1) $tau = 0$ Положим $a_k = abs(c_k x^k) => L = limits(overline(lim))_(k -> infinity) abs(c_k x^k)^(1/k) = abs(x) tau$ $L = abs(x) tau < 1 => "сходится"$. Т.к. $tau = 0$, то сходится для любого $x => R = infinity$. 2) $tau = infinity$ $L = abs(x) tau < 1 $. Т.к. $tau = infinity$, то ряд сходится только в точке $x = 0 ==> R = 0$. 3) $tau in (0, infinity)$ Ряд сходится, если $abs(x) < 1/tau ==> R >= 1/tau$. Пусть $R > 1/tau => exists y in D : abs(y) > 1/tau => abs(y) tau > 1 =>$ в точке y ряд расходится. Противоречие. Значит $R = 1/tau$.
https://github.com/typst-community/mantodea
https://raw.githubusercontent.com/typst-community/mantodea/main/src/example.typ
typst
MIT License
#import "/src/_pkg.typ" #import "/src/_valid.typ" #import "/src/theme.typ" as _theme /// Show a source code frame. /// /// - theme (theme): The theme to use for this code frame. /// - ..args (any): The args to pass to `showybox`. /// -> content #let frame( theme: _theme.default, _validate: true, ..args, ) = { if _validate { import _valid as z _ = z.parse(theme, _theme.schema(), scope: ("theme",)) // NOTE: intentionally left empty, we only validte our own inputs } _pkg.showybox.showybox( frame: ( border-color: theme.colors.primary, title-color: theme.colors.primary, thickness: .75pt, radius: 4pt, inset: 8pt ), ..args ) } /// Shows example code and its corresponding output in a frame. /// /// - side-by-side (bool): Whether or not the example source and output should /// be shown side by side. /// - scope (dictionary): The scope to pass to `eval`. /// - breakable (bool): Whether the frame can brake over multiple pages. /// - result (content, auto): The content to render as the example result. If /// `auto` then `source` is evaluated and used as result. /// - source (content): A raw element containing the source code to evaluate. /// - theme (theme): The theme to use for this code example. /// -> content #let code-result( side-by-side: false, scope: (:), breakable: false, result: auto, source, theme: _theme.default, _validate: true, ) = { if _validate { import _valid as z _ = z.parse(side-by-side, z.boolean(), scope: ("side-by-side",)) _ = z.parse(breakable, z.boolean(), scope: ("breakable",)) _ = z.parse(scope, z.dictionary((:)), scope: ("scope",)) _ = z.parse(result, z.either(z.content(), z.auto_()), scope: ("result",)) _ = z.parse(source, z.content(), scope: ("source",)) _ = z.parse(theme, _theme.schema(), scope: ("theme",)) } let mode = if source.lang == "typc" { "code" } else if source.lang in ("typ", "typst") { "markup" } else if result == auto { panic("cannot evaluate " + source.lang + " code") } if result == auto { result = eval(mode: mode, scope: scope, source.text) } frame( breakable: breakable, theme: theme, grid( columns: if side-by-side { (1fr, 1fr) } else { (1fr,) }, gutter: 12pt, source, if not side-by-side { grid.hline(stroke: 0.75pt + theme.colors.primary) }, result, ), ) }
https://github.com/frectonz/the-pg-book
https://raw.githubusercontent.com/frectonz/the-pg-book/main/book/175.%20disc.html.typ
typst
disc.html The Risk of Discovery January 2017Because biographies of famous scientists tend to edit out their mistakes, we underestimate the degree of risk they were willing to take. And because anything a famous scientist did that wasn't a mistake has probably now become the conventional wisdom, those choices don't seem risky either.Biographies of Newton, for example, understandably focus more on physics than alchemy or theology. The impression we get is that his unerring judgment led him straight to truths no one else had noticed. How to explain all the time he spent on alchemy and theology? Well, smart people are often kind of crazy.But maybe there is a simpler explanation. Maybe the smartness and the craziness were not as separate as we think. Physics seems to us a promising thing to work on, and alchemy and theology obvious wastes of time. But that's because we know how things turned out. In Newton's day the three problems seemed roughly equally promising. No one knew yet what the payoff would be for inventing what we now call physics; if they had, more people would have been working on it. And alchemy and theology were still then in the category Marc Andreessen would describe as "huge, if true."Newton made three bets. One of them worked. But they were all risky.Japanese Translation
https://github.com/Enter-tainer/typstyle
https://raw.githubusercontent.com/Enter-tainer/typstyle/master/tests/assets/serve.typ
typst
Apache License 2.0
#import "/github-pages/docs/book.typ": book-page #show: book-page.with(title: "CLI Serve Command") = The serve command The serve command is used to preview a book by serving it via HTTP at `localhost:25520` by default: ```bash typst-book serve ``` // The `serve` command watches the book's `src` directory for // changes, rebuilding the book and refreshing clients for each change; this includes // re-creating deleted files still mentioned in `book.typ`! A websocket // connection is used to trigger the client-side refresh. ***Note:*** *The `serve` command is for testing a book's HTML output, and is not intended to be a complete HTTP server for a website.* == Specify a directory The `serve` command can take a directory as an argument to use as the book's root instead of the current working directory. ```bash typst-book serve path/to/book ``` == Build options The `serve` command will build your book once before serving the content. It is hence including all of the #link("https://myriad-dreamin.github.io/typst-book/cli/build.html")[options] from `build` command. == Server options The `serve` address defaults to `localhost:25520`. Either option can be specified on the command line: ```bash typst-book serve path/to/book --addr 8000:127.0.0.1 ``` === --open When you use the `--open` flag, typst-book will open the rendered book in your default web browser after building it. // == Specify exclude patterns // The `serve` command will not automatically trigger a build for files listed in // the `.gitignore` file in the book root directory. The `.gitignore` file may // contain file patterns described in the [gitignore // documentation](https://git-scm.com/docs/gitignore). This can be useful for // ignoring temporary files created by some editors. // ***Note:*** *Only the `.gitignore` from the book root directory is used. Global // `$HOME/.gitignore` or `.gitignore` files in parent directories are not used.*
https://github.com/jamesrswift/blog
https://raw.githubusercontent.com/jamesrswift/blog/main/_posts/2024-07-09-packages.md
markdown
MIT License
--- layout: post title: "Typst packages - Why and How?" --- As of writing (version 0.11.1), Typst is rapidly approaching feature parity with LaTeX. However, as must necessarily be the case with anything new: the ecosystem to support it is still growing. Therefore, those who choose Typst will run into a problem sooner or later: "The package I need just doesn't exist yet" If at this point you choose nonetheless to persist with Typst, you'll generally have a crack at implementing something that suits your specific use case, as was the case for me in a previous blog post about sparklines or table notes. As I've recently been working on putting together some packages, now seems like a good a time as any to discuss the step that follows: sharing your implementation so others don't run into the same speedbump. # The directory layout At a minimum, a package consists of a manifest file (`typst.toml`), and an entry point (a typst file, generally `lib.typ` but you can choose whatever name). Generally though, you might want to include other files too, and very quickly it becomes a challenge to keep track of which files belong where. For this reason, the typst-community github organisation have put together a package-template repository. This section will go through how to get it setup and explain the lay of the land. The first task is to fork the template repository. Navigate to [https://github.com/typst-community/typst-package-template](https://github.com/typst-community/typst-package-template), and give it a name in the next menu. As of writing, there is no reason to change any of the defaults. ![Expression]({{ "/assets/2024-07-09-packages/setup-1.png" | relative_url }}) You should then have a repository to version control your package and to collaborate with others (just because it works for your use-case doesn't mean other people can't expand upon it). There are between 2 to 3 different ways your package will end up being bundled, each a slimmed down version of the previous: - This repository: All files - Package repository: manifest, source, documentation PDF, README.md and any files it depends on, LICENSE - What is sent to the consumer of your package when imported: source and assets that are required by your package such as images and fonts - (for templates) Files that should make up a newly created projectusing the template: an entry point and example assets? Files are filtered at certain levels where they have been listed in the exclusion blacklist, and so by default, all files are sent to the consumer. Therefore, it is important to keep in mind which files should make it to which stage, or, keep your directory structures meaningful. The template repository we picked opts for the directory structure: - `/docs/` contains `manual.typ` (this repository) and `manual.pdf` (also package repository) - `/scripts/` are kept only on this repository - `/src/` goes all the way to the consumer - `/tests/` this repository only This behaviour is defined explicitly in `.typstignore` (which concerns which files shouldn't move from this repository to the package repository) and the manifest file `typst.toml` (which concerns which files shouldn't move from the package repository to the consumer) There are instructions for how to use the repository in `README.md`, so this guide will only go through things I found tricky when doing it myself. # `just` Do It This template repository ships with some nifty scripts that are invoked using `just`. I won't cover how to install it, but I will detail what scripts are made available to you and why they might be useful. - `just ci` will make sure your tests don't fail and then compile the documentation for you - `just doc` will compile the documentation - `just install` is the first step to testing that your package is shipable: It installs it locally under the `@local` namespace, so you can test importing it. - `just install-preview` does the same but under the `@preview` namespace - `just package` bundles everything will excluding things that shouldn't be sent to the package repository. - `just test` and `just update` concern running tests and updating their reference images respectively - `just uninstall` and `just uninstall-preview` will uninstall packages from your local computer. I don't end up running these commands manually very often because of the release workflow I'll discuss in the next section, but if you're unsure and you want to dry-run a release, `ci` and `package` are the commands which are quite diagnostic. # The release workflow Once you've taken care of everything, and then taken care of the things you forgot about (incrementing the package version in the manifest, updating the changelog, making sure the documentation is up to date), and tested that your package works when imported locally, its time to make a pull request into the package repository. This can be a faff to do manually, so the package template comes bundled with some github actions that will do the heavy lifting for us (as long as we do some lifting to get it setup first). First things first, if we are doing a pull request into the package repository, we are going to need our own fork, which you can get started on here: [https://github.com/typst/packages/fork](https://github.com/typst/packages/fork). I strongly recommend naming the fork repository "typst-packages". Now, in our previous repository (the one for our package), navigate to `.github/workflows/release.yml` and make sure that line 10 properly points to this form we made. We need to give our workflow permission to do this heavy lifting for us (`ci`, `package`, make a new branch on the fork, copy our release into the right folders and commit), which is achieved by creating a creating a fin-grained personal access token ([https://github.com/settings/tokens?type=beta](https://github.com/settings/tokens?type=beta)). By generating a new token, selecting "Only select repositories" and choosing the "typst-packages" repo, and giving read-write access to `contents` as shown in the picture: ![Expression]({{ "/assets/2024-07-09-packages/setup-2.png" | relative_url }}) Once you've generated it, you will be given a key. Copy this (as you won't be shown this again, you'll need to delete it and redo the steps if you loose it) and keep it secret (as in, same level as "password" secret). The one shown in the picture is ![Expression]({{ "/assets/2024-07-09-packages/setup-3.png" | relative_url }}) The last thing to do now is to tell our github actions this key (which is in effect giving it those permissions). On GitHub, navigate to `settings/secrets/actions` on your repository, and add the new key ![Expression]({{ "/assets/2024-07-09-packages/setup-4.png" | relative_url }}) We've just done a whole lot of steps and I haven't really said why: Now, we can publish our package by creating a release on GitHub! When we do, the workflow will automatically make a new branch on the fork we made, bundle our package, and commit it. All we need to do then is make the pull request. Easy as. To "press go" make a new release, and create a tag on release that has the version number you're releasing: (e.g., `v0.1.0` as the tag to release version 0.1.0). This is also a good place to list your changes! ![Expression]({{ "/assets/2024-07-09-packages/release-1.png" | relative_url }}) # Adding pictures to `README.md` One thing you might find yourself wanting to do is add an image to the front page of your package. After all, it's what's shown on your repository, on the package repository, and on Typst Universe, and first impressions matter. The way I went about doing this was by creating a folder in my project's root directory (which I called `examples` because that's what the pictures were going to be), and include the pictures using html: ```<img alt="Light" src="./examples/file.png">``` Because of the multiple ways our project might be bundled, we now need to look into the `.typstignore` file and the `typst.toml` manifest to make sure it is being included/discard where needed. Importantly to my case (and therefore to the cases of others, probably), my `file.png` is generated from a typst file in the same directory, and we aren't interested in distributing that. To make sure that it gets included in the package repository (so it is shown on Typst Universe), we will edit the `.typstignore` file to add the following lines: ``` # Don't include anything from the examples directory examples/* # ... except png files !examples/*.png ``` We don't need these examples to be sent to the consumer because they don't affect the functionality of the package, so we will make sure to exclude these from that point on by specifying as much in the manifest ``` # ... exclude = [ # ... "examples", # exclude examples ] ``` And that's all: png files in `examples` are bundled but not sent to the consumer. # Release, Promote, Collaborate, Repeat Creating a package is just the first step. As I mentioned earlier, the problem you were trying to solve when you wrote the package is most likely not a problem that is exclusive to you. Saving others any wasted time includes making the package, but it includes making them aware of the package (meaning they can spend more time writing packages for the problems they've run into in their usecases). The Typst Discord server is a great place to do this! The other thing I mentioned earlier is that your use-case might just be a narrowing of a more general problem. Being open to suggestions and pull requests means you can go from a good package to a great package. That being said, not everyone has the time to maintain a project, so you might also want to consider asking the community if there's anyone who would like to take the reins from you. # Further Reading What this post didn't cover is how to make a nice documentation PDF (see the `tidy` or `mantys` packages). It didn't talk about what makes for an intuitive API within the typst ecosystem (see Tingerrr's guidelines on the typst-community GitHub organisation [https://github.com/typst-community/guidelines](https://github.com/typst-community/guidelines)). Typst's is still being actively developed, and in the pipeline is custom types. This will likely change how we lay our packages out, and so this guide might become out of date.
https://github.com/jw2476/cslog
https://raw.githubusercontent.com/jw2476/cslog/master/index.typ
typst
#import "@preview/codelst:0.0.3": sourcecode #set page(fill: rgb(17, 17, 27)) #set text(fill: rgb(202, 211, 245), font: "JetBrains Mono") #set page(height: auto) #set heading(numbering: "1.") #show raw.where(block: false): code => box[ #set text(size: 10pt, fill: rgb(250, 179, 135), font: "JetBrains Mono") #box( stroke: 1pt + rgb(69, 71, 90), fill: rgb(30, 30, 46), radius: 3pt, outset: (x: 2pt, y: 4pt), inset: (x: 2pt), code ) ] #show raw.where(lang: "pretty-rs"): code => block[ #set text(size: 11pt, fill: rgb(202, 211, 245), font: "JetBrains Mono") #block( stroke: 1pt + rgb(69, 71, 90), fill: rgb(30, 30, 46), radius: 4pt, inset: (x: 5pt, y: 5pt), sourcecode( numbers-style: (i) => text(font: "JetBrains Mono", size: 9pt, fill: rgb(255, 255, 255), i), raw(code.text, lang: "rs") ) )] #set raw( theme: "Mocha.tmTheme" ) #set table(stroke: rgb(137, 180, 250)) #show outline.entry: it => { if it.at("label", default: none) == <modified-entry> { it // prevent infinite recursion } else { [#outline.entry( it.level, it.element, it.body, [], // remove fill [] // remove page number ) <modified-entry>] } } #outline(indent: auto) = Introduction MMOs (massively multiplayer online games) are a genre of video games focusing on connecting thousands of players in one central shared world where all players can interact and affect each other's experiences. Games from this genre include World of Warcraft, RuneScape, Final Fantasy XIV, Guild Wars 2 and many more, many of these games however were released around a decade ago and don't take advantage of modern computing. Furthermore, many of these games struggle to stay relevant, especially to younger audiences due to the outdated graphics and systems. My project is a game that hopes to explore what a possible MMORPG taking advantage of modern technologies could look like, focusing on immersion, simulation and player interaction. = Analysis == Why Computation Computers are well suited to MMO servers as they can simulate a world with thousands of players in real-time while generating content for players to explore, a human wouldn't be able to keep up with so many individuals at once. Computers are also well suited for MMO clients as modern computer graphics can render realistic environments real-time. In addition computers are good at communicating with each other quickly and from long distances which is a needed feature of an MMO client otherwise it cannot talk to the server, humans are too slow at communication for it to be efficient on a large scale. == Stakeholders The main stakeholders for my game would be the players as they are the target audience. This group can be split into two main demographics: people who are new to MMOs, and those who are coming from an existing MMO. To deal with this range of experience levels, I will need to make sure there is a good set of tutorials to make sure players new to the genre can understand the game. For gamers more experienced with MMOs, I will need to look at existing games and implement similar feature sets that these players will be expecting, while still adding something new to the game to make it stand out. I'll also need to be considering both casual and competitive players, and make mechanics and content to keep both happy. To get a representative sample for each demographic, I will be talking to people from varying experience ranges with the genre. In addition I will be sending out testing samples throughout the development process, asking both groups for feedback. == Research === Final Fantasy XIV Final Fantasy XIV(FFXIV) is an MMORPG released by <NAME> in 2010. The game revolves around its single-player story which is mainly comprised of voice-acted cutscenes with various NPC characters. The story unlocks most of the other mechanics in the game such as gathering and crafting, dungeons, raids and mounts. Gathering and crafting in FFXIV is unique thanks to its systems: players are given a series of abilities that either increase Progression, Quality, or increase the potential of other abilities. Once Progression reaches 100% the item is gathered/ crafted, the Quality value at this time determines the item's chance to be High Quality, meaning the item will be valued higher. This creates a fun minigame for gathering and crafting which helps to disrupt the monotony of many video game gathering/ crafting systems, this is something I am hoping to replicate in my game. === Guild Wars 2 Guild Wars 2 is an MMORPG developed by ArenaNet in 2012, compared to Final Fantasy XIV the game is much more open ended in its progression, instead of a central story the player is given a level which determines the content they can access, XP can be acquired from many sources such as crafting, questing, PvP and exploration. Guild Wars 2 has had three expansions added to it: Heart of Thorns, Path of Fire and End of Dragons, each of these expansions have added classes, specialisations, new chapters to the Living World and new zones for players to explore. Heart of Thorns also changed the progression system from level-based to the Mastery System. The Mastery System is a huge tree of achievements, tasks and challenges that each reward the player with items and Mastery Levels which can be used like skill points in many games. This creates a completely open ended, horizontal progression system that rewards players for exploring the game while allowing them to pick and choose what rewards they want, this is something I am hoping to base my game's progression on. === Palia Palia is an unreleased MMO being developed by Singularity 6, it focuses on providing a more casual, laid-back experience that many other MMORPGs which can fixate on combat and a grand story when many players just want to craft items for their house, farm crops and trade with their friends. Not much has been released about Palia yet, but I would like to try and incorporate this focus on more casual features into the final game. === Spiritfarer Spiritfarer is an 2-player indie RPG developed by Thunder Lotus Games. Its a game about running a boat for spirits to live on while they prepare to move on into the afterlife, the core gameplay is about making sure these spirits are happy, well-fed and housed by gathering and crafting materials to be used for the construction of the boat. While the story is fantastic, its not something I am planning to focus on. The part that interests me is the minigames, there's one for foraging, mining, smithing, smelting, cutting down trees, weaving, cooking and more, each of these minigames are fun and engaging and your performance in the minigame determines the yield of the output, if your timing is off when your cutting a tree, you'll get less wood, if you time a pickaxe swing badly, you'll get less ore, etc. Spiritfarer is going to be main inspiration for minigames for otherwise tedious mechanics. === Stardew Valley Stardew Valley made by ConcernedApe presents itself as a basic indie farming game, and while the farming part of the game is great, the part where it is really fantastic is making the player feel like they are part of the game world. The village throws festivals, has birthday parties for the NPCs and as the player becomes friendlier with the villagers they start getting invited to these events, they get integrated into the village and made to feel at home. This personal connection with the player is what many feel Stardew Valley is all about, and something I would like to try and emulate in the final game. === Ashes of Creation Ashes of Creation is an unreleased MMORPG being developed by Intrepid Studios. It is focusing on making a dynamic world where player actions result in huge changes to the game's world. The world is split up into areas called nodes, eazch node has a development level which determines the type and level of buildings players are allowed to build in a city. Nodes also have abundances of certain resources and scarcity of others, for example one node may have a surplus of food but lack ore, so must trade with another nearby node for those resources to keep its economy alive. Events like this are called emergent gameplay as they emerged from other mechanics the game developers designed, rather than being designed directly. This emergent gameplay is something I am looking to achieve in the final game. === TODO: Path of Eternity === TODO: Runescape == Essential Features == Limitations Due to limitations in time and budget, the game will not target smartphones or consoles, nor macOS as I don't own a Mac, however both Windows and Linux should be supported. In addition, I'm not great at art, so I will use a simplistic, low-poly style so the assets are simpler to create, I'm also hoping to use music licenced under Creative Commons so I can avoid making my own, I'm more interesting in the game design and programming. Also due to the goal of making the game run on as many devices as possible, I'm not going to be able to expect cutting-edge hardware, the game will need to be performant and optimised for older machines, my test for this will be my 6 year old laptop. Finally I don't have a large budget for servers, so the server code needs to be well optimised and efficient so it can run on a machine like a Raspberry Pi. == Success Criteria To succeed my game will need to: - Run on as many devices as possible - Have low-latency, reliable networking - Have intuitive, fun mechanics and UI - Value the players time - Have many mechanics to allow players to play the parts they enjoy while interacting with the rest of the world resulting in emergent gameplay - Have an immersive world-wide story delivered through events and cutscenes Many of these criteria are very subjective, so there will be phases of playtesting with stakeholders during development where the stakeholders get to play the game for a while and will evaluate it against this criteria. I will also be evaluating against this criteria when designing mechanics and systems. == Requirements === Hardware - A computer with standard peripherals like keyboard, mouse/trackpad, etc - A GPU capable of running Vulkan, this is mainly driver dependant unless the GPU is 10+ years old - More concrete requirements will be decided when the game is closer to completion === Software Because the game needs to be able to run on as many devices as possible, I've tried to keep the requirements as basic as possible: - Up-to-date graphics driver will be needed as I'll be using modern graphics APIs like Vulkan - Windows 10 or Linux = Design == Gameplay Loops The majority of the game can be split into different gameplay loops: #image("design/gameplay_loops.png") === Gathering The gathering system will be responsible for the collection of most resources for crafting and trading, from wood to ores to passive monster drops. === Crafting === Combat === Trading #include "devlogs/mod.typ" // #include "AutoGen.typ" $ integral^(sum_(n = 0)^infinity ((-1)^n)/(n+1))_(sum_(n = 0)^infinity sqrt(n) - sqrt(n+1)) (lim_(t -> infinity) (1 + 1/e^t)^(e^t))^(d/"dx" ( x^2/(sin^2x + cos^2x))) $
https://github.com/neunenak/typst-leipzig-glossing
https://raw.githubusercontent.com/neunenak/typst-leipzig-glossing/master/README.md
markdown
MIT License
# Leipzig Glossing in Typst `leipzig-glossing` is a [Typst](https://github.com/typst/typst) library for creating interlinear morpheme-by-morpheme glosses according to the [Leipzig glossing rules](https://www.eva.mpg.de/lingua/pdf/Glossing-Rules.pdf). # Documentation Run `typst compile documentation.typ` in the root of the repository to generate a pdf file with examples and documentation. This command is also codified in the accompanying [justfile](https://github.com/casey/just) as `just build-doc`. The definitions intended for use by end users are the `gloss` and `numbered-gloss` functions, and the `abbreviations` submodule. # Contributing The canonical repository for this project is on the [Gitea instance](https://code.everydayimshuflin.com/greg/typst-lepizig-glossing). The repository is also [mirrored on Github](https://github.com/neunenak/typst-leipzig-glossing/). Bug reports and code contributions are welcome from all users. ## License This library uses the MIT license; see `LICENSE.txt`. ## Contributors Thanks to [<NAME>](https://github.com/betoma) for a number of suggestions and improvements. Thanks to [<NAME>](https://github.com/rwmpelstilzchen) for the labeling functionality.
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/text/deco_07.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page // Test underline background #set underline(background: true, stroke: (thickness: 0.5em, paint: red, cap: "round")) #underline[This is in the background]
https://github.com/DieracDelta/presentations
https://raw.githubusercontent.com/DieracDelta/presentations/master/polylux/book/src/IMPORT.typ
typst
#import "@preview/polylux:0.3.1": *
https://github.com/01mf02/jq-lang-spec
https://raw.githubusercontent.com/01mf02/jq-lang-spec/main/icfp.typ
typst
#import "@preview/diagraph:0.2.1" #import "@preview/ctheorems:1.1.0" #import "acm.typ": acmart #import "common.typ": * #show: ctheorems.thmrules #show: acmart.with( format: "acmsmall", title: [A formal specification of the jq language], authors: ( ( name: "<NAME>", email: "<EMAIL>", orcid: "0000-0003-1634-9525", affiliation: none, ), ), authors-short: "Färber", anonymous: false, ccs: ( ([Software and its engineering], ( (500, [Semantics]), (500, [Functional languages]), )), ), abstract: [ jq is a widely used tool that provides a programming language to manipulate JSON data. However, the jq language is currently only specified by its implementation, making it difficult to reason about its behaviour. To this end, we provide a formal syntax and denotational semantics for a large subset of the jq language. Our most significant contribution is to provide a new way to interpret updates that allows for more predictable and performant execution. ], keywords: ("jq", "JSON", "semantics"), pub: none, /*( journal: "Journal of the ACM", journal-short: "J. ACM", volume: 37, number: 4, article: 1, month: 8, year: 2018, doi: "XXXXXXX.XXXXXXX", ), */ copyright: pub => [ #line(length: 30%, stroke: 0.5pt) #link("https://creativecommons.org/licenses/by/4.0/")[ #image("cc-by.svg", width: 10%) This work is licensed under a Creative Commons Attribution 4.0 International License. ] \ © 2024 Copyright held by the owner/author(s). ], ) #set raw(lang: "jq") #set figure(placement: auto) /* TODO: - completeness is if we can construct any valid value - explain `main` */ = Introduction UNIX has popularised the concept of _filters_ and _pipes_ #cite(label("DBLP:journals/bstj/Ritchie84")): A filter is a program that reads from an input stream and writes to an output stream. Pipes are used to compose filters. JSON (JavaScript Object Notation) is a widely used data serialisation format @rfc8259. A JSON value is either null, a boolean, a number, a string, an array of values, or an associative map from strings to values. jq is a tool that provides a language to define filters and an interpreter to execute them. Where UNIX filters operate on streams of characters, jq filters operate on streams of JSON values. This allows to manipulate JSON data with relatively compact filters. For example, given as input the public JSON dataset of streets in Paris @paris-voies, jq retrieves the number of streets (6528) with the filter "`length`", the names of the streets with the filter "`.[].nomvoie`", and the total length of all streets (1574028 m) with the filter "`[.[].longueur] | add`". jq provides syntax to update data; for example, to remove geographical data obtained by "`.[].geo_shape`", but leaving intact all other data, we can use "`.[].geo_shape |= empty`". // jq -c was used for both formatting the original dataset and the "shrunk" one. This shrinks the dataset from \~25 MB to \~7 MB. jq provides a Turing-complete language that is interesting on its own; for example, "`[0, 1] | recurse([.[1], add])[0]"` generates the stream of Fibonacci numbers. This makes jq a widely used tool. We refer to the program jq as "jq" and to its language as "the jq language". The jq language is a dynamically typed, lazily evaluated functional programming language with second-class higher-order functions @jq-description. The semantics of the jq language are only informally specified, for example in the jq manual @jq-manual. However, the documentation frequently does not cover certain cases, and historically, the implementation often contradicted the documentation. /* For example, the documentation stated that the filter `limit(n; f)` "extracts up to `n` outputs from `f`". However, `limit(0; f)` extracts up to 1 outputs from `f`, and for negative values of `n`, `limit(n; f)` extracts all outputs of `f`. */ The underlying issue is that there existed no formally specified semantics to rely on. Having such semantics allows to determine whether certain behaviour of a jq implementation is accidental or intended. However, a formal specification of the behaviour of jq would be very verbose, because jq has many special cases whose merit is not apparent. Therefore, we have striven to create denotational semantics (@semantics) that closely resemble those of jq such that in most cases, their behaviour coincides, whereas they may differ in more exotic cases. The goals for creating these semantics were, in descending order of importance: - Simplicity: The semantics should be easy to describe, understand, and implement. - Performance: The semantics should allow for performant execution. - Compatibility: The semantics should be consistent with jq. We created these semantics experimentally, by coming up with jq filters and observing their output for all kinds of inputs. From this, we synthesised mathematical definitions to model the behaviour of jq. The most significant improvement over jq behaviour described in this text are the new update semantics (@updates), which are simpler to describe and implement, eliminate a range a potential errors, and allow for more performant execution. The structure of this text is as follows: @tour introduces jq by a series of examples that give a glimpse of actual jq syntax and behaviour. From that point on, the structure of the text follows the execution of a jq program as shown in @fig:structure. @syntax formalises a subset of jq syntax and shows how jq syntax can be transformed to increasingly low-level intermediate representations called HIR (@hir) and MIR (@mir). After this, the semantics part starts: @values defines the type of JSON values and the elementary operations that jq provides for it. Furthermore, it defines other basic data types such as errors, exceptions, and streams. @semantics shows how to evaluate jq filters on a given input value. @updates then shows how to evaluate a class of jq filters that update values using a filter called _path_ that defines which parts of the input to update, and a filter that defines what the values matching the path should be replaced with. The semantics of jq and those that will be shown in this text differ most notably in the case of updates. Finally, we show how to prove properties of jq programs by equational reasoning in @obj-eq. #figure(caption: [Evaluation of a jq program with an input value. Solid lines indicate data flow, whereas a dashed line indicates that a component is defined in terms of another. ], diagraph.render(read("structure.dot"))) <fig:structure> #include "tour.typ" #include "syntax.typ" #include "values.typ" #include "semantics.typ" = Equational reasoning showcase: Object Construction <obj-eq> We will now show how to prove properties about HIR filters by equational reasoning. For this, we use the lowering in @mir and the semantics defined in @semantics. As an example, we will show a few properties of object construction. Let us start by proving a few helper lemmas, where $c$ and $v$ always denote some arbitrary context and value, respectively. #lemma[ For any HIR filters $f$ and $g$ and any Cartesian operator $cartesian$ (such as addition, see @tab:binops), we have $floor(f cartesian g)|^c_v = sum_(x in floor(f)|^c_v) sum_(y in floor(g)|^c_v) stream(x cartesian y)$. ] <lem:cart-sum> #proof[ The lowering in @tab:lowering yields $floor(f cartesian g)|^c_v = (floor(f) "as" var(x') | floor(g) "as" var(y') | var(x') cartesian var(y'))|^c_v$. Using the evaluation semantics in @tab:eval-semantics, we can further expand this to $sum_(x in floor(f)|^c_v) sum_(y in floor(g)^c{var(x') |-> x}_v) (var(x') cartesian var(y'))|^c{var(x') |-> x, var(y') |-> y}_v$. Because $var(x')$ and $var(y')$ are fresh variables, we know that they cannot occur in $floor(g)$, so $floor(g)^c{var(x') |-> x}_v = floor(g)^c_v$. Furthermore, by the evaluation semantics, we have $(var(x') cartesian var(y'))|^c{var(x') |-> x, var(y') |-> y}_v = stream(x cartesian y)$. From these two observations, the conclusion immediately follows. ] #lemma[ For any HIR filters $f$ and $g$, we have $floor({f: g})|^c_v = sum_(x in floor(f)|^c_v) sum_(y in floor(g)|^c_v) stream({x: y})$. ] <lem:obj-sum> #proof[Analogously to the proof of @lem:cart-sum.] We can now proceed by stating a central property of object construction. #theorem[ For any $n in NN$ with $n > 0$, we have that $floor({k_1: v_1, ..., k_n: v_n})|^c_v$ is equivalent to $ sum_(k_1 in floor(k_1)|^c_v) sum_(v_1 in floor(v_1)|^c_v) ... sum_(k_n in floor(k_n)|^c_v) sum_(v_n in floor(v_n)|^c_v) stream(sum_i {k_i: v_i}). $ ] #proof[ We will prove by induction on $n$. The base case $n = 1$ directly follows from @lem:obj-sum. For the induction step, we have to show that $floor({k_1: v_1, ..., k_(n+1): v_(n+1)})|^c_v$ is equivalent to $ sum_(k_1 in floor(k_1)|^c_v) sum_(v_1 in floor(v_1)|^c_v) ... sum_(k_(n+1) in floor(k_(n+1))|^c_v) sum_(v_(n+1) in floor(v_(n+1))|^c_v) stream(sum_i^(n+1) {k_i: v_i}). $ We start by $ & floor({k_1: v_1, ..., k_(n+1): v_(n+1)})|^c_v =^"(lowering)" \ = & floor(sum_i {k_i: v_i})|^c_v = \ = & floor(sum_(i = 1)^n {k_i: v_i} + {k_(n+1): v_(n+1)})|^c_v =^#[(@lem:cart-sum)] \ = & sum_(x in floor(sum_(i=1)^n {k_i: v_i})|^c_v) sum_(y in floor({k_(n+1): v_(n+1)})|^c_v) stream(x + y). $ Here, we observe that $floor(sum_(i=1)^n {k_i: v_i})|^c_v = floor({k_1: v_1, ..., k_n: v_n})|^c_v$, which by the induction hypothesis equals $ sum_(k_1 in floor(k_1)|^c_v) sum_(v_1 in floor(v_1)|^c_v) ... sum_(k_n in floor(k_n)|^c_v) sum_(v_n in floor(v_n)|^c_v) stream(sum_i^n {k_i: v_i}). $ We can use this to resume the simplification of $floor({k_1: v_1, ..., k_(n+1): v_(n+1)})|^c_v$ to $ sum_(k_1 in floor(k_1)|^c_v) sum_(v_1 in floor(v_1)|^c_v) ... sum_(k_n in floor(k_n)|^c_v) sum_(v_n in floor(v_n)|^c_v) sum_(y in floor({k_(n+1): v_(n+1)})|^c_v) stream(sum_i^n {k_i: v_i} + y) $ Finally, applying @lem:obj-sum to $floor({k_(n+1): v_(n+1)})|^c_v$ proves the induction step. ] We can use this theorem to simplify the evaluation of filters such as the following one. #example[ The evaluation of ${qs(a): (1, 2), (qs(b), qs(c)): 3, qs(d): 4}$ //(with arbitrary context and input) yields $stream(v_0, v_1, v_2, v_3)$, where $ v_0 = {qs(a) |-> 1, qs(b) |-> 3, qs(d) |-> 4},\ v_1 = {qs(a) |-> 1, qs(c) |-> 3, qs(d) |-> 4},\ v_2 = {qs(a) |-> 2, qs(b) |-> 3, qs(d) |-> 4},\ v_3 = {qs(a) |-> 2, qs(c) |-> 3, qs(d) |-> 4}. $ ] = Conclusion We have shown formal syntax and semantics of a large subset of the jq programming language. On the syntax side, we first defined formal syntax (HIR) that closely corresponds to actual jq syntax. We then gave a lowering that reduces HIR to a simpler subset (MIR), in order to simplify the semantics later. We finally showed how a subset of actual jq syntax can be translated into HIR and thus MIR. On the semantics side, we gave formal semantics based on MIR. First, we defined values and basic operations on them. Then, we used this to define the semantics of jq programs, by specifying the outcome of the execution of a jq program. A large part of this was dedicated to the evaluation of updates: In particular, we showed a new approach to evaluate updates. This approach, unlike the approach implemented in jq, does not depend on separating path building and updating, but interweaves them. This allows update operations to cleanly handle multiple output values in cases where this was not possible before. Furthermore, in practice, this avoids creating temporary data to store paths, thus improving performance. This approach is also mostly compatible with the original jq behaviour, yet it is unavoidable that it diverges in some corner cases. We hope that our work is useful in several ways: For users of the jq programming language, it provides a succinct reference that precisely documents the language. Our work should also benefit implementers of tools that process jq programs, such as compilers, interpreters, or linters. In particular, this specification should be sufficient to implement the core of a jq compiler or interpreter. Finally, our work enables equational reasoning about jq programs. This makes it possible to prove correctness of jq programs or to implement provably correct optimisations in jq compilers/interpreters. #bibliography("literature.bib")
https://github.com/HKFoggyU/hkust-thesis-typst
https://raw.githubusercontent.com/HKFoggyU/hkust-thesis-typst/main/hkust-thesis/templates/signature-page.typ
typst
LaTeX Project Public License v1.3c
#import "../imports.typ": * #import "../utils/invisible-heading.typ": invisible-heading #let signature-page( config: (:), info: (:), ) = { info = info let (degreeFull, degreeShort) = set-degree(info.degree) set align(center) [ #pagebreak(weak: true, to: if config.twoside { "odd" }) #invisible-heading("Signature Page") #heading(outlined: false)[#text(size: constants.font-sizes.title)[#info.title.join("\n")]] #do-repeat([#linebreak()], 5) by\ #do-repeat([#linebreak()], 1) #info.author #do-repeat([#linebreak()], 2) This is to certify that I have examined the above #degreeShort thesis\ and have found that it is complete and satisfactory in all respects,\ and that any and all revisions required by\ the thesis examination committee have been made. #do-repeat([#linebreak()], 4) #for (person, content) in info.supervisors { signature-line() [#content.name, #content.role] do-repeat([#linebreak()], 3) } #signature-line() #info.department-head.name, #info.department-head.position #do-repeat([#linebreak()], 1) #info.department #info.submit-date.date #info.submit-date.month #info.submit-date.year ] }
https://github.com/Myriad-Dreamin/tinymist
https://raw.githubusercontent.com/Myriad-Dreamin/tinymist/main/crates/tinymist-query/src/fixtures/signature/builtin_with.typ
typst
Apache License 2.0
#let owo = rgb.with(50%, 50%, 50%); #(/* ident after */ owo);
https://github.com/polarkac/MTG-Stories
https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/042%20-%20Strixhaven%3A%20School%20of%20Mages/006_Episode%204%3A%20Put%20to%20the%20Test.typ
typst
#import "@local/mtgstory:0.2.0": conf #show: doc => conf( "Episode 4: Put to the Test", set_name: "Strixhaven: School of Mages", story_date: datetime(day: 14, month: 04, year: 2021), author: "<NAME>", doc ) Will startled awake. It took him a moment to realize that he was in his room, that the shadowy figures lurking in the corners were remnants of whatever dream he had risen from. He was still in his uniform, now rumpled. His latest assignment, on the desk in front of him, remained unfinished. Outside, he could see the Arcavios night, a slash of darkness punctuated by the usual odd glows across the campus. There was no sign of Rowan. Her side of the room was still in the same state of disarray it had been in for weeks. He stood up, wincing at a crick in his neck, just as a shout came from the hallway. "—outhern gate!" "How many did she—?" "Is everyone—" In the crowd of students rushing past, Will spotted someone from the Prismari Mage Tower team—<NAME>, the point guard who had impressed Quint with his earth magic. "Hey! What's going on?" Wickel pointed down the hall. "Hey first-year! Follow the crowd—Dean Uvilda is waiting to take you to a designated shelter." "But what's #emph[happening] ?" "The Oriq are here," he said curtly, before turning and running after the crowd of younger students. Will stood there for a moment, stunned, a sick feeling growing in his stomach. Professor Onyx had been right. Outside, Will stumbled into a scene of utter chaos. The crowd, joined by more and more students flowing out of the dorms, had frozen by one end of the courtyard. On the other side, past their horrified, stunned expressions, Will could see an encroaching wall of dark shapes. No—not shapes. Creatures. #figure(image("006_Episode 4: Put to the Test/01.jpg", width: 100%), caption: [Mage Hunters' Onslaught | Art by: <NAME>awan], supplement: none, numbering: none) They skittered across the manicured lawn on narrow, pointed legs, insectile plating covering wine-purple flesh. Glowing violet spines ran along their backs and up to eyeless heads, featureless altogether save for a gaping, toothy maw. Terrible screeches rent the air. They sounded hungry. At first, Will only thought his knees were going weak—but the whole ground was shaking. He saw Wickel step out of the crowd, his entire body vibrating with energy, and thrust both hands into the loam beneath him. A semicircle of churning soil rippled out from where he stood, rising up into a wall of dense earth between the creatures and the students. He turned back to the wide-eyed first-years. "Run! I said #emph[run] !" Even as he hurried to comply, Will could see the first of the horrible creatures coming over the earthen wall, scaling it effortlessly. He needed to find his sister. Where was Rowan? #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) Across the campus of Strixhaven, Rowan howled and swung her sword. It bit into a joint between the creature's armor plating, sending a gout of dark blood spurting across her uniform and down onto the overgrown garden surrounding the Witherbloom dormitories. Behind her, Plink was backing away from one of the things, squealing in fear; with a shouted incantation, Auvernine called thorny roots up from the soil to wrap around the creature's legs and drag it down into the earth. "They're everywhere!" shouted Plink, almost stumbling over the creature's buried form. "We're surrounded! Abandon ship! Surrender!" Rowan scanned the field that stretched before Witherbloom college. Her friend was right. The creatures were advancing in an eerily glowing wall of chitin, pushing the students back toward the dorms. "If we just wait for the professors—" started Auvernine. "No. If we just wait, we're going to be overwhelmed. We have to get past them. We have to get out," said Rowan. "And go where?" asked Auvernine desperately. Rowan glanced toward the Biblioplex, her thoughts turning to Will. If she knew her brother, that's where he would be. "There," she said, pointing to its vast silhouette in the night. "Oh, #emph[now ] you want to study?" said Plink, stumbling toward her friends, nearly hysterical. It wasn't just Will that drew her there, though. It was in the center of the campus; if the deans and professors would choose anywhere to make a stand, it would be there—and her brother had never stopped going on about all the powerful spells tucked away in those dusty old tomes. #emph[Let's hope you're right, Will.] #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) "<NAME>, watch out!" Liliana spun around at her student's shout just as an Oriq agent sent a hissing coil of energy at her. It was a vicious spell, something meant to suck the life from her—but she was quite experienced in magic such as this. She stopped the spell inches from her outstretched palm and regarded it coolly. Behind her, the crowd of students that had been in her lecture hall moments ago stared, agape and terrified. #emph[He could have hit them] , she thought. #emph[Well. Fair is fair.] With a gesture, Liliana sent it racing back toward the caster, twice as hungry as before. He tried to flinch away, but the ravenous magic devoured him before he had a chance to even scream. <NAME> and Imbraham came to join her, jogging down the path that led to Quandrix College behind another wave of students. "<NAME>," said Imbraham in that high, odd voice of his. "We are being pursued by a very curious foe. I suggest we regroup with the other faculty at—" He was cut off by a scream; a student had fallen behind. "Go!" Imbraham barked. "I'll watch over this group." They were off at once, Kianne and Liliana matching strides. Another scream followed; this time, they could see the student, collapsed on the ground and cowering as an insect-like monster loomed over him. "<NAME>unters," Kianne hissed under her breath. Liliana could see more of them boiling out from the shadows, their pointed legs clicking against the stone cobbles. The creature reared back, the segments of its body glowing, and Kianne sent a geometric lance of force piercing through it. Liliana grabbed the terrified student and pushed him behind her. "Get out of here." Something else had caught her attention, though—amid the crawling darkness on all sides, there seemed to be a human figure, a man in a strange red uniform. At least she thought he was human at first glance; there was something wrong with his face, a sharpening and stretching to the cheekbones that reminded her of mandibles. He locked eyes with her, and with eerie coordination, all of the other mage hunters surged toward them. "Who is that?" said Kianne. "I don't know," Liliana said. "But it seems he's controlling these creatures somehow." De<NAME>'s face twisted in horror. "All of them? I've never seen magic like that before." "There's always a spell," muttered Liliana. She extended her hand and black threads of magic shot out from the tips of her fingers, but before they could make contact with him, one of the creatures threw itself in the way. The spell burrowed into its shell, making its chitin crack and crumble to dust. #figure(image("006_Episode 4: Put to the Test/02.jpg", width: 100%), caption: [Defend the Campus | Art by: Izzy], supplement: none, numbering: none) At her side, <NAME> lifted her hands, light shining around her. In seconds, a horde of angular, catlike fractals had assembled. The constructs leapt forward at her direction, colliding with the wave of approaching mage hunters. The man in the red coat faded back into the crowd of churning, spiny bodies, and Liliana was leaning forward to chase him when something stopped her. All this mayhem. An attack all across the campus, with no apparent aim but destruction and chaos. Why? Because, as Liliana realized with building dread, it wasn't an attack—it was a misdirection. #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) Will ran. He ran as fast as he could, trying not to think about the horrifying creature behind him and its many legs, or the burning in his lungs as he pushed himself harder, or the wet grass beneath his feet that would be so easy to slip on— #emph[Wait. ] Without stopping, Will stuck his hand down toward the ground and applied a bit of focus. Behind him, the evening dew condensed into hard ice. He turned, looking over his shoulder just in time to see one of the monster's long legs skid off to the side, collapsing underneath it. "Yes!" shouted Will, shortly before running into something spiky and huge. He bounced off the second creature's shell as it whipped one claw at him, nicking his uniform but missing anything important. Falling to the ground, he rolled to one side as another claw buried itself in the earth where his head had been a moment before. Will stuck out his arms blindly, making contact with its armor-plated midsection, and drew the heat from it so fast a crack split through the middle of the shell. The creature fell back, screeching, but by then the other one had gotten up and was scuttling toward him. #figure(image("006_Episode 4: Put to the Test/03.jpg", width: 100%), caption: [Mage Hunter | Art by: <NAME>], supplement: none, numbering: none) Suddenly, a roar filled the air, the sound rolling through the sky. More roars answered it until the ground trembled from the cacophony. The creature leapt away from Will and almost #emph[galloped ] on its many legs, moving fast—but not fast enough. A column of fire shot down from the sky and swept over the ground. All around, Will could hear the shrieks of the invading creatures, could smell the carbon in the air as their shells scorched and popped and blackened. In a moment, they were nothing but ashes, scattered to the wind by the beating of giant wings. Will threw his arms over his head, calling up sheets of ice around himself as another burst of flame ripped through the courtyard. It was barely enough to protect him from the scorching heat, but Will couldn't help the whoop of joy that burst out of him. The dragons had come. #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) Rowan turned at the sound of her name, letting her friends rush past her toward the main campus. There, with a sword of ice in his hand and a goofy-looking slash across the front of his uniform, was her brother. "Will!" They ran toward each other and fell into a hug, squeezing each other tightly. When Rowan pulled back, she frowned at the makeshift weapon in his hand. "Where's your sword?" "In our room," Will said between panting breaths. "I came as fast as I could." "Look out!" someone shouted behind them. Rowan barely had time to register the Oriq agent stepping out from behind the hedge; as he stuck out his hand, thorns of lethal blood-red energy lancing toward them, she knocked Will to the ground. There was a gurgling sound, then silence; it took Rowan a moment to realize she had closed her eyes. When she opened them, she saw the Oriq splayed across the ground. Nearby was the familiar stern presence of <NAME>, who whirled on them with those cold violet eyes. "You two. Why aren't you taking shelter?" "We were attacked," they said—almost at the same time. "At the Prismari dorms," said Will. "And the Witherbloom ones," said Rowan. "They were surrounding us—almost like they were trying to keep us in one place." "That's because they were," said Professor Onyx. "This is part of some distraction." "Distracting us from what?" asked Will. "I don't have the answer to that," she said. "Not yet. But I know one thing—the mage hunters aren't just herding in the students. They've formed a perimeter around the Biblioplex." #emph[A perimeter. ] Rowan didn't like the sound of that. A living wall of spines, of those glowing purple feelers, of snapping teeth. "What should we do?" <NAME> turned those violet eyes on her, then. "If I were a responsible professor, I'd bring you two somewhere safe. I'd keep you well out of all this." "But you're not going to do that," said Rowan. "Are you?" The corner of the professor's mouth twitched—Rowan almost would have called that a smile. "No. I'm not quite so responsible as that. And I need help." #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) "So this is our way in?" said Will, putting his hand on the circle of stone. It seemed set into one of the rolling hills in the wilder section of the Witherbloom campus. "Yes. It's an old maintenance passage I found when I was a student." The professor set her hand on the door and muttered something under her breath. With a slow grinding that seemed alarmingly loud to Will, the circle of stone parted, retreating into the side of the hill. On the other side was a long dark tunnel. "They let students down here?" he asked. Rowan and Professor Onyx both arched an eyebrow. "No," said Will. "No, I guess not." Rowan summoned a sparking ball of light into her hand and took a few cautious steps down the passage, the professor and Will following close behind. "Is there going to be, um," started Will, "anything waiting for us down there?" "I don't know," said Professor Onyx. "It's possible. Nobody at Strixhaven has used these tunnels in a long time, but I'm hardly the only one who knows about them. I believe Extus has been sending his people through them for months now." "Extus?" "The man responsible for all of this. The leader of the Oriq." Will felt something catch in his throat. "Ah. So the only thing we need to worry about is a bunch of murderous mages wielding dark magic." "Toughen up, Will," said Rowan. "It's nothing we haven't seen before." "Is that so?" Professor Onyx seemed amused. "You two are hardly the most likely heroes in all this. But I suppose I'm not one to talk." What that meant, Will had no idea. #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) Extus strode down the curving corridors of the Biblioplex, running one hand along the fine wooden shelves. So much wisdom in those old books—and yet not a drop of it seemed to be helping them now. It was odd to hear the habitual quiet of this place once again, only now the silence was occasionally broken by the screams of a student caught inside. "Extus!" He turned at the sound of his name. One of his agents approached, carrying a heavy book with frayed, yellowing pages. Tavver, if he judged the voice correctly—a younger member, and quite dedicated to the cause. He had already been on several missions deep into the heart of the school. "I found it in the East Wing, just as you said, sir." "Fine work." Extus took the book and wiped off a layer of dust. Gilded letters shone in the low light. "What is it, sir? If you don't mind me asking," said Tavver. "The work of another brilliant mind overlooked and left to rot. They are so quick to throw us away if we don't suit their purpose." He held out a hand, feeling benevolent. "You will be rewarded for everything you've done today." Just as the agent took his hand, Extus spotted the student in Silverquill robes over his shoulder. She was bleeding badly, one arm dangling limp at her side, but she glared at them both with an expression of utter fury. He #emph[felt ] the hate radiating from the spell she was weaving, an orb of perfect darkness, which she whipped straight toward him. Without hesitation, Extus tightened his grip on Tavver's arm and pulled him close, spinning him into the spell's path. The agent's body bowed under the impact, a scream bouncing off the inside of his mask as he went limp and dropped to the floor. The student raised her arms, trying to gather more energy, but she was spent. Extus flung a bolt of crackling energy sailing through the air that hit the student dead-on. As she crumpled, the library went quiet once again. He glanced down at the body of his masked agent, now still. Without a second look, Extus continued on. #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) "You found this as a student?" marveled Will, his voice bouncing eerily off the walls of the stone tunnel. "How long ago was that?" asked Rowan. She held their only light, a jumping bit of spark-magic that did odd things to their shadows. "A very long time ago," Professor Onyx said. "It was a very different time, and I was a very different person back then." They emerged into what looked like a cave chamber. Above, the gray stone ceiling vanished into darkness. A chasm separated the ledge where they stood from another tunnel, which he could barely make out in the low light; a wooden bridge spanned the abyss. "Um, is there some other way to cross?" Will asked, eyeing the frayed rope and ancient-looking boards. "You know, I never found out." Professor Onyx stepped lightly on the edge of the bridge. Rowan followed her, stepping with alarming speed over the rotten planks. "Slow down," Will said, his pace steady and slow behind her. "Each minute we waste here is one the Oriq spend hurting people," Rowan said over her shoulder. Each stride forward sent chunks of wood falling into the chasm below. A crack split the air, bouncing off the walls. Stones skittered down as clouds of dust bloomed around them. Rowan took another step and the wood snapped beneath her. Will dove as Rowan fell, locking one hand around her wrist. Knocking more of the boards away, he heaved and pulled, lifting Rowan back through the bridge. They landed in a heap, then crawled the rest of the way. "Thanks," said Rowan, her voice shaky. "You'd do the same for me." "Come on," said Professor Onyx, on the other side. She barely seemed to notice their near-death experience. "Hurry along." "What does he want?" said Will. "Extus, I mean. What is he here for?" "There are any number of things he could be after. Tomes of great value, magical artifacts—the Biblioplex is full of things an aspiring megalomaniac might want." "So where are you taking us?" "I'm taking you where I would go, if I wanted to cause the most damage possible." Will only stared as she continued down the tunnel. "We need to keep moving," said Rowan, nudging him forward. #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) Extus rested one hand on the smooth, cool wood of the double doors leading to the Hall of Oracles. They had been locked, but thankfully, the Oriq attack had happened too quickly for any wards to be activated and set in place. With a brief exertion of will, he blasted the doors off their hinges and stepped inside. #figure(image("006_Episode 4: Put to the Test/04.jpg", width: 100%), caption: [Hall of Oracles | Art by: <NAME>], supplement: none, numbering: none) Encircling the room were stern, wizened visages carved in stone—oracles, long dead but not forgotten. Extus thought he noticed a certain scorn in their flinty eyes, as if even from the grave they didn't approve of what he was doing here. As if they didn't think he belonged in their ranks. It didn't matter. They were dead. And when he was done, they would be glad that they were. His gaze shifted to the ceiling, and even with his mask, he had to squint against its light. The Strixhaven Snarl hung in the air, tendrils of energy whipping and snapping around the hall. Mana from the primordial origins of this world, still swirling in a maelstrom of power. Below it sat a series of stone rings, nearly as old as the vortex itself—containment circles, Extus knew. Its light cast the entire room in a soft blue glow, sending shadows dancing across the floor. #emph[Yes] , he thought. #emph[This will do.] Opening the book in his hand, he flipped through the yellowed pages until he found what he was looking for. Footsteps sounded from the corridor as Oriq agents filed into the room. Each of them carried a book or scroll. Extus nodded, glad that his mask hid the giddy smile stretched across his face. "Good. Array them as we discussed. It's time." One by one, the agents placed the books and scrolls carefully in front of their leader until the ancient texts formed a semi-circle opened before him. Pausing for only a moment to savor the occasion, Extus began to read. #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) Liliana expected to have to fight her way to the Snarl—there was no way the Oriq would leave their ultimate prize unguarded, after all—but she hadn't expected her charges to be so enthusiastic about this part of their "adventure." She hardly had to kill anyone; at the first sign of one of the masked Oriq, Rowan would zap them with a current of energy that left them twitching on the ground. Even Will was quite useful, forming shells of ice around the fallen Oriq agents so when their muscles stopped seizing, they wouldn't be able to do much more than shiver. When they reached the Hall of Oracles, though, the doors were already ripped apart. Inside, she could make out a group of masked figures silhouetted by the rippling light of the vortex within. At their center, one of them was incanting something from a large heavy tome. She could feel the arcane currents in the air shifting, listening, in a way she had felt too many times before. Powerful dark magic was at work here; even the Kenrith twins seemed to notice something, both of them going very still at her side. "We're too late," said Liliana. "He's already bound himself to the Snarl." "Not yet we're not." Rowan was the first to break the trance that seemed to hold them all, rushing into the room. "Wait!" called Will, running after her before Liliana could stop him. #emph[Fools—they can't face him with that much power at his disposal! ] she thought. Already the masked mages were turning, their hands alight with bright fire and bubbling venom and other crude, vicious spells. Rowan screamed with a mix of fury and a frightening delight as lightning crawled over her skin and jumped to a group of Oriq, unbridled power let loose. Smoke curled from under their hoods as they collapsed. #emph[That girl is a force to be reckoned with already] , thought Liliana.#emph[ A few more years, and she'll be truly terrifying.] But Rowan was still too inexperienced to sense the Oriq agent reaching out behind her, fingers crackling with killing power. Liliana concentrated, and time seemed to slow for a moment as she felt, through the arcane energies swirling in the air, the little light of the man's soul. With a savage thrust of will, she pushed it free of his body, which dropped in a heap on the floor. That was when the spell splashed over her. #emph[What?] thought Liliana, head jerking to the origin of the attack. Extus, the man holding that heavy book, had one hand extended. She hadn't sensed the buildup of any offensive magic, though—not the warmth of fire nor the sickly sensation of death magic. What had he hit her with? Suddenly the room seemed to bend and sway underneath her feet. Everything spun uncomfortably; a sense of vertigo arose in her stomach. It was a feeling not unlike planeswalking, though sick and twisted. The last thing she saw was the Kenrith boy—looking not at her, but at his sister. Terrified, though whether it was for her or #emph[of ] her, Liliana couldn't say. Then her vision went black. When she opened her eyes, the light of the Snarl was gone. Liliana blinked as her eyes adjusted to the darkness. She moved to sit up, her hand dragging through dirt and leaves, and looked around, her mind finally clearing enough to recognize the shapes of the forest around her. #emph[Forced translocation magic. ] She'd never been hit by that before. She climbed to her feet. Ahead, she could just make out one of the torches that led the way to Strixhaven. The campus itself was somewhere in the far distance, beyond sight. #emph[Try not to die, you two. I'm coming—but it's going to be a long walk.] #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) Rowan stared at the spot where <NAME> had been, then turned to Extus. "What did you do to her?" The masked figure made no reply. With a growl of frustration, Rowan extended a hand toward him. The air split with a roar as a bolt of lightning coursed in his direction, but the Oriq leader only gestured toward her. The lightning simply #emph[stopped] , then dropped from the air, shattering on the ground as if made of glass. He waved casually, as though batting a fly, and Rowan saw the air bend and warp as a wave of force rushed toward her. She closed her eyes and held up her hands—but instead of being torn apart by the spell, she was only peppered with shards of ice as the wall Will had thrown up was shattered. "Rowan, listen to me!" shouted Will, grabbing her shoulder. "We need to synchronize our magic, like we used to." "You said it yourself—I can't control my powers anymore!" spat Rowan. "There's something different now. Our magic is changing." "Yeah," said Will. "You've gotten #emph[stronger] . But I've gotten more controlled. Together, we can do it. There's no other way!" But that wasn't true. Rowan looked back at Extus and the storm of raw magic surging and glowing behind him. The Snarl, Professor Onyx had called it. She could feel the power radiating off of it, more power than any one mage could use. She could take it, draw it out, just as she'd drawn the power from that Prismari student's water elemental. "We can do what he's doing—we can draw on the Snarl. Use the same dirty trick!" "That's too dangerous!" said Will. "It's too much power—you'll kill yourself! You'll destroy all of Strix—" He was interrupted by another wave of force roaring out from where Extus stood. Will threw up another shield of ice, but this time the spell was strong enough to punch through and knock them both across the room. #v(0.35em) #line(length: 100%, stroke: rgb(90%, 90%, 90%)) #v(0.35em) Rowan pushed herself into a sitting position, head ringing. Not far away, she could see Will doing the same. Something was pooling at her feet, she realized, soaking through her boots. With alarm, she saw that it was blood. Not her blood, though, and not Will's. Rivulets of the stuff seemed to be spreading out throughout the whole room. She traced their path, her gaze drifting toward the Snarl hanging above Extus. Where it had glowed blue before, now it shone a deep red. A bone-rattling roar shook the hall, sending cracks splintering across the walls and shaking centuries-old dust from the rafters. Another crack ripped through the room and a piece of the ceiling plummeted toward them. Rowan dove toward Will, and they both tumbled away just as the stone crashed into the floor. Another boulder fell, crushing the limp body of an Oriq agent nearby and making Rowan flinch. More blood flowed from the concentric stone circles on the floor in front of Extus, bubbling as if from a fountain. What had started as a trickle grew into a deluge. The sweet iron reek filled her nose. Under the Snarl, Extus spread his arms wide. "Rise, Great One! I call upon you, Blood Avatar! Unleash your wrath upon this unfair world!" From the bubbling fountain of blood in that stone circle, two points began to take shape, stretching and curving into the shape of horns. Something was dragging itself free. Rowan pushed herself backward until she hit the wall. Not horns, but a helmet of ancient bronze. What arose, soaked in gore, was massive and only vaguely humanoid. In each of its four muscular arms, it clutched a cruel weapon, the edges and spikes too many for Rowan to take in altogether. It was a creature of war, that much was clear. A being whose only purpose was to unmake what had stood for centuries. This was Extus's plan all along. This was what they had been trying to stop. And now, their failure could mean the death of them all.
https://github.com/C0ffeeCode/typst-dhbw-technik-template
https://raw.githubusercontent.com/C0ffeeCode/typst-dhbw-technik-template/template/README.md
markdown
# Typst template for faculty Technik at DHBW As an example and guide how to use this template, check out [this PDF document](./Template-Example-guide.pdf). The template can be configured to English or German language. To compile your document, you need to take the `thesis.typ` file as the input file. To use the CLI, compile using `typst watch thesis.typ` or `typst compile thesis.typ`. If you intend to use the VS Code plug-in "Typst-LSP", make sure it is either configured to only take the `thesis.typ` as input or not to perform compilation at all (to use the CLI). This is a adapted version of a template by [@satoqz](https://github.com/satoqz/).
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-10600.typ
typst
Apache License 2.0
#let data = ( ("LINEAR A SIGN AB001", "Lo", 0), ("LINEAR A SIGN AB002", "Lo", 0), ("LINEAR A SIGN AB003", "Lo", 0), ("LINEAR A SIGN AB004", "Lo", 0), ("LINEAR A SIGN AB005", "Lo", 0), ("LINEAR A SIGN AB006", "Lo", 0), ("LINEAR A SIGN AB007", "Lo", 0), ("LINEAR A SIGN AB008", "Lo", 0), ("LINEAR A SIGN AB009", "Lo", 0), ("LINEAR A SIGN AB010", "Lo", 0), ("LINEAR A SIGN AB011", "Lo", 0), ("LINEAR A SIGN AB013", "Lo", 0), ("LINEAR A SIGN AB016", "Lo", 0), ("LINEAR A SIGN AB017", "Lo", 0), ("LINEAR A SIGN AB020", "Lo", 0), ("LINEAR A SIGN AB021", "Lo", 0), ("LINEAR A SIGN AB021F", "Lo", 0), ("LINEAR A SIGN AB021M", "Lo", 0), ("LINEAR A SIGN AB022", "Lo", 0), ("LINEAR A SIGN AB022F", "Lo", 0), ("LINEAR A SIGN AB022M", "Lo", 0), ("LINEAR A SIGN AB023", "Lo", 0), ("LINEAR A SIGN AB023M", "Lo", 0), ("LINEAR A SIGN AB024", "Lo", 0), ("LINEAR A SIGN AB026", "Lo", 0), ("LINEAR A SIGN AB027", "Lo", 0), ("LINEAR A SIGN AB028", "Lo", 0), ("LINEAR A SIGN A028B", "Lo", 0), ("LINEAR A SIGN AB029", "Lo", 0), ("LINEAR A SIGN AB030", "Lo", 0), ("LINEAR A SIGN AB031", "Lo", 0), ("LINEAR A SIGN AB034", "Lo", 0), ("LINEAR A SIGN AB037", "Lo", 0), ("LINEAR A SIGN AB038", "Lo", 0), ("LINEAR A SIGN AB039", "Lo", 0), ("LINEAR A SIGN AB040", "Lo", 0), ("LINEAR A SIGN AB041", "Lo", 0), ("LINEAR A SIGN AB044", "Lo", 0), ("LINEAR A SIGN AB045", "Lo", 0), ("LINEAR A SIGN AB046", "Lo", 0), ("LINEAR A SIGN AB047", "Lo", 0), ("LINEAR A SIGN AB048", "Lo", 0), ("LINEAR A SIGN AB049", "Lo", 0), ("LINEAR A SIGN AB050", "Lo", 0), ("LINEAR A SIGN AB051", "Lo", 0), ("LINEAR A SIGN AB053", "Lo", 0), ("LINEAR A SIGN AB054", "Lo", 0), ("LINEAR A SIGN AB055", "Lo", 0), ("LINEAR A SIGN AB056", "Lo", 0), ("LINEAR A SIGN AB057", "Lo", 0), ("LINEAR A SIGN AB058", "Lo", 0), ("LINEAR A SIGN AB059", "Lo", 0), ("LINEAR A SIGN AB060", "Lo", 0), ("LINEAR A SIGN AB061", "Lo", 0), ("LINEAR A SIGN AB065", "Lo", 0), ("LINEAR A SIGN AB066", "Lo", 0), ("LINEAR A SIGN AB067", "Lo", 0), ("LINEAR A SIGN AB069", "Lo", 0), ("LINEAR A SIGN AB070", "Lo", 0), ("LINEAR A SIGN AB073", "Lo", 0), ("LINEAR A SIGN AB074", "Lo", 0), ("LINEAR A SIGN AB076", "Lo", 0), ("LINEAR A SIGN AB077", "Lo", 0), ("LINEAR A SIGN AB078", "Lo", 0), ("LINEAR A SIGN AB079", "Lo", 0), ("LINEAR A SIGN AB080", "Lo", 0), ("LINEAR A SIGN AB081", "Lo", 0), ("LINEAR A SIGN AB082", "Lo", 0), ("LINEAR A SIGN AB085", "Lo", 0), ("LINEAR A SIGN AB086", "Lo", 0), ("LINEAR A SIGN AB087", "Lo", 0), ("LINEAR A SIGN A100-102", "Lo", 0), ("LINEAR A SIGN AB118", "Lo", 0), ("LINEAR A SIGN AB120", "Lo", 0), ("LINEAR A SIGN A120B", "Lo", 0), ("LINEAR A SIGN AB122", "Lo", 0), ("LINEAR A SIGN AB123", "Lo", 0), ("LINEAR A SIGN AB131A", "Lo", 0), ("LINEAR A SIGN AB131B", "Lo", 0), ("LINEAR A SIGN A131C", "Lo", 0), ("LINEAR A SIGN AB164", "Lo", 0), ("LINEAR A SIGN AB171", "Lo", 0), ("LINEAR A SIGN AB180", "Lo", 0), ("LINEAR A SIGN AB188", "Lo", 0), ("LINEAR A SIGN AB191", "Lo", 0), ("LINEAR A SIGN A301", "Lo", 0), ("LINEAR A SIGN A302", "Lo", 0), ("LINEAR A SIGN A303", "Lo", 0), ("LINEAR A SIGN A304", "Lo", 0), ("LINEAR A SIGN A305", "Lo", 0), ("LINEAR A SIGN A306", "Lo", 0), ("LINEAR A SIGN A307", "Lo", 0), ("LINEAR A SIGN A308", "Lo", 0), ("LINEAR A SIGN A309A", "Lo", 0), ("LINEAR A SIGN A309B", "Lo", 0), ("LINEAR A SIGN A309C", "Lo", 0), ("LINEAR A SIGN A310", "Lo", 0), ("LINEAR A SIGN A311", "Lo", 0), ("LINEAR A SIGN A312", "Lo", 0), ("LINEAR A SIGN A313A", "Lo", 0), ("LINEAR A SIGN A313B", "Lo", 0), ("LINEAR A SIGN A313C", "Lo", 0), ("LINEAR A SIGN A314", "Lo", 0), ("LINEAR A SIGN A315", "Lo", 0), ("LINEAR A SIGN A316", "Lo", 0), ("LINEAR A SIGN A317", "Lo", 0), ("LINEAR A SIGN A318", "Lo", 0), ("LINEAR A SIGN A319", "Lo", 0), ("LINEAR A SIGN A320", "Lo", 0), ("LINEAR A SIGN A321", "Lo", 0), ("LINEAR A SIGN A322", "Lo", 0), ("LINEAR A SIGN A323", "Lo", 0), ("LINEAR A SIGN A324", "Lo", 0), ("LINEAR A SIGN A325", "Lo", 0), ("LINEAR A SIGN A326", "Lo", 0), ("LINEAR A SIGN A327", "Lo", 0), ("LINEAR A SIGN A328", "Lo", 0), ("LINEAR A SIGN A329", "Lo", 0), ("LINEAR A SIGN A330", "Lo", 0), ("LINEAR A SIGN A331", "Lo", 0), ("LINEAR A SIGN A332", "Lo", 0), ("LINEAR A SIGN A333", "Lo", 0), ("LINEAR A SIGN A334", "Lo", 0), ("LINEAR A SIGN A335", "Lo", 0), ("LINEAR A SIGN A336", "Lo", 0), ("LINEAR A SIGN A337", "Lo", 0), ("LINEAR A SIGN A338", "Lo", 0), ("LINEAR A SIGN A339", "Lo", 0), ("LINEAR A SIGN A340", "Lo", 0), ("LINEAR A SIGN A341", "Lo", 0), ("LINEAR A SIGN A342", "Lo", 0), ("LINEAR A SIGN A343", "Lo", 0), ("LINEAR A SIGN A344", "Lo", 0), ("LINEAR A SIGN A345", "Lo", 0), ("LINEAR A SIGN A346", "Lo", 0), ("LINEAR A SIGN A347", "Lo", 0), ("LINEAR A SIGN A348", "Lo", 0), ("LINEAR A SIGN A349", "Lo", 0), ("LINEAR A SIGN A350", "Lo", 0), ("LINEAR A SIGN A351", "Lo", 0), ("LINEAR A SIGN A352", "Lo", 0), ("LINEAR A SIGN A353", "Lo", 0), ("LINEAR A SIGN A354", "Lo", 0), ("LINEAR A SIGN A355", "Lo", 0), ("LINEAR A SIGN A356", "Lo", 0), ("LINEAR A SIGN A357", "Lo", 0), ("LINEAR A SIGN A358", "Lo", 0), ("LINEAR A SIGN A359", "Lo", 0), ("LINEAR A SIGN A360", "Lo", 0), ("LINEAR A SIGN A361", "Lo", 0), ("LINEAR A SIGN A362", "Lo", 0), ("LINEAR A SIGN A363", "Lo", 0), ("LINEAR A SIGN A364", "Lo", 0), ("LINEAR A SIGN A365", "Lo", 0), ("LINEAR A SIGN A366", "Lo", 0), ("LINEAR A SIGN A367", "Lo", 0), ("LINEAR A SIGN A368", "Lo", 0), ("LINEAR A SIGN A369", "Lo", 0), ("LINEAR A SIGN A370", "Lo", 0), ("LINEAR A SIGN A371", "Lo", 0), ("LINEAR A SIGN A400-VAS", "Lo", 0), ("LINEAR A SIGN A401-VAS", "Lo", 0), ("LINEAR A SIGN A402-VAS", "Lo", 0), ("LINEAR A SIGN A403-VAS", "Lo", 0), ("LINEAR A SIGN A404-VAS", "Lo", 0), ("LINEAR A SIGN A405-VAS", "Lo", 0), ("LINEAR A SIGN A406-VAS", "Lo", 0), ("LINEAR A SIGN A407-VAS", "Lo", 0), ("LINEAR A SIGN A408-VAS", "Lo", 0), ("LINEAR A SIGN A409-VAS", "Lo", 0), ("LINEAR A SIGN A410-VAS", "Lo", 0), ("LINEAR A SIGN A411-VAS", "Lo", 0), ("LINEAR A SIGN A412-VAS", "Lo", 0), ("LINEAR A SIGN A413-VAS", "Lo", 0), ("LINEAR A SIGN A414-VAS", "Lo", 0), ("LINEAR A SIGN A415-VAS", "Lo", 0), ("LINEAR A SIGN A416-VAS", "Lo", 0), ("LINEAR A SIGN A417-VAS", "Lo", 0), ("LINEAR A SIGN A418-VAS", "Lo", 0), ("LINEAR A SIGN A501", "Lo", 0), ("LINEAR A SIGN A502", "Lo", 0), ("LINEAR A SIGN A503", "Lo", 0), ("LINEAR A SIGN A504", "Lo", 0), ("LINEAR A SIGN A505", "Lo", 0), ("LINEAR A SIGN A506", "Lo", 0), ("LINEAR A SIGN A508", "Lo", 0), ("LINEAR A SIGN A509", "Lo", 0), ("LINEAR A SIGN A510", "Lo", 0), ("LINEAR A SIGN A511", "Lo", 0), ("LINEAR A SIGN A512", "Lo", 0), ("LINEAR A SIGN A513", "Lo", 0), ("LINEAR A SIGN A515", "Lo", 0), ("LINEAR A SIGN A516", "Lo", 0), ("LINEAR A SIGN A520", "Lo", 0), ("LINEAR A SIGN A521", "Lo", 0), ("LINEAR A SIGN A523", "Lo", 0), ("LINEAR A SIGN A524", "Lo", 0), ("LINEAR A SIGN A525", "Lo", 0), ("LINEAR A SIGN A526", "Lo", 0), ("LINEAR A SIGN A527", "Lo", 0), ("LINEAR A SIGN A528", "Lo", 0), ("LINEAR A SIGN A529", "Lo", 0), ("LINEAR A SIGN A530", "Lo", 0), ("LINEAR A SIGN A531", "Lo", 0), ("LINEAR A SIGN A532", "Lo", 0), ("LINEAR A SIGN A534", "Lo", 0), ("LINEAR A SIGN A535", "Lo", 0), ("LINEAR A SIGN A536", "Lo", 0), ("LINEAR A SIGN A537", "Lo", 0), ("LINEAR A SIGN A538", "Lo", 0), ("LINEAR A SIGN A539", "Lo", 0), ("LINEAR A SIGN A540", "Lo", 0), ("LINEAR A SIGN A541", "Lo", 0), ("LINEAR A SIGN A542", "Lo", 0), ("LINEAR A SIGN A545", "Lo", 0), ("LINEAR A SIGN A547", "Lo", 0), ("LINEAR A SIGN A548", "Lo", 0), ("LINEAR A SIGN A549", "Lo", 0), ("LINEAR A SIGN A550", "Lo", 0), ("LINEAR A SIGN A551", "Lo", 0), ("LINEAR A SIGN A552", "Lo", 0), ("LINEAR A SIGN A553", "Lo", 0), ("LINEAR A SIGN A554", "Lo", 0), ("LINEAR A SIGN A555", "Lo", 0), ("LINEAR A SIGN A556", "Lo", 0), ("LINEAR A SIGN A557", "Lo", 0), ("LINEAR A SIGN A559", "Lo", 0), ("LINEAR A SIGN A563", "Lo", 0), ("LINEAR A SIGN A564", "Lo", 0), ("LINEAR A SIGN A565", "Lo", 0), ("LINEAR A SIGN A566", "Lo", 0), ("LINEAR A SIGN A568", "Lo", 0), ("LINEAR A SIGN A569", "Lo", 0), ("LINEAR A SIGN A570", "Lo", 0), ("LINEAR A SIGN A571", "Lo", 0), ("LINEAR A SIGN A572", "Lo", 0), ("LINEAR A SIGN A573", "Lo", 0), ("LINEAR A SIGN A574", "Lo", 0), ("LINEAR A SIGN A575", "Lo", 0), ("LINEAR A SIGN A576", "Lo", 0), ("LINEAR A SIGN A577", "Lo", 0), ("LINEAR A SIGN A578", "Lo", 0), ("LINEAR A SIGN A579", "Lo", 0), ("LINEAR A SIGN A580", "Lo", 0), ("LINEAR A SIGN A581", "Lo", 0), ("LINEAR A SIGN A582", "Lo", 0), ("LINEAR A SIGN A583", "Lo", 0), ("LINEAR A SIGN A584", "Lo", 0), ("LINEAR A SIGN A585", "Lo", 0), ("LINEAR A SIGN A586", "Lo", 0), ("LINEAR A SIGN A587", "Lo", 0), ("LINEAR A SIGN A588", "Lo", 0), ("LINEAR A SIGN A589", "Lo", 0), ("LINEAR A SIGN A591", "Lo", 0), ("LINEAR A SIGN A592", "Lo", 0), ("LINEAR A SIGN A594", "Lo", 0), ("LINEAR A SIGN A595", "Lo", 0), ("LINEAR A SIGN A596", "Lo", 0), ("LINEAR A SIGN A598", "Lo", 0), ("LINEAR A SIGN A600", "Lo", 0), ("LINEAR A SIGN A601", "Lo", 0), ("LINEAR A SIGN A602", "Lo", 0), ("LINEAR A SIGN A603", "Lo", 0), ("LINEAR A SIGN A604", "Lo", 0), ("LINEAR A SIGN A606", "Lo", 0), ("LINEAR A SIGN A608", "Lo", 0), ("LINEAR A SIGN A609", "Lo", 0), ("LINEAR A SIGN A610", "Lo", 0), ("LINEAR A SIGN A611", "Lo", 0), ("LINEAR A SIGN A612", "Lo", 0), ("LINEAR A SIGN A613", "Lo", 0), ("LINEAR A SIGN A614", "Lo", 0), ("LINEAR A SIGN A615", "Lo", 0), ("LINEAR A SIGN A616", "Lo", 0), ("LINEAR A SIGN A617", "Lo", 0), ("LINEAR A SIGN A618", "Lo", 0), ("LINEAR A SIGN A619", "Lo", 0), ("LINEAR A SIGN A620", "Lo", 0), ("LINEAR A SIGN A621", "Lo", 0), ("LINEAR A SIGN A622", "Lo", 0), ("LINEAR A SIGN A623", "Lo", 0), ("LINEAR A SIGN A624", "Lo", 0), ("LINEAR A SIGN A626", "Lo", 0), ("LINEAR A SIGN A627", "Lo", 0), ("LINEAR A SIGN A628", "Lo", 0), ("LINEAR A SIGN A629", "Lo", 0), ("LINEAR A SIGN A634", "Lo", 0), ("LINEAR A SIGN A637", "Lo", 0), ("LINEAR A SIGN A638", "Lo", 0), ("LINEAR A SIGN A640", "Lo", 0), ("LINEAR A SIGN A642", "Lo", 0), ("LINEAR A SIGN A643", "Lo", 0), ("LINEAR A SIGN A644", "Lo", 0), ("LINEAR A SIGN A645", "Lo", 0), ("LINEAR A SIGN A646", "Lo", 0), ("LINEAR A SIGN A648", "Lo", 0), ("LINEAR A SIGN A649", "Lo", 0), ("LINEAR A SIGN A651", "Lo", 0), ("LINEAR A SIGN A652", "Lo", 0), ("LINEAR A SIGN A653", "Lo", 0), ("LINEAR A SIGN A654", "Lo", 0), ("LINEAR A SIGN A655", "Lo", 0), ("LINEAR A SIGN A656", "Lo", 0), ("LINEAR A SIGN A657", "Lo", 0), ("LINEAR A SIGN A658", "Lo", 0), ("LINEAR A SIGN A659", "Lo", 0), ("LINEAR A SIGN A660", "Lo", 0), ("LINEAR A SIGN A661", "Lo", 0), ("LINEAR A SIGN A662", "Lo", 0), ("LINEAR A SIGN A663", "Lo", 0), ("LINEAR A SIGN A664", "Lo", 0), (), (), (), (), (), (), (), (), (), ("LINEAR A SIGN A701 A", "Lo", 0), ("LINEAR A SIGN A702 B", "Lo", 0), ("LINEAR A SIGN A703 D", "Lo", 0), ("LINEAR A SIGN A704 E", "Lo", 0), ("LINEAR A SIGN A705 F", "Lo", 0), ("LINEAR A SIGN A706 H", "Lo", 0), ("LINEAR A SIGN A707 J", "Lo", 0), ("LINEAR A SIGN A708 K", "Lo", 0), ("LINEAR A SIGN A709 L", "Lo", 0), ("LINEAR A SIGN A709-2 L2", "Lo", 0), ("LINEAR A SIGN A709-3 L3", "Lo", 0), ("LINEAR A SIGN A709-4 L4", "Lo", 0), ("LINEAR A SIGN A709-6 L6", "Lo", 0), ("LINEAR A SIGN A710 W", "Lo", 0), ("LINEAR A SIGN A711 X", "Lo", 0), ("LINEAR A SIGN A712 Y", "Lo", 0), ("LINEAR A SIGN A713 OMEGA", "Lo", 0), ("LINEAR A SIGN A714 ABB", "Lo", 0), ("LINEAR A SIGN A715 BB", "Lo", 0), ("LINEAR A SIGN A717 DD", "Lo", 0), ("LINEAR A SIGN A726 EYYY", "Lo", 0), ("LINEAR A SIGN A732 JE", "Lo", 0), (), (), (), (), (), (), (), (), (), (), ("LINEAR A SIGN A800", "Lo", 0), ("LINEAR A SIGN A801", "Lo", 0), ("LINEAR A SIGN A802", "Lo", 0), ("LINEAR A SIGN A803", "Lo", 0), ("LINEAR A SIGN A804", "Lo", 0), ("LINEAR A SIGN A805", "Lo", 0), ("LINEAR A SIGN A806", "Lo", 0), ("LINEAR A SIGN A807", "Lo", 0), )
https://github.com/DashieTM/ost-5semester
https://raw.githubusercontent.com/DashieTM/ost-5semester/main/blockchain/weeks/week6.typ
typst
#import "../../utils.typ": * #section("Decentralized Autonomous Organizations DAO") - *rules defined and executed by smart contracts* - anyone can audit proposals - voting is a right granted by the proposal creator - DAO is governed entirely by it's members - technical upgrades - porject funding/treasury allocations Example for rule: For a proposal to be valid, 50% of all members need to vote, hence if there is a end date, if not 50% have voted, either the proposal is void, or it's extended until 50% have voted -> guarantee thanks to blockchain. #subsection("History") - first DAO was a disaster financially - experiment was a success though - attacker drained 3.6 million eth - reentrancy attack - function called twice by utilizing a default function in solidity - blacklist introduced because of this attack #columns( 2, [ #text(green)[Benefits] - decisions by individuals rather than a central authority - encourages participation - public: everything is transparent and visible - minimum requirement is to join, and internet ofc #colbreak() #text(red)[Downsides] - decisions and voting takes time - currently only tech-savy people participate - bridging blockchain with real world -> i can't transfer you a watermelon with a smart contract - security -> see "hack"... ], ) #align( center, [#image("../../Screenshots/2023_10_23_03_22_41.png", width: 80%)], ) #align( center, [#image("../../Screenshots/2023_10_23_03_26_00.png", width: 100%)], ) #section("Stable Coins") - backed with another asset - fiat currencies - exchange commodities -> gold, silver etc. - crypto-collateralized - crypto asset backed stablecoins - algorithmic stablecoins -> don't work lmao - used for payment -> other coins have high volatility - today you pay 10cardano, tomorrow 20 - fees... #columns( 2, [ Asset-backed: CeFi - Coinbase, Binance, etc. need to buy fiat assets - central authority can blacklist addresses -> USDC - Example: - Current supply: 50 USDT, collateral 50 USD - User buys 20 USDT for 20 USD → stablecoin issuer mints 20 USDT, has now 70 - USD in collateral, 70 USDT in circulation - User sells 30 USDT → stablecoin issuer destroys 30 USDT, transfers 30 USD - Problem if assets not liquid → bank-run, not enough liquidity #colbreak() Cryto-backed: DeFi - MakerDAO, based on other stablecoins - If other currencies used → need over-collaterization - Problem: Supply 50 DAI, collateral 1 ETH (1 ETH=50 DAI) - User buys 25 DAI for 0.5 ETH (collateral 1.5 ETH) - Price drops of ETH to 1 ETH = 10 DAI - User sells 15 DAI, gets 1.5 ETH (collateral 0 ETH, but 60 DAI in circulation) - DAI collaterization ratio: 134% ], ) #subsection("Algorithmic Stablecoins") - two currencies, one volatile, one stable - If demand is higher for stable coin, stable coins are minted, volatile coin can be bought back and destroyed. - If demand is lower, then volatile coin needs to be minted and sold to buy the stable coin. Stable coin is then destroyed. - #text( red, )[Problem: for terraUSD, the volatile coin was massively minted and could no longer buy back the stable coin -> in other words, the stable coin no longer had a central bank to keep the price stable.] #subsection("Collateral") The idea is, I give someone 100ETH, which they will hold on to as a form of guarantee, in exchange they give me another coin -> here the stable coin for which I will also have to pay interest. Aka it is a loan, for which I also gave them a guarantee -> instead of a report of what i earn, it's a cautionary downpayment that i get back whenever the loan is paid off. The downpayment however can still be staked, meaning i can still do limited amount of investing with this money, just not sell it. In crypto, usually when the collateral goes below a certain amount compared to the loan, e.g. 170%, then it can be used to payback the loan and you recieve the rest back. Problems arise when you hold on to this vault for too long and you can't payback anymore. -> penalty applies when liquidation happens. #align( center, [#image("../../Screenshots/2023_10_23_04_00_10.png", width: 50%)], ) #subsection("Minting") Creation of new blocks in the blockchain -> creating coins on the blockchain can be considered minting -> highly minted means that a lot of coins are created. (This can change the price of the coin) -> relevant for terraUSD problem #section("Proof of Attendance protocol POAP") #align( center, [#image("../../Screenshots/2023_10_23_04_14_49.png", width: 100%)], )
https://github.com/Wh4rp/Presentacion-Typst
https://raw.githubusercontent.com/Wh4rp/Presentacion-Typst/master/ejemplos/7_let.typ
typst
#let faboloso(term, color: blue) = { text(color, box[||| #term |||]) } Tú eres #faboloso[guapisimo]! Yo soy #faboloso(color: purple)[faboloso]!
https://github.com/lsmenicucci/typst-pkgs
https://raw.githubusercontent.com/lsmenicucci/typst-pkgs/main/calendar/calendar.typ
typst
// Update today's date #let defaultDate = (d) => { let now = datetime.today() let dayArg = d.at("day", default: now.day()) let monthArg = d.at("month", default: now.month()) let yearArg = d.at("year", default: now.year()) return datetime(year: yearArg, month: monthArg, day: dayArg) } #let monthLength(dateLike) = { import calc: * let date = dateLike if type(dateLike) != "datetime"{ date = defaultDate(dateLike) } let m = date.month() let y = date.year() if m == 2 { if rem(y, 4) == 0 { return 29 } return 28 } if rem(m, 2) == 0 { return 30 } return 31 } #let weekday_names = ( "pt_br": ("dom", "seg", "ter", "qua", "qui", "sex", "sab"), "en": ("sun", "mon", "tue", "wen", "thu", "fry", "sat") ) #let inMonthDayCell = (content) => text(fill: black)[#content] #let offMonthDayCell = (content) => text(fill: gray)[#content] // Draw the weeks for the indicated month // - cal (Calendar) #let monthWeeks(cal, month: none, year: none, ..args) = { import calc: * let date = (:) if month != none { date.insert("month", month) } if year != none { date.insert("year", year) } if (type(date) != "datetime"){ date = defaultDate(date) } let n = monthLength(date) let prevN = monthLength(( year: date.year(), month: date.month() - 1 )) let thisDate = datetime(year: date.year(), month: date.month(), day: 1) let weekStart = thisDate.weekday() let thisAnnotations = (:) // separate annotations by date for a in cal.marks { if a.date.month() == thisDate.month() and a.date.year() == thisDate.year() { let key = str(a.date.day()) let existing = thisAnnotations.at(key, default: ()) existing.push(a) thisAnnotations.insert(key, existing) } } // draw annotations for a day let drawCell = annotations => day => { let marks = () for a in annotations.at(str(day), default: ()){ let m = circle(radius: 0.1em, fill: black) marks.push(m) } let offset = marks.len() / 3 stack(dir: ttb)[ #day #grid(columns: 3, gutter: 0.1em, ..marks) ] } let calDays = () calDays = calDays + range(prevN - weekStart + 1, prevN + 1) .map(drawCell((:))) .map(offMonthDayCell) calDays = calDays + range(1, n + 1) .map(drawCell(thisAnnotations)) .map(inMonthDayCell) calDays = calDays + range(1, rem(n, 7)) .map(drawCell((:))) .map(offMonthDayCell) // get header let lang = cal.at("lang") let header = weekday_names.at(lang) let calCells = header.map(t => pad(bottom: 0.6em)[#t]) + calDays grid(columns: 7, align: center, inset: 0.5em, ..calCells, ..args) } // mark a calendar // - date (Datetime): date // -> (Calendar) => void #let mark(date, label: "", type: "") = { ((data) => { data.marks.push((date: defaultDate(date), label: label)) return data },) } #let range(start, end, label: "", type: "") = { ((data) => { data.ranges.push(( start: defaultDate(start), end: defaultDate(end), label: label )) return data },) } // create a calendar data // - ..annotations: teste // -> Calendar #let Calendar(..args) = { let data = (marks: (), ranges: ()) for aSet in args.pos(){ for a in aSet { data = a(data) } } data.lang = args.named().at("lang", default:"en") data.defaultDate = args.named().at("default", default: (:)) return data } #let calendar = (d, annotations: (:) ) => calendarObj(d, calendarCell)
https://github.com/rabotaem-incorporated/calculus-notes-2course
https://raw.githubusercontent.com/rabotaem-incorporated/calculus-notes-2course/master/sections/04-parametric-and-curves/02-improper-integral.typ
typst
#import "../../utils/core.typ": * == Несобственные интегралы #def(label: "def-improper-integral")[ _Несобственным интегралом_ называется $ integral_a^(+oo) f dif lambda_1 := lim_(b -> +oo) integral_[a, b] f dif lambda_1, $ если предел существует. ] #th(label: "lebesgue-implies-improper")[ Если $integral_[a, +oo) f dif lambda_1 in overline(RR)$, то $integral_a^(+oo) f dif lambda_1 = integral_[a, +oo) f dif lambda_1$. ] #proof[ Пусть $f >= 0$. Докажем существование предела по Гейне: рассмотрим $b_n arrow.tr +oo$. Пусть $f_n := f dot bb(1)_[a, b_n]$. Тогда $0 <= f_1 <= f_2 <= ...$ и $f_n --> f$. Тогда $ integral_[a, +oo) f dif lambda_1 =_"Леви"^rf("levy") lim integral_[a, +oo) f_n dif lambda_1 = lim integral_[a, +oo) f dot bb(1)_[a, b_n] dif lambda_1 = lim integral_[a, b_n] f dif lambda_1 = integral_a^(+oo) f dif lambda_1, $ Если $f$ суммируема, можно вычесть#rf("def-sfn"). ] #example[ $integral_1^(+oo) (sin x)/x dif x$ --- сходящийся несобственный интеграл (мы выясняли это когда-то давно). Но интеграл Лебега $integral_[1, +oo) (sin x)/x dif x$ не определен, так как $integral_[1, +oo] ((sin x)/x)_(plus.minus) dif lambda_1 (x) = +oo$. Доказывается как обычно: побить на кусочки, уменьшить интегралы, оценить снизу, бла бла бла, не буду расписывать. ] #th(label: "improper-times-exp-continious")[ Пусть $f in C[a, +oo]$ непрерывна, $integral_a^(+oo) f(x) dif x$ сходится. Тогда $F(t) := integral_a^(+oo) e^(-t x) f(x) dif x$ непрерывна на $[0, +oo)$. Так как при положительных $t$ множитель $e^(-t x)$ хороший и быстро сходящийся к нулю, всякие свойства, которые могут быть неверны просто для $f$ становятся верны за счет одного сильного множителя. Этот факт аналогичен теореме Абеля для степенных рядов. ] #proof[ Пусть $F_b (t) = integral_a^b e^(-t x) f(x) dif x$. Для любого $b$, это непрерывная функция#rf("parametric-continious-compact"). Из сходимости $integral_a^(+oo) f(x) dif x$, у нас существует $c$, такое, что для любых $y$ и $b$: $y > b > c$ верно $abs(integral_b^y f(x) dif x) < eps$ (критерий Коши). Зафиксируем какое-нибудь $b$. Пусть $integral_b^y f(x) dif x = g(y)$. Это первообразная $f$#rf("barrow"). Рассмотрим хвост нашего интеграла начиная с $b$: $ integral_b^(+oo) e^(-t x) f(x) dif x = underbrace(lr(e^(- t x) g(x) |)_(x = b)^(x = +oo), 0) + integral_b^(+oo) g(x) dot t e^(-t x) dif x. $ Оценим последний интеграл: $ abs(integral_b^(+oo) e^(-t x) f(x) dif x) = abs(integral_b^(+oo) t g(x) e^(-t x) dif x) <= t integral_b^(+oo) underbrace(abs(g(x)), < eps) e^(-t x) dif x <= \ <= eps t integral_b^(+oo) e^(-t x) dif x = lr(eps cancel(t) dot (e^(-t x) / (-cancel(t))) |)_(x = b)^(x = +oo) = eps dot e^(-b t) < eps $ при больших $b$. А еще, $ abs(F(t) - F(t_0)) <= underbrace(abs(F_b (t) - F(t)), integral_b^(+oo) ... < eps) + abs(F_b (t_0) - F_b (t)) + underbrace(abs(F(t_0) - F_b (t_0)), integral_b^(+oo) ... < eps) < 2 eps + underbrace(abs(F_b (t) - F_b (t_0)), "при" t "близких к" t_0 \ < eps) < 3 eps. $ ] #example[ Если $f(x) = (sin x) / x$. Пусть $F(t) = integral_0^(+oo) e^(-t x) (sin x)/x dif x$. По теореме#rf("improper-times-exp-continious") $F(t) in C[0, +oo)$. Найдем $F'(t)$#rf("parametric-derivative"): $ F'(t) = integral_0^(+oo) -x e^(-t x) (sin x) / x dif x = - integral_0^(+oo) e^(-t x) sin x dif x. $ У нас есть локальное условие Лебега, так как $e^(-alpha x)$ для некоторого $0 < alpha < t_0$ --- суммируемая мажоранта#rf("parametric-derivative"). Можно проинтегрировать: $ integral_0^(+oo) e^(-t x) sin x dif x = integral_0^(+oo) dot e^(-t x) (e^(i x) - e^(-i x))/(2i) dif x = 1/(2i) integral_0^(+oo) (e^(-t x + i x) - e^(-t x - i x)) dif x newline(=) 1/(2i) dot ( lr(e^((-t + i) x)/(-t + i) bar)_(x=0)^(x->+oo) - lr(e^((-t - i) x)/(-t - i) bar)_(x=0)^(x->+oo) ) =^(t > 0) 1/(2i) (1/(-t + i) - 1/(-t - i)) = 1/cancel(2i) dot cancel(-2i)/(t^2 + 1) $ Отсюда $F'(t) = -1/(1 + t^2)$, значит $F(t) = C - op("arctg") t$. Причем $lim_(t -> +oo) F(t) = 0 ==> C = pi / 2$. Отсюда знаем ответ, $F(t) = pi/2 - op("argtg") t$. Но это верно лишь при $t > 0$: мы воспользовались этим, пока брали интеграл. К счастью, $F$ непрерывна на $[0, +oo)$#rf("improper-times-exp-continious"), и можно сделать предельный переход. $F(0) = pi / 2$. Значит, $ integral_0^(+oo) (sin x)/x dif x = pi / 2. $ ]
https://github.com/katamyra/Notes
https://raw.githubusercontent.com/katamyra/Notes/main/Compiled%20School%20Notes/CS3001/CompiledNotes.typ
typst
#import "../../template.typ": * #show: template.with( title: [CS 3001 Notes], description: [CS Ethics Notes Based On Lectures/Lecture Slides], authors: ( ( name: "<NAME>", ), ) ) #include "Modules/Therac25.typ" #include "Modules/Utilitarianism.typ" #include "Modules/Kantianism.typ" #include "Modules/SocialContract.typ" #include "Modules/StakeHolder.typ" #include "Modules/VirtueEthics.typ" #include "Modules/FreedomOfSpeech.typ" #include "Modules/WritingAnArgument.typ" #include "Modules/ProfessionalEthics.typ" #include "Modules/Privacy.typ" #include "Modules/IntellectualProperty.typ" #include "Modules/NetworkedCommunications.typ" #include "Modules/InternalRealism.typ" #include "Modules/BiometricData.typ"
https://github.com/adelhult/typst-hs-test-packages
https://raw.githubusercontent.com/adelhult/typst-hs-test-packages/main/test/counter-examples/space_after_math_mode.typ
typst
MIT License
#let nada(ignore_me) = { "foo" } // removing space after $ works #let w = nada(x => $x$ + x)
https://github.com/janlauber/bachelor-thesis
https://raw.githubusercontent.com/janlauber/bachelor-thesis/main/chapters/implementation.typ
typst
Creative Commons Zero v1.0 Universal
= Implementation #show raw.where(block: false): box.with( fill: luma(240), inset: (x: 3pt, y: 0pt), outset: (y: 3pt), radius: 2pt, ) == Development Environment Setup The development of the One-Click Deployment system necessitates a specifically configured environment to support the technologies used. This setup includes a Kubernetes cluster, which is central to deploying and managing containerized applications. Developers need to install Docker to containerize the application, ensuring consistent operation across different environments. The backend development leverages Go, requiring a Go environment setup, while the frontend uses Node.js and SvelteKit #footnote[https://kit.svelte.dev/], necessitating the installation of Node.js and the appropriate npm packages. \ The development environment setup involves: - A Kubernetes cluster either locally via Minikube #footnote[https://minikube.sigs.k8s.io/docs/] or as a Managed Service at Natron Tech AG #footnote[https://natron.ch]. - Docker #footnote[https://docker.com] installation for building and managing containers. - Node.js #footnote[https://nodejs.org/en] and npm #footnote[https://www.npmjs.com/] to handle various frontend dependencies and build processes. - Go #footnote[https://go.dev/] environment for backend development, set to the appropriate version to ensure compatibility with all dependencies and libraries used. These tools and setups form the backbone of the development infrastructure, providing a robust platform for building, testing, and deploying the system components efficiently. Generally, the development environment are described in detail in the corresponding *README* files of the respective repositories. == Core Functionality Implementation === Design Goals The main goal of the One-Click Deployment system is to put *convention over configuration*. This means that the user should not have to deal with the details of Kubernetes resources like Deployments, Services, Ingresses, etc. The user should only have to define highly abstracted values like the amount of replicas, the container image, the environment variables, etc. To make it even easier for the user, the system provides a blueprint functionality of these abstracted values for certain use cases. The user can then create a new deployment based on a blueprint and only has to adjust the values which are different from the blueprint. For example, the user can create a blueprint for a Node-RED deployment and then create a new deployment based on this blueprint and only has to adjust minor things like the URL where the Node-RED instance should be available. This way the user can deploy complex applications with only a few clicks. The system also provides a real time monitoring of the deployed application. The user can see the current CPU and memory usage of the pods and can also see the Kubernetes resources generated by the deployment like pods, services, ingresses, etc. The user can also interact with these resources like viewing logs, events, and yaml configurations. The system also provides a rollback functionality. Each time the user changes the deployment configuration a new rollout gets triggered. The user can then see the old rollouts and the current one. The user can also rollback to a previous rollout. This functionality is like a snapshot of the deployment configuration at a specific time. With the implementation of the Kubernetes Operator, the system can by design be easily extended with new resources and functionalities. The system is also designed to be highly scalable and reliable. In the end the CRD (Custom Resource Definition #footnote[https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/]) Rollout is the core abstraction of the system. Then the Kubernetes Operator takes care of the rest. To make this level of abstraction more accessible to the user, the system provides a web interface where the user can interact with the system and which it's core functionality is to create and manage Rollout resources. With this design goals in mind, the core functionality of the system was implemented. #pagebreak() === Deployment Module (Kubernetes Operator) The Kubernetes Operator within the One-Click Deployment platform acts as a core component, designed to simplify the management of deployments within the Kubernetes ecosystem. It automates the process of deploying, updating, and maintaining containerized applications. Using Custom Resource Definitions (CRDs), the operator allows users to define their applications in a declarative manner. \ The development of this module involved using the Operator SDK #footnote[https://sdk.operatorframework.io/], which provides tools and libraries to build Kubernetes operators in Go. This SDK facilitates the monitoring of resource states within the cluster, handling events such as creation, update, and deletion of resources. In the #emph("controllers") directory of the #emph("one-click-operator repository") #footnote[https://github.com/janlauber/one-click-operator] on GitHub, the core functionality of the operator is implemented. This includes the reconciliation loop, which continuously monitors the state of resources and ensures that the desired state is maintained. The operator interacts with the Kubernetes API to create and manage resources, such as Deployments, Services, and ConfigMaps, based on the user-defined specifications. \ \ *Kubernetes Resources managed by the operator include:* - *ServiceAccount* #footnote[https://kubernetes.io/docs/concepts/security/service-accounts/]: A service account provides an identity for processes that run in a Pod. - *PersistentVolumeClaim (PVC)* #footnote[https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims]: A PVC is a request for storage by a user. - *Secret* #footnote[https://kubernetes.io/docs/concepts/configuration/secret/]: A Secret is an object that contains a small amount of sensitive data such as a password, a token, or a key. - *Deployment* #footnote[https://kubernetes.io/docs/concepts/workloads/controllers/deployment/]: A Deployment provides declarative updates to Pods and ReplicaSets. - *Service* #footnote[https://kubernetes.io/docs/concepts/services-networking/service/]: A Service is a networking Layer 3/4 load balancer which exposes the pods within the Kubernetes cluster. - *Ingress* #footnote[https://kubernetes.io/docs/concepts/services-networking/ingress/]: An Ingress is similar to a Reverse Proxy and exposes HTTP and HTTPS routes from outside the cluster to services within the cluster. - *HorizontalPodAutoscaler (HPA)* #footnote[https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/]: An HPA automatically scales the number of pods in a deployment based on observed CPU utilization. - *CronJob* #footnote[https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/]: A CronJob creates Jobs on a repeating schedule. All these resources are managed by the operator based on the user-defined specifications in the Rollout resource explained in the @crd. ==== Rollout Controller The *rollout_controller.go* @OneclickoperatorControllersRollout_controller is the primary controller responsible for managing Rollout resources. The following code snippets illustrate the core functionality of the deployment module: #set text(8pt) #show raw.where(block: true): block.with( fill: luma(240), inset: 10pt, radius: 4pt, ) ```go // RolloutReconciler reconciles a Rollout object func (r *RolloutReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := log.FromContext(ctx) // Fetch the Rollout instance var rollout oneclickiov1alpha1.Rollout if err := r.Get(ctx, req.NamespacedName, &rollout); err != nil { if errors.IsNotFound(err) { // Object not found log.Info("Rollout resource not found.") return ctrl.Result{}, nil } // Error reading the object - requeue the request. log.Error(err, "Failed to get Rollout.") return ctrl.Result{}, err } // Reconcile ServiceAccount if err := r.reconcileServiceAccount(ctx, &rollout); err != nil { log.Error(err, "Failed to reconcile ServiceAccount.") return ctrl.Result{}, err } // Reconcile PVCs, Secrets, Deployment, Service, Ingress, HPA, CronJobs [...] // Update status of the Rollout if err := r.updateStatus(ctx, &rollout); err != nil { if errors.IsConflict(err) { log.Info("Conflict while updating status. Retrying.") return ctrl.Result{Requeue: true}, nil } log.Error(err, "Failed to update status.") return ctrl.Result{}, err } return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager. func (r *RolloutReconciler) SetupWithManager(mgr ctrl.Manager) error { if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &batchv1.CronJob{}, "metadata.ownerReferences.uid", func(rawObj client.Object) []string { cronJob := rawObj.(*batchv1.CronJob) ownerRefs := cronJob.GetOwnerReferences() ownerUIDs := make([]string, len(ownerRefs)) for i, ownerRef := range ownerRefs { ownerUIDs[i] = string(ownerRef.UID) } return ownerUIDs }); err != nil { return err } return ctrl.NewControllerManagedBy(mgr). For(&oneclickiov1alpha1.Rollout{}). Owns(&appsv1.Deployment{}). Owns(&corev1.Service{}). Owns(&networkingv1.Ingress{}). Owns(&corev1.Secret{}). Owns(&corev1.PersistentVolumeClaim{}). Owns(&autoscalingv2.HorizontalPodAutoscaler{}). Owns(&corev1.ServiceAccount{}). Owns(&batchv1.CronJob{}). Complete(r) } ``` #set text(12pt) The reconciliation loop continuously monitors the state of Rollout resources, ensuring that the desired state is maintained. The controller reconciles various resources, such as ServiceAccounts, PVCs, Secrets, Deployments, Cronjobs, Services, Ingress, and HPAs, based on the user-defined specifications. The *SetupWithManager* function sets up the controller with the Manager, instructing the manager to start the controller when the Manager is started. #pagebreak() ==== Rollout Resource Specification <crd> The core abstraction happens in the Rollout specification definition. This yaml structure was designed with our design goals in mind. The user or web interface then only has to interact with this structure to deploy and manage applications. The following yaml snippet illustrates the Rollout resource specification: #set text(8pt) #show raw.where(block: true): block.with( fill: luma(240), inset: 10pt, radius: 4pt, ) ```yaml apiVersion: one-click.dev/v1alpha1 # current version of the CRD kind: Rollout # name of the CRD metadata: name: nginx # name of the Rollout namespace: test # namespace where the Rollout should be created spec: args: ["nginx", "-g", "daemon off;"] # (optional) arguments for the container command: ["nginx"] # (optional) command for the container rolloutStrategy: rollingUpdate # or "recreate" (if not specified then "rollingUpdate" is used) nodeSelector: # (optional) set specific nodes where the pods should be scheduled kubernetes.io/hostname: minikube tolerations: # (optional) set specific tolerations for the pods - key: "storage" operator: "Equal" value: "ssd" effect: "NoSchedule" image: # container image registry: "docker.io" repository: "nginx" tag: "latest" username: "test" password: "<PASSWORD>" securityContext: # (optional) security context for the container runAsUser: 1000 runAsGroup: 1000 fsGroup: 1000 allowPrivilegeEscalation: false runAsNonRoot: true readOnlyRootFilesystem: true privileged: false capabilities: drop: - ALL add: - NET_BIND_SERVICE horizontalScale: # the horizontal scaling configuration minReplicas: 1 maxReplicas: 3 targetCPUUtilizationPercentage: 80 resources: # the resource configuration like CPU and memory limits and requests requests: cpu: "100m" memory: "128Mi" limits: cpu: "200m" memory: "256Mi" env: # environment variables for the container - name: "USERNAME" value: "admin" - name: DEBUG value: "true" secrets: # secret environment variables for the container - name: "PASSWORD" value: "<PASSWORD>" - name: "ANOTHER_SECRET" value: "122" volumes: # persistent volumes for the container - name: "data" mountPath: "/data" size: "2Gi" storageClass: "standard" interfaces: # the network interfaces for the container - name: "http" port: 80 - name: "https" port: 443 ingress: ingressClass: "nginx" annotations: nginx.ingress.kubernetes.io/rewrite-target: / nginx.ingress.kubernetes.io/ssl-redirect: "false" rules: - host: "reflex.oneclickapps.dev" path: "/" tls: true tlsSecretName: "wildcard-tls-secret" - host: "reflex.oneclickapps.dev" path: "/test" tls: false cronjobs: # cronjobs for the container - name: some-bash-job suspend: false image: password: "" registry: docker.io repository: library/busybox tag: latest username: "" schedule: "*/1 * * * *" command: ["echo", "hello"] maxRetries: 3 backoffLimit: 2 env: - name: SOME_ENV value: "some-value" resources: limits: cpu: 500m memory: 512Mi requests: cpu: 100m memory: 256Mi serviceAccountName: "nginx" ``` #set text(12pt) Here's a brief overview of the key components: - `apiVersion` and `kind`: These fields specify the version and type of the Kubernetes Custom Resource Definition (CRD). In this case, the CRD is a *Rollout*. - `metadata`: This section includes the name of the Rollout and the namespace where it will be created. There are also optional fields for labels and annotations. #footnote[https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata] - `spec`: This is the heart of the Rollout, containing all the configuration details for the deployment. It includes: - `args` and `command`: These optional fields specify the arguments and command for the container. - `rolloutStrategy`: This field determines the strategy for updating the pods. It can be either *rollingUpdate* or *recreate*. - `nodeSelector` and `tolerations`: These optional fields allow you to control where the pods are scheduled and what conditions they can tolerate. - `image`: This section specifies the container image details, including the registry, repository, tag, and credentials. - `securityContext`: This optional section defines the security settings for the container. - `horizontalScale`: This section configures the horizontal scaling of the pods. - `resources`: This section sets the CPU and memory limits and requests for the container. - `env` and `secrets`: These sections define the environment variables and secret environment variables for the container. - `volumes`: This section configures persistent volumes for the container. - `interfaces`: This section sets up the network interfaces for the container. - `cronjobs`: This section allows you to define cronjobs for the container. - `serviceAccountName`: This field specifies the service account associated with the Rollout. The deployment module is a critical component of the One-Click Deployment system, automating the deployment and management of containerized applications within the Kubernetes environment. The operator simplifies complex tasks, streamlining the deployment process and ensuring efficient system operations. #pagebreak() == Backend Implementation The backend of the One-Click Deployment system is built using Pocketbase #footnote[https://pocketbase.io], an open-source platform that simplifies backend development and deployment. The backend system handles user authentication, data storage, and interactions with the Kubernetes cluster. The backend codebase is written in Go, leveraging the flexibility and performance of the language to manage the system's core functionalities. Also go is the language of the whole Kubernetes ecosystem, so it was a natural choice to use it for the backend because all the standard libraries and tools are available in go. The backend interacts with the Kubernetes API to creates and manages the logical resources required to deploy the Rollout objects, which then trigger the Kubernetes Operator to handle the deployment process. The backend also manages user authentication, ensuring secure access to the system's functionalities. The backend codebase is structured to handle various API endpoints, each responsible for specific operations, such as user authentication, project creation, and Rollout management. The backend interacts with the frontend through RESTful APIs, processing requests and returning appropriate responses based on the system's state and user input. The following code snippet of the *main.go* @OneclickPocketbaseMain demonstrates the implementation of a backend API endpoint for creating a new project: #set text(8pt) #show raw.where(block: true): block.with( fill: luma(240), inset: 10pt, radius: 4pt, ) ```go [...] func main() { // Initialize the Pocketbase app app := pocketbase.New() [...] // Listen for incommint requests to create a new rollout and trigger the rollout creation process in the Kubernetes cluster app.OnRecordBeforeCreateRequest().Add(func(e *core.RecordCreateEvent) error { switch e.Collection.Name { case "rollouts": return controller.HandleRolloutCreate(e, app) } return nil }) [...] } ``` #set text(12pt) The code snippet demonstrates the event handling mechanism in Pocketbase, where the backend listens for incoming requests to create a new Rollout object. Upon receiving the request, the backend triggers the Rollout creation process in the Kubernetes cluster, initiating the deployment of the specified application. The backend implementation is designed to provide a robust and efficient foundation for the One-Click Deployment system, enabling seamless interactions between the frontend, backend, and Kubernetes environment. == User Interface Implementation The user interface of the One-Click Deployment system is developed using Svelte #footnote[https://svelte.dev/], a modern web framework that simplifies frontend development and enhances user experience. The frontend interface serves as the primary interaction point for users, allowing them to define and manage deployment projects easily. The frontend codebase is structured to provide a dynamic and intuitive user experience, with components designed to facilitate project creation, application deployment, and configuration. The frontend interacts with the backend through RESTful APIs with the Pocketbase Javascript SDK #footnote[https://pocketbase.io]Jssdk2024, enabling seamless communication between the user interface and the backend system. The frontend leverages Tailwind CSS #footnote[https://tailwindcss.com/] for styling and Flowbite-Svelte #footnote[https://flowbite-svelte.com/] for UI components, ensuring a consistent and visually appealing design. With SvelteKit #footnote[https://kit.svelte.dev/] as the frontend framework, the One-Click Deployment system benefits from Svelte's reactivity and SvelteKit's versatility, enabling the development of fast, responsive, and accessible web applications. TypeScript is used to enhance code reliability and maintainability, providing type safety and early error detection during development. #pagebreak() === User Interaction The user interface of the One-Click Deployment system features a clean and intuitive design, allowing users to create projects, manage deployments, and configure application settings easily. The following screenshots showcase the key components of the user interface. ==== Projects Overview In One-Click the user can create a new project or manage existing ones. Each project will get a unique *ID* which will be used to identify the project inside the Kubernetes cluster. The project will also get a unique *namespace* in the Kubernetes cluster which has the same name as the project *ID*. #figure( image("../figures/projects-overview.png", width: 100%), caption: "One-Click Deployment System Projects Overview" ) There will be a Kubernetes namespace each One-Click project with certain labels: #set align(center) #table( columns: 2, [*Label*], [*Description*], [`one-click.dev/displayName`], [The users display name in pocketbase.], [`one-click.dev/projectId`], [The unique ID of the project.], [`one-click.dev/userId`], [The users ID of the project.], [`one-click.dev/username`], [The username of the project.] ) #set align(left) #pagebreak() ==== Deployments Overview In the project overview, the user can see all the deployments in this project. He can also navigate to the Blueprints, create a new Deployment or make some project settings. In the listed deployments, he can see the deployment name, status, URL (if configured), the last rollout time, number of replicas, and current deployed docker image. #figure( image("../figures/deployments-overview.png", width: 100%), caption: "One-Click Deployment System Deployments Overview" ) \ ==== Blueprints Blueprints are stored configurations of a One-Click CRD (@crd). With these blueprints the user can easily bootstrap new projects with no effort. The use cases are for example to predifine some enhanced configurations in his rollout crd yaml file which then get automatically applied when creating a new project out of this blueprint. #figure( image("../figures/blueprints.png", width: 100%), caption: "One-Click Deployment System Blueprints" ) #pagebreak() ==== Deployment Overview When selecting or creating a new deployment within a project, users will land on the deployment overview page, as shown in the following screenshot: #figure( image("../figures/deployment-overview.png", width: 100%), caption: "One-Click Deployment System Streamlit Deployment Overview" ) The deployment overview page displays high level information and stats about the deployment configuration, such as amount of rollouts, instances, interfaces, volumes, environment variables, secret variables and the current docker image. There is also real time CPU and memory usage monitoring. #pagebreak() ==== Deployment Map The map shows real time Kubernetes resources generated by the deployment, such as pods, services, ingresses, and persistent volume claims etc. #figure( image("../figures/map.png", width: 100%), caption: "One-Click Deployment System Map" ) The map feature in a deployment uses svelte-flow #footnote[https://svelteflow.dev/] to graphically show the resources of the current rollout in the selected deployment. Everything gets updated in real time via a websocket endpoint to the backend. #pagebreak() The user can move the components with his mouse, zoom in and out and dig into its `manifests` / `logs` / `events` when click on a component. #figure( image("../figures/map-drawer.png", width: 100%), caption: "One-Click Deployment System Map Drawer" ) #pagebreak() ==== Rollouts Each time the user edits and changes something in a deployment a new rollout will get created. This is like a *snapshot* of the CRD configuration. This gives the user the power to undo any changes he did to his deployment configuration like changing the port of an interface or updating the container image tag. He can see every rollout in the rollouts table. Through the frontend the user can either delete or hide a rollout snapshot. If he deletes a rollout then it won't pop up on the overview page anymore. If he hides a rollout then it will still be there but not visible on the rollouts table. #figure( image("../figures/rollouts.png", width: 100%), caption: "One-Click Deployment System Rollouts Table" ) #pagebreak() When selecting a previous rollout the user can click on "rollback" and then a diff shows up which diffs the CRD files and show you exactly what will change. This is a powerful feature because the user can see what will change before he actually rolls back to a previous rollout. He can also refer to a previous rollout if he wants to see what was the configuration at that time. #figure( image("../figures/rollouts-diff.png", width: 100%), caption: "One-Click Deployment System Diff" ) #pagebreak() ==== Container Image Under images, the management of the deployment image is possible. Configuration of the registry (e.g., ghcr.io, docker.io), along with specifying username and password for private registries, and the repository/image are supported. Additionally, defining the image tag is an option. For debugging purposes, copying the current rollout ID allows for searching components within the Kubernetes cluster. #figure( image("../figures/image.png", width: 100%), caption: "One-Click Deployment System Container Image" ) *Auto update* To avoid manual updates of the image tag each time a new version is pushed to the registry, the Auto Update feature can be activated. This feature allows for specifying an interval (1m, 5m, 10m), a pattern, and a policy on how the image registry should be checked and updated. - *Interval*: The cron ticker, defined as a Pocketbase environment variable, checks the registry at set intervals based on the modulo of the minutes. It is recommended to maintain the default 1-minute tick interval. - *Pattern*: A regex pattern that parses the image tag, following the default semantic versioning format (x.x.x). - *Policy*: This dictates the sorting method, which can be either semantic versioning (semver) or timestamp-based, with the latter requiring the image tag to be in unix timestamp format. The concept and behavior mirror those found in fluxcd, as outlined in their guide to image updates: FluxCD Image Update Guide #footnote[https://fluxcd.io/flux/guides/image-update/] \ \ *Examples* #set align(center) #table( columns: 3, [*Pattern*], [*Policy*], [*Note*], [`^\d+.\d+.\d+$`], [semver], [Default x.x.x semver pattern. e.g. 1.2.0 will get updated to 1.2.1], [`dev-\d+.\d+.\d+$`], [semver], [Custom pattern for *dev* versions. e.g. dev-1.2.0 will get updated to dev-1.2.1], [`.*`], [timestamp], [Any pattern will get updated with a *unix* timestamp.], [`preview-*`], [timestamp], [A pattern with the *preview-* prefix which will get updated with a *unix* timestamp.] ) #set align(left) #pagebreak() ==== Scaling On the scale page, configuration options are available for both horizontal and vertical scaling. Horizontal scaling adjusts the number of instances (replicas), while vertical scaling involves setting CPU and memory requests and limits. #figure( image("../figures/scale.png", width: 100%), caption: "One-Click Deployment System Scaling" ) *Horizontal* \ The number of minimum and maximum replicas can be defined. Autoscaling behavior is governed by the *target* *CPU* usage; if the CPU usage exceeds the target, the number of replicas will increase. Current CPU usage can be monitored on the deployment overview page. \ \ *Vertical* \ Vertical scaling involves setting CPU and memory *requests* and *limits*. The request specifies the minimum CPU and memory allocated to the pod, while the limit defines the maximum allowable CPU and memory. Exceeding these limits results in pod *termination* on the *memory* side and *throttling* on the *CPU* side. These measurements are specified in millicores and megabytes, with the limit set at or above the request level. Understanding the application's requirements is crucial for setting appropriate values. If the requirements are unknown, starting with default values and monitoring the system's behavior is advisable. Increases to the limits may be necessary if the pod is frequently terminated due to exceeding these thresholds. ==== Networking Network configuration offers several customization options. An unlimited number of services and ingress interfaces can be configured. Services provide internal Kubernetes connectivity, while ingress interfaces handle external access. To create a new network interface, select the "New Interface" button. The configurable options include: - *name*: Unique identifier within the deployment. - *port*: Corresponds to the application port specified in the Dockerfile. - *ingress* class: Select from available ingress classes within the Kubernetes cluster via a dropdown menu. - *host*: The domain name, such as example.com. - *path*: Access path for the interface (e.g., `/api`, `/`). - *tls*: Toggle to enable TLS and specify the secret name for the TLS certificate. - *tls secret name*: If left unspecified, defaults to the host name. This setting allows for automatic TLS certificate generation using cert-manager annotations #footnote[https://cert-manager.io/docs/usage/ingress/]. #pagebreak() The DNS name corresponds to the Kubernetes Service name in the cluster, which other deployments can use for DNS lookups. The DNS name can be copied using the copy icon. Setting the ingress class to none, removing the host and path, and disabling TLS will result in the deletion of the ingress and creation of the service for internal network exposure only. #figure( image("../figures/network.png", width: 100%), caption: "One-Click Deployment System Networking" ) #pagebreak() ==== Volumes For persistent storage, volumes are available to store application data. Multiple volumes can be defined as needed. To add a new volume, click the "New Volume" button. Configurable options for volumes include: - *name*: Unique identifier within the deployment. - *mount path*: Location where the volume is mounted (e.g., `/data`, `/var/lib/mysql`). - *size*: Volume size in GiB (gibibyte). - *storage class*: Select from the available storage classes within the Kubernetes cluster using a dropdown menu. Once created, the size and storage class of a volume cannot be changed. If a change is necessary, a new volume must be created, and the data migrated to it. #figure( image("../figures/volumes.png", width: 100%), caption: "One-Click Deployment System Volumes" ) #pagebreak() ==== Environment Variables and Secrets On the envs & secrets page, users can configure both environment variables and secrets for their application. Environment variables are key-value pairs injected into the container, while secrets represent sensitive data stored as Kubernetes secrets and are base64 encoded. They can function as environment variables or be mounted as files within the container. Content from a `.env` file or a secret file can be directly pasted into the text area provided. Environment variables and secrets are accessible within the container as environment going forward. #figure( image("../figures/volumes.png", width: 100%), caption: "One-Click Deployment System Environment Variables" ) #pagebreak() ==== Deployment Settings In the deployment settings the user can change it's name or avatar. He can also use the advanced editing mode to edit the CRD of the deployment directly. It's also possible to create new blueprint out of the current deployment configuration. The blueprint can then be used to create new deployments with the same configuration. The user can also delete the deployment. #figure( image("../figures/deployment-settings.png", width: 100%), caption: "One-Click Deployment System Deployment Settings" ) #pagebreak() == Build and Deployment Process Of course the One-Click System itself needs a deployment process. The core components like the Kubernetes Operator, frontend and backend needs to be accessible for the user to run it on his own. The operator is deployed in the Kubernetes cluster where the user wants to deploy his applications. It doesn't matter if it's a local Minikube cluster, a managed Kubernetes cluster at Natron Tech AG #footnote[https://natron.ch] or a cluster in a hyperscaler like AWS, Azure or Google Cloud. An example deployment can be found in the *config* directory of the *one-click-operator* #footnote[https://github.com/janlauber/one-click-operator/tree/main/config] repository. The frontend and backend are build in a single Docker container and can get deployed either in a Kubernetes cluster or on a server. It only needs a connection and kubeconfig (for authentication) to the Kubernetes cluster where the operator is running. An example to deploy it inside the Kubernetes cluster can be found in the *deployment* directory of the *one-click* #footnote[https://github.com/janlauber/one-click/tree/main/deployment] repository. == Project Management To manage the development process efficiently, GitHub Projects was used, which provided a straightforward Kanban board for tracking progress. This tool was instrumental in organizing tasks, collecting feature requests, and prioritizing work based on user feedback and identified requirements. The use of GitHub Projects facilitated a clear and transparent workflow, enabling future contributors to collaborate effectively and stay aligned on project goals. - *Kanban Board*: The Kanban board in GitHub Projects allowed us to visualize the development process, track the status of tasks, and manage the flow of work. Features and tasks were categorized into columns such as "To Do," "In Progress," "Review," and "Done," providing a clear overview of the project's progress. - *Feature Collection*: Based on user feedback and requirements analysis, features were collected and added to the backlog. This ensured that user needs were continuously integrated into the development cycle, leading to a more user-centered product. - *Prioritization*: Tasks were prioritized according to their importance and impact, ensuring that critical features and fixes were addressed promptly. This approach helped maintain a focus on delivering high-value functionality in each iteration. By leveraging GitHub Projects as a project management tool, it was possible to streamline the development process. Also the use of GitHub Issues and Pull Requests was essential for tracking bugs, feature requests, and code changes, ensuring a structured and organized development process with a clear transparency for all users and contributors.
https://github.com/VisualFP/docs
https://raw.githubusercontent.com/VisualFP/docs/main/SA/design_concept/content/design/design_iteration_1_flo_inspired.typ
typst
= Flo-inspired design <flo-inspired-design> This design proposal is inspired by flo (see @flo). With this design, function elements are distributed on a canvas. Every element (e.g., a variable or function) can be connected to another element with arrows. == Function Parameter Editor Parameters of a function are defined separately from the function body. A dialog, as depicted in @flo_inspired_param_editor, appears next to the editor canvas when opening a function with the editor. #figure( image("../../static/flo_inspired_parameter_editor.png", width: 40%), caption: "Draft of proposed function parameter editor" )<flo_inspired_param_editor> Users can add and name a parameter by clicking the plus sign. By clicking on the minus sign, a parameter can be removed again. The user can drag a parameter from the parameter editor onto the function editor canvas to use a parameter in the function body. == Function Editor To define a function, the user can drag pre-defined functions, block elements, and self-defined functions from a sidebar onto the editor canvas. As described above, the same can be done with function parameters from the parameter editor. To connect a parameter to a function call, the user can create a connector-arrow between them. This is achieved by clicking on the parameter while holding Ctrl and then clicking on the function parameter slot. To visually display currying, every function block has only one or no parameter. If a function has more than one parameter, the function block has dependent blocks for every additional parameter. The ":apply" suffix to the function's name recognizes such depending blocks. The last block of a function is the value returned by the function. This value can be used as a parameter for another function or marked as the function's return value. The types of a function or variable block can be viewed by hovering over it. An example of a simple function definition can be seen in @flo_inspired_addition, a visual representation of the addition code scenario defined in @design_eval_code_scenarios. #figure( image("../../static/flo-inspired-addition.png", width: 50%), caption: "Example of flow-inspired function definition for an addition function" )<flo_inspired_addition> Functions can, of course, also be used as parameters themselves. For that, the user can create a connector arrow between a function block and a function parameter slot in the same way as with function parameters. When using functions as parameters, it is possible to leave some function parameters unapplied. Like this, a function parameter can be filled by the function it's used in. An example of that can be seen in @flo_inspired_mapAddFive, a visual representation of the "Map Add 5" code scenario in @design_eval_code_scenarios. The fill-color of the parameter slot recognizes the auto-filled parameter of the "(+)" function. #figure( image("../../static/flo-inspired-mapAdd5.png", width: 60%), caption: "Example of functions used as parameters for other functions" )<flo_inspired_mapAddFive> Pattern-matching is a handy feature of Haskell. To support that in VisualFP, there is a pre-defined match block with match cases for typical scenarios (e.g. empty list and head-tail pattern). The match block has connector slots for each match case to which the user can connect the definition of the case behavior. Another essential concept in any language is recursion. To create the recursive behavior, the user can drag the function they are defining from the sidebar onto the function editor canvas and use it as any other function. @flo_inspired_product, a visual representation of the product code scenario defined in @design_eval_code_scenarios, shows how a recursive function definition using pattern-matching could look like. #figure( image("../../static/flo-inspired-product.png"), caption: "Example of a flow-inspired recursive function" )<flo_inspired_product>
https://github.com/rem3-1415926/Typst_Thesis_Template
https://raw.githubusercontent.com/rem3-1415926/Typst_Thesis_Template/main/appendix/app2.typ
typst
MIT License
#[ #set page( paper: "a3", flipped: true, ) = Appendix Two A schematic would be meaningful here. But I don't have one at the ready, so take this blind text. #lorem(100) #pagebreak() // make sure the A3 page is double sided ]
https://github.com/Clamentos/FabRISC
https://raw.githubusercontent.com/Clamentos/FabRISC/main/src/spec/Section3.typ
typst
Creative Commons Attribution Share Alike 4.0 International
/// #import "Macros.typ": * /// #section( [Low Level Data Types], [This section is dedicated to explain the various proposed low-level data types including integer and floating point. The smallest addressable object in FabRISC is the _byte_, that is, eight consecutive bits. Longer types are constructed from multiple bytes side by side following powers of two: one, two, four or eight bytes in _little-endian_ order. If bigger types are desired, then they can be simulated in software or primitively handled via custom defined extensions.], [FabRISC provides the *Word Length* (`WLEN`) 2 bit ISA parameter, to indicate the natural scalar word length of the processor in bits. The possible values are listed in the table below:], tableWrapper([Scalar word lengths], table( columns: (auto, auto), align: (x, y) => (center, left + horizon).at(x), [#middle([*Code*])], [#middle([*Value*])], [00], [8 bits. ], [01], [16 bits.], [10], [32 bits.], [11], [64 bits.] )), ///. subSection( [Integer Types], [Integers are arguably the most common data types. They can be signed or unsigned and, when they are, 2's complement notation is used and, depending on the length, they can have various names. FabRISC uses the following nomenclature:], tableWrapper([Integer types.], table( columns: (auto, auto), align: (x, y) => (center, left + horizon).at(x), [#middle([*Type*])], [#middle([*Size*])], [Byte ], [8 bits. ], [Short], [16 bits.], [Word ], [32 bits.], [Long ], [64 bits.] )), [Integer types are manipulated by integer instructions which, by default, behave in a modular fashion. Edge cases, such as wraps-around or overflows can happen in particular situations depending if the operation is arithmetic or logical and can raise exceptions. The following is the list of edge cases for the integer data types:], pagebreak(), tableWrapper([Integer edge cases.], table( columns: (auto, auto), align: left + horizon, [#middle([*Case*])], [#middle([*Description*])], [Carry over], [This situation arises when the absolute value or modulus of the result is too big to fit in the desired data type. For example, the addition of the two unsigned bytes: `11111111` and `00000001` will result in a carry over: `(1)00000000`.], [Carry under], [This situation arises when the absolute value or modulus of the result is too accurate to fit in the desired data type. For example, the right shift by one of the byte: `00000011` will result in a carry under: `00000001(1)`.], [Overflow], [This situation arises when the signed value of the result is too big to fit in the desired data type. For example, the addition of the two signed bytes: `01111111` and `00000001` will result in an overflow: `10000000`.], [Underflow], [This situation arises when the signed value of the result is too small to fit in the desired data type. For example, the addition of the two signed bytes: `10000000` and `10000001` will result in an underflow: `(1)00000001`.], [Division by zero], [This situation arises when a non zero values is divided by zero.], [Invalid operation], [This situation arises when an operation is deemed invalid or illegal and does not fall in any other of the previous cases, for example: `0/0`.] )), [Values that serve as pointers can be manipulated as signed 2's complement integers or as unsigned integers, with the latter being preferable when possible. Although the concept of sign doesn't make much sense for addresses, signed arithmetic can still be applied without many problems in these situations. Addition and subtraction will always yield the same exact bit pattern regardless of the interpretation of the operands. Multiplication can still produce the same pattern as well but only if the result is `WLEN` long, which means ignoring the upper `WLEN` bits. The only comparisons that do not depend on sign are equality and inequality because they simply amount to checking if each individual bit of one operand is equal or not to the ones in the other operand. Other comparisons, such as less than or greater than are risky if the object is close to or crosses the sign boundary from `0x7FFFFFFF`...`F` to `0x80000000`...`0`. Using unsigned operations will, of course, cause none of the above mentioned issues.] ), ///. subSection( [Floating Point Types], [Floating point data types are encoded with a modified IEEE-754 standard. The custom format includes all the previously mentioned sizes with the following bit patterns:], pagebreak(), tableWrapper([Floating point formats.], table( columns: (auto, auto, auto, auto), align: (left + horizon, center + horizon, center + horizon, center + horizon), [#middle([*Size*])], [#middle([*Mantissa*])], [#middle([*Exponent*])], [#middle([*Sign*])], [8 bits], [7...3], [2...1], [0], [12 bits], [11...4], [3...1], [0], [14 bits], [13...5], [4...1], [0], [16 bits], [15...6], [5...1], [0], [32 bits], [31...9], [8...1], [0], [64 bits], [63...12], [11...1], [0] )), [The proposed encodings are similar in shape and meaning to the IEEE-754 standard, with the main difference being the placement of the sign and exponent, which are located at the beginning instead of the end of the number.], [Floating point types are manipulated via FP instructions. Edge cases such as overflows, underflows can happen in the specific situations dictated by the IEEE-754 standard and can raise exceptions. The following is the list of edge cases for the floating point data types:], tableWrapper([Floating point edge cases.], table( columns: (auto, auto), align: left + horizon, [#middle([*Case*])], [#middle([*Description*])], [Overflow], [This situation arises when the value of the result is too big to fit in the desired data type. The result of the operation must be set to positive infinity in this case by convention.], [Underflow], [This situation arises when the value of the result is too small to fit in the desired data type. The result of the operation must be set to negative infinity in this case by convention.] )), [`NaN` values are represented as the IEEE-754 standard dictates without any other modification to their behavior. Performing any arithmetic operation with a signalling `NaN` must be considered an illegal operation and the number must be subsequently transformed into a quiet `NaN`.], [Quiet `NaN` values simply propagate through the various operations as dictated by the IEEE-754 standard. In both signalling and quiet cases, the least significant three bits of the mantissa is the payload and encodes the reason. The `NaN` payloads must be ORed when they interact regardless of the operation performed since the result will always result in an `NaN`. The following is the list of payload bits:], pagebreak(), tableWrapper([`NaN` payload vector.], table( columns: (auto, auto), align: left + horizon, [#middle([*Bit*])], [#middle([*Description*])], [0], [Division by zero.], [1], [Invalid operation.], [2], [Reserved for future use.] )) ), ///. subSection( [Arithmetic Flags], [The `EXC` module, presented in the previous section, is concerned about triggering exception events when arithmetic edge cases occur. The situations described in the tables above are translated into a series of "ephemeral" flags that are not stored in any kind of flag register and are activated when the corresponding edge case arises, which in turn, will trigger the associated exception. It is important to note that these flags have to be implemented if either the `EXC` or `HLPR` modules are implemented, if not, this section can be skipped. The following table shows the proposed list of flags:], tableWrapper([Arithmetic flags.], table( columns: (auto, auto), align: left + horizon, [#middle([*Name*])], [#middle([*Description*])], [`COVR1`], [*Carry Over 1 Flag*: \ Activated if a carry over occurred on the 1st byte. ($"WLEN" = 0, 1, 2, 3$).], [`COVR2`], [*Carry Over 2 Flag*: \ Activated if a carry over occurred on the 2nd byte. ($"WLEN" = 1, 2, 3$).], [`COVR4`], [*Carry Over 4 Flag*: \ Activated if a carry over occurred on the 4th byte. ($"WLEN" = 2, 3$).], [`COVR8`], [*Carry Over 8 Flag*: \ Activated if a carry over occurred on the 8th byte. ($"WLEN" = 3$).], [`CUND` ], [*Carry Under Flag*: \ Activated if a carry under occurred ($"WLEN" = 0, 1, 2, 3$).], [`OVFL1`], [*Overflow 1 Flag*: \ Activated if an overflow occurred on the 1st byte. ($"WLEN" = 0, 1, 2, 3$).], [`OVFL2`], [*Overflow 2 Flag*: \ Activated if an overflow occurred on the 2nd byte. ($"WLEN" = 1, 2, 3$).], [`OVFL4`], [*Overflow 4 Flag*: \ Activated if an overflow occurred on the 4th byte. ($"WLEN" = 2, 3$).], [`OVFL8`], [*Overflow 8 Flag*: \ Activated if an overflow occurred on the 8th byte. ($"WLEN" = 3$).], [`UNFL1`], [*Underflow 1 Flag*: \ Activated if an underflow occurred on the 1st byte. ($"WLEN" = 0, 1, 2, 3$).], [`UNFL2`], [*Underflow 2 Flag*: \ Activated if an underflow occurred on the 2nd byte. ($"WLEN" = 1, 2, 3$).], [`UNFL4`], [*Underflow 4 Flag*: \ Activated if an underflow occurred on the 4th byte. ($"WLEN" = 2, 3$).], [`UNFL8`], [*Underflow 8 Flag*: \ Activated if an underflow occurred on the 8th byte. ($"WLEN" = 3$).], [`DIV0` ], [*Division by Zero Flag*: \ Activated if a division by zero occurred.], [`INVOP`], [*Invalid Operation*: \ Activated if an invalid operation occurred.] )) ), ///. comment([ The low level data types are, more or less, the usual ones. Addresses can be interpreted as both signed and unsigned values, though pointer arithmetic is not something that should be heavily relied on because some operations are often deemed "illegal" such as multiplication, division, modulo and bitwise logic in many programming languages. Even if the addresses are always considered signed, the boundary on 64 bit systems can be considered a non issue since the address space is so huge that everything could fit into one of the two partitions. Systems with smaller `WLEN` might encounter some difficulties but some amount of pointer arithmetic can still be done without too much hassle. FabRISC, fortunately, includes unsigned operations and comparisons in the basic modules, which makes this argument moot. I chose to use the little-endian format since it can simplify accesses to portions of a variable without needing to change the address. For example a 64 bit memory location with the content of: `5E 00 00 00 00 00 00 00` can be red at the same address as an 8 bit value: `5E`, 16 bit value: `5E 00`, 32 bit value: `5E 00 00 00` or 64 bit value: `5E 00 00 00 00 00 00 00` which are all the same value. Endianness is mostly a useless debate as the advantages or disadvantages that each type has is often just a tiny rounding error in the grand scheme of things. The reason for this decision is that i simply found the aforementioned property to be interesting to have. The proposed flags might seem weird and unnecessary, however they allow the detection of arithmetic edge cases in a very granular manner. Many ISAs don't have any way of easily detecting overflows and, when present, they either provide instructions that trap or a flag register. In both cases the system will only allow the programmer to check if an overflow occurred at the word length only. FabRISC, not only provides the ability to check at all the standard lengths, but it also distinguishes overflows into two categories depending on the direction. This is useful to provide to the programmer a greater control and insight of the underlying system, as well as, enabling better emulation of CPUs with smaller word lengths. Floating point is mostly similar to the IEEE-754 standard but with some rearrangements. The motivation behind the reordering of the sections is mainly to enable better bit manipulation. Thanks to this, the most "important" bits (sign and exponent) of the number can be easily reached with many of the bitwise immediate instructions. The IEEE-754 standard doesn't define the behavior of the so called "NaN payload" when two `NaN` values interact. I chose to dictate that the least significant 3 bits as a cause vector for the `NaN` generation, this way, the payloads can be ORed and this information can then be used to understand potential issues in the code. ]) ) #pagebreak() ///
https://github.com/Mc-Zen/tidy
https://raw.githubusercontent.com/Mc-Zen/tidy/main/README.md
markdown
MIT License
# Tidy *Keep it tidy.* [![Typst Package](https://img.shields.io/badge/dynamic/toml?url=https%3A%2F%2Fraw.githubusercontent.com%2FMc-Zen%2Ftidy%2Fmain%2Ftypst.toml&query=%24.package.version&prefix=v&logo=typst&label=package&color=239DAD)](https://typst.app/universe/package/tidy) [![MIT License](https://img.shields.io/badge/license-MIT-blue)](https://github.com/Mc-Zen/tidy/blob/main/LICENSE) [![User Manual](https://img.shields.io/badge/manual-.pdf-purple)][guide] **tidy** is a package that generates documentation directly in [Typst](https://typst.app/) for your Typst modules. It parses docstring comments similar to javadoc and co. and can be used to easily build a beautiful reference section for the parsed module. Within the docstring you may use (almost) any Typst syntax − so markup, equations and even figures are no problem! Features: - **Customizable** output styles. - Automatically [**render code examples**](#example). - **Annotate types** of parameters and return values. - Automatically read off default values for named parameters. - [**Help** feature](#generate-a-help-command-for-you-package) for your package. - [Docstring tests](#docstring-tests). The [guide][guide] fully describes the usage of this module and defines the format for the docstrings. ## Usage Using `tidy` is as simple as writing some docstrings and calling: ```typ #import "@preview/tidy:0.3.0" #let docs = tidy.parse-module(read("my-module.typ")) #tidy.show-module(docs, style: tidy.styles.default) ``` The available predefined styles are currenty `tidy.styles.default` and `tidy.styles.minimal`. Custom styles can be added by hand (take a look at the [guide][guide]). ## Example A full example on how to use this module for your own package (maybe even consisting of multiple files) can be found at [examples](https://github.com/Mc-Zen/tidy/tree/main/examples). ```typ /// This function computes the cardinal sine, $sinc(x)=sin(x)/x$. /// /// #example(`#sinc(0)`, mode: "markup") /// /// - x (int, float): The argument for the cardinal sine function. /// -> float #let sinc(x) = if x == 0 {1} else {calc.sin(x) / x} ``` **tidy** turns this into: <h3 align="center"> <img alt="Tidy example output" src="docs/images/sincx-docs.svg" style="max-width: 100%; padding: 10px 10px; box-shadow: 1pt 1pt 10pt 0pt #AAAAAA; border-radius: 4pt; box-sizing: border-box; background: white"> </h3> ## Access user-defined functions and images The code in the docstrings is evaluated via `eval()`. In order to access user-defined functions and images, you can make use of the `scope` argument of `tidy.parse-module()`: ```typ #{ import "my-module.typ" let module = tidy.parse-module(read("my-module.typ")) let an-image = image("img.png") tidy.show-module( module, style: tidy.styles.default, scope: (my-module: my-module, img: an-image) ) } ``` The docstrings in `my-module.typ` may now access the image with `#img` and can call any function or variable from `my-module` in the style of `#my-module.my-function()`. This makes rendering examples right in the docstrings as easy as a breeze! ## Generate a help command for you package With **tidy**, you can add a help command to you package that allows users to obtain the documentation of a specific definition or parameter right in the document. This is similar to CLI-style help commands. If you have already written docstrings for your package, it is quite low-effort to add this feature. Once set up, the end-user can use it like this: ```typ // happily coding, but how do I use this one complex function again? #mypackage.help("func") #mypackage.help("func(param1)") // print only parameter description of param1 ``` This will print the documentation of `func` directly into the document — no need to look it up in a manual. Read up in the [guide][guide] for setup instructions. ## Docstring tests It is possible to add simple docstring tests — assertions that will be run when the documentation is generated. This is useful if you want to keep small tests and documentation in one place. ```typ /// #test( /// `num.my-square(2) == 4`, /// `num.my-square(4) == 16`, /// ) #let my-square(n) = n * n ``` With the short-hand syntax, a unfulfilled assertion will even print the line number of the failed test: ```typ /// >>> my-square(2) == 4 /// >>> my-square(4) == 16 #let my-square(n) = n * n ``` A few test assertion functions are available to improve readability, simplicity, and error messages. Currently, these are `eq(a, b)` for equality tests, `ne(a, b)` for inequality tests and `approx(a, b, eps: 1e-10)` for floating point comparisons. These assertion helper functions are always available within docstring tests (with both `test()` and `>>>` syntax). ## Changelog ### v0.3.0 - New features: - Help feature. - `preamble` option for examples (e.g., to add `import` statements). - more options for `show-module`: `omit-private-definitions`, `omit-private-parameters`, `enable-cross-references`, `local-names` (for configuring language-specific strings). - Improvements: - Allow using `show-example()` as standalone. - Updated type names that changed with Typst 0.8.0, e.g., integer -> int. - Fixes: - allow examples with ratio widths if `scale-preview` is not `auto`. - `show-outline` - explicitly use `raw(lang: none)` for types and function names. ### v0.2.0 - New features: - Add executable examples to docstrings. - Documentation for variables (as well as functions). - Docstring tests. - Rainbow-colored types `color` and `gradient`. - Improvements: - Allow customization of cross-references through `show-reference()`. - Allow customization of spacing between functions through styles. - Allow color customization (especially for the `default` theme). - Fixes: - Empty parameter descriptions are omitted (if the corresponding option is set). - Trim newline characters from parameter descriptions. - ⚠️ Breaking changes: - Before, cross-references for functions using the `@@` syntax could omit the function parentheses. Now this is not possible anymore, since such references refer to variables now. - (only concerning custom styles) The style functions `show-outline()`, `show-parameter-list`, and `show-type()` now take `style-args` arguments as well. ### v0.1.0 Initial Release. [guide]: https://github.com/Mc-Zen/tidy/releases/download/v0.3.0/tidy-guide.pdf
https://github.com/LilNick0101/Bachelor-thesis
https://raw.githubusercontent.com/LilNick0101/Bachelor-thesis/main/content/introduction.typ
typst
= Introduzione == Scopo del progetto Il progetto _Smart Offices_ è nato con l'idea di creare una piattaforma di ricerca di luoghi dove poter lavorare in remoto o in _smart working_ dovuta ad una grande diffusione di questa modalità di lavoro in conseguenza al periodo pandemico del COVID-19, rimasta rilevante anche dopo la pandemia dato che molte persone hanno visto un migliore bilancio vita-lavoro e le aziende hanno visto un risparmio nei costi e un aumento nella produttività. Il progetto quindi si propone di creare una piattaforma che permette di trovare luoghi dove poter lavorare in remoto: per esempio bar, ristoranti, biblioteche, ecc. che offrono la possibilità di lavorare in remoto; La piattaforma offre funzionalità di ricerca e filtri per trovare il luogo più adatto alle proprie esigenze. Un utente può inoltre registrarsi e gli utenti registrati, oltre a consultare i luoghi, possono aggiungere nuovi luoghi, lasciare recensioni sui luoghi e salvare i luoghi nei preferiti. Il progetto nell'insieme è suddiviso in tre parti: - Parte *back-end* che si occupa di esporre i servizi per la ricerca luoghi, il caricamento dei luoghi, dei utenti e delle recensioni; fornisce i dati all'applicazione tramite API REST, è stato sviluppato usando servizi e tecnologie _AWS_ ed è scritto in _TypeScript_ @typescript; #figure( image("../resources/images/aws-logo.svg", width: 30%), caption: [Logo di _Amazon Web Services_.] ) <aws-logo> - Parte *mobile* che si occupa di mostrare i dati forniti dal back-end all'utente su dispositivi mobili: questa si divide in due applicazioni, una scritta in _Kotlin_ @kotlin per _Android_ e l'altra in _Swift_ @swift per _iOS_; la parte _iOS_, sviluppata da un altro collega, utilizza il framework di UI _SwiftUI_ mentre la parte Android utilizza il framework di UI _Jetpack Compose_ @compose. #figure( image("../resources/images/compose-icon.png", width: 30%), caption: [Logo di _Jetpack Compose_.] ) <compose-logo> Il mio progetto di stage si è concentrato sull'applicazione scritta in *Kotlin* per *Android*. Il mio scopo era quello di effettuare le chiamate dal back-end remoto ed implementare le funzionalità dell'applicazione, cioè la ricerca dei luoghi, la visualizzazione dei dettagli di un luogo, il caricamento di un nuovo luogo, l'autenticazione utente, la registrazione di un nuovo account, la pagina del profilo utente, la visualizzazione delle recensioni di un luogo e la creazione di nuove recensioni, il tutto utilizzando le best practices di un'architettura che separa la logica di business dalla logica di presentazione. Il progetto è stato preceduto da un breve periodo di formazione tecnica seguito da una breve demo. Il progetto è stato accompagnato da una metodologia di lavoro agile @agile, con stand-up giornalieri di 15 minuti dove con il tutor aziendale si discuteva del lavoro fatto e del lavoro da fare. == L'azienda #figure( image("../resources/images/logo-zero12.png", width: 30%), caption: [Logo di _Zero12 s.r.l_.] ) <compose-logo> *Zero12 s.r.l* è una software house che propone prodotti innovativi e servizi di consulenza per la trasformazione digitale, è parte del gruppo _Vargroup_. L'azienda offre principalmente prodotti basati su _Amazon Web Services_, come infrastrutture cloud, software web, mobile e intelligenza artificiale. Altri campi di interesse sono la _Augmented Reality_ e l'_Internet of Things_. L'azienda è in continua crescità e si divide in due sedi, una a Padova e una a Empoli. == Motivazione della scelta La prima volta che ho conosciuto l'azienda è stata durante il primo periodo del corso di ingegneria del software quando il mio gruppo doveva scegliere il capitolato per il progetto, più tardi mi sono presentato all'evento *Stage IT 2023* dove sono andato a conoscenza dei loro progetti. Ho scelto questo progetto perché volevo cimentarmi in qualcosa di nuovo ma allo stesso tempo cercavo uno stage legato allo sviluppo mobile o web; quindi, ciò che mi ha spinto a scegliere questo progetto è stata la possibilità di lavorare con tecnologie mobile _Android_ e la possibilità di lavorare con un servizio remoto.
https://github.com/yonatanmgr/university-notes
https://raw.githubusercontent.com/yonatanmgr/university-notes/main/0366-%5BMath%5D/03661101-%5BCalculus%201A%5D/src/lectures/03661101_lecture_6.typ
typst
#import "/0366-[Math]/globals/template.typ": * #show: project.with( title: "חדו״א 1א׳ - שיעור 6", authors: ("<NAME>",), date: "16 בינואר, 2024", ) #set enum(numbering: "(1.א)") == מבחני התכנסות === (משפט) מבחן השורש הכללי תהי סדרה $an >= 0 forall n in NN$ ו-$exists 0 <= alpha < 1$ ו-$exists n_0 in NN$ כך ש-$an ^(1/n) <= alpha forall n > n_0$. אזי $exists liminff(an) = 0$. ==== הוכחה $0 <= an <= alpha^n, forall n > n_0$. מכיוון ש-$alpha^n$ שואפת ל-$0$ וכך גם הסדרה הקבועה $0$, גם $an$ שואפת ל-$0$ (כלל הסנדוויץ׳). #QED ==== שאלה תהי סדרה $an >= 0 forall n in NN$ המקיימת $an ^(1/n)<1 forall n in NN$. האם הסדרה $(an)$ בהכרח מתכנסת? #underline[לא]. למשל, הסדרה: $ an: 1/2, 1/3, 1/2, 1/3 dots $ ואז $an ^(1/n) < 1 forall n in NN$, אך אין גבול $liminff(an)$! === (משפט) מבחן השורש הגבולי תהי סדרה $an >= 0 forall n in NN$ ונניח כי $exists liminff(an^(1/n)) = P$. אזי: + אם $P<1$ #math.arrow.l.double $exists liminff(an) = 0$. + אם $P>1$ #math.arrow.l.double $liminff(an) = +oo$. ==== הוכחה + אם $P<1$, נבחר $0<epsilon<1-P$ $arrl$ $P + epsilon<1$. אז מהגדרת הגבול: $exists n_epsilon in NN: abs(an^(1/n)-P)<epsilon forall n > n_epsilon$. ואז, בפרט, $an^(1/n) < P+epsilon <1$. מהמשפט הקודם ($alpha = P+epsilon$), סיימנו. + אם $P>1$, נבחר $0< epsilon < P-1$ $arrl$ $1< P-epsilon$. מהגדרת הגבול: $exists n_epsilon in NN: abs(an^(1/n)-P)<epsilon forall n > n_epsilon$. בפרט, $1<P-epsilon<an^(1/n) forall n > n_epsilon$. ואז, $1<(P-epsilon)^n<an forall n > n_epsilon$. #QED === (משפט) מבחן המנה הגבולי תהי $an > 0 forall n in NN$ כך ש-$liminff(frac(a_(n+1), an))=L$. + אם $L<1$ $arrl$ $lim an = 0$. + אם $L>1$ $arrl$ $lim a_n = +oo$. === (משפט) מבחן המנה הכללי + נתונה סדרה חיובית $(an)$ ונתונים $n_0 in NN$ ו-$L<1$ כך ש-$a_(n+1)<L a_n forall n > n_0$. אזי $lim a_n =0$. + נתונה סדרה חיובית $(an)$ ונתונים $n_0 in NN$ ו-$L>1$ כך ש-$a_(n+1)>L a_n forall n > n_0$. אזי $lim a_n =+oo$. #pagebreak() = סדרות מונוטוניות == הגדרה - סדרה $(an)$ נקראת *מונוטונית עולה* אם $a_(n+1) >= a_n forall n in NN$, ונקראת *מונוטונית יורדת* אם $a_(n+1) <= a_n forall n in NN$. - אם מתקיים $a_(n+1) > a_n forall n in NN$ אז הסדרה *עולה ממש*, ואם $a_(n+1) < a_n forall n in NN$ אז הסדרה *יורדת ממש*. == משפטים === (משפט) תהי $(an)$ סדרה מונוטונית עולה וחסומה מלמעלה. אזי, $exists lim an=sup an$. אם $(an)$ מונוטונית יורדת וחסומה מלמטה, אז $exists lim an = inf an$. ==== הוכחה נוכיח את המקרה הראשון (השני זהה); נסמן $L = sup an$ ונוכיח $lim an = L$. יהי $epsilon > 0$ שרירותי. קיים $n_epsilon in NN$ כך ש- $ overbrace(L-epsilon <, L=sup an) underbrace(a_n_epsilon <=, forall n > n_epsilon) overbrace(a_n <= L, sup an = L) underbrace(< L + epsilon, epsilon>0) $ כלומר הוכחנו: $ forall epsilon >0 exists n_epsilon in NN: L-epsilon < a_n < L+epsilon, forall n > n_epsilon $ #QED === דוגמה נתונה סדרה $an = (1-1/2) dot (1-1/2^2) dot dots dot (1-1/2^n)$. האם היא מתכנסת? ==== פתרון $a_(n+1)=overbracket((1-1/2) dot dots dot (1-1/2^n), an) dot (1-1/2^(n+1)) <= an forall n in NN$ ואז הסדרה מונ׳ יורדת. מצד שני, $0<an forall n in NN$, כלומר הסדרה חסומה מלמטה, ואז לפי המשפט הקודם $exists lim an$. === דוגמה נתונה סדרה המוגדרת ע״י $a_1 = sqrt(6)$ ו-$a_(n+1) = sqrt(6 + a)_n forall n in NN$. צ״ל כי $exists lim an$ ולחשב אותו. ==== פתרון - נוכיח באינדוקציה כי הסדרה עולה (ברור ש-$an >= 0 forall n in NN$). + $a_2 >= a_1$: $a_2 = sqrt(6+a_1)=sqrt(6+sqrt(6))>=sqrt(6)=a_1$. + נתון $a_n >= a_(n-1)$ ואז $a_(n+1) = sqrt(6+an) >= sqrt(6+a_(n-1)) = an$. - נוכיח באינדוקציה כי הסדרה חסומה מלמעלה. + $a_1 <= 3$: $a_1 = sqrt(6) <=3$. + נתון $an <= 3$ ואז $a_(n+1) = sqrt(an + 6) <= 3$. לכן $exists lim an = a$. נחשב את $a$: מהמשפטים הקודמים $lim a_(n+1) = a$ וגם $lim sqrt(6+ an) = sqrt(6 + a)$. מכאן מתקיים $0<= a = sqrt(6+a)$. פתרונות המשוואה הריבועית $a^2 - a - 6 =0$ הם $a_1 = 3, a_2= -2$, כלומר $a=3$. #QED === (טענה) סדרה $(an)$ מונוטונית עולה ולא חסומה מלמעלה מתכנסת ל-$+oo$. בדומה, סדרה $(an)$ מונוטונית יורדת ולא חסומה מלמטה מתכנסת ל-$-oo$. #pagebreak() == סדרה השואפת ל-$e$ === (טענה) הסדרה $an = (1+ 1/n)^n$ מונוטונית עולה וחסומה. ==== הוכחה - עליה: $ an &= (1+ 1/n)^n = sum_(j = 0)^n binom(n,j)^(n-j) dot (1/n)^j = sum_(j = 0)^n frac(n!, j!(n-j)!) dot 1/n^j \ &= 1 + 1 + sum_(j = 2)^n frac(cancel((n-j)!)(n-j+1) dot dots dot n,j!cancel((n-j)!) dot (n dot n dot dots dot n)) \ &= 1+1+sum_(j = 2)^n 1/j! (frac(n-j+1, n) dot frac(n-j+2, n) dot dots dot frac(n-1, n) dot cancel(n/n)) \ &= 1+1+sum_(j = 2)^n 1/j! (1-1/n)(1-2/n)dots(1-frac(j-1,n)) \ &<= 1+1+sum_(j = 2)^n 1/j! (1-1/(n+1))(1-2/(n+1))dots(1-frac(j-1,n+1)) \ &<= 1+1+sum_(j = 2)^(n+1) 1/j! (1-1/(n+1))dots(1-frac(j-1,n+1)) = (1+frac(1, n+1))^(n+1) = a_(n+1) $ - חסימות: $ 2 &= a_1 <= a_n = (1+1/n)^n <= 1+1 + sum_(j = 2)^n 1/j! = 1+1+1/2+1/(2 dot 3)+dots + frac(1, 2 dot 3 dot dots dot n) \ & <= 1+ 1 + 1/2 + frac(1, 2 dot 2) + dots + frac(1, underbracket(2 dot 2 dot dots dot 2, "פעמים" n-1)) = 1 + 1 + frac(cancel(1/2)(1-(1/2)^(n-1)),cancel(1-1/2)) = 2+1-frac(1, 2^(n-1)) \ &= 3 -frac(1, 2^(n-1)) < 3 $ #QED === (מסקנה) הסדרה $an$ עולה וחסומה מלמעלה, לכן $exists lim (1+1/n)^n$. מסמנים את הגבול ב-$e$ ($2<=e<=3$). ==== הערה בהמשך הקורס רואים שמתקיים גם $e=liminff(sum_(k=1)^n 1/k!)$. ניתן לחשב $e=2.71828dots$. === (טענה) תהי $x_n-> +oo$ (או $x_n->-oo$). אזי, $liminff((1+1/x_n)^(x_n)) = e$.
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-E0000.typ
typst
Apache License 2.0
#let data = ( (), ("LANGUAGE TAG", "Cf", 0), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), (), ("TAG SPACE", "Cf", 0), ("TAG EXCLAMATION MARK", "Cf", 0), ("TAG QUOTATION MARK", "Cf", 0), ("TAG NUMBER SIGN", "Cf", 0), ("TAG DOLLAR SIGN", "Cf", 0), ("TAG PERCENT SIGN", "Cf", 0), ("TAG AMPERSAND", "Cf", 0), ("TAG APOSTROPHE", "Cf", 0), ("TAG LEFT PARENTHESIS", "Cf", 0), ("TAG RIGHT PARENTHESIS", "Cf", 0), ("TAG ASTERISK", "Cf", 0), ("TAG PLUS SIGN", "Cf", 0), ("TAG COMMA", "Cf", 0), ("TAG HYPHEN-MINUS", "Cf", 0), ("TAG FULL STOP", "Cf", 0), ("TAG SOLIDUS", "Cf", 0), ("TAG DIGIT ZERO", "Cf", 0), ("TAG DIGIT ONE", "Cf", 0), ("TAG DIGIT TWO", "Cf", 0), ("TAG DIGIT THREE", "Cf", 0), ("TAG DIGIT FOUR", "Cf", 0), ("TAG DIGIT FIVE", "Cf", 0), ("TAG DIGIT SIX", "Cf", 0), ("TAG DIGIT SEVEN", "Cf", 0), ("TAG DIGIT EIGHT", "Cf", 0), ("TAG DIGIT NINE", "Cf", 0), ("TAG COLON", "Cf", 0), ("TAG SEMICOLON", "Cf", 0), ("TAG LESS-THAN SIGN", "Cf", 0), ("TAG EQUALS SIGN", "Cf", 0), ("TAG GREATER-THAN SIGN", "Cf", 0), ("TAG QUESTION MARK", "Cf", 0), ("TAG COMMERCIAL AT", "Cf", 0), ("TAG LATIN CAPITAL LETTER A", "Cf", 0), ("TAG LATIN CAPITAL LETTER B", "Cf", 0), ("TAG LATIN CAPITAL LETTER C", "Cf", 0), ("TAG LATIN CAPITAL LETTER D", "Cf", 0), ("TAG LATIN CAPITAL LETTER E", "Cf", 0), ("TAG LATIN CAPITAL LETTER F", "Cf", 0), ("TAG LATIN CAPITAL LETTER G", "Cf", 0), ("TAG LATIN CAPITAL LETTER H", "Cf", 0), ("TAG LATIN CAPITAL LETTER I", "Cf", 0), ("TAG LATIN CAPITAL LETTER J", "Cf", 0), ("TAG LATIN CAPITAL LETTER K", "Cf", 0), ("TAG LATIN CAPITAL LETTER L", "Cf", 0), ("TAG LATIN CAPITAL LETTER M", "Cf", 0), ("TAG LATIN CAPITAL LETTER N", "Cf", 0), ("TAG LATIN CAPITAL LETTER O", "Cf", 0), ("TAG LATIN CAPITAL LETTER P", "Cf", 0), ("TAG LATIN CAPITAL LETTER Q", "Cf", 0), ("TAG LATIN CAPITAL LETTER R", "Cf", 0), ("TAG LATIN CAPITAL LETTER S", "Cf", 0), ("TAG LATIN CAPITAL LETTER T", "Cf", 0), ("TAG LATIN CAPITAL LETTER U", "Cf", 0), ("TAG LATIN CAPITAL LETTER V", "Cf", 0), ("TAG LATIN CAPITAL LETTER W", "Cf", 0), ("TAG LATIN CAPITAL LETTER X", "Cf", 0), ("TAG LATIN CAPITAL LETTER Y", "Cf", 0), ("TAG LATIN CAPITAL LETTER Z", "Cf", 0), ("TAG LEFT SQUARE BRACKET", "Cf", 0), ("TAG REVERSE SOLIDUS", "Cf", 0), ("TAG RIGHT SQUARE BRACKET", "Cf", 0), ("TAG CIRCUMFLEX ACCENT", "Cf", 0), ("TAG LOW LINE", "Cf", 0), ("TAG GRAVE ACCENT", "Cf", 0), ("TAG LATIN SMALL LETTER A", "Cf", 0), ("TAG LATIN SMALL LETTER B", "Cf", 0), ("TAG LATIN SMALL LETTER C", "Cf", 0), ("TAG LATIN SMALL LETTER D", "Cf", 0), ("TAG LATIN SMALL LETTER E", "Cf", 0), ("TAG LATIN SMALL LETTER F", "Cf", 0), ("TAG LATIN SMALL LETTER G", "Cf", 0), ("TAG LATIN SMALL LETTER H", "Cf", 0), ("TAG LATIN SMALL LETTER I", "Cf", 0), ("TAG LATIN SMALL LETTER J", "Cf", 0), ("TAG LATIN SMALL LETTER K", "Cf", 0), ("TAG LATIN SMALL LETTER L", "Cf", 0), ("TAG LATIN SMALL LETTER M", "Cf", 0), ("TAG LATIN SMALL LETTER N", "Cf", 0), ("TAG LATIN SMALL LETTER O", "Cf", 0), ("TAG LATIN SMALL LETTER P", "Cf", 0), ("TAG LATIN SMALL LETTER Q", "Cf", 0), ("TAG LATIN SMALL LETTER R", "Cf", 0), ("TAG LATIN SMALL LETTER S", "Cf", 0), ("TAG LATIN SMALL LETTER T", "Cf", 0), ("TAG LATIN SMALL LETTER U", "Cf", 0), ("TAG LATIN SMALL LETTER V", "Cf", 0), ("TAG LATIN SMALL LETTER W", "Cf", 0), ("TAG LATIN SMALL LETTER X", "Cf", 0), ("TAG LATIN SMALL LETTER Y", "Cf", 0), ("TAG LATIN SMALL LETTER Z", "Cf", 0), ("TAG LEFT CURLY BRACKET", "Cf", 0), ("TAG VERTICAL LINE", "Cf", 0), ("TAG RIGHT CURLY BRACKET", "Cf", 0), ("TAG TILDE", "Cf", 0), ("CANCEL TAG", "Cf", 0), )
https://github.com/k4zuy/Typst-Template-HTW
https://raw.githubusercontent.com/k4zuy/Typst-Template-HTW/main/common/acronyms.typ
typst
//source: https://github.com/typst/typst/issues/659 //Dictionary with acronyms #let acronyms = ( NLP: "Natural Language Processing", ) // The state which tracks the used acronyms #let usedAcronyms = state("usedDic", (:)) // The function which either shows the acronym or the full text for it #let acro(body) = { if(acronyms.keys().contains(body) == false) { return rect( fill: red, inset: 8pt, radius: 4pt, [*Warning:\ #body*], ) } usedAcronyms.display(usedDic => { if(usedDic.keys().contains(body)) { return body } return acronyms.at(body) + " (" + body + ")" }); usedAcronyms.update(usedDic => { usedDic.insert(body, true) return usedDic }) } // #import "@preview/acrostiche:0.3.1": * // #init-acronyms(( // "NN": ("Neural Network"), // "OS": ("Operating System",), // "BIOS": ("Basic Input/Output System", "Basic Input/Output Systems"), // )) // Dictionary with acronyms // Acronym is key, value is ("long version", "long version plural ending", "short version plural ending") // Plural endings are optional, "s" is default // Values have to be at least ("",) where the comma is important // Dictionary with acronyms // Acronym is key, value is ("long version", "long version plural ending", "short version plural ending") // Plural endings are optional, "s" is default // Values have to be at least ("",) where the comma is important // #let acronyms = ( // API: ("Application Programming Interface", "s", "s"), // PEBKAC: ("Problem Exists Between Keyboard And Chair", "", ""), // acro: ("acronym",) // ) // // The state which tracks the used acronyms // #let usedAcronyms = state("usedDic", (empty: false,)) // // Check if acronym is in the acronym list // #let acronym_exists(ac) = { // return acronyms.keys().contains(ac) // } // // The acronym itself // #let acronym_short(ac) = { // return ac // } // // The acronyms meaning // #let acronym_long(ac) = { // return acronyms.at(ac).at(0) // } // // The acronym in its plural // #let acronym_short_plural(ac) = { // if (acronyms.at(ac).len() > 2) { // return acronym_short(ac) + acronyms.at(ac).at(2) // } // return acronym_short(ac) + "s" // } // // The acronyms meaning in plural // #let acronym_long_plural(ac) = { // if (acronyms.at(ac).len() > 1) { // return acronym_long(ac) + acronyms.at(ac).at(1) // } // return acronym_long(ac) + "s" // } // // The acronyms meaning with its short form appended // // Used to introduce an acronym // #let acronym_full(ac) = { // return acronym_long(ac) + " (" + acronym_short(ac) + ")" // } // // The acronyms meaning with its short form appended, both in plural // // Used to introduce an acronym // #let acronym_full_plural(ac) = { // return acronym_long_plural(ac) + " (" + acronym_short_plural(ac) + ")" // } // // Error message to alert for undeclared acronym usage // #let acronym_error(ac) = { // if(acronyms.keys().contains(ac) == false) { // return rect( // fill: red, // inset: 8pt, // radius: 4pt, // [*Error*: Acronym *#ac* not found!], // ) // } // } // // To mark an acronym as used in text // // Following usages of ac or acp will not provide the long version anymore // #let declare_acronym_used(ac) = { // usedAcronyms.update(usedDic => { // usedDic.insert(ac, true) // return usedDic // }) // } // // The function which either shows the acronym or the full text for it // #let ac(ac) = { // if (not acronym_exists(ac)) { // return acronym_error(ac) // } // usedAcronyms.display(usedDic => { // if(usedDic.keys().contains(ac)) { // return acronym_short(ac) // } // return acronym_full(ac) // }); // declare_acronym_used(ac) // } // // The function which either shows the acronym or the full text for it in plural // #let acp(ac) = { // if (not acronym_exists(ac)) { // return acronym_error(ac) // } // usedAcronyms.display(usedDic => { // if(usedDic.keys().contains(ac)) { // return acronym_short_plural(ac) // } // return acronym_full_plural(ac) // }); // declare_acronym_used(ac) // } // // The acronym itself // #let acs(ac) = { // if (not acronym_exists(ac)) { // return acronym_error(ac) // } // usedAcronyms.display(usedDic => { // return acronym_short(ac) // }); // declare_acronym_used(ac) // } // // The acronym itself in plural // #let acsp(ac) = { // if (not acronym_exists(ac)) { // return acronym_error(ac) // } // usedAcronyms.display(usedDic => { // return acronym_short_plural(ac) // }); // declare_acronym_used(ac) // } // // The acronyms meaning // #let acl(ac) = { // if (not acronym_exists(ac)) { // return acronym_error(ac) // } // usedAcronyms.display(usedDic => { // return acronym_long(ac) // }); // declare_acronym_used(ac) // } // // The acronyms meaning in plural // #let aclp(ac) = { // if (not acronym_exists(ac)) { // return acronym_error(ac) // } // usedAcronyms.display(usedDic => { // return acronym_long_plural(ac) // }); // declare_acronym_used(ac) // } // // Print the acronym register/listing // // Only used acronyms will be printed // #let print_acronym_listing(outline_name) = { // locate(loc => if (usedAcronyms.final(loc).len() > 1) { // [ // // Acronym register title // #v(1em) // #heading(level: 1, numbering: none, outline_name) // #v(0.5em) // // Acronym register content // #text(size: 1em, locate(loc => usedAcronyms.final(loc) // .pairs() // .filter(x => x.last()) // .map(pair => pair.first()) // .sorted() // .map(key => grid( // columns: (1fr, auto), // gutter: 1em, // strong(key), acronyms.at(key).at(0) // )).join())) // ] // }) // }
https://github.com/dogeystamp/typst-templates
https://raw.githubusercontent.com/dogeystamp/typst-templates/master/problems.typ
typst
The Unlicense
// templates for compsci problem documents outside of contests #import "main.typ": gen_preamble, doc_template, mono_font, lref, source_code, status #let template( title: none, authors: none, problem_url: none, stat: "incomplete", body ) = { doc_template(title: title, { gen_preamble( title: title, authors: authors, prefix: status(stat: stat), suffix: { if (problem_url != none) { [#link(problem_url)] } } ) body }) }
https://github.com/EpicEricEE/typst-marge
https://raw.githubusercontent.com/EpicEricEE/typst-marge/main/src/validate.typ
typst
MIT License
/// Construct an error message for invalid parameters. #let invalid(name, expected, provided) = { let repr = el => { if type(el) == type { repr(el) } else { "`" + repr(el) + "`" } } let invalid-type = false let expected = if type(expected) == array { invalid-type = type(expected.at(0, default: none)) == type expected.map(repr).join(", ", last: " or ") } else { invalid-type = type(expected) == type repr(expected) } if " " not in name { name = "`" + name + "`" } "invalid " + name + ": " "expected " + expected + ", " "got " + if invalid-type { repr(type(provided)) } else { repr(provided) } } /// Validate the `side` parameter of the `sidenote` function. #let validate-side(side) = { let expected = ( "outside", "inside", "start", "end", "left", "right", start, end, left, right, auto ) assert(side in expected, message: invalid("side", expected, side)) } /// Validate the `dy` parameter of the `sidenote` function. #let validate-dy(dy) = { let expected = (length, ratio, relative) assert(type(dy) in expected, message: invalid("dy", expected, dy)) } /// Validate the `padding` parameter of the `sidenote` function. #let validate-padding(padding) = { let expected = (length, dictionary) assert(type(padding) in expected, message: invalid("padding", expected, padding)) if type(padding) == dictionary { let uses-inside-outside = "inside" in padding or "outside" in padding let uses-start-end = "start" in padding or "end" in padding let uses-left-right = "left" in padding or "right" in padding assert( int(uses-inside-outside) + int(uses-start-end) + int(uses-left-right) == 1, message: { "invalid `padding`: " "either use `start`/`end`, `left`/`right` or `inside`/`outside`" } ) for key in padding.keys() { let expected = ("inside", "outside", "start", "end", "left", "right") assert( key in expected, message: invalid("`padding` key", expected, key) ) } for el in padding.values() { assert(type(el) == length, message: invalid("padding", length, el)) } } } /// Validate the `gap` parameter of the `sidenote` function. #let validate-gap(gap) = { let expected = (length, type(none)) assert(type(gap) in expected, message: invalid("gap", expected, gap)) } /// Validate the `numbering` parameter of the `sidenote` function. #let validate-numbering(numbering) = { let expected = (str, function, type(none)) assert( type(numbering) in expected, message: invalid("numbering", expected, numbering) ) } /// Validate the `counter` parameter of the `sidenote` function. #let validate-counter(counter_) = { assert( type(counter_) == counter, message: invalid("counter", counter, counter_) ) } /// Validate the `format` parameter of the `sidenote` function. #let validate-format(format) = { assert(type(format) == function, message: invalid("format", function, format)) } /// Validate all parameters of the `sidenote` function. #let validate(..parameters) = { parameters = parameters.named() // Check for required parameters. let expected = ( "side", "dy", "padding", "gap", "numbering", "counter", "format", "body" ) for key in expected { assert(key in parameters, message: "missing parameter: " + key) } for key in parameters.keys() { assert(key in expected, message: "unexpected parameter: " + key) } // Validate parameters. validate-side(parameters.side) validate-dy(parameters.dy) validate-padding(parameters.padding) validate-gap(parameters.gap) validate-numbering(parameters.numbering) validate-counter(parameters.counter) validate-format(parameters.format) }
https://github.com/cyx2015s/PhyLabReportTemplateTypst
https://raw.githubusercontent.com/cyx2015s/PhyLabReportTemplateTypst/main/README.md
markdown
# PhyLabReportTemplateTypst 普通物理实验Ⅱ的混合实验报告Typst模板 ## 介绍 对普通物理实验Ⅱ的混合实验报告Word模板的Typst复刻,供希望使用Typst写报告的同学使用。 ## 使用 将模板文件放置在任意能够引用到的位置,此处以放置在同一文件夹下为例 ``` PhyReport/ ├── main.typ └── phylab.typ ``` 在`main.typ`中输入: ```typst #import "phylab.typ": phylab #show: phylab.with( name: "这是一个实验名称", instructor: "指导教师姓名", class: "混合2301", author: "张三", author-id: "1<PASSWORD>", date: datetime(year: 2025, month: 1, day: 1), week: "第114周", am-pm: "上午", ) // 此处填写正文,如: = 实验综述 (自述实验背景和原理,不超过 300 字,20 分) = 实验内容 (数据处理、结果与误差分析、实验结论,50 分) = 实验拓展 (解答‘拓展题’,一般不超过 1000 字,30 分) = 参考文献 (标注引用并自拟二级标题) ``` 亦可以直接更改`phylab.typ`中`phylab`函数参数中部分较为固定的内容,如姓名、学号、班级等等。
https://github.com/AOx0/expo-nosql
https://raw.githubusercontent.com/AOx0/expo-nosql/main/README.md
markdown
MIT License
# Slides in Typst This is a template for creating slides in [Typst](https://typst.app/). [![Book badge](https://img.shields.io/badge/docs-book-green)](https://andreaskroepelin.github.io/typst-slides/book) ![GitHub](https://img.shields.io/github/license/andreasKroepelin/typst-slides) ![GitHub release (latest by date)](https://img.shields.io/github/v/release/andreasKroepelin/typst-slides) [![Demo badge](https://img.shields.io/badge/demo-pdf-blue)](https://github.com/andreasKroepelin/typst-slides/releases/latest/download/demo.pdf) ## Quickstart ```typ #import "slides.typ": * #show: slides.with( authors: "Names of author(s)", short-authors: "Shorter author for slide footer", title: "Title of the presentation", subtitle: "Subtitle of the presentation", short-title: "Shorter title for slide footer", date: "March 2023", ) #set text(font: "Inria Sans", size: 25pt) #slide(theme-variant: "title slide") #new-section("My section name") #slide(title: "A boring static slide")[ Some boring static text. #lorem(20) ] #slide[ A fancy dynamic slide without a title. #uncover("2-")[This appears later!] ] #slide(theme-variant: "wake up")[ Focus! ] #new-section("Conclusion") #slide(title: "Take home message")[ Read the book! Try it out! Create themes! ] ``` This code produces these PDF pages: ![title slide](assets/simple.png) As you can see, creating slides is as simple as using the `#slide` function. You can also use different [themes](https://andreaskroepelin.github.io/typst-slides/book/theme-gallery/index.html) (contributions welcome if you happen to [create your own](https://andreaskroepelin.github.io/typst-slides/book/themes.html#create-your-own-theme)!) For dynamic content, the template also provides [a convenient API for complex overlays](https://andreaskroepelin.github.io/typst-slides/book/dynamic.html). Visit the [book](https://andreaskroepelin.github.io/typst-slides/book) for more details or take a look at the [demo PDF](https://github.com/andreasKroepelin/typst-slides/releases/latest/download/demo.pdf) where you can see the features of this template in action. **⚠ This template is in active development. While I try to make sure that the `main`-branch always is in a usable state, there are no compatibility guarantees!**
https://github.com/TypstApp-team/typst
https://raw.githubusercontent.com/TypstApp-team/typst/master/tests/typ/bugs/line-align.typ
typst
Apache License 2.0
// Test right-aligning a line and a rectangle. --- #align(right, line(length: 30%)) #align(right, rect())