repo
stringlengths
26
115
file
stringlengths
54
212
language
stringclasses
2 values
license
stringclasses
16 values
content
stringlengths
19
1.07M
https://github.com/sthenic/technogram
https://raw.githubusercontent.com/sthenic/technogram/main/src/raw-links.typ
typst
MIT License
/* Return content where any markers in the `text` have been replaced with link objects. If no markers exist, `default` is returned. */ #let _markers-to-links(raw-text, default) = { let matches = raw-text.matches(regex("jdztDE(\w+)zRVeVY")) if matches.len() > 0 { let content = [] let index = 0 for match in matches { if index < match.start { content += raw-text.slice(index, match.start) } let link-text = match.text .replace("jdztDE", "") .replace("IbXRuT", "::") .replace("zRVeVY", "") let split = link-text.split("::") if split.len() > 1 { content += link(label(link-text), text(hyphenate: true, split.at(1))) } else { content += link(label(link-text), text(hyphenate: true, link-text)) } index = match.end } /* Copy any remainder */ if index < raw-text.len() { content += raw-text.slice(index) } content } else { /* TODO: Use text(hyphenate: true) here too? Could be nice. */ default } } /* Hook into `raw` to replace special matching text with custom markers. */ #let format-raw(it) = { if it.at("label", default: none) == <technogram-modified-raw> { it } else { /* Check for identifiers and scoped parameters with a matching link in the document. These get marked with a set of random letters to preserve the identifier through the syntax highlighting stage (mostly affects `::`). We have to be careful to only consider unique replacements because the same term may occur multiple times within the text. Moreover, we have to do this in two phases since the first part of a scoped link is a linkable object on its own but, when followed by `::`, should be considered together with the next part. */ let text-with-markers = it.text let seen = () for match in text-with-markers.matches(regex("\w+")) { if ( it.text.at(match.end, default: none) != ":" and query(label(match.text)).len() > 0 and match.text not in seen ) { text-with-markers = text-with-markers .replace(match.text, "jdztDE" + match.text + "zRVeVY") seen.push(match.text) } } for match in text-with-markers.matches(regex("\w+::\w+")) { if query(label(match.text)).len() > 0 and match.text not in seen { text-with-markers = text-with-markers .replace(match.text, "jdztDE" + match.text.replace("::", "IbXRuT") + "zRVeVY") seen.push(match.text) } } [#raw( text-with-markers, block: it.block, lang: it.lang, align: it.align, tab-size: it.tab-size, )<technogram-modified-raw>] } } /* Hook into `raw.line` to replace custom markers with links. */ #let format-raw-line(it) = { /* TODO: Only do this for lang c/cpp? */ if it.at("label", default: none) == <technogram-modified-raw-line> { it } else { let content = [] if it.body.has("children") { /* Array of content */ for c in it.body.children { if c.has("child") { content += _markers-to-links(c.child.text, c) } else if c.has("text") { content += _markers-to-links(c.text, c) } else { content += c } } } else if it.body.has("text") { /* Simple text */ content += _markers-to-links(it.body.text, it.body) } else { content += it.body } [#raw.line( it.number, it.count, it.text, content, )<technogram-modified-raw-line>] } }
https://github.com/essmehdi/ensias-report-template
https://raw.githubusercontent.com/essmehdi/ensias-report-template/master/lib.typ
typst
MIT License
#let IMAGE_BOX_MAX_WIDTH = 120pt #let IMAGE_BOX_MAX_HEIGHT = 50pt #let project(title: "", subtitle: none, school-logo: none, company-logo: none, authors: (), mentors: (), jury: (), branch: none, academic-year: none, french: false, footer-text: "ENSIAS", body) = { // Set the document's basic properties. set document(author: authors, title: title) set page( numbering: "1", number-align: center, footer: locate(loc => { // Omit page number on the first page let page-number = counter(page).at(loc).at(0); if page-number > 1 { line(length: 100%, stroke: 0.5pt) v(-2pt) text(size: 12pt, weight: "regular")[ #footer-text #h(1fr) #page-number #h(1fr) #academic-year ] } }) ) let dict = json("resources/i18n/en.json") let lang = "en" if french { dict = json("resources/i18n/fr.json") lang = "fr" } set text(font: "Linux Libertine", lang: lang, size: 13pt) set heading(numbering: "1.1") show heading: it => { if it.level == 1 and it.numbering != none { pagebreak() v(40pt) text(size: 30pt)[#dict.chapter #counter(heading).display() #linebreak() #it.body ] v(60pt) } else { v(5pt) [#it] v(12pt) } } block[ #box(height: IMAGE_BOX_MAX_HEIGHT, width: IMAGE_BOX_MAX_WIDTH)[ #align(left + horizon)[ #company-logo ] ] #h(1fr) #box(height: IMAGE_BOX_MAX_HEIGHT, width: IMAGE_BOX_MAX_WIDTH)[ #align(right + horizon)[ #if school-logo == none { image("images/ENSIAS.svg") } else { school-logo } ] ] ] // Title box align(center + horizon)[ #if subtitle != none { text(size: 14pt, tracking: 2pt)[ #smallcaps[ #subtitle ] ] } #line(length: 100%, stroke: 0.5pt) #text(size: 20pt, weight: "bold")[#title] #line(length: 100%, stroke: 0.5pt) ] // Credits box() h(1fr) grid( columns: (auto, 1fr, auto), [ // Authors #if authors.len() > 0 { [ #text(weight: "bold")[ #if authors.len() > 1 { dict.author_plural } else { dict.author } #linebreak() ] #for author in authors { [#author #linebreak()] } ] } ], [ // Mentor #if mentors != none and mentors.len() > 0 { align(right)[ #text(weight: "bold")[ #if mentors.len() > 1 { dict.mentor_plural } else { dict.mentor } #linebreak() ] #for mentor in mentors { mentor linebreak() } ] } // Jury #if jury != none and jury.len() > 0 { align(right)[ *#dict.jury* #linebreak() #for prof in jury { [#prof #linebreak()] } ] } ] ) align(center + bottom)[ #if branch != none { branch linebreak() } #if academic-year != none { [#dict.academic_year: #academic-year] } ] pagebreak() // Table of contents. outline(depth: 3, indent: true) pagebreak() // Table of figures. outline( title: dict.figures_table, target: figure.where(kind: image) ) pagebreak() outline( title: dict.tables_table, target: figure.where(kind: table) ) pagebreak() // Main body. body }
https://github.com/adrianvillanueva997/cv
https://raw.githubusercontent.com/adrianvillanueva997/cv/main/modules_en/skills.typ
typst
// Imports #import "@preview/brilliant-cv:2.0.2": cvSection, cvSkill, hBar #let metadata = toml("../metadata.toml") #let cvSection = cvSection.with(metadata: metadata) #cvSection("Skills") #cvSkill( type: [Langs], info: [English, Spanish], ) #cvSkill( type: [Prog Langs], info: [Python, Java, Go, Rust, NodeJS, C, HTML/CSS/JavaScript], ) #cvSkill( type: [DBs], info: [SQL Server, MySQL, PostgreSQL, MongoDB, AWS Athena, Cassandra, Redis], ) #cvSkill( type: [DevOps], info: [Docker, Kubernetes, Terraform, Terragrunt, ArgoCD, Git, CI/CD, GitHub Actions, Prometheus, Grafana], ) #cvSkill( type: [Data Eng], info: [PySpark, Pandas, Airflow, Databricks, DBT, Kafka, FastApi], ) #cvSkill( type: [Cloud], info: [AWS, GCP, Azure], ) #cvSkill( type: [Interests], info: [Hiking, Reading, Traveling, Photography, Cooking, Music, Programming, Gaming, Gardening, Cats], )
https://github.com/barddust/Kuafu
https://raw.githubusercontent.com/barddust/Kuafu/main/src/Analysis/natural.typ
typst
#import "/mathenv.typ": * = Natural Numbers Now we are about to rebuild the whole numerical system. To begin, we have to forget the most of we knowledge about natural numbers. However, we still need some cardinally basic concepts and experience about natural numbers, just to judge if the system is of use. In theory, we can construct any kind of mathematical system. We can claim that one plus one equals, say 3, then what we need to do is changing the rules accordingly, like $1+1+1=4$, $1+1+1+1=5$, and so on. In this case, there is no position for $2$ anymore, since $2$ means $1+!$ before, now replaced by $3$. One disputing question during my high school is where the natural numbers start from. There are two viewpoints, either from 1 or 0. It is some kind of tradition? In fact, no matter where the naturals starts, the rules of naturals still hold. We may talk later. By the thought of deductive reasoning, we need to begin with some axioms, which are true in theory and not needed to be proved. Here come the Peano axioms. == Convention Before the formal reasoning, we would like to give special notations for some sets. - $NN$, the set of natural numbers - $ZZ$, the set of integers - $QQ$, the set of rational numbers - $RR$, the set of real numbers - $CC$, the set of complex numbers Some of these may not be used, like $CC$, the others will show up in future. == The Peano Axioms #axiom[ $0$ is a natural number; i.e., $ 0 in NN $ ] #remark[ $0$ is the beginning of the natural numbers. ] #axiom[ If $n$ is a natural number, then its successor, denoted as $n'$, is also a natural number; i.e., $ forall n (n in NN => n' in NN) $ ] #remark[ This axiom give two information: (1) Every natural numbers has one successor; (2) Successors are both natural numbers, too. So what are successors exactly? Basically, it is the result of adding one, intuitively the later one. Say $3$, its successor is $4$. Successors implies a relation of reasoning. And we can tell the beginning of natural numbers but not the end, that is why we use successor not a predecessor, the former one. $0$ has no such former one. Some books might use other notations, like $n++$, like the post-increment in computer language such as C. Another notation is like $S(n)$, since successor is a single-valued function. ] #axiom[ $0$ is not the successor of any natural number; i.e., $ forall n in NN (n' != 0) $ ] #axiom[ Different natural numbers have different successors; i.e., $ forall n,m in N (n != m => n' != m') $ ] #axiom(name: "Principle of mathematical induction")[ Let $P(n)$ be a property or proposition pertaining to a natural number $n$. If the following two statements are both true: + $P(0)$; + If $P(k)$ is true for any natural number $k$, then $P(n')$ is also true, then we have that $P(n)$ is true for every natural numbers. The description by logic is like: $ (P(0) and forall k in NN(P(k) => P(k'))) => forall n in NN(P(n)) $ ] #remark[ This axiom describes a series of events, for example, nuclear fission, dominoes. If one thing happens, then the next thing must happen. This tells the continuity of events. And $P(0)$, that is the first thing, happens just lighting up the fire, then $P(1)$ happens, furthermore, $P(3)$, and so do the others. ] == Addition #definition(name: "Addition of natural numbers")[ Let $n,m$ be natural numbers. We define: - $0 + m := m$; - $n' + m := (n+m)'$. ] #lemma[ Let $n,m$ be natural numbers, - $n + 0 = n$ - $n + m' = (n+m)'$ ] #proof[ #pf(num:1)[ To prove by induction, we need to show that: (1) the beginning, that is $P(0)$, is true; and (2) the continuity holds, i.e., for any given $n$, if we already have $P(n)$ is true, by supposing we do, then we get that $P(n')$ is also true. ] So, consider the case when $n=0$, we have $0_1 + 0_2 = 0_2$, where we mark these two zeros to trace how 0 in right side comes. We assume that $n + 0 = n$ is true. Now let us see if $n' + 0 = n'$ is true. $ n' + 0 = (n+0)' = n' $ That is exactly what we want, and hence the proof is done. #pf(num:2)[ By Axiom 2.5, we can produce the proof of a proposition pertaining to a single nutural number $n$. For the above propositions needed to be proved, we just claim that "for every natural numbers m", such that the case is still concerned about one variable natural number $n$. We use induction on $n$, the proof is done for all natural numbers $m$ consequently. ] When $n=0$, we have $0 + m' = m'$. Assume that $k + m' = (k+m)'$ is true, consider the case when $n = k'$. Note that we just use another different symbol here, it does not really matter the proof at all. $ k' + m' = (k' + m )' = ((k + m)')' = (k + m')' = k' + m' $ The induction ends. ] #proposition(name: "Rules of addition")[ Let $n,m,l$ be natural numbers. Then $ n+m = m+n\ (n+m)+l = n + (m+l)\ n = m <=> n + l = m + l $ ] #proof[ #pf(num:1)[ Consider the case when $n=0$, then $0 + m = m$ by Lemma 2.3.2. ] Assume that $k + m = m +k$ is true, consider the case when $n=k'$. $ k' + m = (k+m)' = k + m' $ by Lemma 2.3.2. The induction ends. #pf(num:2)[Consider the case when $n=0$, then $(0 + m) + l = m + l = 0 + (m + l)$ by Lemma 2.3.2.] Assume that $(k + m) + l = k + (m + l)$ is true, consider the case when $n=k'$. $ (k' + m) + l &= (k + m)' + l \ &=((k+m) + l)' \ &=(k + (m + l))'\ &=k' + (m+l) $ The induction ends. #pf(num:3)[We use induction for $l$. ] Now we are showing that $(n = m) => (n + l = m + l)$, and the proof from opposite side is similar. Consider the case when $l=0$, then $n + 0 = n = m = m + 0$. Assume that $(n = m) => (n + k = m + k)$ is true, consider the case when $l=k'$. $ n + k' &= (n + k)' = (m + k)' = m + k' $ The induction ends. ] == Order of Natural Members Here we have to define the order of natural numbers at fisrt, for some proof in the following section. In higher level number syste, we could define addition and multiplication at the beginning, and then ordering. Such that we can declare rules on ordering about addition and multiplication together. #definition(name: "Ordering of natural members")[ Let $n,m$ be natural members. - We say that $n$ is _greater than or equal to_ $m$, and write $n >= m$ or $m <= n$, iff $exists a in NN ( n = m + a)$; - We say that $n$ is (strictly) _greater than_ $m$, and write $n > m$ or $m < n$, iff $exists a in NN ( n = m + a and n != m)$. ] #example[ We say $8 >= 5$, that means there exists a natural number $a$, $3$ in this case, such that $8 = 5 + 3$. If $8 > 5$, one more condition is needed: $8 != 5$. ] #remark[ "$m$ is greater than $n$", written as $m > n$ or $n < m$, is also called "$n$ is less than $m$". Therefore, all rules pertaining to $<$ or $<=$, are also satisfied for $>$ and $>=$. ] #proposition(name: "A practical definition")[ $(n > m) <=> exists e in NN (n = m + e) and e != 0$ ] #proof[ These two definitions both claim that there exists some natural number $e$ such that $n = m + e$. The difference is focus on $n != m$ and $e != 0$, which is what we should prove exactly. #noin[_Proof for_ $n != m => e != 0$.] It is obvious by contradiction. Suppose that $e = 0$, and hence $n = m + e = m + 0 = m$, which is a contradiction. #noin[_Proof for_ $e != 0 => n != m$.] It is also obvious by contradiction. Suppose that $n = m$, and hence $n = m = m + e => e = 0$, which is a contradiction. In summary, we have that $n != m <=> e != 0$ if $n = m + e$ for some $e in NN$, then the proof ends. ] #remark[ For convenience, we sometimes write $NNN$ as the set of natural numbers without zero. And hence, the above proposition can be expressed by $ (a > b) <=> exists e in NNN (a = b + e) $ ] #lemma[ $forall a,b in NN (a + b = 0 <=> (a = 0 and b = 0))$. ] #proof[ For contradiction, we assume that there exists $a,b in NN$, such that $a + b = 0$ and $a != 0 or b != 0$. #noin[ *Case 1*, one of $a$ and $b$ equals to $0$, say $b = 0$. Then $0 = a + b = a + 0 = a$, a contradiction. ] #noin[ *Case 2*, both of $a$ and $b$ are equal to $0$. By Axiom 2.2, there exists a natural number $c$, such that $c' = a$. Then we have ] $ 0 = a + b = c' + b = (c + b)' $ This is a contradiction since none of natruals has successor of 0. In both cases, we can deduce contradictions, and hence the original proposition is true. ] #proposition[ Let $a,b,c$ be natural numbers. Then + (Reflexive) $a >= a$. + (Transitive) $(a >= b) and (b >= c) => a >= c$. + (Anti-symmetric) $(a >= b) and (b >= a) => a = b$. + (Addition preserves order) $(a >= b) <=> (a + c >= b + c)$. + $a < b <=> a' <= b$ ] #proof[ #pf(num:1)[Prove by contradiction, we assume that the negation of $a >= a$, i.e, by definition, $forall e in NN ( a != a + e )$.] Consider $e = 0$, we have $ a + e = a + 0 = a$. This is a contradiction. And hence, $a >= a$. is true. #noin[ #pf(num:2)[By definition,] ] $ a >= b &<=> exists e_1 in NN (a = b + e_1) \ b >= c &<=> exists e_2 in NN (b = c + e_2) $ , and hence, $ a = b + e_1 = c + e_2 + e_1 = c + (e_1 + e_2) $ Let $e = e_1 + e_2$, then $a = c + e$. By definition again, we have $a >= c$. #noin[ #pf(num:3)[For contradiction, we we assume that, $a >= b and b >= a and a != b$.] ] By definition, we we have $ a >= b &<=> exists e_1 in NN ( a = b + e_1)\ b >= a &<=> exists e_2 in NN ( b = a + e_2) $ , and hence $ a + 0 = a = b + e_1 = a + e_2 + e_1 $ By cancellation law, we have $e_1 + e_2 = 0$. Then by Lemma 2.4.5, we have $e_1 = e_2 = 0$, i.e., $a = b$. #noin[ #pf(num:4)[By definition, there exists a natural number $e$, such that $a = b + e$. Then] ] $ a = b + e <=> a + c = b + e +c = (b + c) + e <=> a + c >= b + c $ #pf(num:5)[Proof of sufficiency.] For $a < b$, by lemma, we have $exists e_1 in NN (b = a + e_1) and e_1 != 0$. To prove by contradiction, we assume that $not(a' <= b)$, i.e., $forall e_2 in NN (b != a' + e_2)$. $ b = a + e_1 != a' + e_2 = (a + e_2)' = a + e_2' $ and hence, $e != e_2'$ for all $e_2 in NN$. This is a contradiction, since all non-zero natural numbers has a successor. ] #lemma[ 1. $a > b => a' > b$; 2. $a = b => a' > b$; 3. $a < b => a' <= b$. ] #proof[ #pf(num:1)[ $a > b$ means $exists e in NNN ( a = b + e)$. Then $a' = (b + e)' = b + e'$. Since $e$ is positive, then $e'$ is also positive. Otherwise we have that $e' = 0$, but none of natural numbers has 0 as its successor. ] Therefore, $a' > b$ for some $e' in NNN$. #pf(num:2)[] $ a' = b' = (b + 0)' = b + 0' $ In other words, we have $a' = b + 0'$ for some $0' in NNN$, i.e., $a' > b$. #pf(num:3)[ $a < b$ means $exists e in NNN (b = a + e)$. By axiom, there exists another natural number $f$, such that $f' = e$. Then ] $ b = a + f' = a' + f $ and hence, $b >= a'$ by definition. ] #proposition(name: "trichotomy of order for natural numbers")[ Let $a, b$ be natural numbers. Then exactly one of the following statements is true: (1) $a < b$; (2) $a = b$; (3) $a > b$. ] #proof[ We can compose these three statements into two: $a < b$, and $a >= b$. The steps of the proof are: + The exactly one of $a < b$ and $a >= b$ will happen, which means: - At least one of them happens. This part of proof is done by induction; - At most one of them happens, i.e., $(a < b) and (a >= b)$ is false. + If $a >= b$ happens, then the exactly one of $a > b$ and $a = b$ will happen: - At least one of them happens. - At most one of them happens. #noin[_Step 1_: at least one of $a < b$ , $a = b$ and $a > b$ happens.] Fix $b$, we induct on $a$. When $a = 0$, we have $0 <= b$ for all $b$. Another induction on $b$ shows this claim. Suppose that $a = k$, we have this proposition right. There are three cases: (1) If $a > b$, then we have $a' > b$; (2) If $a = b$, then we have $a' > b$; (3) If $a < b$, then we have $a' <= b$. These statements have been proven in the former lemma. Therefore, at least one of $a < b$ , $a = b$ and $a > b$ happens by induction. #noin[ _Step 2_: $(a < b) and (a >= b)$ is false. ] $ (a < b) &<=> exists e_1 in NN (b = a + e_1) and e_1 != 0\ (a >= b) &<=> exists e_2 in NN (a = b + e_2) $ and hence, $b = b + e_1 + e_2 => e_1 + e_2 = 0 => e_1 = 0 and e_2 = 0$. This is a contradiction since $e_1 != 0$. #noin[ _Step3_: If $a >= b$ happens, then $exists e in NN (a = b + e)$. Still we need to show that both of $a > b$ and $a = b$ be neither true nor false at the same time. ] Suppose they are both true, however, it is impossible by the definition of $a > b$ which requires $a != b$. Suppose they are both false, then for every natural number $e$, we have $a != b + e$, which contradicts with the hypothesis. In summary, one of $a > b$, $a = b$, and $a < b$ exactly happens. ] == Multiplication #definition(name: "Multiplication of natural numbers")[ Let $n,m$ be natural numbers. We define: - $0 times m := 0$ - $n' times m := (n times m) + m$ ] #lemma[ Let $n,m$ be natural numbers, - $n times 0 := 0$ - $n times m' := (n times m) + n$ ] #proof[ #pf(num:1)[When $n=0$, $0 times 0 = 0$.] Suppose we have $k times 0 = 0$ when $n = k$, consider $ k' times 0 &= (k times 0) + 0\ &= 0 + 0\ &= 0 $ #pf(num:2)[Use induction on $n$. When $n = 0$, $0 times m' = 0$ by the former lemma.] Suppose we have $k times m' = (k times m) + k$ when $n = k$, consider $ k' times m' &= (k times m') + m'\ &= (k times m) + k + m'\ &= (k times m) + (k + m')\ &= (k times m) + (k +m)'\ &= (k times m) + k' + m\ &= (k times m) + m + k'\ &= (k' times m) + k' $ That is what we want. ] #remark[ The proof looks a little tedious, we may skip some of the steps in the most of time. Why we need that preciseness is to make reasoning sensible and acceptiable in every moment, and that is what we shall gain from Analysis. ] #remark[ For convenience, we sometimes omit the sign $times$ between two letters, or a letter and a number. For instance, $a times b$ is usually written as $a b$, and $3 times c$ is written as $3c$. In another case, we may also use a dot $dot.c$ to represent the multiplication, for example, $x times y$ is equivalent to $x dot.c y$. ] #lemma[ Let $a,b$ be natural numbers, if $a b = 0$, and $a=0 or b=0$. ] #proof[ To prove by contradiction, we asssume that $a != 0$ and $b != 0$. Then there exists a natural number $e$, such that $e' = b$. $ a b = a dot.c e' = a e + a = 0 $ we obtain $a e = 0$ and $a = 0$, this contradicts to the hypothesis $a != 0$. ] #proposition(name: "Rules of multiplication")[ Let $n,m,l$ be natural numbers. Then $ n times m = m times n\ (n + m) times l = n times l + m times l\ (n times m) times l = n times (m times l)\ n = m => n times l = m times l\ (l != 0 and n times l = m times l) => n = m $ ] #proof[ #pf(num:1)[Fix $m$, and use induction on $n$.] Let $n = 0$, we have $0 times m = 0 = m times 0$. Suppose $k times m = m times k$ is true, consider $ k' times m &= (k times m) + m\ &= (m times k) + m\ &= m times k' $ #pf(num:2)[We use induction on $l$.] Let $l = 0$, then $(n + m) times 0 = 0$, $n times 0 + m times 0 = 0 + 0 = 0$. Therefore $(n+m) times 0 = n times 0 + m times 0$. Suppose $(n + m) times l = m times l + m times l$ is true, consider $ (n + m) times l' &= (n + m) times l + (n + m)\ &= (n times l) + (m times l) + n + m\ &= ((n times l) + n) + ((m times l) + m)\ &= (n times l') + (m times l') $ #pf(num:3)[We use induction on $n$.] Let $n = 0$, then $(0 times m) times l = 0 times l = 0$ and $ 0 times (m times l) = 0$, i.e., $(0 times m) times l = 0 times (m times l)$ Suppose $(k times m) times l = k times (m times l)$ is true, consider $ (k' times m) times l &= ((k times m) + m) times l\ &= (k times m) times l + m times l\ &= k times (m times l) + m times l\ &= k' times (m times l) $ Proofs are done. #pf(num:4)[Given $n = m$, we use induction on $l$.] When $l = 0$, $n times l = n times 0 = n = m = m times 0 = m times l$ is true. When $l = k$, suppose that $n times k = m times k$ is true. Consider $l = k'$, then $ n times l' &= n times l + n\ &= m times l + m\ &= m times l' $ The induction ends. #pf(num: 5)[Given $l != 0$ and $n times l = m times l $, we use induction on $l$.] Note that in former part, we use induction from index $0$, which is claimed by Peano's Axiom 5, while here index starts from 1. Actually induction can start from any natural number, we just need to check if the proposition is true on the indices before the index we choose. When $l = 1$, $n times 1 = m times 1 = m$, then $n = m$, the proposition is true. When $l = k$, suppose that if $n times l = m times l$ then $n = m$ is true. Consider $l = k'$, we need to show that $n times l' = m times l' => n = m$. For producing contradiction, assume that $n != m$, in particular, $n > m$ without loss of generality. There exists $e in NNN$, such that $n = m + e$. $ n times l' &= m times l'\ (m + e) times l' &= m times l'\ m times l' + e times l' = m times l'\ e times l' = 0\ $ By lemma, we have that $e = 0$ or $l' = 0$. However, none of them will happens, since $e in NNN$ and there is no such number whose successor is 0. The induction ends. ] #proposition(name: "Multiplication preserves order")[ Let $a,b,c$ be natural numbers, and $c != 0$. If $a < b$ , then $a c < b c$. ] #proof[ By definition, there exists a non-zero natural number $e$, such that $b = a + e$. Then $ b c &= (a + e)c\ &=a c + e c $ Since $e != 0$ and $c != 0$, we have $e c != 0$, and hence $ a c < b c$. ] #proposition(name: "Successor is actually increment")[ $forall n in NN (n' = n + 1)$. ] #proof[ By definition of multiplication, we have $n' dot m = n dot m + m$. Just let $m=1$, the left side: $ n' dot 1 = n' dot 0' = n' dot 0 + n' = 0 + n' = n' $ and the right side: $ n dot 1 + 1 = n dot 0' + 1 = n dot 0 + n + 1 = 0 + n + 1 = n + 1 $ and hence, $n' = n + 1$. ] #remark[ Of course, another way to prove this proposition is to use induction. ] == Exponentiation for natural numbers #definition[ Let $m$ be a natural number. We define: - $m^0 := 1$; - $m^n' := m^n times m$. ] #proposition[ Let $m,n,l$ be natural numbers, - $m^(n) dot.c m^(l) = m^(n + l)$; - $(m^(n))^(l) = m^(n l)$. ] #proof[ For all propositions, we use induction on $l$. #noin[ #pf(num:1)[Let $l = 0$, we have] ] $ m^n dot m^0 &= m^n dot 1\ &= m^n dot 0'\ &= m^n dot 0 + m^n\ &= m^n\ &= m^(n+0) $ Suppose $m^n dot m^k = m^(n+k)$ is true, then $ m^n dot m^k' &= m^n dot (m^k dot m)\ &= (m^n dot m^k) dot m\ &= m^(n+k) dot m\ &= m^(n+k)'\ &= m^(n+k') $ #pf(num:2)[Let $l = 0$, we have $(m^n)^0 = 1$, and $m^(n dot 0) = m^0 = 0$, i.e., $(m^n)^0 = m^(n dot 0)$.] Suppose $(m^n)^k = m^(n k)$ is true, then $ (m^n)^k' &= (m^n)^k dot m^n\ &= m^(n k) dot m^n\ &= m^(n k + n)\ &= m^(n(k+1))\ &= m^(n dot k') $ The proofs are done. ]
https://github.com/piepert/grape-suite
https://raw.githubusercontent.com/piepert/grape-suite/main/src/seminar-paper.typ
typst
MIT License
#import "colors.typ" as colors: * #import "todo.typ": todo, list-todos, hide-todos #import "elements.typ": * #let project( title: none, subtitle: none, submit-to: "Submitted to", submit-by: "Submitted by", university: "UNIVERSITY", faculty: "FACULTY", institute: "INSTITUTE", seminar: "SEMINAR", semester: "SEMESTER", docent: "DOCENT", author: "AUTHOR", student-number: none, email: "EMAIL", address: "ADDRESS", title-page-part: none, title-page-part-submit-date: none, title-page-part-submit-to: none, title-page-part-submit-by: none, sentence-supplement: "Example", date: datetime.today(), date-format: (date) => date.display("[day].[month].[year]"), header: none, header-right: none, header-middle: none, header-left: none, footer: none, footer-right: none, footer-middle: none, footer-left: none, show-outline: true, show-todolist: true, show-declaration-of-independent-work: true, page-margins: none, fontsize: 11pt, body ) = { let ifnn-line(e) = if e != none [#e \ ] set text(font: "Atkinson Hyperlegible", size: fontsize) // show math.equation: set text(font: "Fira Math") show math.equation: set text(font: "STIX Two Math") set par(justify: true) set enum(indent: 1em) set list(indent: 1em) show link: underline show link: set text(fill: purple) show heading: it => context { let num-style = it.numbering if num-style == none { return it } let num = text(weight: "thin", numbering(num-style, ..counter(heading).at(here()))+[ \u{200b}]) let x-offset = -1 * measure(num).width pad(left: x-offset, par(hanging-indent: -1 * x-offset, text(fill: purple.lighten(25%), num) + [] + text(fill: purple, it.body))) } // title page [ #set text(size: 1.25em, hyphenate: false) #set par(justify: false) #v(0.9fr) #text(size: 2.5em, fill: purple, strong(title)) \ #if subtitle != none { v(0em) text(size: 1.5em, fill: purple.lighten(25%), subtitle) } #if title-page-part == none [ #if title-page-part-submit-date == none { ifnn-line(semester) ifnn-line(date-format(date)) } else { title-page-part-submit-date } #if title-page-part-submit-to == none { ifnn-line(text(size: 0.6em, upper(strong(submit-to)))) ifnn-line(university) ifnn-line(faculty) ifnn-line(institute) ifnn-line(seminar) ifnn-line(docent) } else { title-page-part-submit-to } #if title-page-part-submit-by == none { ifnn-line(text(size: 0.6em, upper(strong(submit-by)))) ifnn-line(author + if student-number != none [ (#student-number)]) ifnn-line(email) ifnn-line(address) } else { title-page-part-submit-by } ] else { title-page-part } #v(0.1fr) ] // page setup let ufi = () if university != none { ufi.push(university) } if faculty != none { ufi.push(faculty) } if institute != none { ufi.push(institute) } set page( margin: if page-margins != none {page-margins} else { (top: 2.5cm, bottom: 2.5cm, right: 4cm) }, header: if header != none {header} else [ #set text(size: 0.75em) #table(columns: (1fr, auto, 1fr), align: bottom, stroke: none, inset: 0pt, if header-left != none {header-left} else [ #title ], align(center, if header-middle != none {header-middle} else []), if header-right != none {header-right} else [ #show: align.with(top + right) #author, #date-format(date) ]) ] + v(-0.5em) + line(length: 100%, stroke: purple), ) state("grape-suite-element-sentence-supplement").update(sentence-supplement) show: sentence-logic // outline if show-outline or show-todolist { pad(x: 2em, { if show-outline { outline(indent: true) v(1fr) } if show-todolist { list-todos() } }) pagebreak(weak: true) } // main body setup set page( background: context state("grape-suite-seminar-paper-sidenotes", ()) .final() .map(e => context { if here().page() == e.loc.at(0) { place(top + right, align(left, par(justify: false, text(fill: purple, size: 0.75em, hyphenate: false, pad(x: 0.5cm, block(width: 3cm, strong(e.body)))))), dy: e.loc.at(1).y) } else { } }).join[], footer: if footer != none {footer} else { set text(size: 0.75em) line(length: 100%, stroke: purple) v(-0.5em) table(columns: (1fr, auto, 1fr), align: top, stroke: none, inset: 0pt, if footer-left != none {footer-left}, align(center, context { str(counter(page).display()) [ \/ ] str(counter("grape-suite-last-page").final().first()) }), if footer-left != none {footer-left} ) } ) set heading(numbering: "1.") counter(page).update(1) body // backup page count, because last page should not be counted context counter("grape-suite-last-page").update(counter(page).at(here())) // declaration of independent work if show-declaration-of-independent-work { pagebreak(weak: true) set page(footer: []) heading(outlined: false, numbering: none, [Selbstständigkeitserklärung]) [Hiermit versichere ich, dass ich die vorliegende schriftliche Hausarbeit (Seminararbeit, Belegarbeit) selbstständig verfasst und keine anderen als die von mir angegebenen Quellen und Hilfsmittel benutzt habe. Die Stellen der Arbeit, die anderen Werken wörtlich oder sinngemäß entnommen sind, wurden in jedem Fall unter Angabe der Quellen (einschließlich des World Wide Web und anderer elektronischer Text- und Datensammlungen) kenntlich gemacht. Dies gilt auch für beigegebene Zeichnungen, bildliche Darstellungen, Skizzen und dergleichen. Ich versichere weiter, dass die Arbeit in gleicher oder ähnlicher Fassung noch nicht Bestandteil einer Prüfungsleistung oder einer schriftlichen Hausarbeit (Seminararbeit, Belegarbeit) war. Mir ist bewusst, dass jedes Zuwiderhandeln als Täuschungsversuch zu gelten hat, aufgrund dessen das Seminar oder die Übung als nicht bestanden bewertet und die Anerkennung der Hausarbeit als Leistungsnachweis/Modulprüfung (Scheinvergabe) ausgeschlossen wird. Ich bin mir weiter darüber im Klaren, dass das zuständige Lehrerprüfungsamt/Studienbüro über den Betrugsversuch informiert werden kann und Plagiate rechtlich als Straftatbestand gewertet werden.] v(1cm) table(columns: (auto, auto, auto, auto), stroke: white, inset: 0cm, strong([Ort:]) + h(0.5cm), repeat("."+hide("'")), h(0.5cm) + strong([Unterschrift:]) + h(0.5cm), repeat("."+hide("'")), v(0.75cm) + strong([Datum:]) + h(0.5cm), v(0.75cm) + repeat("."+hide("'")),) } } #let sidenote(body) = context { let pos = here() state("grape-suite-seminar-paper-sidenotes", ()).update(k => { k.push((loc: (pos.page(), pos.position()), body: body)) return k }) }
https://github.com/IdoWinter/UnitedDumplingsLegislatureArchive
https://raw.githubusercontent.com/IdoWinter/UnitedDumplingsLegislatureArchive/main/legislature/book_of_laws.typ
typst
MIT License
#import "templates.typ": * #outline(title: "Book of laws", depth: 2, fill: line(length: 100%), indent: 1em) #pagebreak() #include "constitution/israeli-laws-adoption.typ" #pagebreak() #include "constitution/direct-elections-pm.typ"
https://github.com/iseri27/tagbar-typst
https://raw.githubusercontent.com/iseri27/tagbar-typst/master/README_zh.md
markdown
MIT License
# tagbar-typst ## 简介 tagbar-typst 为 typst 文件生成 ctags. 该插件是 [lvht/tagbar-markdown](https://github.com/lvht/tagbar-markdown.git) 的一个 fork. ~~实际上就是修改了下变量名~~. ## 截图 ![](./screenshot/screenshot.png) ## 安装 - [vim-plug] ```viml Plug 'majutsushi/tagbar' Plug 'Corona09/tagbar-typst' ``` 确保安装了 **php**, 并且 `bin/typst_ctags` 拥有可执行权限. Enjoy :) [vim-plug]: https://github.com/junegunn/vim-plug [dein.vim]: https://github.com/Shougo/dein.vim ~~瞎写的~~Typst 高亮插件: [Corona/nvim-typst](https://github.com/Corona/nvim-typst.git)
https://github.com/chamik/gympl-skripta
https://raw.githubusercontent.com/chamik/gympl-skripta/main/cj-dila/12-krysar.typ
typst
Creative Commons Attribution Share Alike 4.0 International
#import "/helper.typ": dilo #dilo("Krysař", "krysar", "<NAME>", "", [přelom 20. a 21.~st; neoklasicismus], "Česko", "~1920", "epika", "novela") #columns(2, gutter: 1em)[ *Téma*\ Krysař se za jejich lhostejnost mstí občanům města Hammeln. Volné zpracování středovéké saské pověsti. *Motivy*\ maloměšťanství, láska, smrt, zneužívání postavení hodnostářů, biblické *Časoprostor*\ městeč<NAME>, doba neurčita (pravděpodobně 13. st. podle pověsti) *Postavy* \ _Krysař_ -- bezejmenný, komplikovaný, tajemný, obávaný, podivín \ _Agnes_ -- pohledná milenka Krysaře \ _<NAME>_ -- milenec _Agnes_ \ _Strumm_ a _Frosh_ -- krejčí a truhlář, radní co nesplní svůj slib _Krysaři_ \ _<NAME>_ -- chudý rybář, zpomalený *Kompozice* -- kapitoly *Vypravěč* -- er-forma *Jazykové prostředky*\ citové zabarvení, symboly, personifikace, řečnické otázky, krátké holé věty *Obsah*\ // zdroj: https://www.cesky-jazyk.cz/ctenarsky-denik/viktor-dyk/krysar-rozbor.html Krysař přichází do města v době, kdy je sužováno přemnoženými krysami, a nabídne představitelům města, že je za sto rýnských hlodavců zbaví. O místní obyvatele se nezajímá, dokud se nezamiluje do dívky jménem Agnes. Později dochází k hádce krysaře s konšely, neboť mu město nechce vyplatit slíbenou sumu. Dále krysař zjistí, že Agnes čeká dítě se svým milencem Kristiánem. Poté se potká se služebníkem ďábla (faustovský motiv), který ho nabádá, aby použil kouzlo své píšťaly proti lidem. Krysař odmítá, a přestože má s místními nevyřízené účty, rozhodne se město opustit. Jeho cit k Agnes ho však táhne zpět. Po návratu se dovídá, že se Agnes po jeho odchodu utopila v řece. Nemá proto již důvod město Hammeln chránit -- zahraje na svou píšťalu a všechny obyvatele odvede do propasti na hoře Koppel, která podle pověsti vede do země sedmihradské, pozemského ráje. Sám je po chvíli následuje. Jediný, kdo se vábení píšťaly vzepře, je prostoduchý rybář <NAME>, který zaslechne nářek bezmocného nemluvněte a rozhodne se mu pomoci. *Literárně historický kontext*\ Původním názvem _Pravdivý příběh_, vyšel poprvé v časopise Lumír (1911). Autor patřil do skupiny buřičů. Buřičská literatura se na přelomu 19. a 20. století vymezovala proti hospodářskému růstu a změnám ve společnosti. *Směr* \ Neoklasicismus -- z období romantismu. Nevyzdvyhuje city, ale intelekt. Děj má spád. Nadčasové jevy, Typicky povídky a novely. _<NAME>_, _<NAME>_. ] #pagebreak() *Ukázka* A krysař zapískal na svou píšťalu. Nebyl to však tenký, tlumený tón, kterým vábil myši. Zvuk zněl plně a mocně; srdce při něm prudce zabušilo, krok se bezděčně zrychlil a probouzelo se – tiše, oh, tiše! – vše, co dřímalo v hlubokém nitru. Ale jak rychle šla tato píseň ze sna do života a z života do smrti! Jak tragické a velké vypětí! Jak jímavý a nezapomenutelný hlas! Krysař pískal. Šílená slyšela zvuk píšťaly. Její smích zchladl na rtech a mizel; a náhle – poslední ozvěna jejího smíchu se ještě vracela – vypukla v pláč. Slzy jí kanuly z očí. Bylo jí, jako by Agnes brala za ruku a říkala: „Pojď!“ Šla za krysařem. Krysař vyšel na ulici. Mocně a silně zněla jeho píšťala. Kdo ji slyšel, propadl krysařově svodu. Ustal od své práce a šel. Nedořekl slova a šel. Pradleny praly na dvoře Erhardtova domu. Byly mladé a svěží a hovořily o svých milencích. Ale krysař hvízdal. Zapomněly na bělostné prádlo i na chtivé milence a šly za krysařem. Ve Froschově dílně pracovali truhláři. Dělali lože a chválili krásu svých milých. Ale krysařova píšťala jim nedovolila jejich práci i hovor dokončit. Šli za krysařem. Květinářka a dohazovačka Elsbeth právě přemlouvala počestnou Susu Tölschovou, sirotka, sloužícího u radního Lamberta. Cinkala zlatými dukáty, opíjejícími sluch i oči. Ale když právě Susa podléhala svodům staré kuplířky, která jí líčila dobré bydlo a blahý osud, zazněla krysařova píseň. A Elsbeth i Susa šly za krysařem. Dělníci ve Strummově dílně stříhali vzácná sukna. Slyšeli však zvuk krysařovy píšťaly, nechali vzácné látky i hovor o dálných mořích a velkých přístavech a šli tam, kde zněla píseň krysaře, vábivá i teskná zároveň. Míjeli krčmu „U Žíznivého člověka“. <NAME> stál ve dveřích ve své nízké čepici, vždy připraven se sklonit a usmát. Černá Líza stála za jeho zády, ukazovala se i skrývala, vždy připravena se usmívat a milovat. Krysařova píšťala odvedla Rögera i Černou Lízu, jako bere dravá voda břeh. Krysařova píšťala probouzela staré sny a staré hoře. Zhýřilec naklonil svou hlavu; za zvuku píšťaly viděl dávno mrtvou matku, hladící čílko kučeravého hocha, který už dávno není. Za zvuku píšťaly viděl svoji budoucnost: prázdné, bědné, hanebné stáří! Nejnestoudnější frejířka ve městě – Dora, si vzpomněla na hocha s modrýma očima, kterého kdysi dobře milovala. Vzpomněla si na smrt zrazeného. A vzpomněla si i na všechny ty, kteří přišli potom. Vzpomněla si na opovržlivé pohledy těch, kteří plaše odcházejí před svítáním. Vzpomněla si na vrásky, které ráno viděla v zrcadle. #pagebreak()
https://github.com/rabotaem-incorporated/algebra-conspect-1course
https://raw.githubusercontent.com/rabotaem-incorporated/algebra-conspect-1course/master/sections/04-linear-algebra/01-matrices.typ
typst
Other
#import "../../utils/core.typ": * == Матрицы #def[ $R$ --- кольцо, $m, n in NN$ Матрица $m times n$ над кольцом $R$ --- прямоугольная таблица $A = display(mat( a_(1 1), a_(1 2), ..., a_(1 n); a_(2 1), a_(2 2), ..., a_(2 n); dots.v, dots.v, dots.down, dots.v; a_(m 1), a_(m 2), ..., a_(m n); ))$, где $a_(i j) in R$ Есть краткая запись $A = (a_(i j))_(i = 1, ..., m\ j = 1, ..., n) = (a_(i j))$ ] #denote[ Множество матриц $m times n$ над кольцом $R$ обозначается как $M_(m, n) (R)$ Так же обозначают, как: $R^(m times n)$, $M(m, n, R)$, $M_(m times n) (R)$ ] Пусть $A, B in M_(m, n) (R)$ --- матрицы. $A = (a_(i j))$, $B = (b_(i j))$ Их суммой называется матрица $C = (c_(i j))$, где $c_(i j) = a_(i j) + b_(i j)$. Пусть $A = (a_(i j)) in M_(m, n) (R)$, $B = (b_(i j)) in M_(n, p) (R)$ Их произведением называется матрица $C = (c_(i j)) in M_(m, p) (R)$, где $c_(i j) = sum_(k = 1)^n a_(i k) b_(k j)$ Пусть $c in R$, $A in M_(m, n) (R)$ Тогда $c dot A = (c dot a_(i j)) in M_(m, n) (R)$ #notice[ По умолчанию $R$ --- коммутативное кольцо ] #def[ Транспонированная матрица $A = (a_(i j)) in M_(m, n) (R)$ --- матрица $B = (b_(i j)) in M_(n, m) (R)$, где $b_(i j) = a_(j i)$ Обозначается как $A^T$ ] #example[ $display(mat( 2, 0, -3; 1, 5, 4; ))^T = display(mat( 2, 1; 0, 5; -3, 4; ))$ ] #def[ Матрица $A = (a_(i j)) in M_(m, n) (R)$ --- квадратная, если $m = n$ Обозначается как $A in M_(n) (R)$ ] #th(name: [Свойства операций над матрицами])[ + $A + (B + C) = (A + B) + C$ + $0 = (0)$, тогда $A + 0 = 0 + A = A$ + Для любой $A$ есть $-A$, такая что $A + (-A) = (-A) + A = 0$ + $A + B = B + A$ + #[ $(A B)C = A(B C)$, нужно чтобы $A in M_(m, n) (R)$, $B in M_(n, p) (R)$, $C in M_(p, q)(R)$ Обе матрицы принадлежат $M_(m, q) (R)$ ] + $A (B + C) = A B + A C$ + $(B + C) A = B A + C A$ + $(lambda + mu) A = lambda A + mu A, space lambda, mu in R$ + $lambda(A + B) = lambda A + lambda B, space lambda in R$ + $(lambda A) B = lambda (A B) = A(lambda B), space lambda in R$ + $(lambda mu) A = lambda (mu A), space lambda, mu in R$ + $(A + B)^T = A^T + B^T$ + $(A B)^T = B^T A^T$ ] #def[ Пусть $n in NN$. Единичной матрицой порядка $n$ называется: $E_n = display(mat( 1, 0, ..., 0; 0, 1, ..., 0; dots.v, dots.v, dots.down, dots.v; 0, 0, ..., 1; )) in M_(n) (R)$ Как кратко обозначить: $E_n = (delta_(i j))$, где $delta_(i j) = display(cases( 1\, & i = j, 0\, & i eq.not j ))$ --- символ Кронекера ] #pr[ Пусть $A in M_(m, n) (R)$. Тогда $E_m A = A E_n = A$ ] #proof[ $E_m A = (b_(i j)), space A = (a_(i j))$ $b_(i j) = limits(sum)_(k = 1)^m delta_(i k) a_(k j) = a_(i j)$ То есть $E_m A = A$ $E_n A^T = A^T ==> (E_n A^T)^T = (A^T)^T ==> (A^T)^T E_n^T = (A^T)^T ==> A E_n = A$ ] #follow[ $M_(n) (R)$ --- кольцо, где $E_n$ --- нейтральный элемент по умножению Называют кольцом квадратных матриц порядка $n$. ] #notice[ Кольцо не обязательно коммутативное при $n >= 2$ $A = display(mat( 0, 1; 0, 0; )) dot display(mat( 0, 0; 1, 0; )) = display(mat( 1, 0; 0, 0; ))$ $B = display(mat( 0, 0; 1, 0; )) dot display(mat( 0, 1; 0, 0; )) = display(mat( 0, 0; 0, 1; ))$ $A eq.not B$ ] #notice[ $M_1 (R) iso R$ ] #def[ $GL_n (R) = M_n (R)^* = \{A in M_n(R) divides exists B in M_n(R), space A B = B A = E_n\}$ Такая $B$ единственная и называется обратной к $A$, обозначается $A^(-1)$ ] #pr[ + $E_n in GL_n (R), space E_n^(-1) = E_n$ + $A_1, ..., A_k in GL_n (R) ==> limits(product)_(i = 1)^k A_i in GL_n (R), space (A_1 ... A_k)^(-1) = A_k^(-1) ... A_1^(-1)$ + $A in GL_n (R) ==> A^T in GL_n (R), space (A^T)^(-1) = (A^(-1))^T$ ] #proof[ + $E_n E_n = E_n E_n = E_n$ + #[ $(A_1 ... A_k)(A_k^(-1) ... A_1^(-1)) = A_1 ... A_(k-1) (A_k A_k^(-1)) ... A_1^(-1) = A_1 ... A_(k - 1) A_(k - 1)^(-1) ... A_1^(-1) = A_1 A_1^(-1) = E_n$ $(A_k^(-1) ... A_1^(-1))(A_1 ... A_k) = ... = A_k^(-1) A_k = E_n$ ] + #[ $(A^T dot (A^T)^(-1)) = (A^(-1) dot A)^T = E_n^T = E_n$ $((A^T)^(-1) dot A^T) = (A dot A^(-1))^T = E_n^T = E_n$ ] ] #def[ Матричная единица --- это матрица, где все элементы нулевые, кроме одного, который равен единице. Обозначается как $e_(i j)$. ] #notice[ $A = (a_(i j)) = limits(sum)_(i, j) a_(i j) e_(i j)$ ]
https://github.com/JWangL5/CAU-ThesisTemplate-Typst
https://raw.githubusercontent.com/JWangL5/CAU-ThesisTemplate-Typst/master/ref/acronyms.typ
typst
MIT License
// https://github.com/typst/typst/issues/659 #let acronyms = json("../template/acronyms.json") // The state which tracks the used acronyms #let usedAcronyms = state("usedDic", (:)) // The function which either shows the acronym or the full text for it #let acro(body) = { if(acronyms.keys().contains(body) == false) { return highlight(fill: red, [*Warning: #body*],) } usedAcronyms.display(usedDic => { return eval(acronyms.at(body).at(1), mode: "markup") }); usedAcronyms.update(usedDic => { usedDic.insert(body, true) return usedDic }) }
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/math/accent_04.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page // Test wide base. $arrow("ABC" + d), tilde(sum)$
https://github.com/Hao-Yuan-He/resume_typst
https://raw.githubusercontent.com/Hao-Yuan-He/resume_typst/main/main.typ
typst
#import "lib.typ": * // Put your personal information here, replacing mine #let name = "<NAME> (何浩源)" #let location = "Nanjing, China" #let email = "<EMAIL>" #let github = "github.com/stuxf" #let linkedin = "linkedin.com/in/stuxf" #let phone = "Phone number" #let personal-site = "hao-yuan-he.github.io" #let img = "prof_pic.png" #show: resume.with( author: name, // All the lines below are optional. // For example, if you want to to hide your phone number: // feel free to comment those lines out and they will not show. location: location, email: email, // github: github, // linkedin: linkedin, phone: phone, img:img, personal-site: personal-site, accent-color: "#26428b", ) /* * Lines that start with == are formatted into section headings * You can use the specific formatting functions if needed * The following formatting functions are listed below * #edu(dates: "", degree: "", gpa: "", institution: "", location: "") * #work(company: "", dates: "", location: "", title: "") * #project(dates: "", name: "", role: "", url: "") * #extracurriculars(activity: "", dates: "") * There are also the following generic functions that don't apply any formatting * #generic-two-by-two(top-left: "", top-right: "", bottom-left: "", bottom-right: "") * #generic-one-by-two(left: "", right: "") */ == Education #edu( institution: "School of Computer Science, Nanjing Tech University", location: "Nanjing, China", dates: dates-helper(start-date: "2017", end-date: "2021"), degree: "B.Sc. degree of Computer Science.", ) #edu( institution: "School of Artificial Intelligence, Nanjing University", location: "Nanjing, China", dates: dates-helper(start-date: "2021", end-date: "2024"), degree: "M.Sc. student at LAMDA group. Advised by Prof. <NAME>.", ) #edu( institution: "School of Artificial Intelligence, Nanjing University", location: "Nanjing, China", dates: dates-helper(start-date: "2024", end-date: "Now"), degree: "Ph.D. student at LAMDA group. Advised by Prof. <NAME>.", ) == Work Experience #work( title: "Research Intern, AI Lab, Tencent", location: "Shenzhen, China", des: "I was working on the game AI of King of Honor (王者荣耀).", dates: dates-helper(start-date: "2023.05", end-date: "2023.07"), ) == Research My research interests lie in the field of machine learning, with a particular focus on weakly supervised learning and abductive learning. // show me by underline #show "H.-<NAME>": name => box[ #underline(name) ] #bibliography("works.bib", title: none, full: true) == Academic Service #work( title: "Volunteer", des: "MLA’2023, IJCLR’2024", dates: dates-helper(start-date: "2023", end-date: "Now"), ) #work( title: "Conference Reviewer / PC Member", des: "ACML’2023, IJCLR’2024, MATH-AI@NeurIPS’2024, ICLR’2025", dates: dates-helper(start-date: "2023", end-date: "Now"), ) #work( title: "Journal Reviewer", des: "Knowledge and Information Systems", dates: dates-helper(start-date: "2023", end-date: "Now"), ) == Awards #award( name:"Outstanding Graduate Student", des:"Nanjing University", dates:"2023" ) #award( name: "Winner Award", des: "International Algorithm Case Competition, Track of Data Selection and Label Correction", dates: "2022", ) #award( name: "First Price of Academic Scholarship", des:"Top 10%, Nanjing University", dates:"2022, 2023" ) #award( name: "National Encouragement Scholarship", des:"JiangSu Education Department", dates:"2019", ) #award( name:"The First Price of The Chinese Mathematics Competitions", des:"Chinese Mathematical Society", dates:"2018, 2020", )
https://github.com/ilsubyeega/circuits-dalaby
https://raw.githubusercontent.com/ilsubyeega/circuits-dalaby/master/Type%201/2/35.typ
typst
#set enum(numbering: "(a)") #import "@preview/cetz:0.2.2": * #import "../common.typ": answer 2.35 다음 회로의 단자 $(a, b)$ 의 오른쪽 회로를 단순화하여 $R_(e q)$를 구하고, 전압원에 의해 공급되는 전력을 구하라. 모든 저항값의 단위는 $ohm$ 이다. #answer[ 직렬, 병렬 합치기를 통하여 $R_(e q) = 5 ohm$ 이다. $P = V I = V^2/R = 25^2 / 5 = 125 W$. ]
https://github.com/crd2333/crd2333.github.io
https://raw.githubusercontent.com/crd2333/crd2333.github.io/main/src/docs/Reading/Reconstruction/NeRF改进工作.typ
typst
#import "/src/components/TypstTemplate/lib.typ": * #show: project.with( title: "NeRF 改进工作", lang: "zh", ) #let bo = $bold(o)$ #let bxi = $bold(xi)$ #let bnu = $bold(nu)$ #let bK = $bold(K)$ #let bI = $bold(I)$ #let bT = $bold(T)$ #let bP = $bold(P)$ #let batch_size = math.text("batch_size") #let br = math.bold("r") #let cV = math.cal("V") #info()[ - 参考 + #link("https://github.com/awesome-NeRF/awesome-NeRF")[github.com/awesome-NeRF/] + #link("https://zhuanlan.zhihu.com/p/614008188")[NeRF 系列工作个人总结] + #link("https://zhuanlan.zhihu.com/p/618913937")[NeRF Baking 系列工作个人总结] + #link("https://zhuanlan.zhihu.com/p/586939873")[新视角图像生成:讨论基于NeRF的泛化方法] + #link("https://zhuanlan.zhihu.com/p/567653339")[神经体绘制:NeRF及其以外方法] + #link("https://mp.weixin.qq.com/s?__biz=MzU2OTgxNDgxNQ==&mid=2247488880&idx=1&sn=c1eedd7a2f9ec49a4d5d9d786fb76330")[【NeRF大总结】基于NeRF的三维视觉年度进展报告--清华大学刘烨斌] ] = NeRF 进一步的论文(略读) - 时间按 Arxiv 提交时间排序 - Generalization + GRAF: Generative Radiance Fields for 3D-Aware Image Synthesis(2020.7) + GIRAFFE: Representing Scenes as Compositional Generative Neural Feature Fields(2020.11) - Multiscale + NeRF++: Analyzing and Improving Neural Radiance Fields(2020.10) + Mip-NeRF: A Multiscale Representation for Anti-Aliasing Neural Radiance Fields(2021.3) + Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields(2021.11) - Faster Training & Inference + NSVF: Neural Sparse Voxel Fields(2020.7) + AutoInt: Automatic Integration for Fast Neural Volume Rendering(2020.10) + FastNeRF: High-Fidelity Neural Rendering at 200FPS(2021.3) + PlenOctrees for Real-time Rendering of Neural Radiance Fields(2021.3) + KiloNeRF: Speeding up Neural Radiance Fields with Thousands of Tiny MLPs(2021.3) + Direct Voxel Grid Optimization: Super-fast Convergence for Radiance Fields Reconstruction(2021.11) + Plenoxels: Radiance Fields without Neural Networks(2021.11) + InstantNGP: Instant Neural Graphics Primitives with a Multiresolution Hash Encoding(2022.1) + TensoRF: Tensorial Radiance Fields(2022.3) + MobileNeRF: Exploiting the Polygon Rasterization Pipeline for Efficient Neural Field Rendering on Mobile Architectures(2022.7) - Representation Enhancement + VolSDF: Volume rendering of neural implicit surfaces(2021.6) + NeuS: Learning neural implicit surfaces by volume rendering for multi-view reconstruction(2021.6) == Generalization - 参考 + #link("https://zhuanlan.zhihu.com/p/388136772")[NeRF 与 GAN 碰撞出的火花 —— 从 CVPR 2021 最佳论文:GIRAFFE 读起(一)] + #link("https://zhuanlan.zhihu.com/p/384521486")[从NeRF -> GRAF -> GIRAFFE,2021 CVPR Best Paper 诞生记] #grid( columns: (70%, 30%), fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-16-14-46-37.png"), [ #fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-16-15-11-14.png") #fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-16-15-11-31.png") ] ) - 方法总览如上 - GRAF 分成 Generator 和 Discriminator 两个部分 - Generator 部分将相机矩阵 $bK$(固定),相机 pose $bxi$ ,采样模板 $bnu$ 和形状/外观编码 $bz_s in RR^m \/ bz_a in RR^n$ 作为输入预测一个图像 $bI$ 的一个 patch $P'$ - 其中每个 Ray 由 $bK$, $bxi$, $bnu=((u,v), s)$ 三个输入决定,$nu$ 表示采样点的 2D 位置和步幅。每个 Ray 上采样点的方法同 NeRF - Conditional Radiance Field 是 Generator 唯一可学习的部分 - Discriminator 对预测合成的 patch $P'$ 和用 $bnu$ 从真实图片采样得到真实 patch $P$ 进行判断 - 训练阶段,GRAF 使用稀疏的 $K * K$ 个像素点 2D 采样模板进行优化,损失函数如下;测试阶段,预测出目标图片的每个像素的颜色值 $ V(th, phi) = EE_(bz_s wave p_s, bz_a wave p_a, bxi wave p_bxi, bnu wave p_bnu) [f(D_phi (G_th (bz_s, bz_a, bxi, bnu)))] + EE_(bI wave p_cal(D), bnu wave p_bnu) [f(- D_phi (Ga (bI, bnu))) - la norm(na D_phi (Ga(bI, bnu)))] $ - $bxi$, $bnu$, $bI$, $bz_s \/ bz_a$ 全都是根据分布随机采样来的 - 说是有泛化性,但其实还是比较有限,只能对同一类物体(比如都是对汽车、椅子)进行建模,在这个基础上形状、颜色略微不同。这个泛化性一方面是 GAN 本身自带一点点,另一方面则是因为引入了 shape/appearance code - pose 的采样十分让人迷惑,如何保证 Generator 对一个离谱的 pose 依然能生成合理的图片? - 原因可能是使用了 GAN,这样尽管 Discriminator 的判别不如 NeRF 的像素级别监督来的有效直接,但迫使 Generator 学会对不同 pose 生成逼真图像。反之,像素对齐很容易让错误的 pose 和 image 对应起来 - 但还是觉得好奇怪啊。。。 #hline() - 虽然 GRAF 已经在 3D 的图像生成方面实现了高分辨率的可控图像生成,但 GRAF 仅限于单物体的场景,而且在更复杂的真实世界图像生成方面的表现不尽人意 - GIRAFFE 提出将场景表示为合成的 neural feature fields,能够从背景中分离出一个或多个物体以及单个物体的形状和外观 #fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-16-15-55-39.png", width: 70%) #grid( columns: (70%, 30%), fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-16-15-44-22.png"), fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-16-15-46-07.png") ) - 方法总览如上 - 总体而言跟 GRAF 很类似 - Generator 选取图片 $I$,将相机位姿 $bxi$ 和 $N$ 个形状/外观编码 $bz_s^i, bz_a^i$ 以及仿射变换 $bT_i$ 作为输入 + 经过 Ray casting 和 Point Sampling 得到以 $j$ 索引的光线,以 $i$ 索引的光线上采样点,这样的所有点云 + 将其复制 $N$ 份,分别用仿射变换得到 $N$ 个想要表达的物体,送入神经网络得到 每个物体 每条射线 $j$ 每个采样点 $i$ 的隐式表征 + 使用 Scene Compositions 操作将其在物体层面上整合 + 接着送入体渲染公式得到 2D 特征图 + 最后通过一个 2D Neural Rendering 转化成 RGB Image - Discriminator 则是对输入图片和预测图片进行判断,没什么好说的 - 图中橙色矩形为可学习部分,蓝色为不可学习部分 - 仿射变换 $bT = {bs, bt, bold(R)}$ - 允许我们将不同物体从场景中分解出来,可以对单个物体的姿态、形状和外观进行控制。具体来说,表示放置物体的算子为 $ k(bx) = bold(R) dot mat(s_1,,;,s_2,;,,s_3) dot bx + t $ - 即 Generative Neural Feature Fields 可以表示为 $ (si, bf) = h_th (ga(k^(-1) (bx)), ga(k^(-1) (bd)), bz_s, bz_a) $ - Scene Compositions - 场景中有 $N-1$ 个 objects,和 $1$ 个 background(其仿射变换始终固定),这 $N$ 个实体的点云的隐式表征 $(si_(i j), bf_(i j))$ 因为仿射变换和 shape/appearance code 而产生不同。这自然产生了一个问题,如何将它们组合到一起?文章给出了一个简单的算子 $C$ ,即将 density 求平均,feature 加权平均,且保证了 backpropagation 的过程中梯度可以传播到每个实体: $ C(bx, bd) = (si, frac(1, sum_(i=1)^N si_i) sum_(i=1)^N si_i bf_i) $ - 2D Neural Rendering $pi^"neural"_th$,看起来比较复杂,但目的很简单 - 训练阶段,在原始图片集合上进行,测试阶段,可以同时控制相机位姿、目标位姿和目标的形状和外观来产生 2D 图片。并且,GIRAFFE 可以合成超出训练图片中以外的物体 - 回顾一下从 NeRF 到 GRAF 再到 GIRAFFE 的场景表达公式(隐式表达) - NeRF: $ f_th : (ga(bx),ga(bd)) arrow.r.bar (si, bc) ~~~~~ RR^(L_x) times RR^(L_d) -> RR^+ times RR^3 $ - GRAF: $ g_th : (ga(bx),ga(bd), bz_s, bz_a) arrow.r.bar (si, bc) ~~~~~ RR^(L_x) times RR^(L_d) times RR^m times RR^n -> RR^+ times RR^3 $ - GRAFEE: $ h_th : (ga(bx),ga(bd), bz_s, bz_a) arrow.r.bar (si, bf) ~~~~~ RR^(L_x) times RR^(L_d) times RR^m times RR^n -> RR^+ times RR^(M_f) $ == Multiscale === NeRF++ - 具体来说,这篇文章首先讨论了几何-辐射模糊性(shape-radiance ambiguity)这一现象,并且分析了 NeRF 对于避免该现象的成功之处 - 在缺少正则化处理的情况下,本应该出现结果退化(degenerate solution)的情况,即不同的 shape 在训练时都可以得到良好的表现,但是在测试时效果会明显退化。但是 NeRF 却避免了这种情况的发生 - 究其原因,作者提出两点(参考 #link("https://zhuanlan.zhihu.com/p/458166170")[NeRF++ 论文部分解读 为何 NeRF 如此强大?]): + 当预测的 geometry 与真实场景相差甚远时,其 surface light field 会变得十分复杂。而正确 geometry 下对应的 surface light field 一般较为 smooth(e.g. Lambertian case),网络表征高频 surface light field 的局限性迫使其学习到正确的 geometry + NeRF 特殊的 MLP 网络结构不对称地处理着位置信息 $bx$ 和方向信息 $bd$,后者的傅里叶特征(位置编码函数中的 $L_bd$)仅由较低频分量组成,且网络输入位置靠后。即对于一个固定的 $bx$,辐射场 $c(bx, bd)$ 对 $bd$ 表示性有限 - 接下来,NeRF++ 引入一种全空间非线性参数化模型,解决无界 3D 场景下 NeRF 建模问题 - 问题:对于 360 度 captures,NeRF 假设整个场景可以打包到一个有界的体积中。但对于大规模场景来说,要么我们将场景的一小部分装进体积中,并对其进行详细采样,但完全无法捕捉背景元素;或者我们将整个场景放入体积中,由于有限的采样分辨率,到处都缺少细节 - 想法是,把光线 $br = bo + t bd $ 用半径为 $t'$ 的球分成两部分,用不同的 NeRF(不同的 MLP)去计算 $ C(br) = underbrace(int_(t=0)^t' si(bo + t bd) dot bc(bo + t bd, bd) dot e^(- int_(s=0)^t si(bo + s bd) dif s) dif t, (i)) \ + underbrace(e^(- int_(s=0)^t' si(bo + t bd) dif s), (i i)) dot underbrace(int_(t=t')^infty si(bo + t bd) dot bc(bo + t bd, bd) dot e^(- int_(s=t')^t si(bo + s bd) dif s) dif t, (i i i)) $ - (i)项和(ii)项在欧式空间中计算,(iii)在反球面空间中计算 —— 一个处在外球体的 3D 点 $(x,y,z), r=sqrt(x^2+y^2+z^2)$ 可以重参数化为 $(x', y', z', 1/r)$,这个四元组中的所有数都是有界的,提高了数值稳定性(但是变换回原本空间采样频率不还是不够吗?没懂) === Mip-NeRF - 参考 #link("https://blog.csdn.net/weixin_44292547/article/details/126315515")[NeRF神经辐射场学习笔记(四)——Mip NeRF论文创新点解读],#link("https://blog.csdn.net/i_head_no_back/article/details/129419735")[NeRF必读:Mip-NeRF总结与公式推导] - Mip-NeRF 的 mip 跟 CG 里 mipmap 的那个 mip 是同一个东西,意思是“放置很多东西的小空间”,旨在解决 NeRF 原始方法由于远景近景的分辨率不同而出现模糊和锯齿的现象。主要创新点分为三个方面 + Mip-NeRF 的渲染过程是基于抗锯齿的圆锥体(anti-aliased conical frustums),即 Cone Tracing - 当观察方向产生远近或者方向变化时,NeRF 基于 ray 的采样方式对此种变化不敏感,采样频率跟不上变化频率。而基于圆锥体采样的方式显示地建模了每个采样圆锥台的体积变化,从而解决了这种模糊性 - 对于任意一个像素点,从相机中心 $bo$ 沿着像素中心的方向 $bd$ 投射出一个圆锥体,设在图像平面 $bo + bd$ 处的圆锥面的半径为 $dot(r)$,位于 $[t_0, t_1]$ 圆锥台之间的位置 $bx$ 的集合可以表示为(即径向和轴向分别表征) $ F(bx,bo,bd,dot(r),t_0,t_1) = 1{(t_0 < frac(bd^T (bx-bo), norm(d)^2) < t_1) and (frac(bd^T (bx-bo), norm(d)^2 norm(bx-bo)) > frac(1, sqrt(1+(dot(r)\/norm(d))^2)))} $ - 针对基于圆锥体采样方式,原始的位置编码表达式的积分没有封闭形式的解,不能有效地计算,故采用了多元高斯函数来近似圆锥台。因为每个圆锥台截面是圆形的,而且圆锥台轴线对称,所以给定 $bo, bd$,高斯模型完全由 $3$ 个值来表示:$mu_t$​(沿射线的平均距离)、$si_t$(沿射线方向的方差)、$si_r$(沿射线垂直方向的方差),最终的多元高斯模型为 $ mu_t = t_mu + frac(2 t_mu t_de^2, 3 t_mu^2 + t_de^2), si_t^2 = frac(t_mu^2, 3) - frac(4 t_de^4 (12 t_mu^2 - t_de^2), 15(3 t_mu^2 + t_de^2)^2), si_r^2 = dot(r)^2 (frac(t_mu^2, 4) + frac(5 t_de^2, 12) - frac(4 t_de^4, 15(3 t_mu^2 + t_de^2))) $ 其中 $t_mu = t_0 + t_1, t_de = (t_1 - t_0)/2$,我们将其转到世界坐标系中: $ mu = bo + mu_t bd, Si = si_t^2 (bd^T bd) + si_r^2 (bI - frac(bd bd^T, norm(bd)^2)) $ + Mip-NeRF 提出了新的位置编码的方法 —— IPE(Integrated Positional Encoding) - 首先将位置编码改写为矩阵形式(Fourier feature) $ P = mat(1,0,0,2,0,0,,2^(L-1),0,0,;0,1,0,0,2,0,...,0,2^(L-1),0,;0,0,1,0,0,2,,0,0,2^(L-1))^T ga(bx) = vec(sin(bP bx), cos(bP bx)) $ - IPE 为高斯分布的 positional encoding 的期望值 $ ga(mu, Si) = EE_(bx wave cal(N)(bP mu, bP Si bP^T)) [ga(bx)] = vec(sin(bP mu) compose exp(- 1/2 "diag"(bP Si bP^T)), cos(bP mu) exp(- 1/2 "diag"(bP Si bP^T))) $ - 有点难以理解,就把它依旧当成是一条射线,在其上 $t_n wave t_f$ 之间采样许多个高斯椭球就行了,后续渲染依旧是以射线为单位 - IPE 的优点 + 平滑地 encode 一个 volume 的大小和形状,考虑了采样点的频率(为越远的点应当提供越少信息量,但 PE 编码违背了这一原则,导致走样),降低了带有锯齿 PE 特征的影响 + IPE 的高频维度收缩能够使其摆脱超参数 L 的限制 + Mip-NeRF 使用 a single multiscale MLP - NeRF 使用层次抽样 —— fine 和 coarse。这在 NeRF 中是必要的,因为它的 PE 特性意味着它的 MLP 只能学习单一规模的场景模型 - 但是 Mip-NeRF 服从高斯分布的位置编码可以自动在采样频率较低时(IPE 特征间隔较宽时)弱化高频特征,从而缓解 aliasing 现象。这种采样设计本身决定了其适用于多尺度情况,因此两个 MLP 可以合并为一个 - 缺点 + IPE 的计算较之 PE 更耗时,但单个 MLP 弥补了这一点 + Mip-NeRF 相比 NeRF 能够更有效且准确地构建 Multi-View 与目标物体的关系,但这也意味着相机 Pose 的偏差会更容易使 Mip-NeRF 产生混淆,出现更严重的失真。 + 同理,当拍摄过程中存在 motion blur、曝光等噪声时,Mip-NeRF 也会很容易受到影响(只有当图片成像质量高且相机姿态准确时,Mip-NeRF 才能实现非常棒的效果) === Mip-NeRF 360 - 在 Mip-NeRF 的基础上提出三个创新点 + 非线性场景参数化(non-linear scene parameterization) - 提出一种类似卡尔曼滤波的方式将超出一定距离的无界区域的高斯模型变换到非欧式空间中 #fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-17-11-23-01.png", width: 80%) $ "contract"(bx) = cases(bx &norm(bx)=<1, (2-1/norm(bx)) (bx/norm(bx)) ~~~ &norm(bx) > 1) $ - 该函数将坐标映射到半径为 $2$(橙色)的球上,其中半径为 $1$(蓝色)内的点不受影响。并且其设计使得从场景原点的相机投射的光线在橙色区域具有等距间隔 + 在线蒸馏(online distillation) - 使用一个小的 proposal MLP 和一个大的 NeRF MLP。前者只预测权重 $bw$(体渲染公式的那玩意儿),并用许多采样点反复重采样;后者真正预测隐式表征,只过一遍 - 感觉就是把原始 NeRF 的 Hierarchical Volume Sampling 分得更开更细化 + 基于失真的正则项(novel distortion-based regularizer) - 传统的 NeRF 经训练后表现出两种模糊现象 + floaters:体积密集空间中的小而不相连的区域渲染后的结果像模糊的云一样(光线的 $w$ 分布是多峰的) + background collapse:远处的表面被错误地建模为靠近相机的密集内容的半透明云(光线的 $w$ 分布没有显著的峰) - 提出正则项:regularization 的作用就是拔高单峰,压制多峰(即对于光线上每个点的归一化权重,让显著的更加显著,一群不显著的就都压低) - 最后的损失函数考虑了 NeRF 的 $cL_"recon"$,蒸馏的损失函数 $cL_"prop"$,以及正则项 $cL_"dist"$ == Fast Train & Inference === AutoInt - 提出一种自动积分框架,可以学习定积分的求解,能在精度只掉一点的情况下比原始 NeRF 快 $10$ 倍 #fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-17-20-32-04.png", width: 80%) - 正常来说我们学习网络参数 $Phi_th$ 去拟合 $f(dot)$,根据微积分基本定理有 $ Phi_th (bx) = int frac(pa Phi_th, pa x_i) (bx) dif x_i = int Psi_th^i (bx) dif x_i $ - 那么如果我们先构建积分网络(integral network),据此构建梯度网络(grad network),以“对每个输入的梯度值”作为学习对象,但要求其与被拟合函数对齐,那么最终积分网络的输出直接就是定积分的结果 $ int_ba^bb f(bx) dif x_i = Phi_th (bb) - Phi_th (ba) $ - 具体到 NeRF 里面就是把这个玩意儿用到体渲染公式上去,积分网络直接去预测最后的颜色值 === NSVF - NSVF 试图从采样的角度出发解决 NeRF 渲染慢的问题 - 体渲染需要考虑光线上的许多样本点,对于刚体而言绝大部分非表面样本点没有意义 - 因此 NSVF 维护了一个稀疏网格用来指导光线上的样本点采样,从而跳过不必要的样本点 #fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-18-14-35-20.png", width: 80%) - 实际上 NeRF 的运算量大的原因,一方面是不必要的样本点,另一方面则是所有样本点都要过一个大的 MLP - 前者被 NSVF 解决了,但后者没有,这也是后来 FastNeRF, Plenoxels, DVGO 的改进处 - 但 NSVF 提出的一些技术后续依然得到广泛应用 + *Early Termination*,即并不遍历完光线上所有的样本点,当 transparency $T$ 衰减到一定地步时就停止遍历 + *Empty Space Skipping*,对于 AABB(axis-aligned bounding box) 网络,可以用另一个更高分辨率的 AABB 网络来指示哪些区域是空的,从而跳过这些区域 + 网格表示和网格自剪枝(*self-pruning*),在后续基于网格表示的工作中得到广泛的应用 === FastNeRF - 核心方法是受图形学启发的因子分解,允许 + 简洁地存储空间中每个位置的 deep radiance map + 高效地用 ray 的方向查询 map 来估计渲染图像的像素值 - 比原始 NeRF 快 $3000$ 倍,比其他的快一个数量级(CVPR2021 之前) #fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-17-20-55-09.png", width: 80%) - 问题:NeRF 一个像素的每个采样点都要调用一次神经网络 - 为了实时效果利用了 caching,用内存换时间。对空间中 $(x,y,z)$ 每一维均匀采样 $k$ 个值,$(th, phi)$ 每一维采样 $l$ 个值 - 并且把位置和方向分开存储,复杂度 $O(k^3 l^2) -> O(k^3 (1 + 3 D) + l^2 D)$,解决内存爆炸问题 - 上面那个 MLP $F_"pos"$ 只和位置有关,输出 $D$ 个 $(u_i,v_i,w_i)$ 向量,下面的 MLP $F_"dir"$ 只和方向有关,输出上面 $D$ 个向量的权重,最终两个做点乘算 $R G B$ 和 $si$,输入从 $5$ 维变成 $3 + 2$ 维。为了加速也用了跳点和早停策略 - 初看的时候很难理解 - 在换了 pose 之后,在浮点数世界里采样新的点进行渲染,怎么可能利用得起来之前 cache 的计算结果? - 其实深度学习一般用的精度都不高,论文里说是 float16,小数点后也就 $4$ 个有效位。比如方向中的 $th$ in $[0, 2 pi]$,顶天了几万个不同值。并且论文在 Implementation 中提到对于整个 NeRF 场景过大的情况还会把整个包围盒降采样 - 换句话说,实际上整个场景中的点和方向没那么稀疏,是可以做到稠密地离散化的。可以理解为也是体素化了,而且 inference 是 offline 的,不再需要过网络而是直接查值。查询时会进行 nearest neighbour interpolation for $F_"pos"$ and trilinear sampling for $F_"dir"$,把输入值规整到 cache 的 key 上去 #note()[ 这种体素化的方法在同期和后期的许多工作中得到广泛应用。这种方法的思想,在图形学中被称作“烘焙”(bake, baking) —— 将颜色、法线、光照等需要大量计算的东西预先计算好,以某种形式存储起来,之后直接加载。套到 NeRF 里,就是把原本需要经过大 MLP 的结果提前算好然后固定到体素网格、八叉树、哈希表等,接下来介绍的许多工作都是沿着这个思路展开 ] === PlenOctrees #fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-18-15-28-32.png") - 三个创新点 - 修改 NeRF 的 radiance field 部分使之基于球谐函数实现(MLP 预测对应球谐函数的系数),称之为 NeRF-SH。一方面可能更符合物理,另一方面学习任务变为预测系数,相对简单且更快 - 提出 sparsity prior,起到的作用类似于 weight decay,其中 $la$ 是超参,$K$ 是空间中采样点的数量 $ L_"sparsity" = 1/K sum_(i=1)^K |1 - exp(- la si_k)| $ - 将训练好的 NeRF-SH 网络提取成体素网格,然后压缩到基于八叉树的结构中 + Evalution:把包围盒均匀划分成网格,然后预测这些网格点的 $si$。这个网格能够 auto scale 到适应场景的大小(通过调整包围盒大小使所有网格点的 $si$ 都大于一个阈值 $ta_a$) + Filtering:对每个网格点,计算它在所有训练视图渲染的 $w$ 最大值,然后利用阈值 $tau_w$ 过滤掉未被占用的点(它在任何一个视角下都是内部点)。以剩下的每个点为体素中心,作为叶节点来构建体素网格的八叉树(每个叶节点要么是体素要么是空节点) + Sampling:对于固定下来的叶节点体素,内部随机采 256 个点预测其 $si$ 和球谐函数系数更新该体素的值(反走样) - 并且可以进行微调(tree structure fixed, finetune values to optimize) - 优缺点 + 这样构建一个八叉树大概要用 $15$ min,但是后续就不用再过网络而是直接查八叉树的值,推理速度快得多 + 渲染结果上看跟原始 NeRF 其实差不太多甚至好一点 + 但它最大的缺点在于空间占用太大,一个物体就需要接近 2G 的存储(尽管已经使用八叉树来减少了) === KiloNeRF #fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-18-19-21-42.png", width: 70%) - Idea 非常直观,把空间体素化,每个体素用不同 tiny MLP - 使用三阶段策略: + 训练一个原生 NeRF + 将原生 NeRF 的 MLP 预测结果逐点蒸馏到 tiny MLP 中 + fine-tune tiny MLP。为了加速也用了跳点和早停策略 - 评价 - Idea 直观,主要是代码实现有挑战性,作者用 CUDA 实现了一个 GEMM 使得不同样本点通过 tiny MLP 并行计算 - 显著降低运算量并加渲染约数千倍 - 相比于其他方法,其灵活性相对较差,不太好转换也不好部署 === Plenoxels #fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-19-11-59-05.png", width: 90%) - 跟 PlenOctrees 是同一个作者 - 作者发现 Baking 的主要作用反而不是 Raidance 部分的固定,而是 Geometry 部分的固定(PlenOctree 的 fine-tune 不改变八叉树结构) - 而且我个人觉得这种训练完再 baking 的思路不那么端到端。那么能不能在训练的过程中就以 Baking 的表征进行训练呢?Plenoxels 就是这样的尝试 - 八叉树并不是一个适合进行形状优化的表征,所以 baking 表征又回到了稀疏体素网格上 - Plenoxel 是完全的 explicit 方法,没有用到任何 MLP - 为场景构建体素网格。遵守 Coarse to Fine 的原则,后续训练到一定阶段时,对网格进行上采样(一分八,分辨率从 $256^3 -> 512^3$ ,插值出的网格参数用三线性插值得到);同时会对网格根据 $w$ 或者 $si$ 进行剪枝,仅关注非空网格 - 只用网格顶点来存参数(沿用 PlenOctree 中用到的 $si$ 和球谐函数系数)。要渲染一条光线,只需要在光线上采点并根据样本点的位置进行三线性插值(比较 PlenOctree 是查询八叉树得到样本点所在网格的值)从而得到样本点的参数,最后在进行体渲染积分即可 - 直接用顶点参数进行学习,但有个问题是相邻网格之间的参数没有神经网络那么连续,导致失真(想象一下,我某部分网格参数调整得比较好来满足训练视角的渲染,而一些网格随便糊弄一下)。对此 Plenoxel 提出了相应的 smooth prior,通过计算 TV loss 来使相邻网格的值变得平滑 $ cL_"TV" = frac(1, |cV|) sum_(bv in cV, d in cD) sqrt(De_x^2(bv, d) + De_y^2(bv, d) + De_z^2(bv, d)) $ - Plenoxel 也处理了 Unbound Scene 的情况,方法和 NeRF++ 较为相似,用 sparse grid 处理内部情况;用多层球壳(Multi-Sphere Images)处理外部情况,不同层之间可以进行插值(外部的球壳不再是 view-dependent 的了,而是类似贴图) - 评价:在实际使用中 Plenoxel 可能并不是很好用。一方面,完全 explicit 方法不是那么即插即用,不好融合到别的方法;另一方面,explicit 设计很容易陷入局部最优解(网格顶点间的特征相对孤立),产生撕裂效果的失真。与之相比,大家还是更倾向于 Hybrid 方案,用离散的特征存储结合小型 MLP 的方式,实现存储、速度、效果的权衡 === DVGO #fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-19-19-42-38.png") - DVGO 应该是第一篇 hybrid NeRF 的工作,只是时运不济,被 InstantNGP 盖过了风头 - DVGO 的主要贡献 + 用网格存取特征取代了 Encoding(和 Instant-NGP 的 hash encoding 是一个性质的,具体分析见下文) + 三线性插值后过一个 SoftPlus,网格顶点的值可以学的更广,增强了网格拟合高频信号的能力 + 分了两个阶段训练 - 先 Coarse geometry searching 学出大概的 coarse density grid $V^(("density")(si))$ 和 coarse color grid $V^(("rgb")(c))$,一方面用来加大采样密度,另一方面做跳点加速(剪枝无效空间) - 然后进行上采样 + 微调,对颜色的预测引入一个 MLP,结合了网格学习的特征和神经网络学习(hybrid) - 一句话概括:通过体素网格低密度初始化、插值后激活等训练策略直接优化体素表达的 NeRF 密度场与颜色特征场,实现分钟级别的训练收敛 === InstantNGP #fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-19-13-31-01.png") - Instant NGP 同样使用了体素网络,这并不是创新。但是它用哈希表这一数据结构来加速查找,并使用多分辨率混合来得到以往 Coarse-to-Fine 的效果。更重要的是,它这一整套方法的 CUDA 配套代码工程价值极高,这才有了它极快的速度(但其实几秒出图也只是能粗看的状态,更多是个噱头,实际优化到论文中的指标还是需要几分钟的时间,但已经比原始方法快非常非常多),奠定了它的巨大名气,直接盖过了 Plenoxel 和 DVGO 的风头 - 我们可以比较一下到此为止 encoding 的演变 + NeRF 和 Mip-NeRF 分别使用了 PE 和 IPE,它们没有可供学习的参数,只是引入了高频信息和尺度信息 + 稠密参数编码:一些使用 grid 的方法引入了 Baking 的思想,通过查询和插值大大加快了渲染速度;并且参数可学习提高了表达能力。坏处是,稠密参数编码的空间占用是立方级别(只有 $approx 2.5%$ 的区域是有效的表面),并且有时展现出过于平滑的学习,总的来说太浪费资源 + 稀疏参数编码:于是一些方法提出 Coarse-to-Fine 的训练(但可能由于定期更新稀疏数据结构而使训练复杂度增加),一些方法提出 Octree 和 sparse grid,但其剪枝或多或少要求表面信息而影响方法适用性 - 而 Instant NGP 采用 hash table,本质上来说也是一种可学习的参数编码,跟 grid 没太大区别,只是查询得更快。但通过引入这一数据结构,我们可以很方便地使用 $T$ 来控制参数数量,同时多个哈希表实现 muitiscale 也更自然。另外,hash table 相比 tree structure 更加 cache 友好 - 引用一个解读 #q[ 回到我一开始对 NeRF 的 Position Encoding 的解读,我认为 Positional Encoding 是对欧式空间的三个轴分别引入了一组正交基函数,MLP 则用来预测这些个基函数的系数。我觉得这一套观点可以套进 Instant-NGP 或者下面会说到的 DVGO 这些个 Hybrid 方法中,*它们网格存储的值可以视作是某种基函数在该点的离散采样*。高分辨率等同于高采样率,这也就意味着高分辨率的网格可以存取高频基函数的离散点采样结果,相对的低分辨率的网格则存取低频基函数的离散点采样结果。只是相比于给定的一组正交基函数,这些个网格对应的基基本不会是正交的,但确实可以学习可以优化的,这就意味着 MLP 的学习负担被更进一步地降低,整个收敛过程也会更快。*Multi-Resolution 最重要的一点在于引入了合适的归纳偏置 (inductive bias) 来使得不同分辨率的网格能够捕获到相应频率的信息* ] - 下面看一下 InstantNGP 的具体做法 - 整体流程和 NeRF 依旧类似,但是位置信息采用 hash table 编码(方向信息依旧是 PE),预测密度特征的 MLP 变小。两个 MLP 和 hash table 都会在训练过程中优化 + 按照多种分辨率将空间划分成网格(图示为二维情况,方便理解),每个网格顶点都有其量化的坐标 + 构建并初始化 $M$ 个大小均为 $T$ 的 hash table,每个表代表一种分辨率,其中每个顶点保存的特征编码维度为 $F$。构建 hash function,从而建立每个网格顶点坐标到 hash table 的索引 + 对于输入的点 $bx$,在每个分辨率下找到他最近的 $8$ 网格顶点,利用 hash function 取出对应的值;利用三线性插值得到该点的特征编码,将每个分辨率下的特征 concate 起来,随后送入 MLP - Multi-resolution - 我们通过超参 $T$ 去控制 hash table 的大小,为了达到良好的效果,这个值往往设置得比最大分辨率网格要小很多,但比最小分辨率要大(e.g. $16^3 < 64^3 < 512^3$),显然高分辨率下会发生 hash collision - 我们知道网格中绝大部分区域(非表面)是无效的,如果是无效区域的网格顶点和物体表面附近的网格顶点发生了冲突,通过梯度反传,hash table 中的值自然会更加关注物体表面区域的密度值。换句话说,通过 MLP 的注意力自适应地实现了剪枝(或者说,*体素压缩*) === TensoRF #fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-19-22-32-47.png") - 使用张量分解技术,将 4D 张量分解成多个低秩的张量分量,以小见大。论文中使用了 CP 分解和 VM 分解,当然也可以尝试使用其他的张量分解方式 - 本质上是把体素网格分解为低维平面网格表达,空间占用从立方级降为平方级 - 不细看了,类似思路的还有:EG3D(Efficient Geometry-aware 3D Generative Adversarial Networks, CVPR 2022), MeRF(Memory-Efficient Radiance Fields for Real-time View Synthesis in Unbounded Scenes, SIGGRAPH 2023) 等 === MobileNeRF - 假如我们设计了这样一种 NeRF + 每条射线上的采样点位置和个数(且远少于原始 NeRF)是已知的 + 每个采样点的位置特征向量是预先存储好的(grid, bake),仅执行 NeRF MLP 最后那一小部分(称作 Decoder)的推理 + 对于当前待渲染画面的每个像素,上述的计算是通过图形渲染 pipeline 在 GPU 上并行的 - 那么这种新 NeRF,相比原始 NeRF,显然会大幅降低计算量。MobileNeRF 就是这样的设计,以至于可以在移动设备上实时运行 - 为了实现上述过程,作者将训练分为三个阶段: + 初始化一个 grid mesh,基于可微渲染思想,学习场景的几何结构,以及透明度 $al$ 连续的 Radius Field #fig("/public/assets/Reading/Reconstruction/Improved_NeRF/2024-10-20-00-36-16.png") - 初始化一个三维 grid mesh(先生成 voxel grid,每个 grid 中设置一个点,作为顶点 vertice,每相邻的 4 个连接组成 mesh face,整个 mesh 被称为 grid mesh),初始化三个 MLP,分别预测透明度 $al$、空间特征向量 $f_k$(即 NeRF Encoder)、颜色 $c$(即 NeRF Decoder) - 根据相机位姿以及像素坐标,计算射线,射线与 grid mesh 的交点作为采样点(不是原始 NeRF 的随机采样了),颜色加权融合计算 loss - MobileNeRF 采用了可微渲染的思想,把 vertice 位置作为可训练参数,用 loss 推动顶点位置变化,同时用正则化限制每个顶点的“活动范围”在格子内 - 借鉴 InstantNGP,创建了另一个 $P times P times P times 1$ 的 grid mesh,用于排除无关区域,加速训练 - 引入 sparsity loss 和 smooth loss + 将 $al$ 二值化 - 因为在渲染引擎里,处理半透明的 mesh,比完全透明或安全不透明的要更耗时,因此需要将透明度进行二值化,同时继续训练参数 - 为了让训练更稳定,在第二阶段训练过程中,既渲染合成透明度二值化时的最终图像 $hat(C)(r)$,又渲染合成透明度连续时的图像 $C(r)$,二者 loss 相加回传 - 最后当 loss 快收敛时,冻结其它参数,只 finetune $cal(F), cal(H)$ + 对 grid mesh 进行剪枝操作,保存为 OBJ,以及烘培特征向量为纹理图像 Texture,保存 Decoder 的网络权重 - 将训练图像完全无法“看到”的 face 删除($95%$ 以上的 grid 都被删除),然后保存为 OBJ - 给每个 face(四边形)分配一个分辨率为 $K times K$ 的纹理区域(texture patch),因为 face 的顶点坐标已知,容易计算 texture patch 上每个像素对应的空间坐标,获得相应的特征值。这样就完成了 bake 特征纹理的工作(即 Encoder 的输出) - 后面的部分就比较简单了,主要创新点在于这里的各种优化和想到把 Decoder 塞到 shader 里面从而利用传统图形学 pipeline 的已有技术 - 优点: + 第一次实现了移动设备上的实时神经渲染 + 通过引入 OBJ, texture 以及 neural shader,使得很多传统图形优化技术,可以直接使用。例如对大型场景的 LOD,九宫格加载等 - 缺点: + 仅通过一个采样点来代表整条光线路径,当需要表现出半透明或者高光等复杂光学现象时,需要较高的模型精度以及准确的材质模型,mobileNeRF 并不擅长解决后两者 + 通过固定分辨率的网格学习表达整个空间,会导致两个问题:细节分辨率不够;大块平坦区域的 mesh 过于碎片化,顶点数过多 + 为了降低最终的 obj 顶点数量,在第三个阶段删除了对于训练图像完全不可见的 face。这要求采集训练图像时覆盖几乎所有渲染阶段需要的相机角度,否则会在渲染画面中出现大量的空洞。另外,这种删除策略也会损失模型的“泛化能力”,表现是在相邻相机角度切换时,出现“画面突变” + 推理快但训练慢,$8$ 卡 A100,训练 $24$ 小时左右 == Representation Enhancement - 动机 + 基于 Surface Rendering 的方法仅关注其与表面的交点部分,而基于 Volume Rendering 的方法的样本是光线上的很多采样点,所以后者能够更合理和全面地对隐式场进行监督 - 换句话说,基于 Volume Rendering 能够使这个变形更“深入”,因为它能够在当前表面的远处也产生监督,而 Surface Rendering 与之相比则极其容易陷入到当前表面附近的局部最优解 + 但 NeRF 这种隐式表示也有其困难,因为我们最终的目的一般还是渲染刚体,从中提取高质量的表面是困难的,因为在表示中没有足够的表面约束 + 隐式曲面场具有表示几何的优越性,但难以通过 NeRF 光线步进的方法渲染训练;若使用朴素方法将隐式曲面函数转换为密度函数,光线积分所估计的表面位置会略近于真实表面 - 比如,VolSDF(Volume rendering of neural implicit surfaces, NeurIPS 2021), NeuS(Learning neural implicit surfaces by volume rendering for multi-view reconstruction, NeurIPS 2021) 用 SDF 指导采样点的生成,数学公式推导比较多,不细看了
https://github.com/jasmerri/tsumo
https://raw.githubusercontent.com/jasmerri/tsumo/main/src/image-sets.typ
typst
MIT License
#import "./tile.typ": ids, tiles, types, variants #let _prefixes = ( types.character: "Man", types.bamboo: "Sou", types.dot: "Pin", types.dragon: "", types.wind: "", types.other: "", ) #let _suit-names = ( ids.numbered.one: "1", ids.numbered.two: "2", ids.numbered.three: "3", ids.numbered.four: "4", ids.numbered.five: "5", ids.numbered.six: "6", ids.numbered.seven: "7", ids.numbered.eight: "8", ids.numbered.nine: "9", ) #let _dragon-names = ( ids.dragon.white: "Haku", ids.dragon.green: "Hatsu", ids.dragon.red: "Chun", ) #let _wind-names = ( ids.wind.east: "Ton", ids.wind.south: "Nan", ids.wind.west: "Shaa", ids.wind.north: "Pei", ) #let _other-names = ( ids.other.back: "Back", ids.other.question: "Blank", ids.other.blank: "Front", ) #let _names = ( types.character: _suit-names, types.bamboo: _suit-names, types.dot: _suit-names, types.dragon: _dragon-names, types.wind: _wind-names, types.other: _other-names, ) #let _variant-names = ( variants.akadora: "Dora", ) #let _base = "./assets/Tiles/Riichi/" #let _resolve-image-path(tile, ..args) = { let path = _base if tile == tiles.other.nothing { return none } if tile.type not in _prefixes { panic("invalid riichi tile type: " + tile.type) } path += _prefixes.at(tile.type) let n = _names.at(tile.type) if tile.which not in n { panic("invalid riichi tile: " + tile.type + "/" + tile.which + "/" + tile.variant) } path += n.at(tile.which) if tile.variant != none { if tile.variant not in _variant-names { panic("invalid riichi tile variant: " + tile.variant) } if tile.variant == variants.akadora and ( (tile.type != types.character and tile.type != types.bamboo and tile.type != types.dot) or tile.which != ids.numbered.five ) { panic("invalid riichi tile for akadora: " + tile.which) } path += "-" + _variant-names.at(tile.variant) } image(path + ".svg", ..args) } // The default image resolver. Uses Riichi tiles from ./assets/Tiles/Riichi. #let riichi = ( // Return an image from a tile tuple. // The default drawer assumes this exists. resolve-tile: _resolve-image-path, // Image for the background of a face-up tile. // Note that this is used for calculation of tile sizes. // If your set of tiles doesn't have a background, this function should return blank content of the correct size. // The default drawer assumes this exists. front: (..args) => image(_base + "Front.svg", ..args), // Image for the background of a face-down tile. // The default drawer assumes this exists. back: (..args) => image(_base + "Back.svg", ..args), )
https://github.com/HernandoR/lz-brilliant-cv
https://raw.githubusercontent.com/HernandoR/lz-brilliant-cv/main/modules/projects.typ
typst
Apache License 2.0
#import "../brilliant-CV/template.typ": * #cvSection("Projects & Associations") #cvEntry(..languageSwitch(( "en": ( title: [Personal Server | DevOps], society: [Personal Project], date: [08/2021 - Present], location: [Shanghai, China], description: list( [Deployed multiple virtual machines hosted on Proxmox Virtual Environment.], [Constructed an automatic TV tracking system by docker-compose.], [Migrated the services into kubernates, and used TLS to keep the access secure.], [Deployed CDN Service from AWS] ) ), "zh": ( title: [个人服务器 | DevOps], society: [个人项目], date: [08/2021 - 至今], location: [中国上海], description: list( [在Proxmox虚拟环境上部署了多个虚拟机。], [通过docker-compose构建了一个自动的电视追剧系统。], [将服务迁移到了kubernates上, 并使用TLS保持安全访问], [在AWS上部署了CDN服务。] ) ) ))) #cvEntry(..languageSwitch(( "en": ( title: [Openwrt Firmware | Opensource Project], society: [Personal Project], date: [06/2023 - 10/2023], location: [Shanghai, China], description: list( [Imporved firmware of Openwrt by injecting extra drivers.], [Custumize the firmware build process for required packages.], [Automate the building process by CI/CD in github] ) ), "zh": ( title: [Openwrt 固件 | 开源项目], society: [个人项目], date: [06/2023 - 10/2023], location: [中国上海], description: list( [通过注入额外的驱动程序来改进Openwrt固件。], [为所需的软件包定制固件构建过程。], [通过github的CI/CD自动化构建过程] ) ) ))) #cvEntry(..languageSwitch(( "en": ( title: [Vesuvius Challenge - Ink Detection], society: [Kaggle ML Competition], date: [03/2023 - 06/2023], location: [Singapore], description: list( [Constructed a PyTorch training Template.], [Performed SageMaker auto-training.], [Trained multiple segment models, i.e., UNet, FPNet, SegNet.], [Evaluated Segment Anything Model (SAM) from Meta] ) ), "zh": ( title: [Vesuvius Challenge - Ink Detection ], society: [Kaggle 机器学习竞赛], date: [03/2023 - 06/2023], location: [新加坡], description: list( [构建了一个PyTorch训练模板。], [执行了AWS SageMaker自动训练。], [训练了多个图像分割模型,例如UNet、FPENet、SegNet], [从Meta评估了Segment Anything Model (SAM)。] ) ) ))) #cvEntry(..languageSwitch(( "en": ( title: [Image Matching Challenge 2023 | Kaggle Competition], society: [Kaggle ML Competition], date: [02/2023 - 06/2023], location: [Singapore], description: list( [Extract global features from images using EfficientNet-B6/B7.], [Match feature points using SuperPoint+SuperGlue and DKM models.], [Input the matching relations into COLMAP to obtain the final 3D spatial positions and pose estimation.], [Achieved Silver Prize (top 10%)] ) ), "zh": ( title: [Image Matching Challenge 2023 | Kaggle Competition], society: [Kaggle 机器学习竞赛], date: [02/2023 - 06/2023], location: [新加坡], description: list( [使用EfficientNet-B6/B7对图像全局特征进行提取], [利用SuperPoint+SuperGlue和DKM模型匹配特征点], [将匹配关系输入colmap获取最终的3D空间位置与姿态估计], [获得了银奖(前10%)。] ) ) ))) #cvEntry(..languageSwitch(( "en": ( title: [ORB-SLAM2 Based Distributed SLAM], society: [Undergraduate Thesis], date: [02/2022 - 06/2022], location: [Wuhan, China], description: list( [Modified ORB SLAM2 to make it suitable for multi-agent environment.], [Separated the front-end and back-end of ORB SLAM2 to unload the computing tasks to different nodes.], [Used ROS to conduct multi-agent joint modeling experiments on the Kitti dataset.] ) ), "zh": ( title: [基于ORB与ROS的分布式SLAM], society: [本科毕业设计], date: [02/2022 - 06/2022], location: [中国武汉], description: list( [改造ORB SLAM 使其适用于多节点(multi-agent)环境], [将ORB SLAM前后端分离, 以卸载计算任务指不同节点], [使用ROS在Kitti 数据集上进行多节点联合建模实验] ) ) ))) #cvEntry(..languageSwitch(( "en": ( title: [Design of FDM 3D Printer Based on Machine Vision Feedback], society: [National Innovation Training Program], date: [06/2020 - 06/2021], location: [Shanghai, China], description: list( [Implemented colour control algorithms.], [Integrated drive, control, and power management circuits.], [Implemented the moter control system based on an STM32 chip.], [Received First Prize in National Extracurricular Academic Competition "The Challenge Cup"] ) ), "zh": ( title: [基于机器视觉反馈的FDM 3D打印机], society: [国家级创新训练计划], date: [06/2020 - 06/2021], location: [中国上海], description: list( [实现了颜色控制算法。], [集成了驱动、控制和电源管理电路。], [基于STM32芯片实现了控制系统。], [在全国课外学术竞赛“挑战杯”中获得了二等奖] ) ) ))) #cvEntry(..languageSwitch(( "en": ( title: [Research on Quantification and Prediction of Music Evolution], society: [Interdisciplinary Contest in Modeling, World-wide], date: [04/2021], location: [Wuhan, China], description: list( [Developed Multiple models to quantify musical influence.], [Applied RNN neural network to analyze the development of "music's impact on the network" in time series.], [Received Meritorious Winner Prize.] ) ), "zh": ( title: [研究音乐演变的量化和预测], society: [ICM 数学竞赛,全球], date: [04/2021], location: [中国武汉], description: list( [开发了多个模型来量化音乐影响力。], [应用RNN神经网络分析时间序列中“音乐对网络的影响”的发展。], [获得了Meritorious Winner] ) ) ))) #cvEntry(..languageSwitch(( "en": ( title: [Research on Prediction and Placement Strategies for EDP Population], society: [Interdisciplinary Contest in Modeling, World-wide], date: [04/2020], location: [Wuhan, China], description: list( [Constructed a sea level rise model predict the global sea level quantitatively and the scale of EDPs], [Proposed solutions to relocate EDP through cooperative game theory.], [Received Finalist Prize.] ) ), "zh": ( title: [EDP人群数量和预测和安置方案研究], society: [ICM 数学竞赛, 全球], date: [04/2020], location: [中国武汉], description: list( [构建了与人类活动相关的海平面上升模型,以定量预测全球平均海平面高度和环境流离失所者的规模。], [提出了通过合作博弈理论重新安置EDP的解决方案。], [获得了Finalist] ) ) ))) // #cvEntry( // title: [Volunteer Data Analyst], // society: [ABC Nonprofit Organization], // date: [2019 - Present], // location: [New York, NY], // description: list( // [Analyze donor and fundraising data to identify trends and opportunities for growth], // [Create data visualizations and dashboards to communicate insights to the board of directors], // [Collaborate with other volunteers to develop and implement data-driven strategies], // [Provide regular data analysis reports to the board of directors and executive leadership] // ) // ) // #cvEntry( // title: [Volunteer Data Analyst], // society: [ABC Nonprofit Organization], // date: [2019 - Present], // location: [New York, NY], // description: list( // [Analyze donor and fundraising data to identify trends and opportunities for growth], // [Create data visualizations and dashboards to communicate insights to the board of directors], // [Collaborate with other volunteers to develop and implement data-driven strategies], // [Provide regular data analysis reports to the board of directors and executive leadership] // ) // )#cvEntry( // title: [Volunteer Data Analyst], // society: [ABC Nonprofit Organization], // date: [2019 - Present], // location: [New York, NY], // description: list( // [Analyze donor and fundraising data to identify trends and opportunities for growth], // [Create data visualizations and dashboards to communicate insights to the board of directors], // [Collaborate with other volunteers to develop and implement data-driven strategies], // [Provide regular data analysis reports to the board of directors and executive leadership] // ) // )#cvEntry( // title: [Volunteer Data Analyst], // society: [ABC Nonprofit Organization], // date: [2019 - Present], // location: [New York, NY], // description: list( // [Analyze donor and fundraising data to identify trends and opportunities for growth], // [Create data visualizations and dashboards to communicate insights to the board of directors], // [Collaborate with other volunteers to develop and implement data-driven strategies], // [Provide regular data analysis reports to the board of directors and executive leadership] // ) // )#cvEntry( // title: [Volunteer Data Analyst], // society: [ABC Nonprofit Organization], // date: [2019 - Present], // location: [New York, NY], // description: list( // [Analyze donor and fundraising data to identify trends and opportunities for growth], // [Create data visualizations and dashboards to communicate insights to the board of directors], // [Collaborate with other volunteers to develop and implement data-driven strategies], // [Provide regular data analysis reports to the board of directors and executive leadership] // ) // )
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compute/foundations-03.typ
typst
Other
// Test panic. // Error: 7-12 panicked with: 123 #panic(123)
https://github.com/Jollywatt/typst-fletcher
https://raw.githubusercontent.com/Jollywatt/typst-fletcher/master/tests/mark-gallery/test.typ
typst
MIT License
#set page(width: 15cm, height: auto, margin: 1em) #import "/src/exports.typ" as fletcher: diagram, node, edge #context table( columns: (1fr,)*6, stroke: none, ..fletcher.MARKS.get().pairs().map(((k, v)) => [ #set align(center) #raw(k) \ #diagram(spacing: 18mm, edge(stroke: 1pt, marks: (v, v))) ]), )
https://github.com/GYPpro/Java-coures-report
https://raw.githubusercontent.com/GYPpro/Java-coures-report/main/.VSCodeCounter/2023-12-15_05-15-05/diff.md
markdown
# Diff Summary Date : 2023-12-15 05:15:05 Directory d:\\Desktop\\Document\\Coding\\JAVA\\Rep\\Java-coures-report Total : 62 files, 4962 codes, 13 comments, 1260 blanks, all 6235 lines [Summary](results.md) / [Details](details.md) / Diff Summary / [Diff Details](diff-details.md) ## Languages | language | files | code | comment | blank | total | | :--- | ---: | ---: | ---: | ---: | ---: | | Typst | 14 | 4,836 | 63 | 1,253 | 6,152 | | C++ | 1 | 102 | 12 | 10 | 124 | | Java | 47 | 24 | -62 | -3 | -41 | ## Directories | path | files | code | comment | blank | total | | :--- | ---: | ---: | ---: | ---: | ---: | | . | 62 | 4,962 | 13 | 1,260 | 6,235 | | Report | 14 | 4,836 | 63 | 1,253 | 6,152 | | rubbish | 1 | 3 | 0 | 2 | 5 | | sis10 | 7 | 823 | 49 | 114 | 986 | | sis2 | 1 | 0 | -15 | -1 | -16 | | sis4 | 1 | 0 | 0 | 1 | 1 | | sis6 | 3 | -22 | 4 | -9 | -27 | | sis7 | 5 | -137 | 7 | -16 | -146 | | sis8 | 13 | -174 | -17 | -85 | -276 | | sis9 | 17 | -367 | -78 | 1 | -444 | [Summary](results.md) / [Details](details.md) / Diff Summary / [Diff Details](diff-details.md)
https://github.com/MingLLuo/typst-course-description
https://raw.githubusercontent.com/MingLLuo/typst-course-description/main/utils.typ
typst
#let Chinese = 0 #let English = 1 // #let Mixed = 2 // unused, under construction #let mode = 1 // Chinese, English, Mixed // translate function, with context #let translate(zh: [], en: []) = { if mode == Chinese { zh } else if mode == English { en } else { zh + " / " + en } } // Example: // zh: 2021 学年 秋季 // en: 2021 Fall #let term(year, zhTerm, enTerm) = { translate( zh: [#year 学年 #zhTerm], en: [#year #enTerm], ) } // Example: // zh: 2024 年 12 月 1 日 // en: 2024/12/1 #let translate-date(year: int, month: int, day: int) = { translate( zh: [ #year + "年" + #month + "月" + #day + "日" ], en: [ #year + "/" + #month + "/" + #day ], ) } // https://github.com/werifu/HUST-typst-template.git #let info_value(body) = { box( width: 100%, inset: 2pt, stroke: ( bottom: 1pt + black, ), text( size: 16pt, bottom-edge: "descender", )[ #body ], ) } #let info_key(body) = { box( width: 100%, inset: 2pt, stroke: none, text( size: 16pt, body, weight: "semibold", ), ) } #let FrontPage( // appliedSchool: "", // appliedMajor: "", name: "", graduatedFrom: "", graduatedMajor: "", term: "", graduatedLogoLink: "", date: "", ) = { let displayDate = if date == "" { translate-date(2024, 12, 1) } else { date } v(40pt) align(center)[ #set text(size: 30pt, weight: "bold") #translate(zh: [课程描述], en: [Course Descriptions]) #v(20pt) #image( graduatedLogoLink, width: 6cm, ) ] v(80pt) // Info align(center)[ #grid( columns: (auto, 300pt), rows: (50pt, 50pt), gutter: 3pt, // info_key(translate(zh: [申请学校], en: [Applied School])), info_value(appliedSchool), // info_key(translate(zh: [申请专业], en: [Applied Major])), info_value(appliedMajor), info_key(translate(zh: [姓名], en: [Name])), info_value(name), info_key(translate(zh: [毕业院校], en: [Graduated From])), info_value(graduatedFrom), info_key(translate(zh: [毕业专业], en: [Graduated Major])), info_value(graduatedMajor), info_key(translate(zh: [时段], en: [Term])), info_value(term), info_key(translate(zh: [填写日期], en: [Date])), info_value(displayDate), ) #pagebreak() ] }
https://github.com/Jollywatt/typst-fletcher
https://raw.githubusercontent.com/Jollywatt/typst-fletcher/master/tests/diagram-inline/test.typ
typst
MIT License
#set page(width: auto, height: auto, margin: 1em) #import "/src/exports.typ" as fletcher: diagram, node, edge #for (i, a) in ("->", "=>", "==>").enumerate() [ Diagram #diagram( node-inset: 2.5pt, label-sep: 1pt + i*1pt, node((0, -i), $A$), edge((0, -i), (1, -i), text(0.6em, $f$), a), node((1, -i), $B$), ) and equation #($A -> B$, $A => B$, $A arrow.triple B$).at(i). \ ] The formula is #diagram($#[Hello] edge(->) & #[World]$)!
https://github.com/Isaac-Fate/booxtyp
https://raw.githubusercontent.com/Isaac-Fate/booxtyp/master/src/theorems/example.typ
typst
Apache License 2.0
#import "new-plain-template.typ": new-plain-template #import "../counters.typ": example-counter #let example = new-plain-template("Example", template-counter: example-counter)
https://github.com/HarryLuoo/sp24
https://raw.githubusercontent.com/HarryLuoo/sp24/main/math321/hw12.typ
typst
= HW 12, <NAME>, <EMAIL> = 1 Recall that cauchy-riemann equation: $partial_x u = partial_y v, quad partial_y u = -partial_x v$ is a necessary condition for a complex function to be holomorphic. let $f(z) = (x+i y)^3$. Expanding, we have: $ f(x,y) &= x^3 + 3 i x^2 y - 3 x y^2 - i y^3 \ & = (x^3 - 3x y^2) + i(3 x^2 y - y^3) \ & => cases(u(x,y) = x^3 - 3x y^2 , v(x,y) = 3 x^2 y - y^3) $ check if the Cauchy-Riemann equations hold: $ partial_x u = 3x^2-3y^2, quad partial_y u = -6x y\ partial_x v = 6 x y, quad partial_y v = 3 x^2 - 3 y^2 \ => partial_x u = partial_y v, quad partial_y u = -partial_x v $ Thus the function satisfies the cauchy-riemann equations and is holomorphic. = 2 $ f(x,y) &= e^(x)(cos y + i sin y) \ & = e^(x)cos y + i e^x sin y \ & => cases(u(x,y) = e^x cos y, v(x,y) = e^x sin y) $ $ => cases(partial_x u = e^x cos y quad partial_y u = -e^x sin y , partial_y v = e^x cos y quad partial_x v = e^x sin y)\ => partial_x u = partial_y v, quad partial_y u = -partial_x v $ Cauchy-Riemann equations hold at any point $z in bb(C)$ = 3 let $z = x+i y$. the function becomes$ f(x,y) &= 1/(x+i y) = (x- i y)/((x + i y)(x - i y)) =(x- i y)/(x^2 + y^2) \ & = x/(x^2 + y^2) - i y/(x^2 + y^2) \ & => cases(u(x,y) = x/(x^2 + y^2), v(x,y) = -y/(x^2 + y^2)) $ $ cases( partial_x u = (-x^2 + y^2)/(x^2 + y^2)^2 quad partial_y u = -(2 x y)/(x^2 + y^2)^2 , partial_x v = (2x y)/((x^2+y^2)^2) quad partial_y v = (y^2-x^2)/(x^2 + y^2)^2 )\ => partial_x u = partial_y v, quad partial_y u = -partial_x v $ It is obvious that the four partial differentiations exist and are continuous. Cauchy-Riemann equations hold at any point $z in bb(C)$. #rect(inset: 8pt)[ Thus the function is holomorphic on ($bb(C)$ ).) ] = 4 we propose the following parametrization of the contour C: $z = e^(i t), dif z = i e^(i t) dif t, "for" t in (0,2pi) $ The integration becomes: $ integral.double_(C) z^(-n) dif z = integral_(0)^(2pi) (e^(i t))^(-n) i e ^( i t) dif t = integral_(0)^(2pi) i e^(i(1-n)t) dif t $ Noticing $ integral e^(i N t) dif t = 1/(i N) e^(i N t)$ if $N eq.not 0$, the integral becomes $ #rect(inset: 8pt)[ $ display( i [1/(i(1-n)) e^(i(1-n)t)]_(0)^(2pi) = 0 )$ ] $ = 5 #image("assets/2024-04-23-18-25-35.png") - solution: We parametrize the unit circle as $z = e^(i t), dif z = i e^(i t) dif t, "for" t "from" pi/2 "to" 0$ The integration becomes $ integral_(pi/2)^(0) (e^(3 i t)+ e^(e^(i t) )) i e^(i t) dif t &= integral_(0)^(pi/2) - i e^(4 i t) dif t + integral_(0 )^(pi/2) -i e^(e^(i t) + i t) dif t \ & = lr(-i [(e^(4 i t) )/(4 i)]_0^(pi/2)) - i( lr(-i e^(e^(i t) )mid(|)_0 ^(pi/2) ) ) \ & = e - e^(display(e^(pi slash 2 space i)) ) - 1/4 e^(2 pi i) + 1/4 \ #rect(inset: 8pt)[ $ display( & = e - e^(i) )$ ] $
https://github.com/denizenging/site
https://raw.githubusercontent.com/denizenging/site/master/page/search/index.en.typ
typst
#import "@local/pub-page:0.0.0": * #show: template( title: "Search", layout: "search", outputs: ("html", "json"), menu: (5, "search"), )
https://github.com/liuxu89/typstbook
https://raw.githubusercontent.com/liuxu89/typstbook/main/book.typ
typst
#import "@preview/shiroa:0.1.0": * #show: book #book-meta( title: "Liuxu's wiki", summary: [ #prefix-chapter("sample-page.typ")[Hello, typst] = 毛泽东选集 - #chapter("src/1-mao/别了司徒雷登.typ")[别了,司徒雷登] ] ) // re-export page template #import "/templates/page.typ": project #let book-page = project
https://github.com/Otto-AA/definitely-not-tuw-thesis
https://raw.githubusercontent.com/Otto-AA/definitely-not-tuw-thesis/main/src/styles/front-matter.typ
typst
MIT No Attribution
#import "utils/state.typ": is-back-matter #let front-matter-styles = rest => { set page(numbering: "i") counter(page).update(1) counter(heading).update(0) set heading(numbering: none) rest }
https://github.com/MariusLD/2K24-Internship-Sopra-Steria
https://raw.githubusercontent.com/MariusLD/2K24-Internship-Sopra-Steria/master/Internship_Report.typ
typst
#set text( size: 12pt ) #set par( justify: true ) #set heading(numbering: "I.1") #let title = [ SI Formation - Rapport de stage ] #let year = [ Year 2023/2024 ] #table( columns: (22%, 22%, 80pt ,24%, 22%), inset: 8pt, align: horizon, image("logos/esir.png"), image("logos/Logo-Universite-de-Rennes-1.png"),[],[], image("logos/sopra_steria.png", width: 50%), stroke: white ) #table( columns : (50%, 50%), inset : 8pt, align : horizon, [*<NAME>* \ IT - Information Systems \ 2024 \ \ *<NAME>* \ Professor-Researcher], [*<NAME>* \ C<NAME>, n°2, 08011 Barcelona \ info_es$@$<EMAIL> \ \ *<NAME>* \ Senior Full-Stack Engineer] ) #v(100pt) #align(center, text(30pt)[#title]) #v(300pt) #align(left, text(17pt)[#year]) #pagebreak() #align(left, text(17pt)[*Remerciements*]) #v(40pt) Je souhaite débuter ce rapport en adressant mes plus sincères remerciements à toutes les personnes qui m'ont accompagné durant ces six mois de stage. Tout d'abord, je tiens à exprimer ma profonde gratitude à <NAME>, qui a été bien plus qu'une simple tutrice de stage. Tout au long de cette expérience, elle s'est révélée être une véritable mentore. Grâce à ses conseils pertinents et avisés, j'ai pu monter en compétences très rapidement et progresser de manière continue. Je tiens également à remercier <NAME>, dont l'écoute attentive et la disponibilité ont été précieuses pour répondre à mes interrogations. Il m'a également offert l'opportunité d'explorer des tâches transverses au projet, ce qui a considérablement enrichi mon bagage de connaissances. Enfin, je tiens à exprimer ma reconnaissance à l'ensemble de l'équipe. Les développeurs, avec qui les échanges ont toujours été enrichissants, et qui se sont montrés prêts à partager leurs connaissances lors de nos points ensemble. De même, l'équipe des business analysts a toujours été disponible pour expliquer en détail les aspects fonctionnels du projet. Cette collaboration et cette bonne entente globale ont grandement contribué à rendre ce stage non seulement formateur, mais également agréable, me permettant de travailler sur un sujet captivant dans des conditions optimales. #pagebreak() #set page( numbering : "1", number-align : right) #align(left, text(17pt)[*Abstract* (en)]) #v(10pt) The goal of this internship was to develop an innovative solution to address the digitization gap in training management within the French Customs Administration. Given the crucial importance of continuous training for customs officers, it was imperative to replace outdated paper-based processes with a tailored digital platform. This project aims to create a website that centralizes all information related to customs officer training. This ambitious project emphasizes security and accessibility for all users. An earlier initiative, known as OPHELIE, had already been partially implemented. However, this initial effort, which dates back to the pre-Covid era, experienced significant delays and was ultimately abandoned by the customs administration. Over time, the need for such a solution resurfaced, and the requirements evolved, rendering the OPHELIE project obsolete. In this context of renewal, <NAME> proposed to revive and complete the project, now under the name SI Formation. #v(10pt) #align(left, text(17pt)[*Résumé* (fr)]) #v(10pt) L'objectif de ce stage était de développer une solution innovante afin de combler le déficit de numérisation dans la gestion des formations au sein des douanes françaises. En effet, en raison de l'importance cruciale de la formation continue des douaniers, il était impératif de remplacer les processus papier au profit d'une plateforme numérique adaptée. Ce projet vise à créer une application web qui centralise toute l'information relative aux formations des douaniers. Ce projet ambitieux met l'accent sur la sécurité et l'accessibilité pour tous les utilisateurs. Une initiative précédente, connue sous le nom OPHELIE, avait déjà été partiellement mise en place. Toutefois, cette demande initiale, remontant à la période pré-Covid, a vu son avancement considérablement ralenti et finalement abandonné par la douane. Avec le temps, la nécessité d'une telle solution est réapparue, et les besoins ont évolué, rendant le projet OPHELIE obsolète. C'est dans ce contexte de renouveau que <NAME> a proposé de reprendre et de mener à bien ce projet, désormais sous le nom de SI Formation. #pagebreak() #outline( title: text(17pt)[*Sommaire* #v(10pt)], indent: auto) #pagebreak() #set page(header: [ #set text(11pt) #set align(left) _SI Formation_ #h(1fr) *_Introduction_* ]) = Introduction #v(10pt) Mon stage s'est inscrit dans le contexte des douanes françaises. Pour rappel, la douane est une administration de régulation des échanges étatique, chargée de faciliter et de sécuriser les flux de marchandises. La douane c'est une administration en capacité de veille, de surveillance et d'intervention sur terre mais également sur le territoire maritime, avec la protection contre les pollutions marines et le contrôle des activités de pêche. La douane s'inscrit dans un contexte mondial dans lequel nous pouvons constater un développement des flux commerciaux dans le cadre de la mondialisation des circuits économiques et des échanges de biens liés aux déplacements des voyageurs. Cette mondialisation s'est accompagnée d'une ouverture économique, culturelle et politique des différentes zones géographiques. Cette mondialisation, à bien des égards très positives, comporte néanmoins des risques, et c'est pourquoi elle se doit d'être régulée avec une vigilance accrue. Les points de vigilance se portent prioritairement sur : - La diversité des partenaires économiques, ces échanges ont lieu entre pays présentant différents niveaux de développement, de protection du consommateur, de préoccupation environnementale et de régimes fiscaux. Le développement des échanges peut engendrer une recrudescence des fraudes, la criminalité organisée renouvelle perpétuellement ses modes opératoires, augmentant ainsi le volume de produits prohibés dans certains trafics, comme celui des stupéfiants, la contrebande ou la contrefaçon, le trafic de cigarettes, le blanchiment d'argent, les trafics d'armes et munitions... L'émergence de nouvelles menaces, mettant en péril la pérennité et la protection de notre environnement, des espèces et espaces naturels ainsi que de notre patrimoine culturel. Ces menaces se retrouvent sous la forme de trafic illégal d'espèces animales et végétales menacées d'extinction (et représente par la même occasion la deuxième cause de disparition de celles-ci), de transferts de déchets illégaux (hospitaliers, chimiques, métaux lourds). Le patrimoine culturel, quant à lui, est soumis à des réglementations relatives à la protection d'objets d'art, de collection ou d'antiquités. En clair, la douane fait face à de nombreuses menaces qui nécessitent une supervision rigoureuse pour stopper les activités illégales. Ces menaces, souvent difficiles à prévoir, doivent être anticipées au mieux. Pour ce faire, il est important que les douaniers soient correctement formés, préparés à n'importe quel scénario, informés sur les différents types de trafics, et prêts à appliquer des protocoles de vérification stricts et rigoureux. #pagebreak() #set page(header: [ #set text(11pt) #set align(left) _SI Formation_ #h(1fr) *_Sopra Steria_* ]) = Sopra Steria #v(10pt) Pour passer à la présentation du groupe au sein duquel j'ai pu évoluer, Sopra Steria est une entreprise de services du numérique, autrement dit une ESN, française et une société de conseil en transformation numérique des entreprises et des organisations. Elle est née de la fusion de la SOciété de PRogrammation et d'Analyses (Sopra) et la société Steria (Société d'étude et de réalisation en informatique et automatisme). <NAME> est un leader européen de la transformation numérique et figure parmi les cinq principaux acteurs européens du secteur des Entreprises de Services Numériques (ESN), avec 56 000 collaborateurs répartis dans 30 pays, majoritairement en France (48%), au Royaume-Uni (19%), en Europe (31%) sinon dans le reste du monde (2%). L'entreprise intervient dans divers domaines, notamment les services financiers, le secteur public, les télécommunications, les médias, le divertissement, l'aéronautique et spatial, la défense, la sécurité, l'énergie, les services publics, les transports, la distribution, et bien d'autres. @sopra Ainsi <NAME> se distingue comme un acteur majeur sur la scène internationale, dont les entités peuvent être amenées à collaborer étroitement sur des projets de grande envergure. Cette dynamique collaborative s'est faite ressentir au sein de l'équipe où j'ai eu la chance d'évoluer. L'équipe était scindée en deux, géographiquement parlant : une partie était basée à Nantes, tandis que l'autre, à laquelle j'appartenais, se trouvait à Barcelone. Chez <NAME>, l'organisation des équipes et des projets suit une structure en arborescence bien définie. À la base de cette structure, nous trouvons plusieurs pôles d'expertise, chacun spécialisé dans un domaine particulier. J'ai eu l'opportunité de travailler dans le pôle Douane, un domaine clé au sein de l'entreprise. Ce pôle est lui-même subdivisé en plusieurs sphères, chacune regroupant différents projets et applications spécifiques. C'est dans la sphère Fiscalité que le projet SI Formation, auquel j'ai participé, s'inscrit. #pagebreak() #set page(header: [ #set text(11pt) #set align(left) _SI Formation_ #h(1fr) *_Analyse_* ]) = Analyse #v(10pt) == Contexte #v(5pt) Le projet s'intitule SI Formation. Il a pour objectif de fournir une alternative au format papier pour les agents de la douane française, leur permettant ainsi de mettre en place de nouvelles formations plus facilement. Comme mentionné dans l'introduction, le domaine des douanes est très complexe, nécessitant une connaissance approfondie de nombreux protocoles pour pouvoir réagir efficacement à toute situation. Le besoin de passer au format numérique pour faciliter la gestion des équipes et l'organisation des formations a refait surface cette année, bien qu'il ait déjà été identifié il y a quelque temps. En effet, un premier projet, du nom de CLAF, était déjà existant mais sur des technologies très anciennes et trop difficile à maintenir. Une première tentative de succession, nommée OPHELIE, avait vu le jour avant 2020. Pour OPHELIE, des équipes de développement avaient déjà été mobilisées et le projet avait bel et bien débuté. Cependant, l'arrivée de la pandémie de Covid-19, qui a contraint les individus à rester confinés, a eu un impact sans précédent sur l'activité des entreprises françaises, provoquant un ralentissement global de la production. C'est pourquoi le projet OPHELIE a rapidement été suspendu jusqu'à la fin de cette période. À la reprise des activités normales, le projet a pu redémarrer, mais une nouvelle contrainte est apparue : les besoins des clients avaient radicalement changé entre-temps. Il a donc été nécessaire de revoir entièrement le cahier des charges, ce qui a entraîné l'abandon du projet pour éviter des complications supplémentaires. C'est de là qu'est né SI Formation, une version différente d'OPHELIE répondant aux nouveaux besoins exprimés. == Etat de l'art #v(5pt) Maintenant que les bases du problème ont été posées, un premier travail de réflexion a été mené pour réussir la mission. Les technologies liées au développement Web ("_tout le processus de création de sites internet ou d'applications_" @devweb) ne manquent pas de nos jours. C'est un domaine en constante évolution, et désormais, pour garantir rapidité et qualité, les développeurs n'ont d'autre choix que de se tourner vers les frameworks. Frame signifie cadre, et work travail ; les frameworks sont de véritables boîtes à outils et constituent une méthodologie rigoureuse pour les développeurs. En effet, ces outils sont généralement une bonne réponse aux différents points critiques d'une phase de développement : arborescence, normes, sécurité, et CRUD (l'acronyme pour Create, Read, Update, Delete, les quatre opérations de base pour la persistance des données, en particulier le stockage d'informations en base de données @crud). Les frameworks sont des bibliothèques de développement logiciel qui offrent une structure préconstruite, avec des composants autonomes prêts à être réutilisés. Les frameworks modernes sont souvent open-source, ce qui signifie qu'ils sont maintenus par une communauté de développeurs passionnés et spécialisés, qui travaillent ensemble pour améliorer le code et ajouter continuellement de nouvelles fonctionnalités afin de s'adapter aux nouveaux besoins des développeurs. @framework2 En définitive, les points à retenir pour souligner l'importance d'utiliser des frameworks dans des projets de grande envergure sont les suivants : - Rapidité : une base de travail existe déjà, un bon développeur est un développeur qui ne s'ajoute pas du travail inutile et qui sait utiliser des outils existants à son avantage. - Architecture : les frameworks sont optimisés pour garantir un code propre et fonctionnel, ce qui évite de ralentir le fonctionnement d'une application. - Productivité : sur un projet impliquant une grande équipe, les frameworks permettent de disposer d'une base de développement commune, qui définit des stratégies de développement auxquelles chacun doit adhérer. Acquérir des compétences sur ce type de technologies permet de changer de projet plus facilement sans avoir à se former de nouveau. En théorie, il ne resterait plus que la logique métier à assimiler. - Communauté : cet aspect, mentionné plus tôt, est essentiel. Travailler avec de tels outils permet de bénéficier du soutien d'une vaste communauté, qui pourra répondre aux questions via des supports et des forums, aidant ainsi à corriger des bugs ou à résoudre des problèmes de programmation plus ou moins connus. Cela constitue une nouvelle fois un gain de temps non négligeable. @framework2 Une enquête de Stack Overflow, le forum d'échange le plus connu dans le monde de la programmation, a révélé que plus de 55% des développeurs utilisent un framework dans leur travail quotidien. Ce pourcentage peut s'expliquer par le fait que certains considèrent les frameworks comme un frein à la créativité. De plus, les entreprises ont tendance à développer leurs propres outils internes privés. @framework Pour rappel, un projet web s'articule autour de deux sections distinctes mais interdépendantes : le frontend et le backend. #figure( image("ressources/webapp.png", width: 80%), caption: [ Architecture d'une application web @frontback. ], ) Le terme frontend fait référence à la partie visible d'une application, celle que les utilisateurs manipulent directement, comme les menus, les boutons et les éléments visuels de la page. De l'autre côté, nous avons la partie serveur. Le backend d'une application écoute les instructions et effectue différents types de traitement. Lorsque votre utilisateur interagit avec le frontend, l'interaction envoie une demande au backend au format HTTP. Le backend traite la demande et renvoie une réponse. Ces éléments avec lesquels nous communiquons sont des : - Serveurs de base de données pour récupérer ou modifier des données. - Microservices qui exécutent un sous-ensemble des tâches spécifiques requêtées. - API tierces pour exécuter des fonctions annexes. Parmi les frameworks frontend les plus populaires @popframeworks, nous trouvons : - React : Développé par Facebook, React est un framework open-source axé sur la création d'interfaces utilisateur dynamiques et réactives grâce à une architecture basée sur les composants. Son approche « déclarative » simplifie le développement, surtout pour les interfaces complexes. Il est aussi largement utilisé dans le développement mobile avec React Native, permettant ainsi une forte réutilisation de code entre les plateformes web et mobile. - Vue.js : Vue est un framework JavaScript progressif, conçu pour être intégré progressivement dans un projet. Sa simplicité et sa réactivité en font un choix populaire, surtout pour les applications monopages. Vue se distingue par son ("_The Document Object Model (DOM) is a programming API for HTML and XML documents. It defines the logical structure of documents and the way a document is accessed and manipulated_" @dom) virtuel et sa syntaxe simple, facilitant la courbe d'apprentissage. Il est idéal pour les projets où la modularité et la performance sont essentielles. - Angular : Angular, développé par Google, est un framework complet et robuste, principalement écrit en TypeScript. Il se distingue par sa structure organisée et son data binding bidirectionnel, qui synchronise automatiquement les données entre le modèle et la vue. Angular offre également des outils puissants comme Angular CLI pour le scaffolding et le testing, ainsi qu'une gestion avancée des dépendances via l'injection de dépendances. Ces caractéristiques en font un choix privilégié pour les applications d'entreprise nécessitant une grande échelle et une architecture maintenable. - Node.js : Bien que Node.js soit généralement considéré comme un environnement d'exécution backend, il peut être utilisé côté front-end pour des tâches spécifiques. Sa capacité à gérer plusieurs requêtes simultanément grâce à une architecture orientée événements le rend adapté aux applications nécessitant des performances élevées et une forte scalabilité. Pourquoi Angular pour ce projet ? Le choix d'Angular pour ce projet s'explique par plusieurs facteurs spécifiques aux besoins de l'application : - Complexité et échelle : Le projet nécessite la gestion de nombreuses fonctionnalités complexes et une interaction intensive avec le backend. Angular, avec sa structure modulaire et ses outils intégrés, permet de développer une architecture robuste et maintenable, en garantissant une séparation claire des préoccupations. - TypeScript et maintenabilité : Angular est conçu autour de TypeScript, un surensemble de JavaScript qui ajoute des fonctionnalités de typage statique. Cela améliore la maintenabilité du code, facilite la détection d'erreurs en amont et rend le développement plus structuré, ce qui est essentiel pour les grandes équipes ou les projets de longue durée. - Data Binding Bidirectionnel : Le data binding bidirectionnel d'Angular simplifie la gestion des données côté frontend, particulièrement utile dans les applications où les données évoluent fréquemment et nécessitent une synchronisation en temps réel avec l'interface utilisateur. - Outils de développement : Angular CLI accélère le processus de développement en offrant des commandes pour générer du code (module, pipe, resolver, service, guard, interface, component...), exécuter des tests et déployer l'application, ce qui a été particulièrement bénéfique pour le projet. - Tests et fiabilité : Angular est réputé pour ses capacités de test intégrées, permettant de construire des applications fiables. Pour un projet où la fiabilité est critique comme le nôtre (et éviter des scénarios flaky), ces fonctionnalités sont essentielles. Ainsi, bien que d'autres frameworks comme React ou Vue présentent des avantages certains, Angular s'est imposé comme le choix le plus adapté pour répondre aux exigences spécifiques de ce projet en termes de complexité, de performance et de maintenabilité. En backend, le choix du framework est crucial pour déterminer la manière dont une application va traiter les requêtes, gérer les bases de données, et interagir avec les services tiers. Parmi les frameworks backend populaires, nous retrouvons : - Django : Django est un framework web open-source écrit en Python, réputé pour sa simplicité et ses fonctionnalités intégrées. Il propose un ORM (Object-Relational Mapping) performant, une interface d'administration auto-générée, et suit l'architecture Model-Template-View (MTV). Ses capacités de sécurité et son système d'authentification robustes en font un choix privilégié pour des plateformes complexes comme Instagram. - Ruby on Rails : Également connu sous le nom de Rails, ce framework open-source en Ruby suit l'architecture Model-View-Controller (MVC). Il est apprécié pour sa philosophie "Convention over Configuration", qui simplifie le développement en réduisant la nécessité de prendre des décisions sur la structure du code. Rails est utilisé par des géants comme GitHub et Airbnb, en raison de sa capacité à accélérer le développement grâce à des conventions préétablies et à son système de mappage objet-relationnel, Active Record. De manière générale, il existe un large éventails de framework. En 2023 Statista, un site de statistiques, a dressé un histogramme des frameworks les plus utilisés dans le web. #figure( image("ressources/statista.jpg", width: 70%), caption: [ Most used web frameworks among developers worldwide, as of 2024 @statista. ], ) Il est bon de noter qu'un framework en bas dans ce graphique n'est pas nécessairement un mauvais framework, chaque framework a sa spécialité. Par exemple Symfony est toujours un framework très utilisé pour du PHP, ou bien Svelte en JavaScript qui fait abstraction du Document Object Model @dom, une structure logique virtuelle sous forme d'arbre qui permet de définir la façon dont une page va être accédée et manipulée, permettant d'atteindre de hautes performances dans des cas d'usage. Un framework est fait pour répondre à un besoin, sa facilité d'apprentissage, d'intégration, ses fonctionnalitiés, sa communauté active, sa légereté, ses mises à jour sont les principaux critères qui influencent sa popularité. La réticence à changer de framework peut également provenir de la contrainte de devoir se former à nouveau. Spring VS Spring Boot @spring De notre côté, le choix a été fait de s'orienter vers Spring Boot pour la gestion du projet côté backend. Avant de mentionner le nom de Spring Boot, nous devons faire un bon en arrière avec la naissance de Spring en 2002. Spring c'est avant tout un framework open-source léger qui permet aux développeurs Java EE 7 de créer des applications d'entreprise simples, fiables et évolutives. Java est un langage de programmation parmi les plus répandus. Multi-applicatifs, sa côte est due à ses nombreuses forces @java : - Langage multithread, permettant d'exécuter plusieurs tâches en parallèle pour un gain de temps de réponse, de meilleures performances et des coûts de maintenance réduits dans les applications. - Gestion de la mémoire : bien que ce point soit controversé dans une partie de la communauté, la gestion de la mémoire reste une tâche complexe pour les développeurs ; elle est pourtant clé pour la performance. Java en version vanilla s'en occupe automatiquement. Lors des traitements, il est nécessaire de créer des objets, des entités virtuelles, qui donnent forme à nos données. Une fois qu'un objet devient obsolète, il est considéré comme un déchet que Java envoie sur un "tas" plus ou moins extensible, permettant de libérer de l'espace pour de futurs objets. - Évolutivité : si les développeurs souhaitent faire évoluer une application verticalement ou horizontalement, Java le facilite grâce à sa communauté active depuis des dizaines d'années. - Développement multiplateforme : les modifications nécessaires pour adapter une application d'un système d'exploitation à un autre sont mineures, ce qui réduit les efforts de développement. - Son aspect sécurité est également largement mis en avant avec ses fonctionnalités telles que l'isolation de type sandbox, la cryptographie, la gestion des exceptions et le contrôle d'accès. Pour revenir à Spring, il se distingue par sa capacité à gérer divers objets métiers, rendant le développement d'applications web plus facile par rapport aux frameworks et API Java classiques, comme JDBC, JSP et Java Servlet. Spring utilise des techniques modernes telles que la programmation orientée aspect (qui sépare le code technique du code métier), les POJO (Plain Old Java Object, les objets Java classiques sans surcouche par un framework) et l'injection de dépendances pour le développement d'applications d'entreprise. Pour être plus précis, Spring n'est pas un framework à part entière mais une collection de frameworks, comprenant : Spring AOP, Spring ORM, Spring Web Flow et Spring Web MVC. En tant que développeur, il est possible de choisir les modules que nous souhaitons employer, car ils sont indépendants les uns des autres. Les présentations du petit frère faites, voici le grand frère : Spring Boot. Spring Boot, basé sur le framework Spring classique, simplifie son utilisation tout en offrant toutes ses fonctionnalités. Ce framework orienté microservices permet de créer rapidement des applications prêtes pour la production grâce à une configuration automatique. Il suffit d'utiliser les bonnes configurations pour bénéficier de fonctionnalités spécifiques. Spring Boot est particulièrement efficace pour le développement d'API REST. Les API REST sont des API, un contract entre un utilisateur d'informations (émet une requête) et un fournisseur d'informations (répond à la requête), qui respect les principes REST comme @apirest : - Absence d'état : Les APIs ne stockent pas l'état de la session côté serveur. - Mise en cache : Les réponses peuvent être mises en cache, éliminant certaines interactions client-serveur. La question qui subsiste est : Pourquoi choisir Spring Boot plutôt que Spring (qui a longuement été utilisé et l'est toujours) ? Bien que Spring résolve de nombreux problèmes, nous arrivons à un point où les applications tendent à évoluer vers des architectures de microservices, c'est-à-dire la séparation d'une application en plusieurs petits services web autonomes avec leurs propres fonctionnalités. Ces microservices nécessitent des outils spécifiques pour être développés rapidement. Les applications Spring traditionnelles demandent beaucoup de configurations, qu'elles soient en XML, en Java ou par annotations, ce qui ralentit le développement. Par exemple, pour utiliser un des modules de Spring, comme Spring MVC, il faut configurer plusieurs éléments tels que l'annotation ComponentScan, le servlet Dispatcher, le résolveur de vues et les web jars, tout ce qui est nécessaire à la bonne exécution du module. Spring Boot simplifie ce processus avec l'autoconfiguration, qui examine les frameworks disponibles et les configurations fournies par les développeurs ou déjà présentes. Par exemple, s'il trouve Hibernate dans le classpath (un paramètre donné à la machine virtuelle Java pour indiquer où se trouvent les classes et packages afin de les exécuter), il va consulter la configuration fournie ou celle déjà en place dans l'application et configurer automatiquement la source de données et la base de données en mémoire, ainsi que le servlet Dispatcher (l'élément qui délègue les requêtes HTTP). Grâce à Spring Boot, un projet de démarrage est créé avec toutes les configurations XML et les dépendances par défaut, facilitant ainsi grandement le développement. En conclusion, bien que d'autres frameworks comme Django ou Rails possèdent leurs propres avantages, Spring Boot s'est imposé comme le choix idéal pour notre projet en raison de sa compatibilité avec l'écosystème Java, de sa capacité à simplifier le développement de microservices, de ses performances, de sa gestion de la sécurité, et de sa robustesse éprouvée dans les environnements d'entreprise. == Méthodologie #v(5pt) Pour mener à bien un tel projet en gérant une équipe divisée en deux, il est impératif de mettre en place des processus efficaces, et c'est ce à quoi <NAME> accorde une grande importance. Ainsi, une semaine au sein de l'équipe SI Formation se déroule de la manière suivante : tous les jours, à l'exception du mercredi, une réunion d'équipe est prévue et appelée V0, ou plus communément les dailies. Le mercredi, cette réunion est remplacée par le V1, également connu sous le nom de weeklies. Les réunions V0 ont un objectif très simple : faire le point sur les tâches effectuées la veille et celles prévues pour la journée à venir. En plus de permettre à chacun de prendre du recul et d'obtenir une vue d'ensemble sur l'avancement du projet, ces réunions sont également cruciales pour les responsables du suivi du projet. Elles permettent de détecter les tâches qui prennent plus de temps que prévu ou les points critiques nécessitant une attention particulière, qui seront alors traités en dehors des réunions V0. Lors de la réalisation de leurs tâches, les développeurs doivent s'assigner des tickets sur la plateforme Jira. Cette dernière "_fournit des outils de planification et de suivi permettant aux équipes de gérer les dépendances, les exigences fonctionnelles et les parties prenantes dès le démarrage du projet_" @jira. Les responsables techniques (RT) et le chef de projet disposent de tableaux de bord pour suivre en permanence ces tickets, connaître leur attribution et leur avancement. Les tickets sont généralement organisés avec une description comprenant un résumé de la tâche et les écrans à développer, avec une référence vers les spécifications. Les spécifications, rédigées par les business analysts (BAs), résultent des ateliers réalisés avec le client. Elles détaillent les besoins du client, les différents écrans à développer avec des maquettes, la description de ces maquettes, ainsi que des règles de gestion spécifiques à respecter. Ces règles peuvent concerner la gestion des accès des utilisateurs à certaines parties de l'application, l'accessibilité d'éléments de la page, le résultat des interactions avec ces éléments, etc. En ce qui concerne les tickets, ils comprennent également des sous-tickets, plus spécifiques que le ticket principal. Ces sous-tickets permettent de déterminer l'ordre de réalisation des tâches ainsi que le temps alloué à chacune d'elles. Ce temps est crucial et est discuté lors des réunions V0. Après avoir détaillé les tickets qui leur sont assignés, les développeurs doivent également mettre à jour leur RAE (Reste À Effectuer) afin qu'il soit reporté sur le fichier de suivi. Le RAE représente l'estimation du temps restant pour terminer une tâche. Lorsqu'une tâche commence, une estimation de temps est inscrite, basée sur le chiffrage effectué par les RT lors de la mise en place du devis. Le temps estimé est indiqué sur tous les tickets, et le temps restant est affiché en dessous. Initialement, à la création de la tâche, le temps estimé est égal au temps restant. Ce temps est celui que le développeur doit suivre en permanence. Chaque jour, les développeurs doivent enregistrer huit heures sur leurs tâches en cours, puis comparer ces heures au temps restant. Si le développement est loin d'être terminé, le temps restant est réévalué à la hausse. À l'inverse, si la tâche prend moins de temps que prévu, ce temps est ajusté à la baisse. En ce qui concerne les réunions V1, l'accent est mis sur l'avancement global du projet. Chaque partie (BA et développeur) fait le point sur ses propres progrès. Les BAs font souvent des retours sur divers sujets, tels que la livraison des spécifications pour la prochaine phase ou les retours des ateliers. Les développeurs, quant à eux, se concentrent sur la cohérence de l'avancement du développement par rapport aux délais. Ils mettent notamment en avant le burndown, une pratique courante dans les démarches agiles. Il s'agit d'un graphique montrant la quantité de travail effectuée et la quantité de travail restante au cours d'une période définie. Ce graphique est mis à jour quotidiennement pour estimer si le travail peut être réalisé dans les délais impartis. Un tableau indiquant la qualité du code, la couverture des tests du code source, et le taux de duplication du code (à maintenir aussi bas que possible) est également présenté. À la fin des V1, une phase d'humeur de la semaine démarre. Un nouveau tour de table a lieu, où chacun énumère un point positif et un point négatif de la semaine écoulée. Les participants peuvent exprimer leurs inquiétudes ou partager de bonnes nouvelles. La réunion se conclut par un jeu ludique permettant de mieux connaître les différentes personnalités de l'équipe. Les phases pré-livraison sont cruciales pour le projet, et les réunions V0 sont souvent privilégiées par rapport aux V1. Nous privilégions le suivi rapproché pour nous assurer que les objectifs de la phase sont réalisables. Parfois, le temps manque, auquel cas les objectifs à court terme sont réévalués, et la livraison se fait en deux étapes : une première livraison avec réserves, où nous détaillons au client ce qui n'a pas pu être réalisé, suivie d'une seconde livraison avec les éléments manquants. Le V1 post-livraison est particulier car il concerne la préparation de la prochaine phase, avec l'exposé des futures stratégies. #figure( image("ressources/V1.png", width: 100%), caption: [ V1 équipe de fin de phase 2 / début phase 3. ], ) Durant ces réunions, le planning est affiché, avec la prise en compte des versions correctives de chaque phase : #figure( image("ressources/planning.png", width: 120%), caption: [ Plan de charges. ], ) SI Formation est un projet relativement long mais a été planifié sur une période plutôt courte. Tout retard accumulé dans une phase se répercute sur les suivantes. C'est pourquoi, lorsque nous sommes censés débuter une nouvelle phase, l'équipe de développeurs est divisée : certains continuent de corriger la version de la phase précédente, tandis que d'autres commencent la phase actuelle dans les délais impartis. Ces délais exigeants ont notamment poussé l'équipe à faire plusieurs heures supplémentaires pour respecter les échéances de la phase 1. L'avantage de cette première phase est qu'elle nous a permis d'apprendre beaucoup sur le plan organisationnel, ce qui nous a aidés à éviter les mêmes erreurs dans les phases suivantes. Ces ajustements ont eu un impact significatif sur les développeurs, en mettant l'accent sur le suivi des processus. L'objectif était de renforcer le temps consacré à la conception et à la validation de chaque tâche. Les processus suivent la trame suivante : #figure( image("ressources/workflowdev1.png", width: 100%), caption: [ Workflow tâches de développeur - Partie 1 ], ) #figure( image("ressources/workflowdev2.png", width: 100%), caption: [ Workflow tâches de développeur - Partie 2 ], ) Pour détailler les différentes étapes : - Step 1 : Cette étape a été décrite plus tôt. Il s'agit pour le développeur de prendre connaissance de sa tâche à réaliser, en comprenant les tenants et les aboutissants. - Step 2 : Cette étape cruciale, appelée la conception fonctionnelle, consiste à vérifier ce que nous avons réellement compris de notre tâche en consultant les spécifications. Si des erreurs de compréhension apparaissent, il appartient au BA de les corriger. - Step 3 : Cette étape, appelée la conception technique, va de pair avec la conception fonctionnelle. L'objectif ici est de discuter avec un RT pour déterminer si la tâche est réalisable avec les ressources disponibles à ce moment-là. Les RTs ayant une vue d'ensemble sur les tâches en cours et celles déjà réalisées, leur consultation permet d'éviter le développement d'éléments en doublon, ce qui éviterait de perdre du temps inutilement. Cette étape a malheureusement été souvent négligée pendant les phases 1 et 2 en raison du manque de temps. Cela a conduit à quelques problèmes techniques par la suite, ce qui nous a poussés à renforcer cette étape lors de la phase 3. Step 4 : Il s'agit de la phase de réalisation de la tâche. À cette étape, il est crucial de procéder à une réévaluation continue du temps imparti pour la tâche. - Step 5 : Utilisation de Wave. "_WAVE est un outil gratuit d'évaluation de l'accessibilité web, conçu pour identifier les moyens de rendre une page web plus accessible aux personnes handicapées_" @wave. WAVE met en évidence des informations importantes pour une évaluation de l'accessibilité avec des icônes intégrées : - Les erreurs rouges indiquent des problèmes qui affecteront certains utilisateurs handicapés et représentent des non-conformités aux directives WCAG (Web Content Accessibility Guidelines). Cliquer sur une icône dans le panneau "Détails" met en évidence l'emplacement correspondant sur la page web et ouvre une info-bulle expliquant l'erreur. - Erreurs de contraste, ces erreurs concernent le texte qui ne respecte pas les exigences de contraste des WCAG. Par exemple, du texte blanc sur un fond bleu avec un contraste insuffisant sera signalé. - Les alertes jaunes indiquent des éléments de la page qui peuvent causer des problèmes d'accessibilité. L'évaluateur doit décider de l'impact potentiel de ces alertes. - Les icônes vertes indiquent des fonctionnalités qui amélioreront l'accessibilité si elles sont correctement mises en œuvre, comme le texte alternatif descriptif pour les images. - Les éléments structurels bleus montrent des titres, des listes non ordonnées, et des régions ou repères identifiés sur la page. Ils aident à organiser le contenu de manière logique et accessible. - Les icônes violettes montrent l'utilisation des attributs ARIA (Accessible Rich Internet Applications) qui fournissent des informations d'accessibilité importantes en étiquetant les éléments, mais doivent être utilisés avec précaution pour ne pas nuire à l'accessibilité. WAVE est facile à utiliser puisqu'il s'agit d'une extension installable sur n'importe quel navigateur et capable d'analyser toutes les pages. Au-delà des directives WCAG, mondialement reconnues, les sites gouvernementaux français se réfèrent à la norme RGAA (Référentiel Général d'Amélioration de l'Accessibilité). Pourquoi ce point est-il autant mis en avant ? L'accessibilité numérique signifie que les sites web, technologies et outils sont conçus et développés pour être utilisables par les personnes handicapées. Il est essentiel de suivre les grands principes de l'accessibilité : perceptible, utilisable, compréhensible, robuste, tant sur les plans suivants : - Auditif, il faut pouvoir lire ce qui est dit dans une vidéo ou un son émis. - Visuel, il faut pouvoir redimensionner du texte, faire attention au contraste, avoir une langue de lecture définie. - Moteur, il faut avoir des fonctionnalités activables et désactivables au clavier et pas seulement à l'utilisation de la souris, un temps supplémentaire pour effectuer des actions, des liens en début de page pour accélérer la navigation via des accès rapides (et pour des cas plus poussés nous pouvons retrouver les commandes vocales, l'eye tracking...). - Cognitif, le principe Facile À Lire et à Comprendre doit être respecté (FALC) les fonctionnalités ne doivent pas demander une interprétation, usage d'une police spécifique pour les personnes souffrant de dyslexie (par exemple la police Opendyslexique avec empattement et interligne augmenté). Pour respecter les principes de l'accessibilité, nous utilisons en grande majorité le système de design de l'État (DSFR). Il est "_obligatoire pour tous les sites internet et applications mobiles de l'État depuis le mois de juillet 2023, il permet plus de rapidité dans la création et la mise à jour des sites et garantit la cohérence d'ensemble de l'écosystème pour le bénéfice de ses utilisateurs_" @dsfr. Ce système mis à disposition regroupe de nombreux composants, couleurs, émoticones etc. Ces éléments respectement ou permettent de respecter la norme RGAA, mais c'est un devoir du développeur de s'assurer qu'aucune erreur Wave ne soit remontée, et c'est à un BA de s'assurer de la bonne réalisation de l'accesibilité ainsi que de la cohérence entre le DSFR et les maquettes validées avec le client. - Step 6 : L'étape de validation de la tâche par un BA est une étape essentielle qui peut faire gagner beaucoup de temps comme nous le verrons par la suite. Cette tâche consiste tout simplement à vérifier que la fonctionnalité ou l'écran développé correspond aux specs. Pour ça, différents scénarios s'offrent à nous, un cas nominal et des scénarios alternatifs qu'il faut prendre en compte. Cette validation passe aussi par des tests de non-régression, nous nous assurons que la fonctionnalité ajoutée ne vient pas compromettre le comportement d'une autre. - Step 7 : C'est l'étape finale où tout le développement s'est effectué sur une branche parallèle à la branche principale, celle de la version en cours. Il faut maintenant combiner ces branches. Pour cela, nous ouvrons une Merge Request (MR), qui permet de présenter les changements apportés par notre branche avant leur intégration dans la branche principale. Lorsque nous créons cette requête, une pipeline CI/CD se déclenche en arrière-plan. Elle commence par construire (build) notre application, puis lance les batteries de tests. Ces tests, qu'ils soient côté back-end ou front-end, sont ceux rédigés précédemment. L'ajout d'une nouvelle fonctionnalité pouvant impacter un développement antérieur, il est nécessaire d'adapter les tests existants (et non de les supprimer) pour refléter le nouveau comportement. De plus, de nouveaux tests doivent être rédigés par le développeur pour vérifier cette fonctionnalité. En effet, un des steps de la pipeline consiste à lancer un Sonar. SonarQube est un "_outil d'assurance qualité du code qui collecte et analyse le code source pour fournir des rapports sur la qualité sur un projet_" @sonar. Il analyse le code source sous différents aspects et le décompose couche par couche, du niveau du module jusqu'au niveau de la classe. À chaque niveau, il produit des valeurs métriques et des statistiques qui révèlent les zones problématiques ou des erreurs de conception nécessitant des améliorations. C'est donc un outil qui parcourt le nouveau code pour vérifier la couverture des tests. Une couverture de code inférieure à 80% est jugée insuffisante, et cela s'applique également à la couverture globale du projet. En parallèle, une vérification du code dupliqué est effectuée : un taux de duplication supérieur à 5% dans le nouveau code est considéré comme trop élevé. Il est donc nécessaire de revenir sur le développement réalisé pour mutualiser certaines parties du code. Sonar identifie également les "code smells" (mauvaises pratiques de conception) et exige leur correction. Après toutes ces vérifications, Sonar valide le code, et la pipeline est considérée comme réussie. Lorsque la validation est effectuée, le statut de la Merge Request (MR) passe de "En cours" à "À relire". Les Responsables Techniques (RTs) doivent alors effectuer une vérification manuelle supplémentaire en examinant toutes les lignes de code pour repérer d'éventuelles erreurs de développement. S'il n'y a plus de correction à apporter à la MR, elle est alors merge à la version principale en cours (qui dépend de la phase actuelle) et ces nouvelles fonctionnalités sont alors accessibles aux autres membres de l'équipe de développement, pouvant débloquer le développement d'autres fonctionnalités. Lorsqu'un lot de MR a été merge, et que de nouvelles fonctionnalités ou corrections de bugs (aussi appelés "defects") sont disponibles, une livraison est effectuée. Il ne s'agit pas d'une livraison au client, mais d'un déploiement sur l'environnement de qualification (qualif). Cet environnement permet aux BA de réaliser leurs séries de tests pour vérifier que tout est conforme aux spécifications. Les tests des BA sont approfondis et couvrent l'ensemble de l'application, ils testent l'application comme si elle était utilisée par le client. En cas d'anomalies, ils ouvrent de nouveaux tickets sur Jira, annotés comme "Defect". En fonction de la priorité de ces anomalies (qui peuvent parfois être bloquantes), les développeurs s'empressent de corriger les problèmes. À chaque nouvelle version, des tests de non-régression sont également effectués pour s'assurer que les fonctionnalités précédemment déployées n'ont pas été altérées. Cette phase de test consiste généralement à relancer tous les tests ayant été passés avec succès précédemment. En ce qui concerne les tests rédigés par les développeurs, nous avons principalement opté pour une stratégie de tests unitaires en termes de méthodologie. Les tests unitaires sont des tests dits "white box", c'est-à-dire des tests où nous examinons le comportement interne d'une unité de code individuelle, comme une méthode ou une fonction isolée. En revanche, les tests "black box" se concentrent uniquement sur les actions visibles et le comportement externe de l'application sans tenir compte de son fonctionnement interne. Bien que les tests "black box" puissent sembler essentiels car ils permettent de vérifier le fonctionnement global de l'application, les tests unitaires jouent un rôle crucial dans la vérification approfondie et précise des composants individuels du code. En pratique, les tests unitaires assurent une couverture détaillée des différentes parties du code, ce qui peut prévenir des problèmes qui pourraient ne pas être détectés par les tests "black box". #figure( image("ressources/pyramidetest.png", width: 80%), caption: [ Pyramide de test @pyramid. ], ) La pyramide de tests est un modèle qui simplifie la complexité des tests logiciels en les organisant en une structure hiérarchique efficace. Globalement les tests unitaires ont pour objectif d'identifier et corriger rapidement les bugs dans les petites parties du code. Les tests d'intégration quant à eux cherchent à garantir que les différents composants du système fonctionnent correctement ensemble. Enfin les tests End to End visent à valider le bon fonctionnement de l'application dans son ensemble, du début à la fin, dans des conditions réelles d'utilisation. La question maintenant est : pourquoi nous sommes-nous contentés d'effectuer uniquement des tests unitaires dans notre développement ? La contrainte de temps pesait lourdement sur le projet. En mettant l'accent sur les tests unitaires, qui sont rapides à exécuter et faciles à maintenir, nous avons pu bénéficier de cycles de développement plus courts et plus agiles. Les tests unitaires fréquents et automatisés aident à maintenir un haut standard de qualité du code tout au long du cycle de vie du développement. En revanche, les tests de type "black box" sont plus complexes à mettre en place mais assurent une vérification complète du bon fonctionnement de l'application de bout en bout. Dans un contexte où le temps est limité, il est souvent préférable de privilégier une pyramide de tests avec une base solide, c'est-à-dire de nombreux tests unitaires, plutôt que de consacrer beaucoup de temps à des tests "black box" et avoir une base fragile. À l'avenir, l'objectif est de compléter cette pyramide de tests en ajoutant les tests nécessaires pour atteindre un niveau de couverture complet, jusqu'au pyramidion. #pagebreak() #set page(header: [ #set text(11pt) #set align(left) _SI Formation_ #h(1fr) *_L'Application_* ]) = L'Application #v(10pt) == Solution proposée #v(5pt) SI Formation doit s'imprégner de toutes les notions métier liées à la douane et reproduire le workflow autour des formations douanières, et c'est ainsi que la solution proposée s'est développée naturellement. L'application issue du projet doit permettre aux gestionnaires de créer des fiches de formation. Sur ces fiches, ils peuvent renseigner différentes informations, telles que le libellé de la formation, les nomenclatures associées (des étiquettes décrivant le type de formation), la direction responsable de cette formation, etc. À partir d'une fiche descriptive de formation, un gestionnaire peut créer des sessions. Les sessions représentent la mise en pratique des formations. Une session s'étend sur une période donnée, où chaque journée est découpée en vacations (généralement des demi-journées). Chaque vacation est attribuée à un formateur, rémunéré ou non, chargé de cette formation. Les agents de la douane peuvent ensuite rechercher des formations pour trouver celles qui répondent à leurs besoins. À partir d'une fiche de formation, ils peuvent consulter les sessions publiées auxquelles ils peuvent postuler. Les agents effectuent une demande d'inscription via un formulaire où ils doivent fournir des informations telles que leur motivation et les documents obligatoires. C'est à ce moment que le workflow des candidatures commence. #figure( image("ressources/workflowinscription.png", width: 100%), caption: [ Workflow des inscriptions. ], ) Lorsqu'une demande d'inscription est effectuée, un e-mail est envoyé au N+1 de l'agent pour l'informer qu'une nouvelle candidature est à traiter. Lorsque le N+1 se connecte à SI Formation, il peut visualiser ces candidatures sur la page d'accueil. Il peut alors émettre un avis sur chacune d'elles, qu'il soit favorable ou défavorable (avec justification), et valider ses choix. À ce moment-là, le N+2 recevra également un e-mail l'informant qu'une candidature est à traiter. Le N+2 aura généralement plus de candidatures à traiter et, sur sa page d'accueil, il aura une vue d'ensemble des sessions avec des candidatures à examiner. Il pourra alors émettre son avis à son tour. Dans le cadre d'une formation nationale, une autre étape est impliquée : le gestionnaire FP doit donner son avis et dresser un classement en fonction des priorités. Étant donné que les formations ont un nombre limité de places, si de nombreuses candidatures sont reçues, tout le monde ne pourra pas y assister. Il est à noter qu'à chaque étape, l'acteur en charge peut consulter les avis et commentaires des étapes précédentes. Quand tous les avis ont été donnés, les gestionnaires de session interviennent. Ils accèdent à leur outil dédié pour chaque session. Dans le cadre d'une formation nationale, il s'agit des gestionnaires d'école, tandis que pour une formation locale, ce sont les gestionnaires FP. Ils disposent alors d'un tableau de bord leur offrant une vue d'ensemble de l'avancement du workflow pour toutes les candidatures de la session. Bien qu'ils puissent consulter les détails des avis (qu'ils proviennent du N+2 ou du gestionnaire FP, selon le contexte), ils sont responsables de la validation ou du refus des demandes d'inscription. Lorsqu'une demande est validée, ou à tout moment durant le cycle de vie d'une candidature, l'agent peut demander l'annulation de sa candidature en cas de contretemps. Si une demande d'annulation est effectuée, le workflow reprend à zéro pour cette demande spécifique. Une fois l'annulation traitée, le gestionnaire concerné peut également valider ou refuser cette demande. Une fois la candidature validée, elle est considérée comme finalisée. Le gestionnaire envoie alors un e-mail et un document PDF confirmant de manière définitive la participation de l'agent à la formation. À partir du moment où la date limite pour les inscriptions est dépassée, la session elle-même entre dans un nouveau cycle. Cependant avant d'en parler, une autre notion à considérer est celle des séances Tir/TPCI. Ces séances sont très particulières et auraient presque nécessité une application distincte pour leur gestion, tant elles sont indépendantes du reste de l'application. Les douaniers ont régulièrement besoin de se former à des exercices de tir, que ce soit pour utiliser de nouveaux modèles d'armes ou pour améliorer leur dextérité dans le maniement des armes. Ces séances sont soumises à de nombreuses régulations, et la logique métier qui les sous-tend est très rigoureuse. Tout d'abord, des formations spécifiques existent pour ces séances, et elles sont confidentielles. Cela signifie que seuls les individus invités peuvent participer à ces formations, et il est impossible de rechercher ce type de séances via l'outil de recherche de SI Formation. Ces séances donnent naissance à la notion de "séance réalisée", ce qui permet de maintenir un historique pour réaliser des statistiques futures. Pour chaque séance réalisée, un certain nombre de métriques sont collectées, telles que les participants, les dates des séances, le nombre de balles tirées par agent, le formateur responsable de la supervision, etc. #figure( image("ressources/workflowsession.png", width: 100%), caption: [ Workflow des sessions. ], ) Deux notions importantes sont introduites dans ce nouveau workflow : les états liquidatifs et l'apurement. Tout d'abord, le schéma montre une période durant laquelle il est possible de signer les états liquidatifs. Ces états attestent de la possibilité de rémunérer un ou plusieurs formateurs présents lors d'une session ou d'une séance. Pour qu'un utilisateur puisse accéder à l'écran de gestion des états liquidatifs, deux prérequis doivent être remplis : la session doit être publiée ou bien la séance Tir/TPCI doit être terminée, et la date de fin doit être dépassée depuis les mois précédents. La signature des états liquidatifs est soumise à diverses contraintes, telles que s'assurer qu'aucun plafond annuel n'a été dépassé et que les barèmes de rémunération ont été respectés. Une fois cette période écoulée, il est alors possible d'apurer la session. Une session ne peut être apurée que si la gestion des présences a été effectuée et les états liquidatifs signés. Les séances Tir/TPCI ne font pas l'objet d'apurement, car elles sont saisies a posteriori. Lorsque la session est à apurer, ni les formateurs, ni les agents inscrits, ni les périodes, ni les vacations ne sont modifiables. La session est alors définitivement figée et ne peut plus être modifiée. Une fois la séance terminée, le calcul de la rémunération est effectué et la session est considérée comme terminée. Nous remarquons également sur le graphique l'ajout d'un nouveau statut pour les sessions, attribué à celles qui sont bloquées. Une session est bloquée lorsque le plafond de rémunération est dépassé pour au moins un formateur. Elle reste dans cet état tant que le problème n'est pas résolu, sinon elle est annulée si elle dépasse théoriquement le début de sa réalisation. S'accompagne à ça les FIF (fiche individuelle de formation) que chaque agent détient. Il s'agit d'une fiche récapitulative permettant à un agent de consulter des informations telles que les : - formations suivies (celles où il est présent sur au moins une vacation de la session). - formations demandées non suivies (celles où il a une absence, une inscription refusée par le gestionnaire, une inscription annulée par l'agent lui-même, ou une session tout simplement annulée dans sa globalité). - formations animées (celles pour lesquelles au moins une vacation a été animée). - ses habilitations et qualifications. Les statisques sont un point clé de l'application, aucune donnée ne sera jamais supprimée. Une session passée, une inscription annulée, les documents fournis lors d'une inscription, tout est imaginé de telle sorte à pouvoir retracer les différents cycles de vie que nous avons pu détailler (celui des candidatures ou encore celui des sessions). == Implémentation #v(5pt) Pour la réalisation et l'implémentation de l'application, il s'agissait principalement de mettre en œuvre la solution conçue, évaluée et budgétisée lors des ateliers avec le client. Un point important à souligner est l'architecture du projet d'un point de vue technique. L'architecture choisie est l'architecture hexagonale, parfaitement adaptée à Spring Boot. #figure( image("ressources/archihexagonale.png", width: 80%), caption: [ Architecture hexagonale @hexagonale. ], ) L'architecture hexagonale, ou architecture "_ports and adapters_", est une approche qui permet de structurer une application en séparant clairement la logique métier (Business Logic) des autres parties du système, telles que les interfaces utilisateur (User-Side) et les composants d'infrastructure comme les bases de données ou les services externes (Server-Side). Cette architecture est particulièrement bénéfique pour un projet Spring Boot, car elle facilite le développement, les tests, et la maintenance de l'application en isolant les préoccupations métier des détails d'implémentation technique @hexagonale. En isolant la logique métier au cœur de l'application, nous, en tant que développeurs, pouvons tester automatiquement et de manière fiable son comportement indépendamment des autres composants. Cela permet d'obtenir un retour rapide et précis lors de l'exécution des tests. De plus, en utilisant des interfaces, appelées "_ports_", pour interagir avec les systèmes externes et les interfaces utilisateur, l'architecture hexagonale garantit que les dépendances sont dirigées vers la logique métier, et non l'inverse. Ainsi, les problèmes rencontrés dans une partie du système, comme un changement de base de données ou une modification de l'interface utilisateur, n'ont aucun impact direct sur le cœur métier @hexagonale. Cette structure modulaire rend l'application plus flexible et plus facile à maintenir, car les adaptations nécessaires, les "_adapters_", peuvent être modifiées ou remplacées sans affecter le reste du système. Dans le contexte d'un projet Spring Boot, où les besoins en scalabilité et en tests automatisés sont cruciaux, l'architecture hexagonale s'avère particulièrement efficace pour garantir que l'application reste robuste tout en permettant des itérations rapides et sûres. Ensuite, pour aborder la gestion des données et la base de données utilisée, nous avons opté pour Hibernate. Un des défis majeurs avec les bases de données est la gestion des requêtes. Hibernate est un framework ORM (Object-Relational Mapping) très populaire dans la communauté Java. Il facilite la manipulation des données en permettant aux développeurs de mapper les objets de l'application aux tables d'une base de données relationnelle. Ainsi, il est possible de gérer les opérations CRUD (définies plus tôt) sur ces objets sans avoir à écrire de code SQL à la main, ce qui simplifie considérablement le processus de développement @hibernate. Pourquoi avons-nous spécifiquement choisi Hibernate dans notre cas ? Tout d'abord, parce que nous utilisons Java, et Hibernate repose sur la spécification JPA (Java Persistence API), qui définit un standard pour la persistance des objets Java. JPA permet de définir comment les objets Java doivent être stockés, récupérés et gérés dans une base de données relationnelle. Cependant, JPA n'a pas d'implémentation concrète, il ne s'agit que d'une spécification. C'est là qu'Hibernate intervient, en offrant une solution pour gérer la persistance des données grâce à son propre langage de requête spécialisé : Hibernate Query Language (HQL). Il est également important de noter qu'Hibernate prend en charge le lazy loading, ce qui signifie que les données ne sont chargées que lorsqu'elles sont réellement nécessaires. Cette fonctionnalité est particulièrement utile dans les applications Spring Boot pour optimiser l'utilisation des ressources et améliorer les performances globales. Hibernate est facile à configurer et offre de nombreux autres avantages pour se concentrer sur la performance, ce qui est primordial dans une application à grande échelle, comme le caching et la gestion automatique des transactions. Les opérations de base sont déjà prises en charge par Hibernate, ce qui simplifie le développement @hibernate. Grâce à Hibernate, du côté base de données, il ne nous reste plus qu'à rédiger des scripts SQL pour créer nos tables et insérer des données de référence. Tout sera correctement interprété lorsque nous implémenterons les entités Java. Une Java Entity @entity est simplement un objet utilisé pour représenter une table dans une base de données relationnelle. Chaque instance d'une entité correspond à une ligne de cette table. C'est la liaison entre les données stockées en base et les données utilisables au sein de l'application. Dans les tâches plus transverses ou industrielles, nous avons également dû nous pencher sur Harbor et Rancher. Harbor est un outil essentiel pour le déploiement de notre application, car il garantit une couche de sécurité avec la détection de vulnérabilités, ainsi que la gestion des images de conteneurs @harbor. Une image Docker y est stockée et décrit l'entièreté de la mise en place de notre application avec tous les services sur lesquels elle repose. Cette image se nomme %VERSION%-SNAPSHOT lorsqu'il s'agit d'un déploiement sur l'environnement de qualif, et SNAPSHOT est retiré lorsqu'il s'agit d'un déploiement en MOA (l'environnement de la douane) pour signaler qu'il s'agit d'une version stable de fin de phase. À côté de ça, nous utilisons Rancher qui est une plateforme d'orchestration qui simplifie la gestion et le déploiement d'applications au sein de clusters de serveurs. Elle permet d'organiser les conteneurs en services et en ensembles logiques appelés stacks, assurant ainsi une structuration claire des applications. @rancher En intégrant Harbor avec Rancher, nous améliorons la sécurité et l'efficacité des déploiements de notre application conteneurisée, en garantissant l'utilisation d'images fiables et conformes, tout en facilitant son intégration dans des processus de déploiement continu. Tout est automatisé au travers d'une pipeline CI/CD pour éliminer les opérations redondantes. Les services externes appelés par notre application incluent Rush et InterRH, deux services de la douane permettant de collecter des informations indispensables, telles que le matricule d'un agent, le service auquel il appartient, et ses droits sur l'application (gestionnaire local ou national, administrateur, agent sans droit). En fonction de ces droits, des contrôles d'accès (guards) ont été mis en place pour limiter l'accès à certaines pages de gestion de l'application pour les agents sans droits particuliers. Nous récupérons également des informations personnelles telles que le nom, le prénom, l'adresse e-mail et les supérieurs hiérarchiques de l'agent (ces données étaient bouchonnées dans le cadre des tests sur nos environnements, ce qui signifie que nous avions créé des agents fictifs pour éviter d'accéder à des informations trop confidentielles). == Résultats #v(5pt) La fin de mon stage a coïncidé avec le milieu de la phase 3 du projet. Bien que la fin de la phase 2 ait été marquée par des difficultés, notamment une instabilité de l'application due à de nombreuses régressions causées par l'intégration trop précipitée de nouvelles fonctionnalités, la première livraison de la v2 avec réserves (report de certaines fonctionnalités) n'a pas été convaincante. Cependant, dès le début de la phase 3, nous avons rapidement redressé la situation en livrant une v2 bien plus fonctionnelle. L'une des principales difficultés rencontrées sur le projet SI Formation concernait les échéances serrées, qui imposaient un rythme de développement rapide, rendant la qualité du code difficile à maintenir. À chaque début de nouvelle phase, nous devions encore nous concentrer sur les tâches de la phase précédente, accumulant ainsi du retard au fil du temps. C'est dans ce contexte que j'ai été chargé de travailler seul sur la notion des états liquidatifs pendant plusieurs semaines au cours de la phase 3. Ce sujet était particulièrement complexe en raison de problèmes de conception et de modélisation du modèle de données, qui n'avaient pas été identifiés suffisamment tôt. Le démarrage du développement sur cette partie a été difficile, car malgré des spécifications validées, celles-ci ont nécessité des révisions à la fin du processus via des points DEV/BA et des discussions directes avec le client. Néanmoins, être responsable d'une partie aussi cruciale m'a poussé à adopter une approche méthodique dans ma stratégie de développement. Cela m'a également conduit à être particulièrement rigoureux dans ma compréhension du sujet, veillant à clarifier chaque point de doute, ce qui a permis d'identifier les problématiques rencontrées, pour finalement être efficace et rapide dans mon développement. Un moment clé de la transition entre la phase 2 et la phase 3 a été le REX (retour d'expérience) de la phase 2. Un REX consiste à réunir l'équipe pour faire une rétrospective des semaines écoulées. Ce moment de partage s'articule autour de cinq axes : - Start doing : les actions à entreprendre qui n'ont pas encore été mises en place. - Stop doing : les actions à cesser, qu'elles aient été volontairement mises en place ou non. - More of : les actions déjà en place mais qui pourraient être davantage exploitées ou améliorées. - Less of : les actions en cours qu'il serait préférable de réduire. - Keep doing : les bonnes pratiques à maintenir. Chaque membre de l'équipe exprime son avis sur ces points pendant 15 minutes sous forme de tickets. Ensuite, lors d'un tour de table, les idées sont développées par les auteurs des différents tickets, puis une phase de vote permet de mettre en avant les tickets nécessitant le plus d'attention. Une fois les tickets prioritaires choisis, l'équipe réfléchit aux solutions pour résoudre les problèmes critiques, améliorer les points importants mais sous-exploités, et éliminer les mauvaises pratiques. Ce REX est essentiel car il permet à l'équipe de s'accorder sur les nouvelles directives à suivre afin de ne pas répéter les erreurs lors de la prochaine phase. Globalement, le projet a été un franc succès. Nous avons reçu les félicitations directes de l'adjoint du directeur des opérations chez <NAME>, attestant de la qualité du travail réalisé. Cependant, il est important de réfléchir aux axes d'amélioration pour un projet de cette envergure avec des délais aussi courts. D'un point de vue technique, les technologies utilisées étaient parfaitement adaptées, la structure du projet était idéale, et les plateformes de monitoring étaient simples à utiliser et fiables, tant en termes de sécurité que de gestion de la charge. Cependant, la communication a été un point faible. Échanger pendant un sprint n'est pas simple, et les changements dans l'équipe n'ont pas facilité les choses. Les nouveaux développeurs n'ont pas toujours pu assimiler les pratiques discutées lors des points DEV, préférant parfois appliquer les méthodes de leurs précédents projets, en particulier lorsqu'ils sont externes à Sopra Steria. De plus, les points DEV, censés être hebdomadaires, n'ont pas toujours été maintenus comme prévu lors des phases de rush, bien qu'il s'agisse de points d'échange cruciaux. La communication entre les BA (Business Analysts) et les développeurs est également essentielle. Cependant, le fait que de nombreux fils de discussion aient eu lieu en parallèle a conduit à des spécifications manquant de clarté ou comportant des zones d'ombre, alors que les développeurs auraient pu apporter une perspective technique précieuse. Bien que le point de conception fonctionnelle avec un BA au début de chaque tâche ait partiellement atténué ce problème, les spécifications étaient souvent rédigées par une seule personne, ce qui ne garantissait pas toujours une vision complète des différents workflows de l'application. Cette limitation a parfois conduit à des erreurs de compréhension, orientant le développement de manière incorrecte. Lors des phases de test, ces incohérences ont nécessité des révisions, engendrant parfois un rework. Revoir une fonctionnalité déjà développée est particulièrement problématique dans un contexte de rush, car cela implique de doubler le temps de travail nécessaire pour développer une même fonctionnalité. Voici quelques exemples concrets : - Une pop-in, développée conformément aux spécifications et validée lors des démonstrations avec les BA, s'est finalement révélée devoir être transformée en une page distincte. Cette transition a nécessité une nouvelle réflexion pour éviter les régressions. - Une pop-in destinée à ajouter des nomenclatures à une fiche de formation lors de sa création manquait de règles de gestion claires, ce qui a entraîné diverses interprétations de son fonctionnement. Ces règles ont dû être révisées ultérieurement, nécessitant des ajustements supplémentaires. Le manque de projection a également été problématique entre les phases. Par exemple, l'évolution de l'outil pour inscrire de nouveaux stagiaires à une session à la volée durant la phase 2 a nécessité un rework de l'outil développé lors de la phase 1, car les comportements étaient très différents, au lieu d'être une simple évolution. En dehors des spécifications, des erreurs ont également été commises par les développeurs : mauvaise interprétation des spécifications, conception fonctionnelle et technique insuffisamment approfondie, manque de visibilité sur les autres développements (nous avons parfois travaillé sur la même fonctionnalité ou corrigé la même anomalie décrite dans deux tickets différents sans le savoir), et une stratégie de développement mal optimisée, entraînant des blocages fréquents. Les tâches étaient souvent trop dépendantes les unes des autres, empêchant de progresser pleinement. Cette situation a été partiellement corrigée à partir de la phase 2 grâce à une utilisation améliorée de draw.io, qui a permis de concevoir un diagramme des dépendances entre les différentes tâches et de mieux définir les priorités. #figure( image("ressources/drawio.png", width: 100%), caption: [ Diagramme de dépendances des états liquidatifs. ], ) Cette expérience m'a montré à quel point une communication efficace est fondamentale pour le succès d'un projet. #pagebreak() #set page(header: [ #set text(11pt) #set align(left) _SI Formation_ #h(1fr) *_Evaluation et Conclusion_* ]) = Evaluation et Conclusion #v(10pt) En termes d'évaluation de mon stage, une fiche de satisfaction a été établie selon plusieurs critères, tels que les aptitudes générales, les méthodes de travail, le comportement et l'intégration. Concernant le critère « savoir travailler au sein d'une équipe », qui inclut : interagir, partager les informations et coopérer avec chaque membre de l'équipe pour atteindre les objectifs collectifs ou individuels, il a été noté que j'ai me suis bien intégré à l'équipe, avec une participation active aux réunions V0 et V1 et un reporting efficace et clair. Pour ce qui est de la « capacité à connaître et comprendre le fonctionnement de SI Formation », il a été mentionné que j'ai acquis rapidement des compétences sur le projet. Aujourd'hui, je suis décrit comme un profil autonome capable de réaliser les développements avec efficacité. Je suis suffisamment débrouillard pour gérer les sujets qui me sont affectés, avec un niveau de support efficace, une seule explication suffit pour comprendre le besoin. Un axe d'amélioration identifié est la nécessité d'avoir une vue d'ensemble des sujets en cours dans l'équipe, ce qui est considéré comme normal pour un junior dans un contexte professionnel, comme discuté lors de l'évaluation. Concernant la « réalisation de développements simples et moyens dans le respect des SFD et des bonnes pratiques - complexité technique, la recherche de solutions, la conception, les nouvelles technologies et l'intégration (avec une autonomie progressive) » le responsable de l'évaluation a noté que mes résultats ont largement dépassé les attentes. Les commentaires soulignent que je réalise des développements de qualité, en vérifiant auprès de mes RTs et BA que les attentes sont bien respectées tout au long du processus. Mes RTs sont confiants dans ma capacité à gérer les tâches de développement, et j'ai réussi à prendre en charge des développements complexes, ce qui justifie une mention très favorable sur mon niveau technique et ma compréhension des besoins. Un axe d'amélioration suggéré est de traiter des sujets d'industrialisation tels que l'installation de l'application sur des environnements Kubernetes, ce que j'ai cherché à améliorer suite à cette évaluation. Enfin, concernant « la capacité à évaluer mon RAE et à faire remonter de manière proactive les éventuelles difficultés rencontrées », il a été noté que mon niveau de reporting sur Jira est correct, que je sais évaluer mon RAE et que je remonte systématiquement les dérives auprès de mes RTs. De mon point de vue, je suis plus que satisfait de cette expérience. J'en sors grandi avec des connaissances techniques renforcées et améliorées. J'ai constaté ma capacité à m'intégrer rapidement dans un projet dynamique dès ses débuts. J'ai eu l'occasion de faire de belles rencontres humaines dans un cadre professionnel, d'apprendre des connaissances partagées par mes RTs et autres développeurs expérimentés, et d'enrichir mon expérience professionnelle. De plus, ayant mes efforts été remarqués, une opportunité de poursuivre mon parcours chez <NAME> après mon stage m'a été offerte, une offre que j'ai acceptée avec enthousiasme, tant j'ai apprécié l'expérience. #pagebreak() #set page(header: [ #set text(11pt) #set align(left) _SI Formation_ #h(1fr) *_Bibliographie_* ]) = Bibliographie #v(10pt) #bibliography("bibliography.yml") #pagebreak() #set page(header: [ #set text(11pt) #set align(left) _SI Formation_ #h(1fr) *_Annexe_* ]) = Annexe #v(10pt) == Glossaire #table(columns: (33%, auto), stroke : black, inset : 8pt,fill : (col,row)=> if (row == 0 or row == 8) {gray} else {white}, text(12pt)[*Terme*], text(12pt)[*Définition*], [_RT_], [ - Responsable Technique : Encadre et supervise l'équipe de développeurs ], [_BA_], [ - Business Analyst : Etudie puis propose des solutions aux besoins commerciaux d'un client et traduit la solution retenue pour la partie tec ], [_Specs_], [ - Les spécications sont le manuel de développement d'un développeur. Toutes les pages de l'application y sont décrites, ainsi que toutes les règles de gestion et d'enregistrement qui les accompagnent. ], [_V0 / V1_], [ - Successivement les réunions : quotidiennes et hebdomadaires ], [_REX_], [ - Retour d'expérience ], [_Environnement de Qualif_], [ - Environnement permettant à l'équipe BA de tester l'application ], [_Environnement de MOA_], [ - Environnement permettant à la douane de tester l'application ], [_MR_], [ - Merge Request, requête envoyée pour combiner une branche avec une nouvelle fonctionalité ou une correction à la branche principale de l'application ], [_Vocabulaire relatif au contexte métier_], [], [_Formation_], [ - Représentée sous forme de fiche, il s'agit d'une thématique abordée dans le cadre de la montée en compétence des agents de la douane ], [_Session_],[ - Instance d'une formation, il s'agit d'une représentation de cette dernière dans la réalité], [_Période_], [ - Intervalle de temps durant laquelle une session à lieu, il peut y avoir plusieurs reproductions de cette session donc plusieurs périodes ], [_Vacation_], [ - Demi-journée de formation durant une période ], [_Séance_], [ - Formation spécialisé dans les Tir/TPCI ], [_Etat Liquidatif_], [ - Phase de contrôle après la cloture de la phase d'inscription à une session et que tous les agents y participant et formateurs placés sur des vacations sont convoqués. Un état liquidatif permet de valider la rémunération d'un formateur par le biais d'une signature ], [_Apurement_], [ - Finalisation définitive d'une session ], [_FIF_], [ - Fiche individuelle de formation, il s'agit d'une fiche propre à un agent pour qu'il puisse avoir un récapitulatif de sa participation à une séance ou session ], [_Formateur_], [ - Personne qui encadre une session sur au moins une vacation ], [_Gestionnaire_], [ - Personne qui valide les demandes d'inscription ou les demandes d'annulation pour la participation à une séance ou session ]) #v(5pt)
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/math/spacing_05.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page // Test weak spacing $integral f(x) dif x$, // Not weak $integral f(x) thin dif x$, // Both are weak, collide $integral f(x) #h(0.166em, weak: true)dif x$
https://github.com/chubetho/Bachelor_Thesis
https://raw.githubusercontent.com/chubetho/Bachelor_Thesis/main/chapters/experimental.typ
typst
#import "@preview/glossarium:0.4.1": gls, glspl = Experiment <section_experiment> In this chapter, a detailed experiment will be conducted, simulating the entire development cycle of a web application using micro frontend architecture across several stages: planning, setup, implementation, build, deployment, testing, as well as @ci and @cd. Each stage will be closely monitored to gather comprehensive insights into this architecture. Additionally, a single-page application version of the same application, derived from the micro frontends version, will be implemented to compare performance metrics and evaluate the overall system behavior of both approaches in the next chapter. The primary goal is to provide a deep understanding of the micro frontend approach, highlighting its potential benefits and drawbacks. Furthermore, optimizations for enhancing the developer experience will be discussed after the development cycle. Note that the code examples presented in this chapter may differ from the actual code. For accurate and precise code, refer to the GitHub repository at @nguyen_DKLB_2024. == Planning Stage <section_planning_stage> In this initial stage of the experiment, the current state of the backend for the #gls("dklb", long: true) application will be briefly reviewed. Following this, each step in the decision framework outlined in the @section_decision_framework will be applied. These steps will collectively provide valuable insights for the upcoming setup stage. === Backend Fortunately, the backend architecture of the @dklb is already organized as microservices, following #gls("ddd", long: true) principles. For instance, the games domain in this context is divided into multiple core subdomains based on specific games. The `/lotto6aus49/**` path interacts exclusively with microservices dedicated to the Lotto game, while the `/eurojackpot/**` path engages with APIs associated with the Eurojackpot game. These distinct separations emphasize that each core subdomain focuses on the unique functionalities within the overall application. However, to ensure the experiment is comprehensive, a mocked server that simulates the backend will be set up to handle requests from the micro frontends. While this server could be built using a web framework in any programming language, a JavaScript-based framework will be used to minimize additional setup efforts and maintain consistency with the frontend technologies. === Horizontal-Split The principles of @ddd offer a strong foundation for implementing a vertical-split strategy for micro frontends. In this approach, each micro frontend aligns with a specific subdomain, such as a particular game, effectively mirroring the microservices architecture established on the backend. However, to increase the system's flexibility, a horizontal-split strategy will be adopted instead. While the majority of micro frontends will continue to follow a vertical-split, focusing on specific game subdomains, certain functionalities or cross-cutting concerns will be shared across multiple micro frontends. For example, although the homepage and Lotto game are developed as separate micro frontends, the component responsible for displaying Lotto quotes from the Lotto micro frontend can be exposed and reused within the homepage micro frontend. This adoption of a horizontal-split strategy ensures that the architecture remains adaptable and responsive to diverse requirements. === Module Federation Module Federation has been chosen for its significant potential in the development process. It supports both horizontal and vertical splitting of the application, as well as client-side and server-side composition, providing the flexibility needed to accommodate future requirements with ease. Furthermore, the use of a single frontend framework across the entire application partially eliminates the dependency management challenges typically encountered with Module Federation, making it an even more advantageous choice. Client-side composition is selected to combine with Module Federation because the potential teams for the @dklb rewrite project at MULTA MEDIO are already familiar with @spa development. Additionally, if the application later requires enhanced @seo, faster load times, or improved performance, transitioning from client-side to server-side composition will be straightforward, as Module Federation already supports this capability. Further details regarding client-side routing will be discussed during the implementation stage. For enabling communication between micro frontends, any methods outlined in @fundamental_communication will be viable. These methods can be easily opted in or out as needed, without requiring upfront planning. == Setup Stage The setup stage will concentrate on selecting the appropriate tools and technologies required for the new frontend architecture. This involves choosing frameworks and libraries, as well as setting up deployment environments, and defining the project structure. === Tools for Development - Vue.js: It is a progressive JavaScript framework used for building user interfaces. It is highly adaptable, supporting the creation of both simple and complex applications through its reactive and component-based architecture. This design facilitates the development of a modular and scalable frontend @_VueJS_. - Tailwind CSS: It is a utility-first CSS framework designed to facilitate the rapid development of custom user interfaces. By utilizing utility classes, it enables developers to style elements efficiently, minimizing the need for extensive custom CSS and preventing CSS class collisions, particularly in the context of micro frontend architecture. This methodology results in cleaner, more maintainable code, aligning well with Vue.js's modular structure @_TailwindCSS_. - Vite: It is a modern build tool that significantly enhances the development experience. It provides a fast and efficient setup, offering features like instant server start, hot module replacement, and optimized build processes. This tool integrates seamlessly with Vue.js and Tailwind CSS, improving development speed and efficiency, making it an ideal choice for modern web projects @_Vite_. Also, there is a plugin for Vite that enables the Module Federation feature for Vite @_OriginjsVitepluginfederation_2024. - ElysiaJS: It is a web framework that enables developers to set up routes for handling different HTTP requests, making it ideal for building Restful APIs. With its robust set of features, ElysiaJS allows for the efficient and maintainable development of applications, effectively mimicking a backend server to serve API endpoints in this experiment @_Elysia_. However, since this is primarily for simulation purposes, other web frameworks like Express.js or Fastify could also be used effectively. === Tools for Deployment - Docker: It is a platform that enables developers to package applications into containers, ensuring consistency across different environments. These containers encapsulate all the necessary components, such as code, runtime, libraries, and settings, making deployment and scaling straightforward and reliable @_Docker_. - Nginx: It is a high-performance web server and reverse proxy server known for its speed, stability, and low resource consumption. Nginx is widely used for serving static content, load balancing, and as a reverse proxy for distributing traffic across multiple servers. It is particularly favored for its ability to handle a large number of concurrent connections efficiently @_Nginx_. - GitHub Actions: It is a powerful automation tool integrated with GitHub repositories. It allows developers to create workflows for continuous integration and continuous deployment. With GitHub Actions, the deployment process can be automated, including linting, running tests, building Docker images, and deploying, ensuring a consistent and efficient pipeline from development to production @_GitHubActions_. === Tools for Testing - Vitest: It is a highly efficient testing framework built on top of Vite, designed to facilitate the writing and execution of unit tests. By utilizing Vitest, developers can ensure that each component behaves as intended, helping to maintain the overall reliability of the software @_Vitest_. - Playwright: It is an essential tool for end-to-end testing, addressing aspects of application quality that go beyond what unit tests with Vitest can achieve by allowing for comprehensive testing of the entire application, simulating real-world user interactions across different browsers. Playwright helps to identify issues that might only arise when the entire system is in use, making it an important tool for maintaining the overall quality and stability of a web application @_Playwright_. === Monorepo Strategy In software development, there are two repository strategies: monorepo and polyrepo. A monorepo architecture stores code for multiple projects using a single repository. For example, a monorepo repository contains three folders, one for a web app project, one for a mobile app project, and one for a server app project. In contrast, a polyrepo approach uses multiple repositories for each project @henderson_Monorepovspolyrepo_2024. In this experiment, a monorepo strategy will be employed. This approach involves storing all micro frontends, a UI library, toolings, and a server application in a single repository, simplifying the setup stage, especially within the scope of this experiment. Aligning with the monorepo strategy, the project structure is designed to ensure clarity and scalability. The structure is organized into several key directories, each serving a specific purpose. Below is an overview of the project structure: #grid( columns: (1.03fr, 2fr), gutter: 10pt, [ #figure(caption: "Project structure in the experiment.")[ ``` . ├── apps │ ├── home │ ├── lotto │ ├── shell | └── ... | ├── packages | ├── ui | ├── mfe-config | └── ... | ├── server | ├── e2e | ├── tools | ├── tailwindcss | └── ... | └── ... ``` ]<figure_project_structure> ], [ - apps: This directory contains the host, known as shell application (`shell`), which integrates and manages remotes, referred to as micro frontends, such as `home` and `lotto`. Each remote, along with the host, is developed and maintained within its own subdirectory. - packages: Here stores shared logic and resources, such as the `ui` library or `mfe-config`, an overview configuration file for all applications. These shared packages ensure consistent styling and functionality throughout the project. - server: This folder houses the server application, which acts as a simulated backend, processing requests from the micro frontends. - e2e: Here are end-to-end tests for the application, which are an important part of the continuous integration pipeline. - tools: This directory holds based configurations for development dependencies such as Tailwind CSS. ], ) === App Configurations - For host application (`shell`) In the vite configuration shown in @figure_vite_config_shell, `@originjs/vite-plugin-federation` plugin is used to establish the host application running on port `8000`. This host is configured to have two remote applications, `home_app` and `lotto_app`, which operate on ports `8001` and `8002`, respectively. The configuration also includes the `shared: ['vue']` option, ensuring that the Vue package is shared between the host and the remote applications. #figure(caption: "Vite configuration for the host application.")[ ```ts // apps/host/vite.config.ts import federation from '@originjs/vite-plugin-federation' export default defineConfig({ plugins: [ federation({ remotes: { 'home_app': 'http://localhost:8001/assets/remoteEntry.js', 'lotto_app': 'http://localhost:8002/assets/remoteEntry.js' }, shared: ['vue'], }), ], server: { port: 8000 } }) ``` ] <figure_vite_config_shell> #pagebreak() - For remote applications (`home` and `lotto`) As outlined in the configuration for the host application (@figure_vite_config_shell) the `home_app` is configured to run on port `8001`, while the `lotto_app` is set to run on port `8002`. Both remote applications also use the `@originjs/vite-plugin-federation` plugin to expose their respective `App` components from their source directories (@figure_vite_config_home_and_lotto). These `App` components can later be imported and displayed, for example, using `import HomeApp from 'home_app/App'`. #figure(caption: "Vite configuration for the home and lotto applications.")[ ```ts // apps/home/vite.config.ts import federation from '@originjs/vite-plugin-federation' export default defineConfig({ plugins: [ federation({ name: 'home_app', exposes: { './App': './src/App.vue' }, shared: ['vue'], }), ], server: { port: 8001 } }) ``` ```ts // apps/lotto/vite.config.ts import federation from '@originjs/vite-plugin-federation' export default defineConfig({ plugins: [ federation({ name: 'lotto_app', exposes: { './App': './src/App.vue' }, shared: ['vue'], }), ], server: { port: 8002 } }) ``` ] <figure_vite_config_home_and_lotto> #pagebreak() - For UI library To prevent a single point of failure that could potentially bring down the entire application if the UI library fails, the UI will be bundled within each micro frontend during the compile time, rather than being deployed as a separate micro frontend. This approach ensures that all essential base elements are packaged together, thereby simplifying the management of multiple deployments and minimizing the risk of version inconsistencies. As illustrated in @figure_vite_config_ui, the UI library will be built using the ES module format. Additionally, the Vue package is excluded from the build, as it is already present in both the host and remote applications. #figure(caption: "Vite configuration for UI library")[ ```ts // apps/host/vite.config.ts export default defineConfig({ build: { lib: { entry: 'src/index.ts', fileName: 'index', formats: ['es'], }, rollupOptions: { external: ['vue'], output: { globals: { vue: 'Vue' } }, } } }) ``` ] <figure_vite_config_ui> #pagebreak() === Tailwind CSS To ensure consistent styling across all applications, a base Tailwind CSS configuration has been established, as illustrated at the top of @figure_tailwind. This setup allows Tailwind CSS to scan all Typescript and Vue files to generate the required styles. The `preflight` option, responsible for generating reset CSS rules, is enabled exclusively in the shell application, which helps minimize the amount of CSS that users need to download. Furthermore, a specific set of colors has been defined to maintain a uniform color scheme across the applications. At the bottom of @figure_tailwind is the `tailwind.config.ts` file for the shell application, which extends this base configuration. #figure(caption: [The default Tailwind CSS configuration, along with its extended version.])[ ```ts // tools/tailwind/index.ts export default { content: ['src/**/*.{vue,ts}'], corePlugins: { preflight: false, }, theme: { colors: { primary: '#d22321', secondary: '#c5c5c5' } } } ``` ```ts // apps/shell/tailwind.config.ts import config from '@dklb/tailwind' export default { content: config.content, corePlugins: { preflight: true, }, presets: [config], } ``` ] <figure_tailwind> == Implementation Stage <section_implementation> Following the selection of the required development tools, this stage focuses on implementing the host application, the two micro frontends (`home` and `lotto`), the UI library, and the server application. Additionally, a routing issue will be identified during this process, and a solution will be developed to address it. A quick note: Vue Router #footnote[https://router.vuejs.org/], the official routing library for Vue.js, is used in this experiment to manage routing. === Host Application The host application should be simple and lightweight, including elements that remain consistent across all pages, such as the navigation menu and footer. For the main content area, the `RouterView` component from Vue Router is utilized as a slot, responsible for loading the appropriate registered component based on the current URL state. #figure(caption: "App component of the host application.")[ ```vue <!-- apps/shell/App.vue --> <template> <TheNav /> <main <RouterView /> </main> <TheFooter /> </template> ``` ] After defining the entry component `App.vue`, all necessary routes will be registered as illustrated in @figure_shell_router. The first route is associated with `home` micro frontend for the path `/`, representing the homepage. The second route, mapped to `lotto` micro frontend, corresponds to the path `/lotto6aus49`, where users can participate in lotto games. If no path matches the specified routes, the user is redirected to an error page, which is handled by the `Error.vue` component. Vue Router supports lazy loading of components using the promise syntax. For example, the syntax `component: () => import('home_app/App')` means that the `App` component of the `home` application is only loaded when the user navigates to the homepage. This optimization reduces the amount of JavaScript that needs to be downloaded initially, improving page load times. #figure(caption: [Router configuration of the host application.])[ ```ts // apps/shell/router.ts const router = createRouter({ routes: [ { path: '/', component: () => import('home_app/App'), }, { path: '/lotto6aus49', component: () => import('lotto_app/App'), }, { path: '/:pathMatch(.*)*', component: () => import('./pages/Error.vue'), }, ], }) ``` ] <figure_shell_router> === Micro Frontends The implementation of each micro frontend is straightforward and aligns with the development of a normal single-page application. For instance, the `App.vue` component in the `home` micro frontend might contain a simple heading displaying "Homepage". When a user navigates to the homepage, Vue Router loads this template into the `App` component of the `shell` application, producing the result shown below. #figure(caption: [The `App` component of the `shell` application after the `home` is loaded.])[ ```html <!-- apps/home/App.vue --> <template> <h1>Homepage</h1> </template> ``` ```html <!-- apps/shell/App.vue --> <template> <TheNav /> <main> <h1>Homepage</h1> </main> <TheFooter /> </template> ``` ] === UI Library The UI library must be designed to be minimal, highly extensible, and independent of any specific location within the application, ensuring its effectiveness and usability across various parts of the system. As shown in @figure_button_ui_with_customized, a basic `UiButton` component is implemented as a simple HTML button element with predefined Tailwind CSS classes and no context-specific logic. If the home micro frontend requires a customized button, it can create a wrapper around this component to extend its styles, as demonstrated by the `HomeButton` component. #figure(caption: [The `UiButton` component and its wrapper `HomeButton`.])[ ```vue <!-- packages/ui/UiButton/UiButton.vue --> <template> <button class="inline-flex text-sm uppercase"> <slot /> </button> </template> ``` ```vue <!-- apps/home/components/HomeButton.vue --> <template> <UiButton class="bg-primary text-white"> <slot /> </UiButton> <!-- will be rendered as below --> <button class="inline-flex text-sm uppercase bg-primary text-white"> <slot /> </button> </template> ``` ] <figure_button_ui_with_customized> === Server Application The server application is configured to listen on port `3000` and only accepts requests originating from port `8000`, where the host application is running. It verifies the request's origin, setting `authorized` to true or false based on whether the origin is `localhost:8000`, and configures CORS to permit only this specific origin. This setup creates a security layer that helps prevent unauthorized requests from third parties, including REST clients like Postman or web browsers, ensuring that only the host application can securely interact with the server application. #figure(caption: [The configuration for the server application.])[ ```ts const app = new Elysia() .derive(({ request }) => { const origin = request.headers.get('origin') return { authorized: origin === 'http://localhost:8000' } }) .use( cors({ origin: /http:\/\/localhost:8000/ }), ) .listen(3000) ``` ] <figure_server_config> === Routing Problem The path `/lotto6aus49` alone is insufficient to fully represent the entire subdomain for the Lotto game, as there is still a need for a page to view the results of previous draws. A proposed solution is to use `/lotto6aus49` as a prefix and then remove the existing route, replacing it with two new routes: one for displaying the play field at `/lotto6aus49/normalschein` and another for querying previous results at `/lotto6aus49/quoten`. #figure(caption: "Router of the host application with recently added routes.")[ ```ts // apps/shell/router.ts const router = createRouter({ routes: [ // ... { path: '/lotto6aus49/normalschein', component: () => import('lotto_app/Normalschein'), }, { path: '/lotto6aus49/quoten', component: () => import('lotto_app/Quoten'), }, // ... ], }) ``` ] Additionally, the `lotto` micro frontend must expose its corresponding components for these new routes, ensuring that the correct components are available to be loaded and displayed by the host application. #figure(caption: [Vite configuration for lotto micro frontend with more exposed components.])[ ```ts // apps/lotto/vite.config.ts export default defineConfig({ plugins: [ federation({ name: 'lotto_app', exposes: { './Normalschein': './src/Normalschein.vue', './Quoten': './src/Quoten.vue', }, shared: ['vue'] }), ] }) ``` ] These routes share the prefix `/lotto6aus49`, which suggests that a separate Vue Router instance should ideally be created within the `lotto` micro frontend to manage its nested routes. This approach would allow the host application's router to register only the top-level routes for its remotes, while deeper-level routing would be handled within each micro frontend. However, this approach is not feasible under the current Module Federation setup. In this architecture, only a single instance of Vue is created within the host application, which utilizes the router defined in @figure_shell_router. Consequently, no additional Vue or Vue Router instance exists within the `lotto` micro frontend to manage nested routing independently. However, if a new route is now required to display instructions for the Lotto game, or if an existing route, such as the one for displaying results, needs to be removed, similar steps must be repeated to achieve the desired outcome. This repetition not only increases the potential for errors but also indicates a poor developer experience. Therefore, a more automated solution is desirable, which would involve creating a mechanism where routes can be dynamically registered and managed without the need for extensive manual input. #pagebreak() === Routing Solution 1. Overview configuration The initial step in this routing solution is to create an overview configuration for all applications. This overview specifies the directory locations, operating ports, names, and prefixes for each application. This configuration is important not only for defining how each micro frontend is served but also for enabling the host application to access information about its remotes. Henceforth, the term "overview configuration" will refer to this specific configuration. #figure(caption: "Overview configuration for all applications.")[ ```js // packages/mfe-config/index.js export default { shell: { dir: 'shell', port: '8000', }, home: { dir: 'home', port: '8001', name: 'home_app', prefix: '/', }, lotto: { dir: 'lotto', port: '8002', name: 'lotto_app', prefix: '/lotto6aus49', }, } ``` ] <figure_mfe_config> 2. Automated Components Exposure Firstly, a list of routes needs to be generated. Drawing inspiration from the file-based routing systems used by popular meta-frameworks like Nuxt.js, a similar approach will be implemented to create this list. Secondly, this list of routes will be converted into a format that the vite plugin can understand. Finally, a wrapper function will encapsulate these processes and return a value that will be passed into the vite configuration for the host application. Additionally, to enhance flexibility, this wrapper function can also accept extended exposes, for cases where components cannot be located at the specified locations, and custom remotes, which enable a horizontal-split method. The minimal code for this implementation is provided below. #figure( caption: [A wrapper function is built on top of the vite plugin.], [ ```js function wrapper(name, _exposes, _remotes){ const files = getFiles(name) const exposes = getExposes(files, _exposes) saveExpsoses() const remotes = getRemotes(_remotes) return federation({ name, exposes, remotes }) } ``` ], ) <figure_wrapper_function> In the context of the `lotto` micro frontend, its folder structure is illustrated in @figure_folder_structure_lotto, left. After the execution of the wrapper function, a `routes.json` file is temporarily saved on disk and also included in the `exposes` object, which the host application will later access. The content of this file is illustrated in @figure_folder_structure_lotto, right. #figure(caption: [The folder structure of the `lotto` micro frontend (left) and the generated `routes.json `file (right).])[ #grid( columns: (1fr, 1fr), gutter: 10pt, [ ``` . └── apps ├── lotto │ │ │ ├── pages │ │ ├── normalschein.vue │ │ └── quoten.vue │ └── ... │ └── ... ``` ], [ ```json [ { "path": "/normalschein", "component": "Normalschein" }, { "path": "/quoten", "component": "Quoten" } ] ``` ], ) ] <figure_folder_structure_lotto> #pagebreak() 3. Automated Routes Registration The final step in this routing solution is the automated registration of routes. With the overview configuration established in the first step and the details about the exposed components of each micro frontend obtained in the second step, the host application's router can now iterate through the overview configuration, reading the corresponding `routes.json` file and then process compiles a flat array of all possible routes within the application. #figure(caption: [Router configuration with automated routes registration.])[ ```ts const router = createRouter() for (const config of mfeConfig){ const routes = getRoutes(config) router.addRoutes(routes) } app.use(router) ``` ] From now on, any changes in the directory monitored by the wrapper function will automatically trigger the creation of a router with the correct routes, ensuring the proper routing of the application. == Build Stage As illustrated in the dependencies graph below, the UI library must be built before both the host application and the micro frontends. This sequence is necessary because the UI library is not defined as a separate micro frontend but rather as a dependency that is bundled during the build process. Once the UI library is built, the host and remote applications can be built either sequentially or in parallel, as their dependencies are resolved only at runtime. In contrast, the server application, which has no dependencies, can be built in the usual manner without any special considerations. #figure( image("/assets/build.png", width: 80%), caption: "Dependencies graph: Solid arrows indicate build-time dependencies; Dashed arrows indicate runtime dependencies.", ) == Testing Stage The testing stage is focused on verifying the functionality and reliability of the application, ensuring that all components operate as expected before deploying to production. In this experiment, two types of testing will be covered: unit testing and end-to-end testing. === Unit Testing Unit testing focuses on the smallest testable parts of the application, such as individual functions or components. Due to their limited scope and complexity, unit tests typically execute quickly. It is advisable to write unit tests for each component and utility function during the development process, particularly for the UI library. Below are two basic unit tests for the `UiButton` component from the UI library. #figure(caption: [Two unit tests for the `UiButton` component.])[ ```ts // packages/ui/UiButton/UiButton.test.ts const slots = { default: () => 'Click me' } it('should be rendered as a button', () => { const component = mount(UiButton, { slots }) const button = component.find('button') expect(button.exists()).toBe(true) expect(button.text()).toBe('Click me') }) it('should be rendered as a link', () => { const component = mount(UiButton, { slots, props: { to: '/about' } }) const anchor = component.find('a') expect(anchor.exists()).toBe(true) expect(anchor.text()).toBe('Click me') expect(anchor.attributes('href').toBe('/about') }) ``` ] === End-to-End Testing End-to-end (E2E) testing is a comprehensive method for evaluating the entire workflow of an application. Unlike unit tests, which focus on isolating components within a simulated environment, E2E testing replicates user interactions in a production-like setting to ensure that the system meets its requirements and functions as expected. Below is a basic E2E test intended to validate the navigation workflow. #figure(caption: [Simple E2E test to test the navigation workflow.])[ ```ts // e2e/tests/app.test.ts test('Navigation', async ({ page }) => { await page.goto('http://localhost:8000/') await expect(page).toHaveTitle('LOTTO Berlin') const playBtn = page.getByRole('link', { name: /Jetzt Spielen/ })) await expect(playBtn).toBeVisible() await playBtn.click() const url = page.url() expect(url).toBe('http://localhost:8000/lotto6aus49/normalschein') const heading = page.getByRole('heading', { name: /Normalschein/ })) await expect(heading).toBeVisible() }) ``` ] == Deployment Stage After completing the building and testing stages, this phase shifts its focus to determining the most effective solution for containerizing the application using Docker. === Server Container The Dockerfile of the server application defines a two-stage build process. In the first stage, the application is built by installing dependencies and compiling the code. The second stage uses the same base image and copies the compiled output from the first stage. This approach helps keep the final Docker image small and efficient by including only the essential files needed to run the application. Finally, the Dockerfile exposes the necessary port and specifies the command to start the server application when the container is launched. #figure(caption: "Dockerfile for the server application.")[ ```Dockerfile # dockers/Dockerfile.server FROM oven/bun:slim AS build WORKDIR /dklb COPY ./server ./ RUN bun install && bun run build FROM oven/bun:slim COPY --from=build /dklb/dist/index.js ./index.js EXPOSE 3000 ENTRYPOINT ["bun", "run", "./index.js"] ``` ] === Containers for Micro Frontends To avoid the problems related to manual management, particularly regarding routing issues during the implementation phase, an automated approach for generating Dockerfiles based on the overview configuration is preferred. This approach involves two steps: first, generating an Nginx configuration file for both the host and remote applications, and second, creating a corresponding Dockerfile for each of these applications. 1. Nginx configurations Firstly, @cors headers must be appended to each nginx configuration of remote applications. This step is essential to guarantee that only requests originating from the host application are permitted and also prevent any CORS-related issues. Secondly, the host's nginx configuration is configured to always attempt to load the `/index.html` file, regardless of the URI requested. Without this configuration, the nginx server may return a "Not found" error for requests that do not explicitly point to existing resources. #figure(caption: [Generation of `nginx.conf` files based on the overview configuration.])[ ```ts // scripts/docker.ts const cors = ` add_header 'Access-Control-Allow-Origin' 'http://localhost:8000'; add_header 'Access-Control-Allow-Methods' 'GET'; add_header 'Access-Control-Allow-Headers' 'Content-Type';` for (const { port, dir } of Object.values(mfeConfig)) { const isShell = dir === 'shell' const path = `.nginx/${dir}.conf` const str = ` server { listen ${port}; server_name localhost_${dir}; ${isShell ? '' : cors} location / { root /usr/share/nginx/html/${dir}; index index.html; ${isShell ? 'try_files $uri $uri/ /index.html;' : ''} }` await write(path, str) } ``` ] 2. Dockerfiles The process of generating Dockerfiles for each micro frontend can be seamlessly integrated into the same loop that creates the Nginx configuration files. The Dockerfile follows a two-stage approach. In the first stage, the necessary files and dependencies are gathered, followed by the build process for the UI library and the specific micro frontend. The second stage sets up the environment for serving the micro frontend. By removing the default configuration, this stage ensures that only custom configurations and assets are used, and it prepares the built assets to be served by the web server on the defined port. Lastly, the command to start the server application by launching the container is specified. #figure(caption: [Generation of the `Dockerfile` files based on the overview configuration.])[ ```ts // scripts/docker.ts const content = ` FROM oven/bun:slim AS build WORKDIR /dklb COPY . . RUN bun install RUN bun run build:ui RUN bun run --cwd apps/${dir} build FROM nginx:alpine WORKDIR /usr/share/nginx/html RUN rm -rf * && rm -f /etc/nginx/conf.d/default.conf COPY .nginx/${dir}.conf /etc/nginx/conf.d COPY --from=build /dklb/apps/${dir}/dist ${dir} EXPOSE ${port} ENTRYPOINT ["nginx", "-g", "daemon off;"]` await write(`dockers/Dockerfile.${dir}`, content) ``` ] === Docker Compose The final step in this deployment section involves using docker-compose, a convenient tool for simplifying the process by allowing to define and run multiple docker containers. The server application's service is first defined, specifying the location of its Dockerfile and the port it will run on. Following this, the overview configuration is looped through again to generate service definitions for each micro frontend. These definitions include the service name, relevant build settings, and necessary port mappings. The script also ensures that each micro frontend service waits for the server to start, maintaining the correct initialization sequence. Once all configurations are defined, they are written into a `docker-compose.yml` file. This Docker-based strategy enables the @dklb application to be easily deployed on any machine with Docker installed, streamlining the deployment process and ensuring consistency across different environments. #figure(caption: [Generation of `docker-compose.yml` file based on the overview configuration.])[ ```ts const contents = [ `services: server: build: context: . dockerfile: dockers/Dockerfile.server ports: - '3000:3000'`, ] for (const { port, dir } of Object.values(mfeConfig)) { const content = ` ${dir}: build: context: . dockerfile: dockers/Dockerfile.${dir} ports: - '${port}:${port}' depends_on: - server` contents.push(content) } await write('docker-compose.yml', contents.join('\n')) ``` ] === Other Containerization Approach In the containerization approach implemented above, each micro frontend is deployed in its container. This design, while flexible, results in increased memory usage, as the memory requirements scale with the number of micro frontend containers. An alternative is to run the host application and all micro frontends only within a single container. As illustrated in @figure_docker_desktop, the multi-container approach requires around 27MB of memory, whereas the single-container approach needs only about 9MB for the entire frontend. This reduction in memory usage can be advantageous in resource-constrained environments. #show image: it => block(radius: 5pt, clip: true)[#it] #figure( caption: "Memory usage comparison: multi-container vs. single-container approach.", image("/assets/docker_idle.png"), )<figure_docker_desktop> #show image: it => it However, the single-container approach has trade-offs. Redeploying a micro frontend in this setup can be more cumbersome, as developers must apply the necessary changes, rebuild the micro frontend, and push the built assets to the correct directory in the container, responsible for that micro frontend. In some scenarios, this might even require taking down the entire container, leading to downtime for the whole application. On the other hand, with the multi-container approach, individual containers can be stopped and restarted independently, allowing updates to specific micro frontends without disrupting the entire system. This independence reduces the operational burden on developers and can minimize application downtime. Several deployment strategies are available that can optimize the deployment process and effectively address the issues previously mentioned. One such strategy is blue-green deployment, which involves the use of two identical production environments, referred to as blue and green. The blue environment handles live traffic, while the green environment remains idle or is used for staging new releases. When a new version is ready, it is deployed to the green environment. After comprehensive testing, traffic is switched to the green environment, allowing for seamless updates. Should any issues arise, traffic can be reverted to the blue environment. This approach ensures minimal downtime during deployments, enhances reliability, and offers quick rollback capabilities @fowler_BlueGreenDeployment_2013. == CI/CD Stage In this final stage of the experiment, the process for automating the integration of code changes from all applications and the UI library into the main branch is executed. This automation involves running linting, building, and testing tasks within each application. These steps enable the early detection of issues, ensure compatibility between new code and the existing codebase, and maintain a high standard of code quality. To provide a concrete example, the following section presents a code snippet that defines a pipeline for end-to-end testing, which is triggered when a pull request is made to the main branch. For clarity, the specific commands for each step have been omitted. #figure(caption: "Pipeline configuration for end-to-end testing.")[ ```yml # .github/workflows/e2e.yml name: e2e on: pull_request: branches: - main jobs: e2e: runs-on: ubuntu-latest steps: - name: Checkout Repo - name: Setup Bun - name: Install Dependencies - name: Setup Docker - name: Build Images - name: Run Containers - name: Install Playwright - name: Wait for Containers - name: Run E2E Tests - uses: Upload Report - name: Stop Containers ``` ] After completing the code integration, the focus shifts to automated deployment. Digital Ocean has been selected as the provider for the virtual private server (VPS). Significant effort has been invested in generating the necessary Docker configuration files during the deployment stage. The following code snippet demonstrates the action file used to manage the deployment process. This workflow is triggered whenever new code is pushed to the main branch and contains three primary steps: configuring SSH keys, installing the command-line interface for Digital Ocean, and establishing access to the VPS. Once access is secured, a sequence of commands is executed, including pulling the latest code via Git, bringing down existing Docker containers, rebuilding them, and bringing them back up. #figure(caption: "Pipeline configuration for deployment.")[ ```yml # .github/workflows/e2e.yml # ... steps: - name: Set up SSH run: | mkdir -p ~/.ssh echo "${{ secrets.SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa - name: Install doctl uses: digitalocean/action-doctl@v2 with: token: ${{ secrets.DIGITAL_OCEAN_TOKEN }} - name: Redeploy on Droplet run: | doctl compute ssh ${{ env.DROPLET_ID }} --ssh-command " cd /root/DKLB git pull origin main bun run prepare:docker docker compose down docker compose up --build -d " ``` ] #pagebreak() == Developer Workflow Optimization To enhance the developer workflow, a scaffold script is implemented to streamline the creation of new micro frontend applications. Initially, a `.template` directory is established to store the templates for the micro frontend application and the pipeline configuration file. Following this, a command line interface (CLI) is implemented to prompt the developer for the location and prefix of the micro frontend. The corresponding Dockerfile is then generated, and necessary updates are made to the `docker-compose.yml` file. Finally, the script asks whether to install dependencies or perform the build process. #show image: it => block(radius: 5pt, clip: true)[#it] #grid( columns: (1fr, 1.5fr), column-gutter: 10pt, figure( caption: [Structure of the `.template` directory.], [ ``` .templates ├── app │ ├── src │ ├── vite.config.ts │ └── ... └── workflows ├── ci.yml └── ... ``` ], ), figure( caption: [The scaffold CLI for the creation of new micro frontend application.], image("/assets/create_app.png"), ), ) #show image: it => it #pagebreak(weak: true)
https://github.com/soul667/typst
https://raw.githubusercontent.com/soul667/typst/main/PPT/typst-slides-fudan/themes/polylux/book/src/dynamic/poor-alternatives.typ
typst
#import "../../../polylux.typ": * #set page(paper: "presentation-16-9") #set text(size: 40pt) #polylux-slide[ #only(1)[Ann] #only(2)[Bob] #only(3)[Christopher] likes #only(1)[chocolate] #only(2)[strawberry] #only(3)[vanilla] ice cream. This sentence is a visual reference. ]
https://github.com/yhtq/Notes
https://raw.githubusercontent.com/yhtq/Notes/main/计算机网络/网络层.typ
typst
#import "../template.typ": * #show: note.with( title: "计算机网络", author: "YHTQ", date: none, logo: none, ) #let chapter2 = [ = 网络层 == 基本功能 将主机端数据经由物理链路,经过路由器交换机等网络设备,交付到目的主机。 == 服务模型 - 面向连接 | 无连接 - 可靠传输 | 不可靠传输 - 服务质量保障 | 基本保障 | 不保障 历史上这些问题都有过很大争议,时至今日最终胜出的是 internet 协议,提供无连接数据报文服务,同时几乎不做任何保障,复杂问题全部交由传输层。\ 一部分是因为网络中路由器等设备要承载巨大的数据量,如果处理逻辑复杂会导致网络设备的成本过高。\ 同时,由于协议简单,在不同协议的网络中也可以实现向下兼容,与其他协议的网络互联。 网络结构往往是一个复杂的图,两点之间可能有极多的路径,每个数据包都有可能走不同的路径,这导致可能出现严重的乱序问题。乱序包在传输层以上会产生极大负担,因此 internet 也需要一定程度上防止乱序的发生。 == 关键功能 + 转发 根据报文头部的地址信息,决定将报文转发到哪个路由器接口。 + 路由 根据网络情况和路由算法、协议,规划数据报文的转发路径。\ 计算方法往往有: - 分布式协作,各个路由器协作计算 - 软件定义网络:新型措施,设计中心化的计算服务器计算路由方式,下发到路由器 随着软件定义网络概念的提出,网络层也被分为数据平面与控制平面两个概念。数据平面指负责数据转发的部分,控制平面指负责路由计算的部分。 === 数据平面 负责执行转发函数,按照转发表和包的目的地决定出口端口。\ 流程: - 链路层解封装,IP 地址校验 - 获取目的 IP 地址,基于最长前缀匹配查找转发表 - 若查找失败,直接丢弃 - 若查找成功,将数据包转发至对应端口: - IP 头部 TTL 减一,重新计算校验和 - 换取转发出接口和下一跳链路层地址 - 重新进行链路层封装,建立连接发送报文 路由器往往有多个输入输出端口,往往各个输入端口是独立进行计算的,可以充分利用每个端口的传输带宽(线速)。\ 由于路由器的工作负载很大,一般转发操作的时间仅在 ns 级,因此一般使用专用芯片进行转发操作。\ ==== 输入端口: 输入端口将数据读进缓冲队列,再以严格的先进先出策略处理每个报文,具体的转发策略有两种: - 基于目的地址的转发:只考虑目的地址,不考虑源地址,计算简单但不能进行复杂的管理 匹配方法:现代路由器往往使用最长前缀匹配规则,也就是根据地址前缀在路由表中查询,找到最长的前缀匹配项。\ 优势: + 速度快,可以处理大量网络流量 + 现代路由器往往利用一种特殊硬件 ternary content addressable memories (TCAMs),可以以极快速度并行完成百万级路由表的前缀匹配 - 基于任意地址的转发:可以根据数据报文中任何字段 排头阻塞:类似于传输层,在数据平面上,也可能出现输入端口被大排头阻塞,后续报文对应的输出端口空闲但得不到利用的情况。另一情况是,两个相同入口,出口的包,后一个包必须等待前一个包完整移动到出口后才能移动。\ ==== 交换结构 将报文从输入端口的缓冲区域传输至输出端口,大致有三种经典交换结构: - 共享内存 最早期的交换结构实现,不需要专用的芯片,只需要输入端口输出端口使用一个共享内存,控制平面的路由处理器控制读写。 问题:内存读写速度远远低于端口线速;必须使用类似中断处理的技术进行管理,延迟太大。现代处理器可能会采用其的一个改进版。 - 共享总线 在硬件上实现输入端口与输出端口的直连,无需处理器干预。\ 输入端口查找完转发表后,给报文加上标签,通过共享总线广播给所有输出端口,输出端口根据标签决定是否接收报文。 问题:总线一次只能广播一个报文,且交换速率受总线带宽的制约。 - 纵横式 Crossbar 使用 $2N$ 条总线连接 $N$ 个输入端口和 $N$ 个输出端口,以方格形式连接。方格的每个节点标识自己是否被占用,只要两条转发路径不相交,就可以同时进行转发。\ 实际使用中,这 $2N$ 条总线已经可以很好地实现全交换,但还可以进一步改进,使用更加复杂的交换结构和更多的总线。 ==== 输出端口 输出端口同样利用缓冲队列,将缓冲队列的数据报文发出。与输入端口不同,输出端口不需要遵循先进先出,可以按照更好的性能目标进行优化。\ 除了先进先出外,还有以下调度机制: + 基于优先级的调度机制: - 根据数据包头判断优先级 - 按照优先队列,总是先发送高优先级报文 - 实际实现时,采用多个队列代表不同优先级而不是二叉树等数据结构,提高芯片性能。 - 问题:优先级难以确定,容易导致公平性问题,低优先级报文可能永远得不到发送 + 轮询调度: 将报文分为多个类,每次轮询所有队列,依次发送 + 加权公平队列: 类似上面两种策略的组合,在轮询中为每个队列设置权值,高优先级的队列会被更多轮询,同时低优先级队列也可以得到处理 RFC 标准中,建议典型的输出缓冲区大小为 $"RTT" times "链路带宽"$,但以现在的技术指标来看数值过大并不合理。实际上往往使用的是 $("RTT" times "链路带宽")/(sqrt(N))$,其中 $N$ 表示当前网络流的数量,也就是相同网络地址的数据报文的数量(有时也会区分端口,有时不做区分)。这是因为往往单个流内的数据会进行拥塞控制等机制,不太可能一次发送过多数据。\ 缓冲区溢出时,选择性地丢弃报文,同样有不同的策略。\ === 控制平面 负责计算转发路径,由路由器上独立的路由选择处理器进行 - 传统控制平面:由每个交换机上的路由算法模块协作完成 路由协议:实际上就是类似在图中最短路算法。路由器往往配备多种路由协议,同时也可设置为不执行任何路由协议,按照管理员的设定进行转发,称为静态路由。\ 路由管理将通过目的地,路由协议和优先级等,将计算路径并下发至转发表。往往不同算法会给出不同的路径,路由器内部将设置优先级,选择优先级最高的路径。 - 中心化控制平面:由中心化的控制器计算路由,路由器上只需要通过控制代理模块与中心控制器通信并安装于路由器即可 事实上,现代路由器往往集成了大量其他功能,包括大量网络层应用层协议栈等等。 == 分片与重组 internet 协议最基础的功能是寻址和分片。 - MTU:最大传输单元,分为链路 MTU 和路径 MTU - 分片策略:分为允许途中分片和不允许途中分片,前者可以在每一跳过程中进行分片,后者必须在传输前感知路径 MTU 按其进行分片 - 重组:将分片的数据包重组,途中重组对网络设备负担过大,往往采用目的端重组的方式。 == 网络层协议:IPV4 - 通过目的 IP 地址和源 IP 地址表明数据报文的目的地和源头 - 允许中途分片,可能发生多次分片,分片后的报文与原始报文具有相同头部,最后一定在目的端才进行重组。设计之初希望兼容不同链路 MTU,链路 MTU 减小时分成更小的片,往往在目的端再进行合并 - 分片利用标识,标志,片偏移三者进行标记,标识用于标记同一数据报文,标志用于标记是否还有后续分片,片偏移用于标记当前分片在原始数据报文中的位置(相对开头的字节数)。 - 设置 TTL 生存周期,减为零时直接丢弃 - 设置头部校验和,防止无效传输(这里的校验和只计算首部,不计算数据部分) - 报文头部标识了上一层协议的类型(TCP或UDP) - 头部总长(在无选项的情况下)为 20 字节,配合 TCP 的 20 字节头部,共计 40 字节 === IP 地址 真正进行数据交换的实际上是网络设备的接口,IP 地址按照接口分配,接口之间以链路层相连。\ IP 地址设计之初,希望所有网络接口都有唯一一个标识它的 IP 地址,但实际使用中使用了大量诸如子网,NAT 等技术,使得网络产生了分级结构,IP 地址在每一级网络中将有不同的含义。\ 全球 Internet 的 IPV4 地址是分级分发的,由 ICANN 分配给区域性因特网注册机构,再分配给 ISP,再由 ISP 分配给用户。这种分配策略被称为无类别域间路由(Classless Interdomain Routing, CIDR)。一个 CIDR 地址分为前缀部分和其他部分,每个前缀部分对应一块连续的地址,这块地址被统一分配给某一组织,进而组织内部的设备就仅有除前缀外的部分不同。对于组织外的路由器,进行转发时只需要将包转发到这个组织的子网之中即可,不关心具体的主机序号,这大大降低了转发表的复杂度。\ 前缀还可以进一步划分为更长的前缀,对应子网划分成更细的子网。小子网聚合成为大子网称为路由聚合,对于路由器,除非它有直接发往小子网的路径(按照最长前缀匹配),否则它不需要关心小子网的结构,只负责把包转发给最大的子网。\ 除此外,还有一些特殊地址,例如 255.255.255.255 & subset_mask 留作局部广播地址,负责向子网内所有设备广播,但不会被路由器向外转发。 0.0.0.0 & subset_mask 留作保留 “本主机” 地址\ === IP 包转发 在实际的网络中,同一子网间的网络设备将会按照 MAC 地址进行转发(包括子网设备到路由器,路由器到子网设备两个方向),不同子网间的网络设备将会按照 IP 地址进行转发。\ 问题:如何通过 IP 地址查找 MAC 地址?\ 解决方案:ARP 协议: - 通过广播的方式,询问目的 IP 地址对应的 MAC 地址。 - 找到一次后,将结果缓存,下次直接使用。 ARP Spoofing 攻击: 由于 ARP 协议是无状态的,如果攻击者随意发送 ARP 响应,路由器即使没有发出 ARP 请求,也会在缓存表中将该响应记录,从而导致路由器将数据报文发送至攻击者,攻击者可以进行中间人攻击。 == 其他数据平面协议 === DHCP 协议:IP 地址分发 某个组织的一块 IP 地址必须逐个分发给每个设备,手工分发显然是不具有效率的,因此需要动态主机配置协议(Dynamic Host Configuration Protocol, DHCP)。\ 每个子网中至少需要一台 DHCP 服务器或者 DHCP 中继代理,代理到负责该子网的 DHCP 服务器(往往已经集成在现代路由器中),具体的 DHCP 分为四个步骤: - DHCP Discover:新加入网络的设备向子网内广播 DHCP Discover 报文,请求 IP 地址。这个 IP 地址使用特殊目的 IP 地址 255.255.255.255 和特殊源地址 0.0.0.0,报文数据中包含一个事务 ID。链路层负责将这个包转发给子网中所有节点 - DHCP Offer:DHCP 服务器收到后,分配一个 IP 地址,以广播形式发送 DHCP Offer 报文,报文中包含 IP 地址,子网掩码,默认 DNS 和第一跳路由器(默认网关),租约时间等信息。报文中包含事务 ID,以便客户端识别 - DHCP Request: 客户端的 offer 可能同时被多个 DHCP 服务器接收,客户端收到后,选择一个 offer,广播 DHCP Request 报文,报文中包含 offer 中的配置参数 - DHCP ACK:服务器收到后,以广播形式发送 DHCP ACK 报文,报文中包含客户端的配置参数,客户端收到后,配置自己的网络接口,完成配置。 问题:移动网络中不能维持一个固定的 IP 地址 - 安全问题: + DHCP 耗竭攻击:攻击者短期内发送大量 DHCP Discover 报文,耗尽 DHCP 服务器的 IP 地址资源 + 流氓 DHCP 服务器:攻击者在网络中设置 DHCP 服务器,向客户端发送虚假的 DHCP Offer 报文,将客户端的流量重定向至攻击者的服务器 === NAT 协议:IP 地址转换 NAT 全称为网络地址转换,用于解决 IPv4 地址不足的问题。子网内部向外部通讯时,必须经过转换才能实现正常通讯。\ 私有 IP 地址往往是: - 10.\*.\*.\* - 172.\*.\*.\* - 192.\*.\*.\* 子网和公网的连接处称为 NAT 节点。子网内部设备发送数据包至节点,节点以自己的公网身份和用于区分子网内部设备的端口号(在转发时动态分配并记录端口号与 (IP, 端口)的一一对应),将数据包发送至公网。\ 等到收到回复时,再根据对应表发回至子网内部设备。\ #remark[][ 对于 TCP 和 UDP 数据,NAT 协议往往可以选择将区分两种数据分开进行管理 ] 问题/缺点: - 内部设备违背 IP 结构模型,直接让路由器处理传输层协议 - 违反了端到端的原则,同时也破坏了协议分层 - 从外部看无法区分内部设备的恶意行为,可能造成误封 - 导致 IP 地址无法加密 === ICMP 协议 用于报告网络状态是否出现差错,最经典的就是 ping 命令和 traceroute (用于探测整个路径上的路由器地址),差错包括: - 3:终点不可达:路由器上转发表没有路径,协议无法识别...... - 5:路由重定向 - 11:超时(TTL 过期)。由于 TTL 的初始值没有规定,各个操作系统都设置了不同的初始值,可以用来探测对方的操作系统 - 12:参数问题 == 控制平面(路由)协议 为了解决具体的路由路径问题,常常将网络节点和通路抽象成一个有权图。权可以是 1,也可以是带宽的倒数,以及其他拥塞相关的参数等。\ 路由选择算法的核心就是在给定节点之间找到代价最短的路径,本质上就是一个最短路算法。\ 经典的最短路算法: - Dijkstra 算法 - Bellman-Ford 算法 应用算法时,需要考虑现实情形中,每台路由器资源有限(简单性),各个路由器独立异步工作(异步性),网络状态不断变化,甚至发生故障(鲁棒性),每条路径不能消耗过多的带宽资源(公平性),也因此计算机网络中的路由选择算法是一个很值得研究的话题。 + 路由器是否知道全局信息? - 知道全局信息:链路状态算法(基于 Dijkstra) - 不知道全局信息:每个路由器节点只知道邻居节点的开销:距离向量算法(基于 Bellman-Ford) + 动态改变链路权重? - 静态(基本不现实) - 动态 == Bellman-Ford 与距离向量 Bellman-Ford 算法: $ d_x (y) = min_v {c(x, v) + d_y (v)} $<bellman-ford> 问题在于如何将其分布式计算,包括: - 数据的分布式:将单机算法维护的信息分散到每个节点 所谓距离向量,就是每个节点只维护自己的距离向量: $ vec(d_x (1), d_x (2), dots.v, d_x (n)) $ 以及它的所有邻居的距离向量,这就是每个节点应用 Bellman-Ford 算法所需要的全部信息。 - 计算的分布式:将算法更新操作分散到各个节点 每个节点的计算步骤为: + 发送:向邻居节点发送自己的距离向量 + 接收:接收来自邻居的距离向量 + 计算:使用 @bellman-ford 更新自己的距离向量 算法特点: - 异步:节点节奏不需要一致 - 迭代 - 分布 - 链路状态改变时,会快速从发生变化的节点开始向外传播。同时,"好消息快速传播,坏消息传播慢"。若某个链路代价变成极大值,收敛速度不可接受,被称为无穷计数问题。\ 解决方法: + 毒性逆转\ 无穷计数问题来自于距离向量并不保存最短路的路径,因此可以通过其他手段保持最短路从而避免问题,例如毒性逆转就是若 $a -> c$ 的最短路的第一步到 $b$,则 $a$ 报告 $b$ $D_a (c) = + infinity$(这是合理的,显然此时 $b -> c$ 最短路不会经过 $a$)\ 然而,它仍然不能彻底解决无穷计数问题,仍然有可能因为第二跳及之后的链路环境严重恶化导致缓慢收敛。 === 链路状态算法 要求每个节点知道完整的网络拓扑与链路开销。\ Dijkstra:每次向已知最短路节点集加入距离最短的点,同时更新其他节点到已知最短路节点集的最短距离。\ 链路状态算法可分为五步: + 发现邻居,了解对方的网络地址 + 设置到每个邻居的成本度量:可以人工配置,通过试探延迟或者链路带宽自动设置 + 构造分组链路状态分组(Link State packet, LSP),包含之前收到的信息 + 将 LSP 广播出去,从而每个路由器都知道整个网路的链路状态。 - 如果收到新分组,会将这个分组广播给其他所有的链路,称为洪泛广播 - 如果是重复分组(网络中可能有回路)或者过时分组,直接丢弃 两种算法相比: - 消息数量最多都是 $O(n E)$($E$ 为边数) - 链路状态算法每个节点花费 $O(n log n)$ 完成计算,链路状态算法可能发生无穷计数 - 一旦网络状态改变,链路状态算法影响较小,而距离向量算法可能造成大量错误计算结果,传播大家严重 - 路由器的运算能力和存储能力有限,可能难以支撑链路状态算法。曾经在很长一段时间距离向量算法都是主流,随着芯片的发展链路状态算法也得到更多的使用。 == 层次路由 === 自洽网络系统(AS, Autonomous System) 意指每个管理机构内部的网络,往往使用相同的路由算法、协议。每个 AS 具有全球唯一的 ID 号,自洽系统可能包含小的自洽系统。\ 自洽系统内部使用的路由协议称为内部网关路由协议 Interior Gateway Protocol, IGP,自洽系统之间使用的路由协议称为外部网关路由协议 Exterior Gateway Protocol, EGP。\ 显然 IGP 可以多种多样,典型的包括 OSPF 开放最短路径优先协议 Open Shortest Path First(将 AS 再具体划分为不同区域,区域间只同步区域间链路,区域间有层次结构,上层区域称为主干,非主干区域不能互相发布路由信息,对应链路状态算法), RIP Routing Information Protocol(对应距离向量算法,使用跳数作为距离,最多只能包含15个路由器以解决无穷计数问题), IS-IS 等等\ 而 EGP 必须统一,典型的是 BGP 协议\ === BGP AS 间路由难度很大,主要面临超大规模(百万级别路由器)和策略问题(各个 AS 间往往涉及商业利益等问题难以协调,同时采用的路由协议也不同)。\ 由此产生了 BGP Border Gateway Protocol,作为唯一运行的 EGP 协议,功能包括: - eBGP:从相邻的 AS 获得网络可达信息 - iBGP:在 AS 内部传播网络可达信息 具体分为: - 路由器之间建立连接,通告路径。每个路径包括路径前缀(目的网络)+ 属性(AS路径,经过的所有 AS 号; 下一跳,说明路由信息对应的下一跳的 IP 地址),网关路由器可以选择接受或拒绝。一但接受,就通过 iBGP 传播到内部所有路由器。如果有多条路径,可能根据策略选择不同的路径,以优先级从高到低包括: + 本地偏好值属性:人为指定的策略变量 + 路径长度 + 下一跳距离 + 其他附加标准 + 最低路由器 ID - 路由器也要根据策略决定是否向相邻 AS 通告路径信息 - 实际转发时,AS 内部路径也按照一定策略选择。典型的是热土豆策略,也即尽快离开本地 AS (注意 BGP 中所有包都通过 TCP 协议交付,因此非常严格地说是应用层协议,但是功能是网络层的延伸) == 其他路由功能 === 广播路由 如果主机想要向网络中所有机器发送消息,如何实现?显然知道所有设备 IP 地址代价太大,因此现代路由器往往采取更简单的方式,例如: + 泛洪 就是路由器把进入数据包发送到所有(除进入线路)外的线路上。然而网络中往往有回路存在,如果多个路由器都选择泛洪操作,可能造成广播风暴 + 序号控制泛洪:为每个广播包加上序号,很容易拒绝到重复的广播消息 + 逆向路径转发:假设路由表已经计算完毕,从而路由表表示到达各网络的最优路径,只按最优路径转发。 + 生成树:构建最小生成树,按最小生成树转发,最佳使用带宽 === 组播路由 源目标向网络中一部分机器发送数据。实现步骤: + 确定组成员 + 如何编址 ==== IGMP 协议 Internet Group Management Protocol,用于机器向路由器声明自己加入某个组播组。 然而,当今的互联网设备许多都是单播设备,这时需要隧道技术,将组播数据穿过那些只支持单播的设备 === 选播路由 许多时候我们并不需要指定目标,只需要收到响应即可,典型例子如 DNS 服务,这时可以利用选播技术。 == 软件定义网络 相较于一种技术,SDN 更像是思想或者理念,包括大量具体的技术。 - 传统的网络基本不可编程,只能通过硬件定义的功能使用,比较死板 - 传统路由算法中,往往都要不断向邻居传播消息,而现代网络设备数量不断增加,负载很大 - 网络中流量往往是很有特征的,如果能适当规划流量的传播路径可以有效提高传输效率。传统的基于最短路的算法达到局部最优,但有可能将大量流量压到同一链路上。为了更优的计算路径需要网络流等更加复杂的算法。 SDN(软件定义网络)的优势: - 数据平面提供开发接口,可以编程 - 控制平面由中心化控制器决定,以全局视角进行更好的网络管理 面临的挑战: - 高校、标准组织 ONF 的市场影响力和工程背景都太弱 - 牵扯到太多利益关系,传统厂商不买账 SDN 设备: - SDN 交换机:提供开放协议接口,类似于微处理器 - SDN 控制器:类似于网络操作系统,向下与 SDN 交换机交互,向上为上层应用提供接口,物理上以分布式系统实现。 === OpenFlow 协议 - 核心是基于开放协议,模拟网络中各种机器的抽象“匹配-转发”操作。匹配+动作被称为流表,具体来说,流表包括: - 首部字段的集合,用于在 TCAM 内存中匹配进入报文,允许使用 $*$ 做通配符,例如 $128.119.*.*$,多个相同匹配时按照流表自身的优先级解决 - 计数器集合:包括已经与该表项匹配的包数量以及上次更新时间等信息 - 动作集合:重写首部,转发分组,丢弃分组,复制分组,例如 drop, forward(2), send to controller, send to processing piepline, modify fields - 问题:匹配规则和动作是固定且无状态的 == 其他网络层技术 === IPV6 - 提出目的:解决 IPV4 地址不足的问题,同时尽量解决历史遗留问题 - 以 128 bit 表示地址。习惯上用 $:$ 进行分隔。由于往往有很多零,允许用 "::" 表示省略若干零(规定只能使用一次) - IP 报文头部固定 40 字节,不允许途中分片,分片过大直接返回分片过大对应的 ICMP 消息即可。相较于 IPV4,头部省略了很多内容,包括校验和(留给传输层的 TCP/UDP 协议),分片消息,头部长度等等字段。 - 增加了流标签,表明发送方需要特殊处理 - 扩展头:IPV6 设置了不定数量的扩展头,允许使用额外的机制,例如分片,指定路径等等。 - 改进的 DHCP, ICMP 等等协议 - IPV6 的普及十分缓慢,有一系列的过渡技术,包括: + 隧道:假设两个设备使用相同的网络类型,但中途某一段使用另外的不兼容的网络类型,我们可以在通过这部分是,按照这部分的网络类型进行隧道封装,离开时再解包。 + 翻译:将 IPV4 与 IPV6 的头部进行转换。注意传输层(甚至应用层)可能使用 IP 地址做校验等等,因此需要逐一转换 === 网络服务质量技术 尽管通常的网络实践并不在网络层管理网络质量,在一些特殊网络中网络质量管理技术也得到了应用。常见方法包括: + 数据包调度:在路由器输出端口决定何者优先输出 + 流量工程:假设我们事先可以估计流量的信息,可以事先为流量规划合理的流量。 + 流量整形:把不规则的流量特征整理成规则的流量发出,典型算法包括: - 漏桶算法:设置漏桶,溢出时一次性发送,可以平滑流量 - 令牌桶算法:设置令牌桶,均匀生成令牌(若已达上限就丢弃)。数据包以大小对应需要令牌的量,如果令牌不足则直接丢弃数据包 + 综合服务:在互联网上维护连接状态,用预留资源提供服务保障。这要求所有路由器都提供复杂的支持,因此只是被设计,基本没有被真正实现 + 区分服务:利用 IP 报头中指明的优先级区分服务优先级。但互联网上用户很难感知其他用户和全局的网络状态,很容易造成滥用,产生“公地悲剧” === 网络连接服务 尽管因特网在网络层是无连接的,但一些技术希望建立逻辑上的连接 + 虚电路:在因特网上为双方建立逻辑上的连接,并无真正物理上的连接。来源于分组交换机制只是早期的偶然选择,希望利用虚电路技术来解决分组交换的一些问题,但由于分组交换使用已经非常广泛,虚电路并没有真正广泛使用。 + MPLS:多协议标签交换。介于网络层和链路层之间的协议,希望能够跨越网络层协议的地址表示方法,提供统一的虚拟化地址,每个路由器只需要按照虚拟化的标签转发即可。 + VPN: - 起源:许多机构希望建立专用网络,但物理上完全专用的网络成本过于高昂,因此出现了 VPN(Virtual Private Network) 也就是建立在 Internet 基础上,保持逻辑上隔离的网络。 - 设计原则:安全性、隧道与加密、数据验证、用户验证、防火墙与攻击检测 - 核心思想:通过类似隧道的技术,穿过公共网络将两个私有网络连接起来,并在网关之间保证数据的加密和防篡改 === SR(Segment Routing) 集中式与分布式结合的设计,用于改进经典 SDN 的拓展性问题,可靠问题(单点故障难以避免),性能问题。\ 原理:源路由机制,将路径段的规划也全部交给端系统,在报文头中全部指定。 - 可以采用 MPLS 或 SRV6 (以 IPV6 头部扩展的形式)实现报文中记录路径段信息 ] #chapter2
https://github.com/0xmycf/typst-template
https://raw.githubusercontent.com/0xmycf/typst-template/main/test.typ
typst
#import "./Prelude.typ": applyStyle, citep #show: doc => applyStyle( author: "<NAME>" , title: "Lorem in the Context of Me" , doc ) = Top Header ```hs -- the main function is the entrypoint in the program -- in contrast to java this is low boilerplate -- the type signature can be omitted. main :: IO () main = do -- do notation args <- getArgs <&> T.pack -- imports omitted! let xs = 12345678910 forM_ args (putStrLn . T.unpack) -- printing the arguments if length args == 0 -- could use null here then error "give a filename..." -- here otherMain is some function in scope else otherMain (head args) ``` == Some Lorem Text in other languages === Some Lorem Text in Chinese 上海香港,您好。早上好,这是一段中文的测试文本。 Lorem Ipsum是印刷和排版领域中的虚拟文本。 Lorem Ipsum自15世纪以来一直是行业的标准虚拟文本。 当一个未知的打印机拿走了一种类型的压力,并将其混合成一个类型样本书时, 它不仅存活了五个世纪,而且还跨越了电子排版,其基本思想仍然保持不变。 它在1960年代与Letraset传统工具一起流行, 包括Lorem Ipsum段落包含在最近的桌面出版软件中,例如Aldus PageMaker。 中文的Lorem Ipsum通常是随机生成的,但也可以使用一些实际的文本来代替, 例如《庄子》、《老子》等。这是一段长度不等的中文测试文本。 === Some Lorem Text in Japanese こんにちは、東京です。このテキストは日本語のロレム・イプサムです。 ロレム・イプサムは印刷・出版業界で用いられるダミーテキストです。 このテキストは、文字の大きさや配置などを確認するために使用されます。 ロレム・イプサムは15世紀に欧州で誕生したテキストで、現在も使われ続けています。 このテキストはランダムに生成されたものであり、実際には意味を持たない言葉が並んでいます。 === Some Lorem Text in Korean 안녕하세요, 서울입니다. 이것은 한국어 로렘 입숨 텍스트입니다. 로렘 입숨 텍스트는 인쇄 및 출판 업계에서 사용되는 더미 텍스트입니다. 이 텍스트는 글자 크기 및 배치 등을 확인하기 위해 사용됩니다. 로렘 입숨은 15세기에 유럽에서 탄생한 텍스트이며, 현재까지 사용되고 있습니다. 이 텍스트는 무작위로 생성된 것이며, 실제로는 의미를 가지지 않는 단어들이 나열되어 있습니다. ==== A fourth level header just for testing stuff 上海香港,您好。早上好,这是一段中文的测试文本。 Lorem Ipsum是印刷和排版领域中的虚拟文本。 Lorem Ipsum自15世纪以来一直是行业的标准虚拟文本。 当一个未知的打印机拿走了一种类型的压力,并将其混合成一个类型样本书时, 它不仅存活了五个世纪,而且还跨越了电子排版,其基本思想仍然保持不变。 它在1960年代与Letraset传统工具一起流行, 包括Lorem Ipsum段落包含在最近的桌面出版软件中,例如Aldus PageMaker。 中文的Lorem Ipsum通常是随机生成的,但也可以使用一些实际的文本来代替, In here we cite @citekeyA and here we cite them in text: #citep("citekeyA"). Note that the above citation should be in _apa_ style but isn't (properly)! #pagebreak() = Another Chapter #import sym: * #import math: limits #let d = dot.op #let implies = sym.arrow.r.double In here we display some math. We want to show that if $#forall x #sym.in #RR " : " x #d 0 = 0$. *Proof:*\ $ &x #d 0 &= 0 \ &x #d (0 + 0) &= 0 \ &x #d 0 + x #d 0 &= 0 $ This is true due to $0$ being the additive neutral of $(+)$ and the distributive law. #h(1fr) $qed$ == Testing some numbers! 12345678910 + 0 + 1 + 2 + 3 + 5 + 6 + 7 + 8 + 9 + 10 + -1 + 11 + 1/3
https://github.com/Pablo-Gonzalez-Calderon/showybox-package
https://raw.githubusercontent.com/Pablo-Gonzalez-Calderon/showybox-package/main/lib/pre-rendering.typ
typst
MIT License
/* * ShowyBox - A package for Typst * <NAME> and Showybox Contributors (c) 2023-2024 * * lib/pre-rendering.typ -- The package's file containing all * the internal functions used for pre-rendering some components * to get their dimensions or properties. * * This file is under the MIT license. For more * information see LICENSE on the package's main folder. */ #import "id.typ": * #import "sections.typ": * /* * Function: showy-pre-render-title() * * Description: Pre-renders the title emulating the conditions of * the final container. * * Parameters: * + sbox-props: Showybox properties */ #let showy-pre-render-title(sbox-props, id) = context{ let my-state = state("showybox-" + id, 0pt) if type(sbox-props.width) == ratio { layout(size => { // Get full container's width in a length type let container-width = size.width * sbox-props.width let pre-rendered = block( spacing: 0pt, width: container-width, fill: yellow, inset: (x: 1em), showy-title(sbox-props) ) place( top, hide(pre-rendered) ) let rendered-size = measure(pre-rendered) // Store the height in the state my-state.update(rendered-size.height) }) } else { // Pre-rendering "normally" will be effective let pre-rendered = block( spacing: 0pt, width: sbox-props.width, fill: yellow, inset: (x: 1em), showy-title(sbox-props) ) place( top, hide(pre-rendered) ) context { let rendered-size = measure(pre-rendered) // Store the height in the state my-state.update(rendered-size.height) } } //v(-(my-state.final(loc) + sbox-props.frame.thickness)/2) }
https://github.com/max-niederman/CS250
https://raw.githubusercontent.com/max-niederman/CS250/main/hw/3.typ
typst
#import "../lib.typ": * #show: homework.with(title: "CS 250 Homework #3") = Proof Techniques == 1 #set enum(numbering: "a.") + An oblong rectangle is a geometric figure with four right angles, but not a square. + Zero is not positive, but neither is it negative. + Gunnar is a person with red hair, but neither is he tall nor does he have green eyes. + Philip is a person with red hair, but neither is he tall nor does he have green eyes. == 2 + $-1 | 1$ and $1 | -1$, but $-1 eq.not 1$. + $(-1)^2 = 1 > 0$, but $-1 <= 0$. + $0$ is an even number, but $0^2 + 1 = 1$ is not prime. + $6$ is a positive integer, but $6^3 <= 6!$. == 3 Proof by exhaustion: #table( columns: (auto, auto, auto, auto), $n$, $n^2$, $2^n$, $n^2 >= 2^n$, $2$, $4$, $4$, [True], $3$, $9$, $8$, [True], $4$, $16$, $16$, [True], ) #sym.qed == 4 Let $m$ and $n$ be two even integers, and write them as $2a$ and $2b$ for some integers $a$ and $b$. Then: $ m + n &= 2a + 2b \ &= 2(a + b) $ So, by definition, $m + n$ is even. #sym.qed == 5 Let $m$ and $n$ be two even integers, and assume by contradiction that $m + n$ is odd. By definition, $n = 2b$ and $m + n = 2k + 1$ for some integers $b$ and $k$. But then: $ m + n &= 2k + 1 \ m &= 2k + 1 - n \ &= 2k + 1 - 2b \ &= 2(k - b) + 1 $ So $m$ is odd, which contradicts the assumption that $m$ is even. Therefore, by contradiction, $m + n$ is even. #sym.qed == 6 Let $m$ be an odd integer and $n$ be an even integer, with $m = 2a + 1$ and $n = 2b$ for some integers $a$ and $b$. $ m + n &= (2a + 1) + 2b \ &= 2a + 2b + 1 \ &= 2(a + b) + 1 $ So, by definition, $m + n$ is odd. #sym.qed == 7 Let $m$ be an odd integer and $n$ be an even integer, with $m = 2a + 1$ and $n = 2b$ for some integers $a$ and $b$. $ m - n &= (2a + 1) - 2b \ &= 2a + 1 - 2b \ &= 2(a - b) + 1 $ So, by definition, $m - n$ is odd. #sym.qed == 8 Let $n$ be an even integer, with $n = 2a$ for some integer $a$. $ n^2 &= (2a)^2 \ &= 4a^2 \ n^2 - 1 &= 4a^2 - 1 \ &= 4a^2 - 2 + 2 - 1 \ &= 2(2a^2 - 1) + 1 $ So, by definition, $n^2 - 1$ is odd. #sym.qed == 9 Let $n$ and $n + 1$ be two consecutive integers. === Case 1: $n$ is even. Let $a$ be an integer such that $n = 2a$. Then: $ n(n + 1) &= 2a(n + 1) &= 2(a(n + 1)) $ So, by definition, $n(n + 1)$ is even. === Case 2: $n$ is odd. Let $a$ be an integer such that $n = 2a + 1$. Then: $ n(n + 1) &= (2a + 1)(2a + 1 + 1) \ &= (2a + 1)(2a + 2) \ &= 2((2a + 1)(a + 1)) $ So, by definition, $n(n + 1)$ is even. #sym.qed == 10 Let $n$ be an integer. === Case 1: $n$ is even. Let $a$ be an integer such that $n = 2a$. Then: $ n^2 + n &= (2a)^2 + 2a \ &= 4a^2 + 2a \ &= 2(2a^2 + a) $ So, by definition, $n^2 + n$ is even. === Case 2: $n$ is odd. Let $a$ be an integer such that $n = 2a + 1$. Then: $ n^2 + n &= (2a + 1)^2 + 2a + 1 \ &= 4a^2 + 4a + 1 + 2a + 1 \ &= 4a^2 + 6a + 2 \ &= 2(2a^2 + 3a + 1) $ So, by definition, $n^2 + n$ is even. #sym.qed == 11 Let $n$ be an integer. $ 3(n^2 + 2n + 3) - 2n^2 &= 3n^2 + 6n + 9 - 2n^2 \ &= n^2 + 6n + 9 \ &= (n + 3)^2 $ So, by definition, $3(n^2 + 2n + 3) - 2n^2$ is a perfect square. #sym.qed == 12 Let $x$ be an integer such that $x + 1$ is not positive. Then: $ x + 1 &<= 0 \ x &<= -1 \ x &<= 0 $ So $x$ is not positive. Therefore, by contraposition, if $x$ is positive, then $x + 1$ is positive. #sym.qed == 13 Let $n$, $n + 1$, and $n + 2$ be three consecutive integers. $ n + (n + 1) + (n + 2) &= n + n + n + 1 + 2 \ &= 3n + 3 \ &= 3(n + 1) $ The number $n + 1$ is an integer, so $3$ divides $n(n + 1)(n + 2)$ by definition. #sym.qed == 14 Let $n, m, p in ZZ$ such that $n divides m$ and $m divides| p$. Then, by definition, there exist $a, b in ZZ$ such that $m = a n$ and $p = b m$. Therefore, $ p &= b m \ &= b (a n) \ &= (b a) n $ The number $b a$ is an integer, so $n divides| p$ also. #sym.qed == 15 Let $n$ be an odd integer with $n = 2a + 1$ for some integer $a$. === Case 1: $a$ is even. Let $b$ be an integer such that $a = 2b$. Then: $ n^2 &= (2a + 1)^2 \ &= (2(2b) + 1)^2 \ &= (4b + 1)^2 \ &= 16b^2 + 8b + 1 \ &= 8(2b^2 + b) + 1 $ So there exists an integer $k$ such that $n^2 = 8k + 1$. === Case 2: $a$ is odd. Let $b$ be an integer such that $a = 2b + 1$. Then: $ n^2 &= (2a + 1)^2 \ &= (2(2b + 1) + 1)^2 \ &= (4b + 3)^2 \ &= 16b^2 + 24b + 9 \ &= 8(2b^2 + 3b + 1) + 1 $ So there exists an integer $k$ such that $n^2 = 8k + 1$. #sym.qed == 16 === Part 1: $2 divides n => 2 divides.not n^3 + 13$ Let $n$ be an even integer with $n = 2a$ for some integer $a$. Then, $ n^3 + 13 &= (2a)^3 + 13 \ &= 8a^3 + 13 \ &= 2(4a^3 + 6) + 1 $ So $n^3 + 13$ is odd. === Part 2: $2 divides.not n^3 + 13 => 2 divides n$ Let $n$ be an integer such that $n^3 + 13$ is odd, with $n^3 + 13 = 2a + 1$ for some integer $a$. Then, $ n^3 + 13 &= 2a + 1 \ n^3 &= 2a + 1 - 13 \ &= 2a - 12 \ &= 2(a - 6) $ So $n^3$ is even. If we assume by contradiction that $n$ is odd, then $n = 2b + 1$ for some integer $b$. This implies that $ n^3 &= (2b + 1)^3 \ &= 8b^3 + 12b^2 + 6b + 1 \ &= 2(4b^3 + 6b^2 + 3b) + 1 $ So, by contradiction, $n^3$ must be even. === Part 3: Equivalence It follows from parts 1 and 2 that $n$ is even if and only if $n^3 + 13$ is odd. #sym.qed
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/chronos/0.1.0/src/utils.typ
typst
Apache License 2.0
#let get-participants-i(participants) = { let pars-i = (:) for (i, p) in participants.enumerate() { pars-i.insert(p.name, i) } return pars-i } #let get-group-span(participants, group) = { let min-i = participants.len() - 1 let max-i = 0 let pars-i = get-participants-i(participants) for elmt in group.elmts { if elmt.type == "seq" { let i1 = pars-i.at(elmt.p1) let i2 = pars-i.at(elmt.p2) min-i = calc.min(min-i, i1, i2) max-i = calc.max(max-i, i1, i2) } else if elmt.type == "grp" { let (i0, i1) = get-group-span(participants, elmt) min-i = calc.min(min-i, i0) max-i = calc.max(max-i, i1) } else if elmt.type == "sync" { let (i0, i1) = get-group-span(participants, elmt) min-i = calc.min(min-i, i0) max-i = calc.max(max-i, i1) } } return (min-i, max-i) } #let get-style(base-name, mods) = { let style = if base-name == "lifeline" {( fill: white, stroke: black + 1pt )} if mods == auto { return style } if type(mods) == dictionary { return style + mods } panic("Invalid type for parameter mods, expected auto or dictionary, got " + str(type(mods))) } #let fit-canvas(canvas, width: auto) = layout(size => { let m = measure(canvas) let w = m.width let h = m.height let r = if w == 0pt {0} else { if width == auto {1} else if type(width) == length { width / w } else { size.width * width / w } } let new-w = w * r let new-h = h * r r *= 100% box( width: new-w, height: new-h, scale(x: r, y: r, reflow: true, canvas) ) })
https://github.com/SWATEngineering/Docs
https://raw.githubusercontent.com/SWATEngineering/Docs/main/src/2_RTB/PianoDiProgetto/sections/PreventivoSprint/QuintoSprint.typ
typst
MIT License
#import "../../const.typ": Re_cost, Am_cost, An_cost, Ve_cost, Pr_cost, Pt_cost #import "../../functions.typ": prospettoOrario, prospettoEconomico, glossary == Quinto #glossary[sprint] *Inizio*: Venerdì 22/12/2023 *Fine*: Giovedì 28/12/2023 #prospettoOrario(sprintNumber: "5") #prospettoEconomico(sprintNumber: "5")
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/fh-joanneum-iit-thesis/1.2.3/template/chapters/glossary.typ
typst
Apache License 2.0
#import "global.typ": * // Not listed in table of contents (the outline) // Not numbered #heading(outlined: false, numbering: none)[ Glossary ] // Add list of terms // Usage within text will then be #gls(<key>) or plurals #glspl(<key>) // Output is sorted by key #print-glossary( ( ( key: "gc", short: "GC", long: "Garbage Collection", desc: [Garbage collection is the common name for the term automatic memory management.], ), ( key: "cow", short: "COW", long: "Copy on Write", desc: [Copy on Write is a memory allocation strategy where arrays are copied if they are to be modified.], ), ( key: "svg", short: "SVG", long: "Scalable Vector Graphics", desc: [A vector image format.], ), ( key: "csv", short: "CSV", long: "Comma-separated Values ", desc: [A human readable, plain text file format using commas to separate the values.], ), ) )
https://github.com/gianzamboni/cancionero
https://raw.githubusercontent.com/gianzamboni/cancionero/main/theme/music.typ
typst
#import "@preview/chordx:0.4.0": * #import "@preview/cetz:0.2.2": * #let chart-chord = chart-chord.with(size: 2em, design: "round") #let chordsData = ( Am: ( fingers: "nn231nnn", tabs: "xo231o"), A: ( fingers: "nn23nnn", tabs: "oo23oo"), E: ( fingers: "n231nnn", tabs: "o231oo"), Dm: ( fingers: "nnn231", tabs: "xxo231"), D7: ( fingers: "nnn213", tabs: "xxo213"), G: ( fingers: "21nn34", tabs: "32oo33") ) #let drawChord(name) = { let data = chordsData.at(name) chart-chord(..data, name) } #let drawCompass = place(left, [ $4$\ $4$ ]) #let tickSeparation = 1.75em #let chartLine(position, width: 1pt) = place(left + top, { line(start: (position, -0.125em), end: (position, 70%), stroke: width) }) #let drawTicks(tickNumber) = { return range(0, tickNumber).map(tick => { let distance = tick*tickSeparation + 1.5em place(left + top, dx: distance, dy: 1.5em, [ #set text(size: 0.75em) #(calc.rem(tick, 4) + 1) ]) }).join() } #let drawCompassDivisions(spaces) = { return range(0, calc.floor(spaces/4)-1).map(division => [ #let xStart = (division + 1)*tickSeparation*4 + tickSeparation*0.5 #place(left + top, { chartLine(xStart) }) ]).join([]) } #let drawFinalBar(spaces) = { let xDistance = spaces*tickSeparation + 1em place(left + top, dx: xDistance, dy: 12.5%, [ #circle(radius: 0.125em, fill: color.black) #v(-0.5em) #circle(radius: 0.125em, fill: color.black) ]) chartLine(xDistance + 0.5em, width: 2pt) chartLine(xDistance + 0.75em, width: 2pt) } #let drawNotes(notes) = { let noteDisplacement = state("noteDisplacement", 1.5) let notesRender = notes.fold((1.25em, []), (value, tuple) => { let newDisplacement = value.at(0) + tuple.at(1)*tickSeparation let newContent = value.at(1) + [ #place(left + top, dx: value.at(0), [ #text(size: 1em, tuple.at(0)) ]) ] return (newDisplacement, newContent) }) return notesRender.at(1) } #let beatDiagram(notes) = { let spaces = notes.map(note => note.at(1)).sum() assert(calc.rem(spaces, 4) == 0) box(height: 3em, width: 100%)[ #drawCompass #drawTicks(spaces) #drawCompassDivisions(spaces) #drawFinalBar(spaces) #drawNotes(notes) ] v(2em) }
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/rivet/0.1.0/src/structure.typ
typst
Apache License 2.0
#import "range.typ" as rng #import "util.typ" #let make( name, bits, ranges, start: 0 ) = { return ( name: name, bits: bits, ranges: ranges, start: start ) } #let load(id, data) = { let struct = (id: id) let ranges = (:) for (range-span, range-data) in data.ranges { let (start, end) = rng.parse-span(str(range-span)) ranges.insert( rng.key(start, end), rng.load(start, end, range-data) ) } let ranges2 = (:) for (k, range_) in ranges { if range_.values != none and range_.depends-on != none { let depends-key = rng.key(..range_.depends-on) let depends-range = ranges.at(depends-key) let bits = rng.bits(depends-range) let values = (:) for (v, d) in range_.values { v = util.z-fill(str(int(v)), bits) values.insert(v, d) } range_.values = values } ranges2.insert(k, range_) } return make( id, int(data.bits), ranges2, start: data.at("start", default: 0) ) } #let get-sorted-ranges(struct) = { let ranges = struct.ranges.values() return ranges.sorted(key: r => r.end) }
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/array-13.typ
typst
Other
// Test the `push` and `pop` methods. #{ let tasks = (a: (1, 2, 3), b: (4, 5, 6)) test(tasks.at("a").pop(), 3) tasks.b.push(7) test(tasks.a, (1, 2)) test(tasks.at("b"), (4, 5, 6, 7)) }
https://github.com/Flower101010/Typst_template
https://raw.githubusercontent.com/Flower101010/Typst_template/main/template.typ
typst
#import "@preview/ctheorems:1.1.2": * #let conf( title: [], institute: [], auther: [], class: [], doc, ) = [ #show: thmrules.with(qed-symbol: $square$) #set heading(numbering: "A.1.") #set text(font: "Source Serif Pro", lang: "zh", region: "cn") #set page( header: context { if counter(page).get().first() > 1 [ #text( font: "New Computer Modern", style: "italic", class,) - #text( font: "New Computer Modern", title) #h(1fr) #counter(page).display() #line( length: 100%, start: (0pt, -10.5pt), stroke: 0.4pt, ) ] }, footer: context { if counter(page).get().first() > 1 [ #h(1fr) #text( font: "New Computer Modern", style: "italic", auther, ) #line( length: 100%, start: (0pt, -25pt), stroke: 0.4pt, ) ] else [ #align(center)[#counter(page).display()] ] }, ) #align(center)[ #text( 11pt, weight: "thin", font: "New Computer Modern", style: "normal", smallcaps(institute), ) #line( length: 100%, start: (0pt, -5pt), stroke: 0.4pt, ) #move( dx: 0pt, dy: -9pt, text( 17pt, weight: "black", font: "New Computer Modern", style: "normal", title, ), ) #move( dx: 0pt, dy: -9pt, text( 11pt, weight: "light", font: "New Computer Modern", style: "italic", auther, ), ) #line( length: 100%, start: (0pt, -13pt), stroke: 0.4pt, ) #move( dx: 0pt, dy: -19pt, text( 11pt, weight: "thin", font: "New Computer Modern", style: "normal", class, ), ) ] #doc ] #let proof = thmproof("proof", "Proof") #let problem(item,index: "#") ={ layout(size => [ #let pro = { move( dx: 0pt, dy: -12.5pt, block( fill: orange.lighten(95%), inset: (x: 4pt, y: 4pt), stroke: 1pt + orange , height: 15pt, radius:4pt, "Problem "+ index ))+ move( dx: 0pt, dy: -19pt, item,) } #let orignal = { block( fill: orange.lighten(90%), radius:4pt, inset: (x: 8pt, y: 5pt), width: 100%, height: auto, above: 2.2em, stroke: 1.5pt + gradient.linear(..color.map.flare), breakable: false, pro, ) } #let (height,) = measure( block(width: size.width, orignal), ) #v(10pt) #block( fill: orange.lighten(90%), radius:4pt, inset: (x: 8pt, y: 5pt), width: 100%, height: height - 10pt, above: 2.2em, stroke: 1.5pt + gradient.linear(..color.map.flare), breakable: false, pro, ) ]) } #let theorem = thmbox( "theorem", // identifier "Theorem", // head ) #let lemma = thmbox( "theorem", // identifier - same as that of theorem "Lemma", // head fill: rgb("#efe6ff") ) #let example = thmplain( "example", "Example", ).with(numbering: none) #let solution = thmplain( "theorem", "Solution", ).with(numbering: none)
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-1400.typ
typst
Apache License 2.0
#let data = ( ("CANADIAN SYLLABICS HYPHEN", "Pd", 0), ("CANADIAN SYLLABICS E", "Lo", 0), ("CANADIAN SYLLABICS AAI", "Lo", 0), ("CANADIAN SYLLABICS I", "Lo", 0), ("CANADIAN SYLLABICS II", "Lo", 0), ("CANADIAN SYLLABICS O", "Lo", 0), ("CANADIAN SYLLABICS OO", "Lo", 0), ("CANADIAN SYLLABICS Y-CREE OO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER EE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER I", "Lo", 0), ("CANADIAN SYLLABICS A", "Lo", 0), ("CANADIAN SYLLABICS AA", "Lo", 0), ("CANADIAN SYLLABICS WE", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE WE", "Lo", 0), ("CANADIAN SYLLABICS WI", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE WI", "Lo", 0), ("CANADIAN SYLLABICS WII", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE WII", "Lo", 0), ("CANADIAN SYLLABICS WO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE WO", "Lo", 0), ("CANADIAN SYLLABICS WOO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE WOO", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI WOO", "Lo", 0), ("CANADIAN SYLLABICS WA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE WA", "Lo", 0), ("CANADIAN SYLLABICS WAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE WAA", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI WAA", "Lo", 0), ("CANADIAN SYLLABICS AI", "Lo", 0), ("CANADIAN SYLLABICS Y-CREE W", "Lo", 0), ("CANADIAN SYLLABICS GLOTTAL STOP", "Lo", 0), ("CANADIAN SYLLABICS FINAL ACUTE", "Lo", 0), ("CANADIAN SYLLABICS FINAL GRAVE", "Lo", 0), ("CANADIAN SYLLABICS FINAL BOTTOM HALF RING", "Lo", 0), ("CANADIAN SYLLABICS FINAL TOP HALF RING", "Lo", 0), ("CANADIAN SYLLABICS FINAL RIGHT HALF RING", "Lo", 0), ("CANADIAN SYLLABICS FINAL RING", "Lo", 0), ("CANADIAN SYLLABICS FINAL DOUBLE ACUTE", "Lo", 0), ("CANADIAN SYLLABICS FINAL DOUBLE SHORT VERTICAL STROKES", "Lo", 0), ("CANADIAN SYLLABICS FINAL MIDDLE DOT", "Lo", 0), ("CANADIAN SYLLABICS FINAL SHORT HORIZONTAL STROKE", "Lo", 0), ("CANADIAN SYLLABICS FINAL PLUS", "Lo", 0), ("CANADIAN SYLLABICS FINAL DOWN TACK", "Lo", 0), ("CANADIAN SYLLABICS EN", "Lo", 0), ("CANADIAN SYLLABICS IN", "Lo", 0), ("CANADIAN SYLLABICS ON", "Lo", 0), ("CANADIAN SYLLABICS AN", "Lo", 0), ("CANADIAN SYLLABICS PE", "Lo", 0), ("CANADIAN SYLLABICS PAAI", "Lo", 0), ("CANADIAN SYLLABICS PI", "Lo", 0), ("CANADIAN SYLLABICS PII", "Lo", 0), ("CANADIAN SYLLABICS PO", "Lo", 0), ("CANADIAN SYLLABICS POO", "Lo", 0), ("CANADIAN SYLLABICS Y-CREE POO", "Lo", 0), ("CANADIAN SYLLABICS <NAME>", "Lo", 0), ("CANADIAN SYLLABICS <NAME>", "Lo", 0), ("CANADIAN SYLLABICS PA", "Lo", 0), ("CANADIAN SYLLABICS PAA", "Lo", 0), ("CANADIAN SYLLABICS PWE", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE PWE", "Lo", 0), ("CANADIAN SYLLABICS PWI", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE PWI", "Lo", 0), ("CANADIAN SYLLABICS PWII", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE PWII", "Lo", 0), ("CANADIAN SYLLABICS PWO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE PWO", "Lo", 0), ("CANADIAN SYLLABICS PWOO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE PWOO", "Lo", 0), ("CANADIAN SYLLABICS PWA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE PWA", "Lo", 0), ("CANADIAN SYLLABICS PWAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE PWAA", "Lo", 0), ("CANADIAN SYLLABICS Y-CREE PWAA", "Lo", 0), ("CANADIAN SYLLABICS P", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE P", "Lo", 0), ("CANADIAN SYLLABICS <NAME>", "Lo", 0), ("CANADIAN SYLLABICS TE", "Lo", 0), ("CANADIAN SYLLABICS TAAI", "Lo", 0), ("CANADIAN SYLLABICS TI", "Lo", 0), ("CANADIAN SYLLABICS TII", "Lo", 0), ("CANADIAN SYLLABICS TO", "Lo", 0), ("CANADIAN SYLLABICS TOO", "Lo", 0), ("CANADIAN SYLLABICS Y-CREE TOO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER DEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER DI", "Lo", 0), ("CANADIAN SYLLABICS TA", "Lo", 0), ("CANADIAN SYLLABICS TAA", "Lo", 0), ("CANADIAN SYLLABICS TWE", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE TWE", "Lo", 0), ("CANADIAN SYLLABICS TWI", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE TWI", "Lo", 0), ("CANADIAN SYLLABICS TWII", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE TWII", "Lo", 0), ("CANADIAN SYLLABICS TWO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE TWO", "Lo", 0), ("CANADIAN SYLLABICS TWOO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE TWOO", "Lo", 0), ("CANADIAN SYLLABICS TWA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE TWA", "Lo", 0), ("CANADIAN SYLLABICS TWAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE TWAA", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI TWAA", "Lo", 0), ("CANADIAN SYLLABICS T", "Lo", 0), ("CANADIAN SYLLABICS TTE", "Lo", 0), ("CANADIAN SYLLABICS TTI", "Lo", 0), ("CANADIAN SYLLABICS TTO", "Lo", 0), ("CANADIAN SYLLABICS TTA", "Lo", 0), ("CANADIAN SYLLABICS KE", "Lo", 0), ("CANADIAN SYLLABICS KAAI", "Lo", 0), ("CANADIAN SYLLABICS KI", "Lo", 0), ("CANADIAN SYLLABICS KII", "Lo", 0), ("CANADIAN SYLLABICS KO", "Lo", 0), ("CANADIAN SYLLABICS KOO", "Lo", 0), ("CANADIAN SYLLABICS Y-CREE KOO", "Lo", 0), ("CANADIAN SYLLABICS KA", "Lo", 0), ("CANADIAN SYLLABICS KAA", "Lo", 0), ("CANADIAN SYLLABICS KWE", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE KWE", "Lo", 0), ("CANADIAN SYLLABICS KWI", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE KWI", "Lo", 0), ("CANADIAN SYLLABICS KWII", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE KWII", "Lo", 0), ("CANADIAN SYLLABICS KWO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE KWO", "Lo", 0), ("CANADIAN SYLLABICS KWOO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE KWOO", "Lo", 0), ("CANADIAN SYLLABICS KWA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE KWA", "Lo", 0), ("CANADIAN SYLLABICS KWAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE KWAA", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI KWAA", "Lo", 0), ("CANADIAN SYLLABICS K", "Lo", 0), ("CANADIAN SYLLABICS KW", "Lo", 0), ("CANADIAN SYLLABICS SOUTH-SLAVEY KEH", "Lo", 0), ("CANADIAN SYLLABICS SOUTH-SLAVEY KIH", "Lo", 0), ("CANADIAN SYLLABICS SOUTH-SLAVEY KOH", "Lo", 0), ("CANADIAN SYLLABICS SOUTH-SLAVEY KAH", "Lo", 0), ("CANADIAN SYLLABICS CE", "Lo", 0), ("CANADIAN SYLLABICS CAAI", "Lo", 0), ("CANADIAN SYLLABICS CI", "Lo", 0), ("CANADIAN SYLLABICS CII", "Lo", 0), ("CANADIAN SYLLABICS CO", "Lo", 0), ("CANADIAN SYLLABICS COO", "Lo", 0), ("CANADIAN SYLLABICS Y-CREE COO", "Lo", 0), ("CANADIAN SYLLABICS CA", "Lo", 0), ("CANADIAN SYLLABICS CAA", "Lo", 0), ("CANADIAN SYLLABICS CWE", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE CWE", "Lo", 0), ("CANADIAN SYLLABICS CWI", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE CWI", "Lo", 0), ("CANADIAN SYLLABICS CWII", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE CWII", "Lo", 0), ("CANADIAN SYLLABICS CWO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE CWO", "Lo", 0), ("CANADIAN SYLLABICS CWOO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE CWOO", "Lo", 0), ("CANADIAN SYLLABICS CWA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE CWA", "Lo", 0), ("CANADIAN SYLLABICS CWAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE CWAA", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI CWAA", "Lo", 0), ("CANADIAN SYLLABICS C", "Lo", 0), ("CANADIAN SYLLABICS SAYISI TH", "Lo", 0), ("CANADIAN SYLLABICS ME", "Lo", 0), ("CANADIAN SYLLABICS MAAI", "Lo", 0), ("CANADIAN SYLLABICS MI", "Lo", 0), ("CANADIAN SYLLABICS MII", "Lo", 0), ("CANADIAN SYLLABICS MO", "Lo", 0), ("CANADIAN SYLLABICS MOO", "Lo", 0), ("CANADIAN SYLLABICS Y-CREE MOO", "Lo", 0), ("CANADIAN SYLLABICS MA", "Lo", 0), ("CANADIAN SYLLABICS MAA", "Lo", 0), ("CANADIAN SYLLABICS MWE", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE MWE", "Lo", 0), ("CANADIAN SYLLABICS MWI", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE MWI", "Lo", 0), ("CANADIAN SYLLABICS MWII", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE MWII", "Lo", 0), ("CANADIAN SYLLABICS MWO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE MWO", "Lo", 0), ("CANADIAN SYLLABICS MWOO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE MWOO", "Lo", 0), ("CANADIAN SYLLABICS MWA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE MWA", "Lo", 0), ("CANADIAN SYLLABICS MWAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE MWAA", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI MWAA", "Lo", 0), ("CANADIAN SYLLABICS M", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE M", "Lo", 0), ("CANADIAN SYLLABICS MH", "Lo", 0), ("CANADIAN SYLLABICS ATHAPASCAN M", "Lo", 0), ("CANADIAN SYLLABICS SAYISI M", "Lo", 0), ("CANADIAN SYLLABICS NE", "Lo", 0), ("CANADIAN SYLLABICS NAAI", "Lo", 0), ("CANADIAN SYLLABICS NI", "Lo", 0), ("CANADIAN SYLLABICS NII", "Lo", 0), ("CANADIAN SYLLABICS NO", "Lo", 0), ("CANADIAN SYLLABICS NOO", "Lo", 0), ("CANADIAN SYLLABICS Y-CREE NOO", "Lo", 0), ("CANADIAN SYLLABICS NA", "Lo", 0), ("CANADIAN SYLLABICS NAA", "Lo", 0), ("CANADIAN SYLLABICS NWE", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE NWE", "Lo", 0), ("CANADIAN SYLLABICS NWA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE NWA", "Lo", 0), ("CANADIAN SYLLABICS NWAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE NWAA", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI NWAA", "Lo", 0), ("CANADIAN SYLLABICS N", "Lo", 0), ("CANADIAN SYLLABICS <NAME>", "Lo", 0), ("CANADIAN SYLLABICS NH", "Lo", 0), ("CANADIAN SYLLABICS LE", "Lo", 0), ("CANADIAN SYLLABICS LAAI", "Lo", 0), ("CANADIAN SYLLABICS LI", "Lo", 0), ("CANADIAN SYLLABICS LII", "Lo", 0), ("CANADIAN SYLLABICS LO", "Lo", 0), ("CANADIAN SYLLABICS LOO", "Lo", 0), ("CANADIAN SYLLABICS Y-CREE LOO", "Lo", 0), ("CANADIAN SYLLABICS LA", "Lo", 0), ("CANADIAN SYLLABICS LAA", "Lo", 0), ("CANADIAN SYLLABICS LWE", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE LWE", "Lo", 0), ("CANADIAN SYLLABICS LWI", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE LWI", "Lo", 0), ("CANADIAN SYLLABICS LWII", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE LWII", "Lo", 0), ("CANADIAN SYLLABICS LWO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE LWO", "Lo", 0), ("CANADIAN SYLLABICS LWOO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE LWOO", "Lo", 0), ("CANADIAN SYLLABICS LWA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE LWA", "Lo", 0), ("CANADIAN SYLLABICS LWAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE LWAA", "Lo", 0), ("CANADIAN SYLLABICS L", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE L", "Lo", 0), ("CANADIAN SYLLABICS MEDIAL L", "Lo", 0), ("CANADIAN SYLLABICS SE", "Lo", 0), ("CANADIAN SYLLABICS SAAI", "Lo", 0), ("CANADIAN SYLLABICS SI", "Lo", 0), ("CANADIAN SYLLABICS SII", "Lo", 0), ("CANADIAN SYLLABICS SO", "Lo", 0), ("CANADIAN SYLLABICS SOO", "Lo", 0), ("CANADIAN SYLLABICS Y-CREE SOO", "Lo", 0), ("CANADIAN SYLLABICS SA", "Lo", 0), ("CANADIAN SYLLABICS SAA", "Lo", 0), ("CANADIAN SYLLABICS SWE", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE SWE", "Lo", 0), ("CANADIAN SYLLABICS SWI", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE SWI", "Lo", 0), ("CANADIAN SYLLABICS SWII", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE SWII", "Lo", 0), ("CANADIAN SYLLABICS SWO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE SWO", "Lo", 0), ("CANADIAN SYLLABICS SWOO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE SWOO", "Lo", 0), ("CANADIAN SYLLABICS SWA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE SWA", "Lo", 0), ("CANADIAN SYLLABICS SWAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE SWAA", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI SWAA", "Lo", 0), ("CANADIAN SYLLABICS S", "Lo", 0), ("CANADIAN SYLLABICS ATHAPASCAN S", "Lo", 0), ("CANADIAN SYLLABICS SW", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT S", "Lo", 0), ("CANADIAN SYLLABICS MOOSE-CREE SK", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI SKW", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI S-W", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI SPWA", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI STWA", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI SKWA", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI SCWA", "Lo", 0), ("CANADIAN SYLLABICS SHE", "Lo", 0), ("CANADIAN SYLLABICS SHI", "Lo", 0), ("CANADIAN SYLLABICS SHII", "Lo", 0), ("CANADIAN SYLLABICS SHO", "Lo", 0), ("CANADIAN SYLLABICS SHOO", "Lo", 0), ("CANADIAN SYLLABICS SHA", "Lo", 0), ("CANADIAN SYLLABICS SHAA", "Lo", 0), ("CANADIAN SYLLABICS SHWE", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE SHWE", "Lo", 0), ("CANADIAN SYLLABICS SHWI", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE SHWI", "Lo", 0), ("CANADIAN SYLLABICS SHWII", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE SHWII", "Lo", 0), ("CANADIAN SYLLABICS SHWO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE SHWO", "Lo", 0), ("CANADIAN SYLLABICS SHWOO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE SHWOO", "Lo", 0), ("CANADIAN SYLLABICS SHWA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE SHWA", "Lo", 0), ("CANADIAN SYLLABICS SHWAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE SHWAA", "Lo", 0), ("CANADIAN SYLLABICS SH", "Lo", 0), ("CANADIAN SYLLABICS YE", "Lo", 0), ("CANADIAN SYLLABICS YAAI", "Lo", 0), ("CANADIAN SYLLABICS YI", "Lo", 0), ("CANADIAN SYLLABICS YII", "Lo", 0), ("CANADIAN SYLLABICS YO", "Lo", 0), ("CANADIAN SYLLABICS YOO", "Lo", 0), ("CANADIAN SYLLABICS Y-CREE YOO", "Lo", 0), ("CANADIAN SYLLABICS YA", "Lo", 0), ("CANADIAN SYLLABICS YAA", "Lo", 0), ("CANADIAN SYLLABICS YWE", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE YWE", "Lo", 0), ("CANADIAN SYLLABICS YWI", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE YWI", "Lo", 0), ("CANADIAN SYLLABICS YWII", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE YWII", "Lo", 0), ("CANADIAN SYLLABICS YWO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE YWO", "Lo", 0), ("CANADIAN SYLLABICS YWOO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE YWOO", "Lo", 0), ("CANADIAN SYLLABICS YWA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE YWA", "Lo", 0), ("CANADIAN SYLLABICS YWAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE YWAA", "Lo", 0), ("CANADIAN SYLLABICS NASKAPI YWAA", "Lo", 0), ("CANADIAN SYLLABICS Y", "Lo", 0), ("CANADIAN SYLLABICS BIBLE-CREE Y", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE Y", "Lo", 0), ("CANADIAN SYLLABICS SAYISI YI", "Lo", 0), ("CANADIAN SYLLABICS RE", "Lo", 0), ("CANADIAN SYLLABICS R-CREE RE", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE LE", "Lo", 0), ("CANADIAN SYLLABICS RAAI", "Lo", 0), ("CANADIAN SYLLABICS RI", "Lo", 0), ("CANADIAN SYLLABICS RII", "Lo", 0), ("CANADIAN SYLLABICS RO", "Lo", 0), ("CANADIAN SYLLABICS ROO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE LO", "Lo", 0), ("CANADIAN SYLLABICS RA", "Lo", 0), ("CANADIAN SYLLABICS RAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE LA", "Lo", 0), ("CANADIAN SYLLABICS RWAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE RWAA", "Lo", 0), ("CANADIAN SYLLABICS R", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE R", "Lo", 0), ("CANADIAN SYLLABICS MEDIAL R", "Lo", 0), ("CANADIAN SYLLABICS FE", "Lo", 0), ("CANADIAN SYLLABICS FAAI", "Lo", 0), ("CANADIAN SYLLABICS FI", "Lo", 0), ("CANADIAN SYLLABICS FII", "Lo", 0), ("CANADIAN SYLLABICS FO", "Lo", 0), ("CANADIAN SYLLABICS FOO", "Lo", 0), ("CANADIAN SYLLABICS FA", "Lo", 0), ("CANADIAN SYLLABICS FAA", "Lo", 0), ("CANADIAN SYLLABICS FWAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE FWAA", "Lo", 0), ("CANADIAN SYLLABICS F", "Lo", 0), ("CANADIAN SYLLABICS THE", "Lo", 0), ("CANADIAN SYLLABICS N-CREE THE", "Lo", 0), ("CANADIAN SYLLABICS THI", "Lo", 0), ("CANADIAN SYLLABICS N-CREE THI", "Lo", 0), ("CANADIAN SYLLABICS THII", "Lo", 0), ("CANADIAN SYLLABICS N-CREE THII", "Lo", 0), ("CANADIAN SYLLABICS THO", "Lo", 0), ("CANADIAN SYLLABICS THOO", "Lo", 0), ("CANADIAN SYLLABICS THA", "Lo", 0), ("CANADIAN SYLLABICS THAA", "Lo", 0), ("CANADIAN SYLLABICS THWAA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE THWAA", "Lo", 0), ("CANADIAN SYLLABICS TH", "Lo", 0), ("CANADIAN SYLLABICS TTHE", "Lo", 0), ("CANADIAN SYLLABICS TTHI", "Lo", 0), ("CANADIAN SYLLABICS TTHO", "Lo", 0), ("CANADIAN SYLLABICS TTHA", "Lo", 0), ("CANADIAN SYLLABICS TTH", "Lo", 0), ("CANADIAN SYLLABICS TYE", "Lo", 0), ("CANADIAN SYLLABICS TYI", "Lo", 0), ("CANADIAN SYLLABICS TYO", "Lo", 0), ("CANADIAN SYLLABICS TYA", "Lo", 0), ("CANADIAN SYLLABICS NUNAVIK HE", "Lo", 0), ("CANADIAN SYLLABICS NUNAVIK HI", "Lo", 0), ("CANADIAN SYLLABICS NUNAVIK HII", "Lo", 0), ("CANADIAN SYLLABICS NUNAVIK HO", "Lo", 0), ("CANADIAN SYLLABICS NUNAVIK HOO", "Lo", 0), ("CANADIAN SYLLABICS NUNAVIK HA", "Lo", 0), ("CANADIAN SYLLABICS NUNAVIK HAA", "Lo", 0), ("CANADIAN SYLLABICS NUNAVIK H", "Lo", 0), ("CANADIAN SYLLABICS NUNAVUT H", "Lo", 0), ("CANADIAN SYLLABICS HK", "Lo", 0), ("CANADIAN SYLLABICS QAAI", "Lo", 0), ("CANADIAN SYLLABICS QI", "Lo", 0), ("CANADIAN SYLLABICS QII", "Lo", 0), ("CANADIAN SYLLABICS QO", "Lo", 0), ("CANADIAN SYLLABICS QOO", "Lo", 0), ("CANADIAN SYLLABICS QA", "Lo", 0), ("CANADIAN SYLLABICS QAA", "Lo", 0), ("CANADIAN SYLLABICS Q", "Lo", 0), ("CANADIAN SYLLABICS TLHE", "Lo", 0), ("CANADIAN SYLLABICS TLHI", "Lo", 0), ("CANADIAN SYLLABICS TLHO", "Lo", 0), ("CANADIAN SYLLABICS TLHA", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE RE", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE RI", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE RO", "Lo", 0), ("CANADIAN SYLLABICS WEST-CREE RA", "Lo", 0), ("CANADIAN SYLLABICS NGAAI", "Lo", 0), ("CANADIAN SYLLABICS NGI", "Lo", 0), ("CANADIAN SYLLABICS NGII", "Lo", 0), ("CANADIAN SYLLABICS NGO", "Lo", 0), ("CANADIAN SYLLABICS NGOO", "Lo", 0), ("CANADIAN SYLLABICS NGA", "Lo", 0), ("CANADIAN SYLLABICS NGAA", "Lo", 0), ("CANADIAN SYLLABICS NG", "Lo", 0), ("CANADIAN SYLLABICS NNG", "Lo", 0), ("CANADIAN SYLLABICS SAYISI SHE", "Lo", 0), ("CANADIAN SYLLABICS SAYISI SHI", "Lo", 0), ("CANADIAN SYLLABICS SAYISI SHO", "Lo", 0), ("CANADIAN SYLLABICS SAYISI SHA", "Lo", 0), ("CANADIAN SYLLABICS WOODS-CREE THE", "Lo", 0), ("CANADIAN SYLLABICS WOODS-CREE THI", "Lo", 0), ("CANADIAN SYLLABICS WOODS-CREE THO", "Lo", 0), ("CANADIAN SYLLABICS WOODS-CREE THA", "Lo", 0), ("CANADIAN SYLLABICS WOODS-CREE TH", "Lo", 0), ("CANADIAN SYLLABICS LHI", "Lo", 0), ("CANADIAN SYLLABICS LHII", "Lo", 0), ("CANADIAN SYLLABICS LHO", "Lo", 0), ("CANADIAN SYLLABICS LHOO", "Lo", 0), ("CANADIAN SYLLABICS LHA", "Lo", 0), ("CANADIAN SYLLABICS LHAA", "Lo", 0), ("CANADIAN SYLLABICS LH", "Lo", 0), ("CANADIAN SYLLABICS TH-CREE THE", "Lo", 0), ("CANADIAN SYLLABICS TH-CREE THI", "Lo", 0), ("CANADIAN SYLLABICS TH-CREE THII", "Lo", 0), ("CANADIAN SYLLABICS TH-CREE THO", "Lo", 0), ("CANADIAN SYLLABICS TH-CREE THOO", "Lo", 0), ("CANADIAN SYLLABICS TH-CREE THA", "Lo", 0), ("CANADIAN SYLLABICS TH-CREE THAA", "Lo", 0), ("CANADIAN SYLLABICS TH-CREE TH", "Lo", 0), ("CANADIAN SYLLABICS AIVILIK B", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT E", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT I", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT O", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT A", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT WE", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT WI", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT WO", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT WA", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT NE", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT NI", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT NO", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT NA", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT KE", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT KI", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT KO", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT KA", "Lo", 0), ("CANADIAN SYLLABICS SAYISI HE", "Lo", 0), ("CANADIAN SYLLABICS SAYISI HI", "Lo", 0), ("CANADIAN SYLLABICS SAYISI HO", "Lo", 0), ("CANADIAN SYLLABICS SAYISI HA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER GHU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER GHO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER GHE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER GHEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER GHI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER GHA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER RU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER RO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER RE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER REE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER RI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER RA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER WU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER WO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER WE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER WEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER WI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER WA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER HWU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER HWO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER HWE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER HWEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER HWI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER HWA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER THU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER THO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER THE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER THEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER THI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER THA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TTU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TTO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TTE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TTEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TTI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TTA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER PU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER PO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER PE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER PEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER PI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER PA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER P", "Lo", 0), ("CANADIAN SYLLABICS CARRIER GU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER GO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER GE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER GEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER GI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER GA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER KHU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER KHO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER KHE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER KHEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER KHI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER KHA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER KKU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER KKO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER KKE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER KKEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER KKI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER KKA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER KK", "Lo", 0), ("CANADIAN SYLLABICS CARRIER NU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER NO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER NE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER NEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER NI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER NA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER MU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER MO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER ME", "Lo", 0), ("CANADIAN SYLLABICS CARRIER MEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER MI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER MA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER YU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER YO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER YE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER YEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER YI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER YA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER JU", "Lo", 0), ("CANADIAN SYLLABICS SAYISI JU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER JO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER JE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER JEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER JI", "Lo", 0), ("CANADIAN SYLLABICS SAYISI JI", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("CAN<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("<NAME>", "Lo", 0), ("CAN<NAME>", "Lo", 0), ("CANADIAN SYLLABICS CARRIER LHA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TLHU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TLHO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TLHE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TLHEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TLHI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TLHA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TLU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TLO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TLE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TLEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TLI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TLA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER ZU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER ZO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER ZE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER ZEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER ZI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER ZA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER Z", "Lo", 0), ("CANADIAN SYLLABICS CARRIER INITIAL Z", "Lo", 0), ("CANADIAN SYLLABICS CARRIER DZU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER DZO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER DZE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER DZEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER DZI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER DZA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER SU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER SO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER SE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER SEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER SI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER SA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER SHU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER SHO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER SHE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER SHEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER SHI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER SHA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER SH", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TSU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TSO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TSE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TSEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TSI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TSA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER CHU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER CHO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER CHE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER CHEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER CHI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER CHA", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TTSU", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TTSO", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TTSE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TTSEE", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TTSI", "Lo", 0), ("CANADIAN SYLLABICS CARRIER TTSA", "Lo", 0), ("CANADIAN SYLLABICS CHI SIGN", "So", 0), ("CANADIAN SYLLABICS FULL STOP", "Po", 0), ("CANADIAN SYLLABICS QAI", "Lo", 0), ("CANADIAN SYLLABICS NGAI", "Lo", 0), ("CANADIAN SYLLABICS NNGI", "Lo", 0), ("CANADIAN SYLLABICS NNGII", "Lo", 0), ("CANADIAN SYLLABICS NNGO", "Lo", 0), ("CANADIAN SYLLABICS NNGOO", "Lo", 0), ("CANADIAN SYLLABICS NNGA", "Lo", 0), ("CANADIAN SYLLABICS NNGAA", "Lo", 0), ("CANADIAN SYLLABICS WOODS-CREE THWEE", "Lo", 0), ("CANADIAN SYLLABICS WOODS-CREE THWI", "Lo", 0), ("CANADIAN SYLLABICS WOODS-CREE THWII", "Lo", 0), ("CANADIAN SYLLABICS WOODS-CREE THWO", "Lo", 0), ("CANADIAN SYLLABICS WOODS-CREE THWOO", "Lo", 0), ("CANADIAN SYLLABICS WOODS-CREE THWA", "Lo", 0), ("CANADIAN SYLLABICS WOODS-CREE THWAA", "Lo", 0), ("CANADIAN SYLLABICS WOODS-CREE FINAL TH", "Lo", 0), ("CANADIAN SYLLABICS BLACKFOOT W", "Lo", 0), )
https://github.com/rem3-1415926/Typst_Thesis_Template
https://raw.githubusercontent.com/rem3-1415926/Typst_Thesis_Template/main/sec/authorship.typ
typst
MIT License
= Declaration of Authorship *Explanation* I hereby certify that the thesis I am submitting is entirely my own original work except where otherwise indicated. I am aware of the University's regulations concerning plagiarism, including those regulations concerning disciplinary actions that may result from plagiarism. Any use of the works of any other author, in any form, is properly acknowledged at their point of use. #v(2cm) #grid( columns: (auto, auto), rows: (auto, auto), column-gutter: 3em, row-gutter: 1em, [*Location*], [*Date*], [8640 Rapperswil], [#datetime.today().display("[day]. [Month repr:long]. [year]")] ) #v(1cm) *Signature* #grid( columns: (1fr, 1fr, 1fr), rows: 48pt, align(bottom + center)[<NAME>], align(bottom + center)[<NAME>], align(bottom + center)[] )
https://github.com/frectonz/the-pg-book
https://raw.githubusercontent.com/frectonz/the-pg-book/main/book/163.%20ronco.html.typ
typst
ronco.html The Ronco Principle January 2015No one, VC or angel, has invested in more of the top startups than <NAME>. He knows what happened in every deal in the Valley, half the time because he arranged it.And yet he's a super nice guy. In fact, nice is not the word. Ronco is good. I know of zero instances in which he has behaved badly. It's hard even to imagine.When I first came to Silicon Valley I thought "How lucky that someone so powerful is so benevolent." But gradually I realized it wasn't luck. It was by being benevolent that Ronco became so powerful. All the deals he gets to invest in come to him through referrals. Google did. Facebook did. Twitter was a referral from <NAME> himself. And the reason so many people refer deals to him is that he's proven himself to be a good guy.Good does not mean being a pushover. I would not want to face an angry Ronco. But if Ron's angry at you, it's because you did something wrong. Ron is so old school he's Old Testament. He will smite you in his just wrath, but there's no malice in it.In almost every domain there are advantages to seeming good. It makes people trust you. But actually being good is an expensive way to seem good. To an amoral person it might seem to be overkill.In some fields it might be, but apparently not in the startup world. Though plenty of investors are jerks, there is a clear trend among them: the most successful investors are also the most upstanding. [1]It was not always this way. I would not feel confident saying that about investors twenty years ago.What changed? The startup world became more transparent and more unpredictable. Both make it harder to seem good without actually being good.It's obvious why transparency has that effect. When an investor maltreats a founder now, it gets out. Maybe not all the way to the press, but other founders hear about it, and that investor starts to lose deals. [2]The effect of unpredictability is more subtle. It increases the work of being inconsistent. If you're going to be two-faced, you have to know who you should be nice to and who you can get away with being nasty to. In the startup world, things change so rapidly that you can't tell. The random college kid you talk to today might in a couple years be the CEO of the hottest startup in the Valley. If you can't tell who to be nice to, you have to be nice to everyone. And probably the only people who can manage that are the people who are genuinely good.In a sufficiently connected and unpredictable world, you can't seem good without being good.As often happens, Ron discovered how to be the investor of the future by accident. He didn't foresee the future of startup investing, realize it would pay to be upstanding, and force himself to behave that way. It would feel unnatural to him to behave any other way. He was already living in the future.Fortunately that future is not limited to the startup world. The startup world is more transparent and unpredictable than most, but almost everywhere the trend is in that direction.Notes[1] I'm not saying that if you sort investors by benevolence you've also sorted them by returns, but rather that if you do a scatterplot with benevolence on the x axis and returns on the y, you'd see a clear upward trend.[2] Y Combinator in particular, because it aggregates data from so many startups, has a pretty comprehensive view of investor behavior. Thanks to <NAME> and <NAME> for reading drafts of this.Japanese Translation
https://github.com/jpssrocha/templates
https://raw.githubusercontent.com/jpssrocha/templates/main/README.md
markdown
MIT License
# templates My templates for writing in markup languages (mainly typst nowadays). Feel free to copy them!
https://github.com/WinstonMDP/math
https://raw.githubusercontent.com/WinstonMDP/math/main/exers/9.typ
typst
#import "../cfg.typ": * #show: cfg $ "Prove that" lim_(x -> x') log_a x = log_a x' $ $abs(log_a x' - log_a x) < epsilon <-> abs(log_a x/x') < log_a a^epsilon$ - $0 < a < 1$ $abs(log_a x/x') < log_a a^epsilon <-> a^epsilon < abs(x/x') <-> a^epsilon abs(x') < abs(x)$ - $1 < a$ $abs(log_a x/x') < log_a a^epsilon <-> abs(x/x') < a^epsilon <-> abs(x) < a^epsilon abs(x')$
https://github.com/soul667/typst
https://raw.githubusercontent.com/soul667/typst/main/PPT/MATLAB/touying/docs/docs/start.md
markdown
--- sidebar_position: 2 --- # Getting Started Before you begin, make sure you have installed the Typst environment. If not, you can use the [Web App](https://typst.app/) or the Typst LSP and Typst Preview plugins for VS Code. To use Touying, you only need to include the following code in your document: ```typst #import "@preview/touying:0.2.1": * #let (init, slide, slides) = utils.methods(s) #show: init #show: slides = Title == First Slide Hello, Touying! #pause Hello, Typst! ``` ![image](https://github.com/touying-typ/touying/assets/34951714/6f15b500-b825-4db1-88ff-34212f43723e) It's simple. Congratulations on creating your first Touying slide! 🎉 ## More Complex Examples In fact, Touying provides various styles for writing slides. For example, the above example uses first-level and second-level titles to create new slides. However, you can also use the `#slide[..]` format to access more powerful features provided by Touying. ```typst #import "@preview/touying:0.2.1": * #let s = (s.methods.enable-transparent-cover)(self: s) #let (init, slide) = utils.methods(s) #show: init // simple animations #slide[ a simple #pause *dynamic* #pause slide. #meanwhile meanwhile #pause with pause. ][ second #pause pause. ] // complex animations #slide(setting: body => { set text(fill: blue) body }, repeat: 3, self => [ #let (uncover, only, alternatives) = utils.methods(self) in subslide #self.subslide test #uncover("2-")[uncover] function test #only("2-")[only] function #pause and paused text. ]) // math equation animations #slide[ == Touying Equation #touying-equation(` f(x) &= pause x^2 + 2x + 1 \ &= pause (x + 1)^2 \ `) #meanwhile Touying equation is very simple. ] // multiple pages for one slide #slide[ == Multiple Pages for One Slide #lorem(200) ] // appendix by freezing last-slide-number #let s = (s.methods.appendix)(self: s) #let (slide,) = utils.methods(s) #slide[ == Appendix ] ``` ![image](https://github.com/touying-typ/touying/assets/34951714/192b13f9-e3fb-4327-864b-fd9084a8ca24) In addition, Touying provides many built-in themes to easily create beautiful slides. Basically, you just need to add a line at the top of your document: ``` #let s = themes.metropolis.register(s, aspect-ratio: "16-9") ``` This will allow you to use the Metropolis theme. For more detailed tutorials, you can refer to the following chapters.
https://github.com/giZoes/justsit-thesis-typst-template
https://raw.githubusercontent.com/giZoes/justsit-thesis-typst-template/main/resources/utils/custom-heading.typ
typst
MIT License
// 展示一个标题 #let heading-display(it) = { if it != none { if it.has("numbering") and it.numbering != none { numbering(it.numbering, ..counter(heading).at(it.location())) [ ] } it.body } else { "" } } // 获取当前激活的 heading,参数 prev 用于标志优先使用之前页面的 heading #let active-heading(level: 1, prev: true, loc) = { // 之前页面的标题 let prev-headings = query(selector(heading.where(level: level)).before(loc), loc) // 当前页面的标题 let cur-headings = query(selector(heading.where(level: level)).after(loc), loc) .filter(it => it.location().page() == loc.page()) if prev-headings.len() == 0 and cur-headings.len() == 0 { return none } else { if prev { if prev-headings.len() != 0 { return prev-headings.last() } else { return cur-headings.first() } } else { if cur-headings.len() != 0 { return cur-headings.first() } else { return prev-headings.last() } } } } // 获取当前页面的标题 #let current-heading(level: 1, loc) = { // 当前页面的标题 let cur-headings = query(selector(heading.where(level: level)).after(loc), loc) .filter(it => it.location().page() == loc.page()) if cur-headings.len() != 0 { return cur-headings.first() } else { return none } }
https://github.com/coco33920/.files
https://raw.githubusercontent.com/coco33920/.files/mistress/typst_templates/moderncv.typst/example.typ
typst
#import "moderncv.typ": * #show: project.with( title: "Master Frobnicator", author: "<NAME>", github: "johndoe1337", phone: "+01 234 56 7890", email: "<EMAIL>" ) = Education #cventry( start: (month: "October", year: 2100), end: (month: "October", year: 2101), role: [Frobnication Engineering], place: "University of Central Mars City, M.Sc." )[ #v(1em) _with a grade of 110/110 with honors_ ] #cventry( start: (month: "October", year: 2099), end: (month: "October", year: 2100), role: [Frobnication Science and Engineering], place: "University of Central Mars City, B.Sc." )[ #v(1em) _with a grade of 110/110 with honors_ ] = Work Experience #cventry( start: (month: "December", year: 2101), end: (month: "", year: "Present"), role: [Junior Frobnication Engineer], place: "WeDontWork Inc.", lorem(40) ) = Side Projects #cventry( start: (month: "December", year: 2099), end: (month: "", year: "Present"), role: [Quux Master], place: "MasterQuuxers.mars", lorem(40) ) #cventry( start: (month: "March", year: 2098), end: (month: "August", year: 2099), role: [Full-bar frobnicator], place: "M.O.O.N. Inc", lorem(40) ) = Languages #cvlanguage( language: [Martian], description: [Mother tongue] ) #cvlanguage( language: [Klingon], description: [C64 level], certificate: [Earth Klingon Certificate -- Certificate in Advanced Klingon (CAK64)] ) #pagebreak() = Technical Skills #cvcol[ ==== Programming Languages #grid( columns: (1fr, 1fr, 1fr), row-gutter: 0.5em, [- Java], [- C], [- C++], [- Python], [- Martian], [- English], [- ChatGPT], [- Ancient Greek], [- Legalese] ) ] #cvcol[ ==== Environments - Earth (development and server management) - Wind (development) - Fire (development) ] #cvcol[ ==== Misc Various university-related and personal projects, some available on my GitHub profile. ] = Other #cvcol[ - Best Pizza Cook Central Mars City 2091 Championship Winner - Coffee Conossieur - If You Are Reading This You Are Awesome ] #v(1fr) #align(center)[_(Last updated: February 2102)_]
https://github.com/Lucas-Wye/tech-note
https://raw.githubusercontent.com/Lucas-Wye/tech-note/main/src/Linux.typ
typst
= Linux - Linux是一套免费使用和自由传播的类Unix操作系统,是一个基于POSIX和Unix的多用户、多任务、支持多线程和多CPU的操作系统。它能运行主要的Unix工具软件、应用程序和网络协议。它支持32位和64位硬件。Linux继承了Unix以网络为核心的设计思想,是一个性能稳定的多用户网络操作系统。 == Control Key ``` [CTRL]U cancel line [CTRL]C cancel operation [CTRL]S pause display [CTRL]Q restart display [CTRL]A 光标移到行首 [CTRL]E 光标移到行末 [CTRL]K 清除至当前行尾 [CTRL]V treat following control character as normal character [Option]方向键 以单词为单位移动 ``` == User ``` sudo adduser USERNAME # 添加root权限 sudo usermod -g sudo USERNAME # change password passwd # delete user sudo userdel -r USERNAME ``` == Infomation ``` who who am i whoami env alias man ``` == File Maintenance ``` # r = 4, w = 2, x = 1 chmod umask # set in startup files for the account to masks out permissions, umask numbers added to desired permission number equals 7. chgrp # change the group of the file chown # change the owner of a file # 查看当前目录文件大小 # (1)列出当前目录下每个文件的大小,同时也会给出当前目录下所有文件大小总和 ls -lht # (2)列出当前文件夹下所有文件对应的大小 du -sh PATH # 删除文件中的空行 cat YOUR_FILE | sed -e '/^$/d' # conditions -r return true (1) if it exists and is readable, otherwise return false (0) -w true if it exists and is writable -x true if it exists and is executable -f true if it exists and is a regular file (or for csh, exists and is not a directory) -d true if it exists and is a directory -e true if the file exists -o true if the user owns the file -z true if the file has zero length (empty) # 对Exfat文件系统支持 sudo apt install exfat-utils # 打包 tar -cvf YOUR_FILE.tar YOUR_FILE # 仅打包 tar -zcvf YOUR_FILE.tar.gz YOUR_FILE # gzip压缩 tar -jcvf YOUR_FILE.tar.bz2 YOUR_FILE # bzip2压缩 # 查看文件 tar -tvf YOUR_FILE.tar tar -ztvf YOUR_FILE.tar.gz tar -jtvf YOUR_FILE.tar.bz2 # 解包 tar -xvf YOUR_FILE.tar tar -zxvf YOUR_FILE.tar.gz tar -jxvf YOUR_FILE.tar.bz2 ``` == find and search ``` # 查找24小时内修改过的文件 find ./ -mtime 0 # 查找当前目录及子目录中的.c文件 find . -name "*.c" # 查找当前目录符合条件的文件内容 grep -nHR "STRING" . # grep不匹配二进制文件 grep --binary-files=without-match ``` == process ``` ps ps -ef kill -9 PID ``` == Bash executes order ``` # login shell executes order: /etc/profile ~/.bash_profile ~/.bash_login ~/.profile # non-login shell executes: /etc/bashrc ~/.bashrc ``` == History ``` history !598 # 执行第598条命令 sudo !! # 以root执行上一条命令 history | awk '{a[$2]++}END{for(i in a){print a[i] " " i}}' | sort -rn | head # 统计情况 ``` == CPU ``` # 总核数 = 物理CPU个数 x 每颗物理CPU的核数 # 总逻辑CPU数 = 物理CPU个数 x 每颗物理CPU的核数 x 超线程数 # 物理CPU个数 cat /proc/cpuinfo| grep "physical id"| sort| uniq| wc -l # 每个物理CPU中core的个数(即核数) cat /proc/cpuinfo| grep "cpu cores"| uniq # 逻辑CPU的个数 cat /proc/cpuinfo| grep "processor"| wc -l # CPU型号 cat /proc/cpuinfo | grep name | cut -f2 -d: | uniq -c # CPU的负载,返回1、5、15分钟内的负载情况 uptime ``` == 内存 ``` cat /proc/meminfo free ``` == 磁盘 ```sh # 硬盘信息 fdisk -l # 查看磁盘IO的性能 iostat -x 10 # 挂载硬盘到某个文件夹 sudo mount /dev/sda YOUR_PATH # 查看硬盘挂载信息 df -h # 取消挂载 sudo umount YOUR_PATH ``` == Fedora Firefox 视频播放异常处理 ``` sudo dnf install ffmpeg ffmpeg-libs --allowerasing ``` == 开机自动挂载Windows硬盘分区 ``` 查看分区信息 sudo fdisk -l 查看磁盘类型 sudo blkid ``` 输出 ``` Device Boot Start End Sectors Size Id Type /dev/nvme0n1p1 * 2048 1187839 1185792 579M 7 HPFS/NTFS/exFAT /dev/nvme0n1p2 1187840 210903039 209715200 100G 7 HPFS/NTFS/exFAT /dev/nvme0n1p3 210903040 420618239 209715200 100G 7 HPFS/NTFS/exFAT /dev/nvme0n1p4 420620286 500117503 79497218 37.9G 5 Extended /dev/nvme0n1p5 420620288 421595135 974848 476M 83 Linux /dev/nvme0n1p6 421597184 450891775 29294592 14G 83 Linux /dev/nvme0n1p7 450893824 500117503 49223680 23.5G 83 Linux ``` ``` 修改配置文件 sudo vim /etc/fstab # for Windows 10 C:/ /dev/nvme0n1p2 /home/usrname/Windows_Disks/C ntfs defaults 0 0 挂载新添加的分区 sudo mount -a ``` == 字体 ```sh # 安装Windows字体 sudo cp [Windows-Fonts] /usr/share/fonts/Windows-Fonts sudo mkfontscale sudo mkfontdir fc-cache # 查看中文字体 fc-list:lang=zh-cn ``` == I/O Redirection and Piping ``` # stdin: 0, stdout: 1, stderr: 2 | 管道 > stdout重定向到file >> stdout重定向到file(不覆盖) < stdin从file重定向 tee 复制stdout >/dev/null 直接扔掉stdout 1>FILE_1 2>FILE_2 stdout to FILE_1, stderr to FILE_2 >FILE 2>&1 redirect stdout and stderr to FILE 2>&1 | tee 将stderr和stdout输出到文件的同时在屏幕上输出 ``` == 开机进入命令行 ```sh sudo systemctl set-default multi.user # 进入命令行 sudo systemctl set-default graph... # 进入图形界面 ``` == 命令行切换回GUI ``` startx sudo service gdm3 restart ``` == Ubuntu设置窗口键在左侧 ```sh gsettings set org.gnome.desktop.wm.preferences button-layout 'close,minimize,maximize:' ``` == 生成强密码 ```sh openssl rand -base64 NUMBER ``` == terminal output to clip - Windows: `clip` - MacOS: `pbcopy`, `pbpaste` - Linux: `xsel` == ssh ```sh # 安装 SSH(Secure Shell) 服务以提供远程管理服务 sudo apt install openssh-server # 启动ssh服务 /etc/init.d/ssh start sudo service ssh start # 检测是否已启动 ps -e | grep ssh ## SSH远程登录 ssh username@IP_ADDR # 将文件/文件夹从远程机下载到本地(scp) scp -r username@IP_ADDR:/home/username/remotefile.txt . # 设置公钥登录 # (1)复制本地的公钥 cat ~/.ssh/id_rsa.pub # (2)在远程机器上写入复制的公钥 vim ~/.ssh/authorized_keys chmod 600 ~/.ssh/authorized_keys # (3)远程机器授权公钥登录 sudo echo "PubkeyAuthentication yes" >> /etc/ssh/sshd_config # (4)重启ssh服务 sudo systemctl restart sshd.service # Or ssh-copy-id -i Public_Key_File Remote_Server # .ssh/config example Host {HOSTNAME} HostName {IP} User {Username} ssh HOSTNAME # SSH for data transfer ssh -qTfnN -D PORT SERVER ``` == More #link("https://github.com/QSCTech/2020-Autumn-Round-Two/tree/master/problem-set-1")[A good introduction to Linux]
https://github.com/Mc-Zen/quill
https://raw.githubusercontent.com/Mc-Zen/quill/main/src/decorations.typ
typst
MIT License
#import "gates.typ": * // align: "left" (for rstick) or "right" (for lstick) // brace: auto, none, "{", "}", "|", "[", ... #let lrstick(content, n, align, brace, label, pad: 0pt, x: auto, y: auto) = gate( content, x: x, y: y, draw-function: draw-functions.draw-lrstick, size-hint: layout.lrstick-size-hint, box: false, floating: true, multi: if n == 1 { none } else { ( target: none, num-qubits: n, wire-count: 0, label: label, size-all-wires: if n > 1 { none } else { false } )}, data: ( brace: brace, align: align, pad: pad ), label: label ) /// Basic command for labelling a wire at the start. /// - content (content): Label to display, e.g., `$|0〉$`. /// - n (content): How many wires the `lstick` should span. /// - brace (auto, none, str): If `brace` is `auto`, then a default `{` brace /// is shown only if `n > 1`. A brace is always shown when /// explicitly given, e.g., `"}"`, `"["` or `"|"`. No brace is shown for /// `brace: none` /// - pad (length): Adds a padding between the label and the connected wire to the right. /// - label (array, str, content, dictionary): One or more labels to add to the gate. /// See @@gate(). #let lstick( content, n: 1, brace: auto, pad: 0pt, label: none, x: auto, y: auto ) = lrstick(content, n, right, brace, label, pad: pad, x: x, y: y) /// Basic command for labelling a wire at the end. /// - content (content): Label to display, e.g., `$|0〉$`. /// - n (content): How many wires the `rstick` should span. /// - pad (length): Adds a padding between the label and the connected wire to the left. /// - brace (auto, none, str): If `brace` is `auto`, then a default `}` brace /// is shown only if `n > 1`. A brace is always shown when /// explicitly given, e.g., `"}"`, `"["` or `"|"`. No brace is shown for /// `brace: none`. /// - label (array, str, content, dictionary): One or more labels to add to the gate. /// See @@gate(). #let rstick( content, n: 1, brace: auto, pad: 0pt, label: none, x: auto, y: auto ) = lrstick(content, n, left, brace, label, pad: pad, x: x, y: y) /// Create a midstick, i.e., a mid-circuit text. /// - content (content): Label to display, e.g., `$|0〉$`. /// - label (array, str, content, dictionary): One or more labels to add to the gate. #let midstick( content, n: 1, fill: none, label: none, x: auto, y: auto ) = { if n == 1 { gate(content, draw-function: draw-functions.draw-unboxed-gate, label: label, fill: fill, x: x, y: y) } else { mqgate(content, n: n, draw-function: draw-functions.draw-boxed-multigate, label: label, fill: fill, x: x, y: y, stroke: none) } } /// Creates a symbol similar to `\qwbundle` on `quantikz`. Annotates a wire to /// be a bundle of quantum or classical wires. /// - label (int, content): #let nwire(label, x: auto, y: auto) = gate([#label], draw-function: draw-functions.draw-nwire, box: false, x: x, y: y) /// Set current wire mode (0: none, 1 wire: quantum, 2 wires: classical, more /// are possible) and optionally the stroke style. /// /// The wire style is reset for each row. /// /// - wire-count (int): Number of wires to display. /// - stroke (auto, none, stroke): When given, the stroke is applied to the wire. /// Otherwise the current stroke is kept. /// - wire-distance (length): Distance between wires. #let setwire(wire-count, stroke: auto, wire-distance: auto) = ( qc-instr: "setwire", wire-count: wire-count, stroke: stroke, wire-distance: wire-distance ) /// Highlight a group of circuit elements by drawing a rectangular box around /// them. /// /// - wires (int): Number of wires to include. /// - steps (int): Number of columns to include. /// - x (auto, int): The starting column of the gategroup. /// - y (auto, int): The starting wire of the gategroup. /// - z (str): The gategroup can be placed `"below"` or `"above"` the circuit. /// - padding (length, dictionary): Padding of rectangle. May be one length /// for all sides or a dictionary with the keys `left`, `right`, `top`, /// `bottom` and `default`. Not all keys need to be specified. The value /// for `default` is used for the omitted sides or `0pt` if no `default` /// is given. /// - stroke (stroke): Stroke for rectangle. /// - fill (color): Fill color for rectangle. /// - radius (length, dictionary): Corner radius for rectangle. /// - label (array, str, content, dictionary): One or more labels to add to the /// group. See @@gate(). #let gategroup( wires, steps, x: auto, y: auto, z: "below", padding: 0pt, stroke: .7pt, fill: none, radius: 0pt, label: none ) = ( qc-instr: "gategroup", wires: wires, steps: steps, x: x, y: y, z: z, padding: process-args.process-padding-arg(padding), style: (fill: fill, stroke: stroke, radius: radius), labels: process-args.process-label-arg(label, default-pos: top) ) /// Slice the circuit vertically, showing a separation line between columns. /// /// - n (int): Number of wires to slice. /// - x (auto, int): The starting column of the slice. /// - y (auto, int): The starting wire of the slice. /// - z (str): The slice can be placed `"below"` or `"above"` the circuit. /// - stroke (stroke): Line style for the slice. /// - label (array, str, content, dictionary): One or more labels to add to the /// slice. See @@gate(). #let slice( n: 0, x: auto, y: auto, z: "below", stroke: (paint: red, thickness: .7pt, dash: "dashed"), label: none ) = ( qc-instr: "slice", wires: n, x: x, y: y, z: z, style: (stroke: stroke), labels: process-args.process-label-arg(label, default-pos: top) ) /// Lower-level interface to the cell coordinates to create an arbitrary /// annotatation by passing a custom function. /// /// This function is passed the coordinates of the specified cell rows /// and columns. /// /// - columns (int, array): Column indices for which to obtain coordinates. /// - rows (int, array): Row indices for which to obtain coordinates. /// - callback (function): Function to call with the obtained coordinates. The /// signature should be with signature `(col-coords, row-coords) => {}`. /// This function is expected to display the content to draw in absolute /// coordinates within the circuit. /// - z (str): The annotation can be placed `"below"` or `"above"` the circuit. #let annotate( columns, rows, callback, z: "below", ) = ( qc-instr: "annotate", rows: rows, x: none, y: none, z: z, columns: columns, callback: callback )
https://github.com/Mufanc/hnuslides-typst
https://raw.githubusercontent.com/Mufanc/hnuslides-typst/master/main.typ
typst
#import "/configs.typ" #import "/utils/colors.typ" // - 全局样式 - // // 页面设置 #set page(width: configs.slide.width, height: configs.slide.height) #set page(margin: configs.slide.margin) // 页面背景 #set page(background: box( width: configs.slide.width, height: configs.slide.height, fill: pattern(image("/assets/images/bg-common.png")) )) // 默认字体 #set text(size: 12pt, font: ("Noto Sans SC", "Open Sans", "Noto Color Emoji"), fill: colors.secondary) // 标题样式 #show heading.where(level: 1): set text(size: 1.5em, weight: 300) #show heading.where(level: 2): set text(size: 1.2em, weight: 500) // 段落 #set par(leading: 1.2em, justify: true) // - 导入各种模板 - // #import "templates/cover.typ": cover #import "templates/catalog.typ": catalog #import "templates/title.typ": title #import "templates/slides.typ" #import "templates/end.typ": end // - 幻灯片内容 - // // 封面 #cover("移植自「古风湖大」PPT 模板", "主题汇报", "汇报人:小湖") // 目录 #catalog() // 正文 #title("麓山巍巍", description: "你可以在这里添加一行描述信息") #slides.leading("带样式的大标题")[ - 这是一张带有大标题的幻灯片 - 这是一张带有大标题的幻灯片 == 小标题 #v(0.5em) #lorem(100) ] #slides.normal[ = 也可以用普通标题 #v(1em) - 这是一张普通标题幻灯片 - 这是一张普通标题幻灯片 == 二级标题 === 三级标题 #v(0.5em) #lorem(80) ] #title("湘水泱泱", description: "你可以在这里添加一行描述信息") #title("宏开学府", description: "你可以在这里添加一行描述信息") #title("济济沧沧", description: "你可以在这里添加一行描述信息") // 尾页 #end
https://github.com/barddust/Kuafu
https://raw.githubusercontent.com/barddust/Kuafu/main/src/BeforeMathematics/proof.typ
typst
#import "/mathenv.typ": * = Proofs Here introduces three methods for mathematical proofs: direct proof, proof by contradiction, and mathematical induction. == Direct Proof The basic form of proof is $ A => B $ , which proves the proposition from premises directly. This is the most common method of proving, which can also produce some new useful conclusion. Although, directly proving is also most difficult. It is like you drive to a cross without any indicators there, you may be confused which path is the correct one. In certain cases, it is obvious to get the result, while the most cases, you have to *guess* which one should be the next step. #example[ For a simply proposition $A: x = pi => sin(x/2) + 1 = 2$, the procedure is that $x=pi$ implies $x/2=pi/2$, which implies sin(x/2), furthermore implying $sin(x/2) + 1$. ] == Proof by Contradiction Given a proposition $A: p => q$, we want to show it is always true. It could be very hard to prove it directly, so think about the opposite, which is to prove its negation being false, even for some special case. By @impeq, $p => q$ equals to $not p or q$. Suppose its negation $not A$, i.e., $not (not p or q)$. By De Morgan's laws, we have $ not A: p and not q $ Now that it is clear. To prove a proposition by contradiction, firstly regard the negation of conclusion as a premise. Then reasoning in logic with all premises, including the negation of conclusion. The goal is to *produce a contradiction*, even for some speical case, depending on the quantifier of the conclusion of the proposition. Here comes a well-know proof by contradiction. #proposition[ There is infinite number of prime numbers. ] #proof[ For sake of contradiction, suppose that there is finite number of prime numbers, say $p_1, p_2, dots.c, p_n$. We construct a new number: $ m = p_1p_2 dots.c p_n + 1 $ By assumption, $m$ is not a prime number, and hence there is a prime, say $p_i$, such that $m$ is divisible by $p_i$, i.e., for some integer $q$, we have $ m = p_1p_2 dots.c p_n + 1 = q p_i $ therefore, $ q = p_1p_2 dots.c p_(i-1)p_(i+1) dots.c p_n + 1/p_i $ This is impossible, since $q$ is an integer, while the right-hand side is the sum of an integer and an non-integer. Here comes a contradiction. Thus, the original proposition "There is infinite number of prime numbers" is true. ] == Mathematical Induction Given a proposition about natural numbers, denoted as $P(n)$, we want to prove $P(n)$ is true for all $n in NN$. The mathematical induction consists of two parts: + $P(1)$ is true. In other words, $P(n)$ is true for the first index. + $P(i) => P(i+1)$ for $i in NN$. For any natural numbers, if this proposition is true for $i$, which must be also true for $i+1$. Here gives a example. #proposition[ For any $n in NN$, $ P: 1^2 + 2^2 + dots.c + n^2 = 1/6 n (n+1) (2n+1) $ ] #proof[ Firstly, we need to show $P(1)$ is true. It is obvious that $1^2 = 1/6 times 1 times 2 times 3 = 1$. Then we need to prove the continuity of $P(n)$. Suppose that $P(k)$ is true, let us see what happen if $n=k+1$. $ 1^2 + 2^2 + k^2 + (k+1)^2 &= 1/6 k (k+1) (2k+1) + (k^2 + 2k + 1)\ &= (2k^3 +3k^2 + k + 6k^2 + 12 k + 6)/6\ &= (2k^3 + 9k^2 + 13 k + 6)/6\ &= 1/6 (k+1)(2k^2+7k+6)\ &= 1/6 (k+1)(k+2)(2k+3)\ &= 1/6 (k+1)(k+2)(2(k+1)+1) $ That is what we want. ]
https://github.com/xsro/xsro.github.io
https://raw.githubusercontent.com/xsro/xsro.github.io/zola/typst/nlct/math/diffgeo.typ
typst
#import "common.typ":theorem,definition = Differentiable Manifold == Structure of Manifolds #definition[ Let $(M,cal(T))$ be a second coutable, $T_2$(Hausdorff) topological space. $M$ is called an $n$ dimensional topological manifold if there exists a subset $cal(A)={A_lambda | lambda in Lambda} subset cal(T)$, such that + $union.big_(lambda in Lambda) A_lambda supset M$; + For each $U in cal(A)$ there exists a homeomorphism $phi:U arrow phi(U) subset RR^n$, which is called a coordinate chart, denoted by $(U,phi)$. + Moreover, if for two coordinate charts: $(U,phi)$ and $(V,Psi)$, if $U sect V$ is not empty, then both $Psi circle.small phi^(-1): phi(U sect V) arrow Psi(U sect V)$ and $phi circle.small Psi^(-1):Psi(U sect V) arrow phi(U sect V)$ are $C^r(C^infinity,C^omega)$. such two coordinate charts are said to be consistent. + If a coordinate chart, $W$, is consistent with all charts in $cal(A)$, then $W in cal(A)$. Then $(M,cal(T))$ is called a $C^r$($C^infinity$, analytic, respectively) differentiable manifold. ] #definition[ Let $M$,$N$ be two $C^r$ manifolds with dimensions $m$,$n$ respectively. $F:M arrow N$ is called a $C^r$ mapping, if for each $x in M$ and $y=F(x) in N$ there are coordinate charts $(U,phi)$ about $x$ and $(V,psi)$ about y, such that $ tilde(F)=psi circle.small F circle.small phi^(-1) $ ] == Fiber Bundle == Vector Field == One Parameter Group == Lie Algebra of Vector Fields == Co-tangent Space == Lie Derivatives == Frobenius’ Theory == Lie Series, Chow’s Theorem == Tensor Field == Riemannian Geometry == Symplectic Geometry
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/fletcher/0.4.0/README.md
markdown
Apache License 2.0
# Fletcher [![Manual](https://img.shields.io/badge/docs-manual.pdf-green)](https://github.com/Jollywatt/typst-fletcher/raw/master/docs/manual.pdf) ![Version](https://img.shields.io/badge/dynamic/toml?url=https%3A%2F%2Fgithub.com%2FJollywatt%2Farrow-diagrams%2Fraw%2Fmaster%2Ftypst.toml&query=package.version&label=version) [![Repo](https://img.shields.io/badge/GitHub-repo-blue)](https://github.com/Jollywatt/typst-fletcher) _**Fletcher** (noun) a maker of arrows_ A [Typst]("https://typst.app/") package for drawing diagrams with arrows, built on top of [CeTZ]("https://github.com/johannes-wolf/cetz"). <picture> <source media="(prefers-color-scheme: dark)" srcset="https://github.com/Jollywatt/typst-fletcher/raw/master/docs/examples/example-2.svg"> <img alt="logo" width="600" src="https://github.com/Jollywatt/typst-fletcher/raw/master/docs/examples/example-1.svg"> </picture> ```typ #import "@preview/fletcher:0.4.0" as fletcher: node, edge #fletcher.diagram(cell-size: 15mm, $ G edge(f, ->) edge("d", pi, ->>) & im(f) \ G slash ker(f) edge("ur", tilde(f), "hook-->") $) #fletcher.diagram( node-fill: rgb("aafa"), node-outset: 2pt, axes: (ltr, btt), node((0,0), `typst`), node((1,0), "A"), node((2.5,0), "B", stroke: c + 2pt), node((2,1), "C", extrude: (+1, -1)), for i in range(3) { edge((0,0), (1,0), bend: (i - 1)*25deg) }, edge((1,0), (2,1), "..}>", corner: right), edge((1,0), (2.5,0), "-||-|>", bend: -0deg), ), ``` ## Todo - [x] Mathematical arrow styles - [x] Also allow `&`-delimited equations for specifying nodes - [ ] Support CeTZ arrowheads - [ ] Support arbitrary node shapes drawn with CeTZ - [ ] Allow referring to node coordinates by their content? - [ ] Support loops connecting a node to itself - [x] More ergonomic syntax to avoid repeating coordinates? ## Change log ### 0.4.0 - Add ability to specify diagrams in math-mode, using `&` to separate nodes. - Allow implicit and relative edge coordinates, e.g., `edge("d")` becomes `edge(prev-node, (0, 1))`. - Add ability to place marks anywhere along an edge. Shorthands now accept an optional middle mark, for example `|->-|` and `hook-/->>`. - Add “hanging tail” correction to marks on curved edges. Marks now rotate a bit to fit more comfortably along tightly curving arcs. - Add more arrowheads for the sake of it: `}>`, `<{`, `/`, `\`, `x`, `X`, `*` (solid dot), `@` (solid circle). - Add `axes` option to `diagram()` to control the direction of each axis in the diagram's coordinate system. - Add `width`, `height` and `radius` options to `node()` for explicit control over size. - Add `corner-radius` option to `node()`. - Add `stroke` option to `edge()` replacing `thickness` and `paint` options. - Add `edge-stroke` option to `diagram()` replacing `edge-thickness`. ### 0.3.0 - Make round-style arrow heads better approximate the default math font. - Add solid arrow heads with shorthand `<|-`, `-|>` and double-bar `||-`, `-||`. - Add an `extrude` option to `node()` which duplicates and extrudes the node's stroke, enabling double stroke effects. ### 0.2.0 - Experimental support for customising arrowheads. - Add right-angled edges with `edge(..., corner: left/right)`.
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/mantys/0.1.1/src/theme.typ
typst
Apache License 2.0
#let page = ( paper: "a4", margin: auto ) #let fonts = ( serif: ("Linux Libertine", "Liberation Serif"), sans: ("Liberation Sans", "Helvetica Neue", "Helvetica"), mono: ("Liberation Mono"), text: ("Linux Libertine", "Liberation Serif"), headings: ("Liberation Sans", "Helvetica Neue", "Helvetica"), code: ("Liberation Mono") ) #let font-sizes = ( text: 12pt, headings: 12pt, // Used as a base size, scaled by heading level code: 9pt ) #let colors = ( primary: eastern, // rgb(31, 158, 173), secondary: teal, // rgb(18, 120, 133), argument: navy, // rgb(0, 29, 87), option: rgb(214, 182, 93), value: rgb(181, 2, 86), command: blue, // rgb(75, 105, 197), comment: gray, // rgb(128, 128, 128), module: rgb("#8c3fb2"), text: rgb(35, 31, 32), muted: luma(210), info: rgb(23, 162, 184), warning: rgb(255, 193, 7), error: rgb(220, 53, 69), success: rgb(40, 167, 69), // Datatypes taken from typst.app dtypes: ( length: rgb(230, 218, 255), integer: rgb(230, 218, 255), float: rgb(230, 218, 255), fraction: rgb(230, 218, 255), ratio: rgb(230, 218, 255), relative: rgb(230, 218, 255), "relative length": rgb(230, 218, 255), angle: rgb(230, 218, 255), "none": rgb(255, 203, 195), "auto": rgb(255, 203, 195), "any": rgb(255, 203, 195), "regular expression": rgb(239, 240, 243), dictionary: rgb(239, 240, 243), array: rgb(239, 240, 243), stroke: rgb(239, 240, 243), location: rgb(239, 240, 243), alignment: rgb(239, 240, 243), "2d alignment": rgb(239, 240, 243), boolean: rgb(255, 236, 193), content: rgb(166, 235, 229), string: rgb(209, 255, 226), function: rgb(249, 223, 255), label: rgb(167, 234, 255), color: gradient.linear(..color.map.spectral, angle:180deg), gradient: gradient.linear(..color.map.spectral, angle:180deg), // color: ( // rgb(133, 221, 244), // rgb(170, 251, 198), // rgb(214, 247, 160), // rgb(255, 243, 124), // rgb(255, 187, 147) // ) ) )
https://github.com/edgarogh/f4f1eb
https://raw.githubusercontent.com/edgarogh/f4f1eb/main/template.typ
typst
#let title_size = 44pt #let project( background: rgb("f4f1eb"), title: "", from_details: none, to_details: none, margin: 2.1cm, vertical_center_level: 2, body ) = { set page(fill: background, margin: margin) set text(font: ("HK Grotesk", "Hanken Grotesk")) let body = [ #set text(size: 11pt, weight: "medium") #show par: set block(spacing: 2em) #body ] let header = { grid( columns: (1fr, auto), [ #set text(size: title_size, weight: "bold") #set par(leading: 0.4em) #title ], align(end, box( inset: (top: 1em), [ #set text(size: 10.2pt, fill: rgb("4d4d4d")) #from_details ] )), ) v(title_size) text(size: 9.2pt, to_details) v(title_size) } layout(size => style(styles => [ #let header_sz = measure( block(width: size.width, header), styles, ); #let body_sz = measure( block(width: size.width, body), styles, ) #let ratio = (header_sz.height + body_sz.height) / size.height #let overflowing = ratio > 1 #if overflowing or vertical_center_level == none { header body } else { // If no overflow of the first page, we do a bit of centering magic for style grid( rows: (auto, 1fr), header, box([ #v(1fr * ratio) #body #v(vertical_center_level * 1fr) ]), ) } ])) }
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/optimal-ovgu-thesis/0.1.0/disclaimer.typ
typst
Apache License 2.0
#import "components.typ": sans-font, variable-pagebreak #let oot-disclaimer( title: "", international-title: "", author: none, city: " ", is-doublesided: none, lang: "en", ) = { let heading = "Statement of authorship of the student" if (lang == "de") { heading = "Selbstständigkeitserklärung" } text(font: sans-font, size: 2em, weight: 700, heading) line(start: (0pt, -1.5em), length: 100%) [Thesis: #title] if (international-title.len() > 0) { [\ (#international-title)] } v(5mm) grid( columns: 2, gutter: 1em, [Name: #author.name], [Surname: #author.surname], [Date of birth: #author.date-of-birth], [Matriculation no.: #author.matriculation-no], ) v(5mm) par( first-line-indent: 0em, )[ I herewith assure that I wrote the present thesis independently, that the thesis has not been partially or fully submitted as graded academic work and that I have used no other means than the ones indicated. I have indicated all parts of the work in which sources are used according to their wording or to their meaning. \ I am aware of the fact that violations of copyright can lead to injunctive relief and claims for damages of the author as well as a penalty by the law enforcement agency. ] v(15mm) let signature-line = (length) => { box(line(length: length, stroke: (dash: "loosely-dotted"))) } grid( columns: 2, gutter: 0.5em, column-gutter: 1fr, city + ", " + signature-line(3cm), " " + signature-line(5cm), ) variable-pagebreak(is-doublesided) }
https://github.com/simon-epfl/notes-ba3-simon
https://raw.githubusercontent.com/simon-epfl/notes-ba3-simon/main/softcon/notes.typ
typst
== Week 1 Call by value first resolves the "values" before calling the function, while call by name first calls the function, gets the results and then resolves them. == Week 2 === Take a function as a parameter: ```scala def sum(f: Int => Int, a: Int, b: Int): Int = if a > b then 0 else f(a) + sum(f, a + 1, b) ``` === We can also generate functions using functions: ```scala def sum(f: Int => Int): (Int, Int) => Int = def sumF(a: Int, b: Int): Int = if a > b then 0 else f(a) + sumF(a + 1, b) sumF ``` ```scala def sumInts = sum(x => x) def sumCubes = sum(x => x * x * x) def sumFactorials = sum(fact) ``` ```scala sumCubes(1, 10) + sumFactorials(10, 20) ``` Generalization: It's the same as creating a function that takes the `n-1` arguments, and one takes the last one: ```scala def f(ps1)...(psn-1) = (psn ⇒ E) ``` === Types ``` Type = SimpleType | FunctionType FunctionType = SimpleType '=>' Type | '( ' [ Types ] ') ' '= > ' Type SimpleType = Ident Types = Type { ' , ' Type } ``` === Several ways of writing functions that return functions ```scala def isGreaterThanBasic(x: Int, y: Int): Boolean = x > y val isGreaterThanAnon: (Int, Int) => Boolean = (x, y) => x > y val isGreaterThanCurried: Int => Int => Boolean = x => y => x > y // Same as `x => (y => x > y)` def isGreaterThanCurriedDef(x: Int)(y: Int): Boolean = x > y ``` #sym.triangle Curried signifie que la fonction prend ses arguments un par un ! (en fait elle renvoie une nouvelle fonction à chaque fois) C'est utile si on veut appliquer des transformations partielles (fixer le premier argument et retarder l'application du second). == Week 3 #image("classsub.png", width: 80%) #image("classsub2.png", width: 80%) #image("extension.png", width: 80%) #image("uextension.png", width: 80%) #image("infixmethods.png", width: 80%) Déterminée en fonction du caractère qui démarre l'opérateur: #image("precedencerules.png", width: 80%) #image("functionmethods.png", width: 80%) == Types Contravariance : quand un type plus général est utilisé pour un autre type Covariance : quand un type plus précis est utilisé pour un autre type ```Scala trait Printer[-A] { def print(value: A): Unit } val animalPrinter: Printer[Animal] = (animal: Animal) => println(s"Printing an animal: $animal") val dogPrinter: Printer[Dog] = animalPrinter // ok, Printer[Animal] is a supertype of Printer[Dog] ```
https://github.com/davidcarayon/cv-typst
https://raw.githubusercontent.com/davidcarayon/cv-typst/main/cv-eng.typ
typst
#import "@preview/modern-cv:0.3.1": * #show: resume.with( author: ( firstname: "David", lastname: "CARAYON", email: "<EMAIL>", phone: "(+33) 6 64 66 90 60", website: "https://dcarayon.fr", github: "davidcarayon", linkedin: "david-carayon", address: "8 Lotissement L'entrada 33650 CABANAC-ET-VILLAGRAINS", positions: ( "Statistician", "Data Scientist", (fa-icon("r-project", font : "Font Awesome 6 Brands")), (fa-icon("python", font : "Font Awesome 6 Brands")), (fa-database()), (fa-icon("git-alt", font : "Font Awesome 6 Brands")), (fa-icon("github", font : "Font Awesome 6 Brands")), (fa-icon("gitlab", font : "Font Awesome 6 Brands")), ), ), date: datetime.today().display(), language: "en", colored-headers: true, ) #set text( font: "Avenir" ) = Experiences #resume-entry( title: "Statistician", location: "Bordeaux, France", date: "2019 - Present", description: "ETTIS Research Unit", ) #resume-item[ - Managing data pipelines for numerous research projects, ranging from data collection to final reporting, including data management, cleaning, and statistical processing (EDA, modeling, multivariate analysis, machine learning). - Transforming raw data into useful information and interactive solutions through packages, applications, and associated documentation. - Providing methodological support to temporary staff (code reviews, data analysis strategy, statistics) and conducting internal training on best practices (Programming, Open Source, Open Science). ] #resume-entry( title: "Biostatistician", location: "Bordeaux, France", date: "2017-2019", description: "EABX Research Unit (Fixed-term contract)", ) #resume-item[ - Handling large datasets (monitoring the ecological quality of rivers across France over 10 years) and implementing appropriate processing algorithms. - Promoting results through R packages (indicator calculations), interactive maps (Leaflet), and writing technical reports, scientific articles, and presentations at international conferences. ] = Skills #resume-skill-item( "Programming", (strong("R"),strong("Python"), strong("SQL"),"Git","Github Actions", "Gitlab CI/CD", "Docker", "ODK"), // (fa-icon("r-project", font : "Font Awesome 6 Brands"), fa-icon("python", font : "Font Awesome 6 Brands"),fa-database() ,fa-icon("docker", font : "Font Awesome 6 Brands"),fa-icon("git-alt", font : "Font Awesome 6 Brands"), fa-icon("gitlab", font : "Font Awesome 6 Brands"), fa-icon("github", font : "Font Awesome 6 Brands")), ) #resume-skill-item( "Frameworks", (strong("Tidyverse"), strong("Shiny"), "data.table", "gt", "targets","renv","sf"), ) #resume-skill-item( "Software", (strong("VSCode + Quarto"), "Rstudio", "Jupyter", "Zotero", "Office Suite", "QGIS"), ) #resume-skill-item("Languages", (strong("English (bilingual)"), "Spanish (basic)")) = Project Selection #resume-entry( title: "Data.Interventions", location: [#globe-link("https://projet-swym.fr/data.interventions")], date: "2023-Present", description: "ETL and Machine learning for drowning prediction", ) #resume-item[ - Deploying a digital data collection solution from lifeguard interventions in Landes via Open Data Kit (ODK) into a PostgreSQL database (INRAE Datacenter). - Developing a complete ETL pipeline collecting and shaping data in a SQlite database. - Developing an R Shiny application for automated reporting of lifeguard activities. - Training a parallel machine learning model (xGBoost) to predict high-risk days. ] #resume-entry( title: "IDEATools", location: github-link("davidcarayon/IDEATools"), date: "2019 - Present", description: "Sustainability Indicators for Agricultural Farms (IDEA4)", ) #resume-item[ - Developing the R package making the method operational and the associated web interface (Shiny). ] #resume-entry( title: "Shiny Kubernetes Service (SK8)", location: globe-link("https://sk8.inrae.fr"), date: "2022 - Present", description: "Institutional service for scalable deployment of Shiny applications", ) #resume-item[ - Designing Shiny applications with Gitlab API interactions, handling containers (Docker) via CI/CD. ] = Education and Certifications #resume-entry( title: "Datacamp Certification", location: "Online", date: "2024", description: "Data Scientist with Python and R", ) #resume-entry( title: "University of Bordeaux", location: "Bordeaux", date: "2015-2017", description: "Master in Biodiversity and Environmental Monitoring", ) // #resume-entry( // title: "INU Champollion", // location: "Albi", // date: "2012-2015", // description: "Bachelor in Biology and Environmental Sciences", // )
https://github.com/bcourtel/cv
https://raw.githubusercontent.com/bcourtel/cv/main/cv.fr.typ
typst
MIT License
#import "./modern-cv/lib.typ": * #show: resume.with( author: ( firstname: "Benjamin", lastname: "Courtel", email: "<EMAIL>", phone: "(+33) 06 60 31 15 07", github: "bcourtel", linkedin: "bcourtel", address: "", positions: ("Développeur web front-end et back-end", "PHP / TypeScript"), ), date: datetime.today().display(), accent_color: rgb("#8839ef"), ) #par(justify: false, leading: 0.8em, linebreaks: "optimized")[ Développeur passionné par le web et les nouvelles technologies, j'accorde une attention particulière au respect de la vie privée et à l'accessibilité dans mes projets. J'aime travailler aussi bien du côté front-end que back-end, généralement avec Vue.js et Symfony, respectivement. Je peux aussi vous accompagner dans la modernisation de systèmes existants, en suivant les bonnes pratiques habituelles à ce genre de problématiques, notamment la mise en place de tests automatisés _end-to-end_ sur l'existant afin de se prémunir autant que possible des régressions. ] = Compétences #resume-skill-item( "Programmation", ( strong("PHP"), strong("TypeScript"), strong("Symfony"), strong("Vue.js"), "Laravel", "SCSS" ), ) #resume-skill-item( "Outils", ( strong("Git"), strong("VS Code"), strong("IDEs JetBrains"), strong("Linux"), strong("GitHub Actions"), "GitLab CI", "Windows", "Salesforce", ), ) #resume-skill-item( "Bonnes pratiques", ( strong("Accessibilité"), strong("TDD"), strong("DDD"), "Architecture hexagonale", "Design System", "CI/CD", ), ) #resume-skill-item( "Langues", ("Français (langue maternelle)", "Anglais (bilingue, TOEIC : 990/990)"), ) = Expérience #resume-entry( title: "Freelancing", location: "Télétravail", date: "septembre 2021 - aujourd'hui", description: "Développeur Web full-stack", ) #resume-item[ - Sekost, de novembre à décembre 2023 - Génération de rapports d'analyse en PDF à destination des clients de Sekost - PDF générés à partir de pages HTML présentant les données d'analyse depuis une API GraphQL. - forte contrainte sur le poids des PDF générés, qui contenir plusieurs centaines de pages - attention particulière au design final des PDF, en prenant en compte les différences entre un rendu web, screen et print - besoin d'une navigation interne dans le PDF robuste et intuitive - Sekost, d'avril à août 2023 - Interventions ponctuelles afin de d'accompagner l'équipe du projet sur le développement de composants frontend de visualisation de données provenant de l'API de Sekost - Travail en autonomie avec des points réguliers avec le CEO de l'entreprise afin de rebondir rapidement sur les retours des clients de Sekost après leur avoir présenté les composants en question - Theodo, de novembre 2021 à mai 2022 - Amélioration et mise en conformité de l'accessibilité du site e-commerce de Carrefour - passage d'un score de conformité d'environ 35% à plus de 50% (seuil pour la mention « Partiellement accessible ») - Réflexion et début de la mise en place du design system pour le groupe, en collaboration avec l'équipe de designers - COPEEKS, de mai à juin 2021 - Développement de composants Vue.js responsive pour le dashboard client - Mise en place de méthodologies de travail (code review, intégration continue, pair programming) via GitLab et Notion - UI et UX design ] #resume-entry( title: "<NAME>", location: "Rennes (France)", date: "novembre 2019 - mars 2021", description: "Lead Web Developer", ) #resume-item[ - Développement des produits de l'entreprise : PHP7 (Symfony), HTML5, SCSS, JavaScript (Vue.js, jQuery) - Maintenance des serveurs (Debian 10) - DevOps (Azure DevOps, Ansible, Docker) - Gestion de l'équipe de développement (organisation du travail en Scrum) - Définition des besoins clients, découpage et chiffrage ] #resume-entry( title: "KelBillet", location: "Rennes (France)", date: "novembre 2019 - mars 2021", description: "Développeur Web full-stack", ) #resume-item[ - Développement des nouveaux produits de l'entreprise : PHP7 (Laravel), HTML5, SCSS, JavaScript (Vue.js) - Maintenance des anciens produits : PHP5/7, HTML, LESS, JavaScript (jQuery), conteneurisation - DevOps (Azure DevOps, Ansible, Docker) - Interventions sur les serveurs (Debian, RabbitMQ, Cassandra) - Organisation du travail suivant la méthodologie Kanban, découpage en tâches et chiffrage des demandes clients ] #resume-entry( title: "STMicroelectronics", location: "Crolles (France)", date: "octobre 2015 - septembre 2018", description: "Apprenti Ingénieur développement de kits de CAD pour la micro-électronique", ) #resume-item[ - Notions de micro-électronique avancée - Développement en SKILL pour les outils de CAD propres à la micro- électronique - Développement d’outils internes en Python - Méthodologie AGILE - PFE : développement d'un outil interne de gestion et de standardisation d'environnements de développement - Développement Python 3.5, PyQt, Click (environnement isolé grâce à Pipenv) - Interface avec l'outil de gestion de versions (Dassault Systèmes Synchronicity DesignSync) - Intégration avec des scripts (Cadence SKILL, Tcl, Python) et flows existants - Modularité de l'outil - Documentation technique (Sphinx) et utilisateur - Tests unitaires et d'intégration ] #resume-entry( title: "Athlone Institute of Technology", location: "Athlone (Irlande)", date: "avril 2015 - juin 2015", description: "Intern in the Department of Electronics, Computer & Software Engineering", ) #resume-item[ - Programmation sur Arduino et Processing - Création de projets de TP pour les futurs étudiants - Conception de cartes électroniques ] = Formation #resume-entry( title: "Institut supérieur d'électronique et du numérique", location: "Brest (France)", date: "2015 - 2018", description: "Diplôme d'ingénieur, Génie logiciel", ) #resume-item[ - Programmation : Java, Android, C++, C, ASM, HTML5 / CSS3 / ES6, PHP7 - Bases de données : MariaDB, Modélisation Merise et UML - Projet de fin d'études : réalisation d'un site de partage de cartes postales, développeur back-end (Laravel 5), API RESTful (documentée par Swagger), intégration à l'API Vision Microsoft ] #resume-entry( title: "IUT de Rennes", location: "Rennes (France)", date: "2013 - 2015", description: "DUT Génie Électrique et Informatique Industrielle, Spécialisation Robotique et Informatique Industrielle", ) #resume-entry( title: "Lycée Fulgence Bienvenüe", location: "Loudéac (France)", date: "2009 - 2012", description: "Baccalauréat scientifique", ) = Bénévolat #resume-entry( title: "Festival Déjanté", description: "Bénévole", location: "Saint-Gouéno (France)", date: "mai 2017 - aujourd'hui", ) #resume-item[ - Installation, désinstallation et nettoyage du site du festival - Tournées de communication, poses d'affiches et mise à disposition de flyers dans différents lieux - Mise en place et exploitation de stands destinés aux festivaliers ] #resume-entry( title: "Festival M'né le Barouf", description: "Bénévole", location: "Saint-Gilles-du-Mené (France)", date: "juin 2015 - aujourd'hui", ) #resume-item[ - Installation, désinstallation et nettoyage du site du festival - Tournées de communication, poses d'affiches et mise à disposition de flyers dans différents lieux - Mise en place et exploitation de stands destinés aux festivaliers ] #resume-entry( title: " Association Mené des Sons", description: "Bénévole, membre actif, responsable communication, co-secrétaire", location: "Merdrignac (France)", date: "juin 2014 - janvier 2021", ) #resume-item[ - Organisation du festival Uvas Pasas - Réalisation du teaser 2015 - Réalisation du site Web - Programmateur - Montage et démontage du site du festival - Gestion du merchandising - Organisation de divers événements dans l'année (soirée bénévole, tremplin, pique-nique électronique, etc.) ] #resume-entry( title: "Association des Festivités Vernoises", description: "Bénévole", location: "Vern-sur-Seiche (France)", date: "juin 2019 - aujourd'hui", ) #resume-item[ - Installation, désinstallation et nettoyage du site du festival - Mise en place et exploitation de stands destinés aux festivaliers - Changements de plateau ] #resume-entry( title: "Festival Mythos", description: "Bénévole", location: "Rennes (France)", date: "avril 2019", ) #resume-item[ - Régie buvettes ] #resume-entry( title: "Association des Jeunes du Pays de Merdrignac (AJPM) ", description: "Bénévole, membre actif, responsable ménage", location: "Merdrignac (France)", date: "juin 2012 - janvier 2018", ) #resume-item[ - Organisation de divers événements durant l'année (repas, tournois de belote, journée d'intégration, etc.) - Animation du camping du festival Uvas Pasas (concerts, jeux, buvette, repas, petit déjeuner, nettoyage, etc.) - Réalisation d'affiches pour les événements - Rénovation d'une caravane - Installation de scènes de concerts - Organisation de la fête de la musique de Merdrignac (groupes, tenue de la buvette, installation et désinstallation du matériel) ] = Projets #resume-entry( title: "homelab", date: "juillet 2020 - aujourd'hui", description: "Maintenance d'un cloud personnel", ) #resume-item[ - D'abord simplement sur un VPS Scaleway, puis sous Kubernetes managé par Scaleway, et enfin déployé sur un cluster Kubernetes déployé sur des machines bare metal directement chez moi - Suivi des bonnes pratiques GitOps, déploiement assuré par Fluxcd, suivi des mises à jour avec Mend Renovate, domaines automatiquement synchronisés via External-DNS et certificats automatiquement renouvelés avec cert-manager et Traefik - Monitoring avec Grafana, backups offsite journaliers automatiques et testés régulièrement ] #resume-entry( title: "Untch", date: "janvier 2017 - mai 2020", description: "Développeur front-end (Vue.js) et Android (Kotlin)", )
https://github.com/7sDream/fonts-and-layout-zhCN
https://raw.githubusercontent.com/7sDream/fonts-and-layout-zhCN/master/notice.typ
typst
Other
#import "/template/template.typ": web-page-template #import "/template/heading.typ": chapter #import "/template/util.typ" #import "/template/components.typ": note #show: web-page-template #show: doc => if util.is-web-target() { doc } #chapter[ 在线阅读说明 ] 本书是 <NAME> 编写的《#link("https://simoncozens.github.io/fonts-and-layout/")[Fonts and Layout for Global Scripts]》的中文翻译版,是一本关于字体设计、Unicode和计算机中复杂文本处理的免费书籍。 中文版使用 #link("https://typst.app/")[Typst] 作为排版系统,借助 #link("https://github.com/Myriad-Dreamin/typst-book")[typst-book] 生成在线阅读版本。 此方案其实是*在您的浏览器中*,使用 WASM 实时运行 Typst 编译器,将源码编译为 SVG 并显示。 这样的好处有: - 即使没有安装需要的字体也能显示各种文字 - 保证显示和排版效果和 PDF 基本一致 - 所有正确绘制的图示(不包括外部图片)在明亮/黑暗模式下会智能地使用合适的颜色 缺点有: - 您的浏览器需要支持 WASM - 在页面加载时可能会使用较高的计算资源 - 由于按页编译的特性,所有交叉引用需要手工指定目标页面,可能会存在错误 如果您在阅读时遇到任何问题(额,那么你也有可能没法看见这句话),可以#link("https://github.com/7sDream/fonts-and-layout-zhCN/issues/new")[提交 Issue]。 也可以选择#link("https://github.com/7sDream/fonts-and-layout-zhCN/releases/latest")[下载PDF],离线阅读。
https://github.com/AsiSkarp/grotesk-cv
https://raw.githubusercontent.com/AsiSkarp/grotesk-cv/main/src/template/content/personal.typ
typst
The Unlicense
#let meta = toml("../info.toml") #import "@preview/fontawesome:0.4.0": * #let icon = meta.section.icon.personal #let language = meta.personal.language #let include-icon = meta.personal.include_icons = #if include-icon [#fa-icon(icon) #h(5pt)] #if language == "en" [Personality] else if language == "es" [Personalidad] #v(5pt) #if language == "en" [ - Analytic thinking - Quality conscious - Good communicator - Independent - Team player - Preemptive - Eager to learn ] else if language == "es" [ - Pensamiento analítico - Consciente de la calidad - Buen comunicador - Independiente - Jugador de equipo - Preventivo - Ansioso por aprender ]
https://github.com/han0126/MCM-test
https://raw.githubusercontent.com/han0126/MCM-test/main/2024校赛typst/main.typ
typst
#import "template/template.typ": * #import "template/template.typ": template as CUMCM #show: CUMCM.with( abstract: [ 在大学时代,积极参与学科竞赛对大学生的发展具有多重意义。首先,竞赛是一种全面发展和自我实现的重要途径,可以提升学生的学业成绩和人格魅力。其次,通过竞赛,学生能够在学术上和专业领域取得优异成绩,为未来的发展奠定坚实的基础。最后,对于大学生而言,慎重地选择参加何种竞赛也是十分重要的,因为不同竞赛会给予不同的锻炼和机会,需要根据自身情况和发展目标进行选择。因此,对大学生各种竞赛赛事的评估和分析,对于他们的成长和发展具有重要意义。这需要综合考虑学生的兴趣爱好、专业特长、未来规划等因素,为其提供合适的竞赛机会,促进其全面发展和综合素质的提升。 针对第一问,由于原始数据样本数据太多造成计算难度较大,因此先系统选取了14个数据样本进行分析。选取赛事参加总人数、赛事举办届数、CNIK数量、ALEXA国内排名以及主办方权威性五个指标进行分析评价。使用*秩和比综合评价法*进行评估得到结论对选取的14个竞赛赛事进行了三等级的分级评价,分别为A级(3个)、B级(9个)、C级(2个)。 针对第二问,首先对于给出的数据进行大范围的筛选,去除可能性较低进入正式目录的赛事,而后采用了*熵值法*进行总体评价分析,重新确立几个指标后进行进行评估,对数据进行处理与分析,用公式对极小型数据、中间型数据进行不同方式的处理使其成为极大型指标,正向处理后的指标对其进行标准化则得到处理后的数据。紧接着使用公式计算信息熵并得到权重,最终得出顺序排列的12个赛事排序,根据结果得到最有可能进入正式目录的赛事。 对于第三问,考虑分别探究三个因素对参赛人数的影响,考虑到三个因素不是简单的线性关系,所以先分别探讨三个因素与参赛人数的关系,建立回归方程。分别根据不同的限制条件建立三个具体的预测回归模型(具体模型公式见后文)。考虑到本文第一问从三个指标分别分析对参加人数的影响,构建的是线性回归模型和有理函数模型。第二问为发现三个指标之间的相互作用与内在关联,现重新构建模型,以寻找各因素间的相互影响和共同对因变量投入的贡献程度,所以选择构建*多项式非线性回归模型*。由于函数式较为复杂,故采用*粒子群算法*求函数最大值,最终得出结论,当每周理论培训时长为290分钟,实践培训时长为260分钟,投入699千元时,将有最多的参与人数,即有45.216%的人参加培训。最后第三小问需根据前文所述提出建议,具体建议见后文详述。 ], title: "关于大学生学科竞赛赛事的评价分析与问题解决", keywords: ("秩和比综合评价法", "熵值法", "多项式非线性回归模型", "粒子群算法") ) #include "chapter/chapter1.typ" #include "chapter/chapter2.typ" #include "chapter/chapter3.typ" #include "chapter/chapter4.typ" #include "chapter/chapter5.typ" #include "chapter/chapter6.typ" = 参考文献 #bibliography("refs.bib", title: none, style: "gb-7714-2015-numeric") #include "chapter/appendix.typ"
https://github.com/JamesWilmot/cheatsheets
https://raw.githubusercontent.com/JamesWilmot/cheatsheets/main/README.md
markdown
# cheatsheets ```bash typst init @preview/cram-snap tmux ```
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/regression/issue43.typ
typst
Other
<foo:bar> #let a = "a" #let w = x => $x$ + x #let x = `hello` #let a = (a:1) #let b = (:b:2) #let c = ( : c : 3) #let d = (:..a) #let e = ( : ..b) #let f = (:)
https://github.com/sebaseb98/clean-math-thesis
https://raw.githubusercontent.com/sebaseb98/clean-math-thesis/main/chapter/appendix.typ
typst
MIT License
#set heading(numbering: none) // Heading numbering = Appendix<chap:Appendix> #counter(heading).update(1) #set heading(numbering: "A.1", supplement: [Appendix]) // Defines Appendix numbering == Notation<sec:notation> #table( columns: 2, column-gutter: 3em, stroke: none, [$C_0$], [functions with compact support], [$overline(RR)$], [extended real numbers $RR union {oo}$], ) #v(1.5cm, weak: true) == Abbreviations<sec:abbreviations> #table( columns: 2, column-gutter: 1.55em, stroke: none, [iff], [if and only if], [s.t.], [such that], [w.r.t.], [with respect to], [w.l.o.g], [without loss of generality], )
https://github.com/piepert/philodidaktik-hro-phf-ifp
https://raw.githubusercontent.com/piepert/philodidaktik-hro-phf-ifp/main/src/parts/ephid/grundpositionen/selbstverstaendnis.typ
typst
Other
#import "/src/template.typ": * == #ix("Selbstverständnis der Philosophie") #orange-list-with-body()[#ix(strong[esoterisches Selbstverständnis], "Selbstverständnis, esoterisch") #h(1fr) #ix("Heraklit"), #ix("Nietzsche", "<NAME>.")][ Das #ix("esoterische Selbstverständnis", "Selbstverständnis, esoterisch") der Philosophie versteht sie als von dem Alltag getrennt, die Lebenswelt ist eine sinnlose oder sogar schädliche Ablenkung. Das gemeine Volk aus Nicht-Philosophen wird abgewertet und der Philosoph bleibt für sich allein. Diese Art der Philosophie richtet sich nur an Gleichgesinnte und Fachleute. Die Philosophie ist ein einsames Projekt der Selbsterkenntnis. ][#ix(strong[pädagogisch-politisches Selbstverständnis der Philosophie], "Selbstverständnis, pädagogisch-politisch") #h(1fr) #ix("Platon"), #ix("Fichte", "Fichte, <NAME>.")][ Das #ix("pädagogisch-politische Selbstverständnis", "Selbstverständnis, pädagogisch-politisch") der Philosophie stellt die Philosophie als Hilfe dar, die sich um Erziehung und die Gesellschaft sorgt. Sie wird charakterisiert als periagogisch#en[periagogisch: das Umwenden der Seele zur Wahrheit], evolutionär#en[evolutionär: #todo[Was bedeutet "evolutionär" im pädagogisch-politischen Selbstverständnis?]] und pädagogisch#en[pädagogisch: mit den Mitteln der Philosophie, die richtige Methode zur Erziehung finden. Siehe dazu Platons "Politeia".]. Die Philosophie hat den Auftrag, Erziehung, Staat und Gesellschaft zum Wohl des Ganzen zu gestalten. Es ist ungerecht, sich dieser Aufgabe zu entziehen. #colbreak() ][#ix(strong[situativ-pragmatisches Selbstverständnis der Philosophie], "Selbstverständnis, situativ-pragmatisch") #h(1fr) #ix("Protagoras"), #ix("Hume", "<NAME>")][ Das #ix("situativ-pragmatische Selbstverständnis", "Selbstverständnis, situativ-pragmatisch") der Philosophie spricht ihr eine beratende, therapeutische Funktion zu. Sie wird als Möglichkeit benutzt, den Alltag in bestimmten Lebensbereichen zu beeinflussen und steht daher direkt mit ihm in Verbindung. Es geht um eine grundlegende Vermittlung bestimmter Fertigkeiten oder Fähigkeiten. Im Gegensatz zu anderen Selbstverständnissen, ist die Philosophie nicht das Mittel, die Wahrheit zu erreichen und lehnt daher jeglichen Anspruch auf diese ab. ]
https://github.com/SkytAsul/fletchart
https://raw.githubusercontent.com/SkytAsul/fletchart/main/src/elements.typ
typst
#import "internals.typ": internal-element, internal-link #import "utils.typ": cetz-rect-double #import "deps.typ": fletcher #import fletcher.shapes: diamond, pill, rect, parallelogram #let decision = (name: "decision", node-options: (shape: diamond.with(fit: 0), fill: color.orange.desaturate(50%))) #let beginning = (name: "beginning", node-options: (shape: pill, fill: color.red.desaturate(50%))) #let end = (name: "end", node-options: (shape: pill, fill: color.red.desaturate(50%))) #let process = (name: "process", node-options: (shape: rect, fill: color.aqua.desaturate(50%))) #let predefined-process = (name: "predefined-process", node-options: (shape: cetz-rect-double, fill: color.blue.desaturate(70%))) #let input-output = (name: "input-output", node-options: (shape: parallelogram, fill: color.purple.desaturate(60%))) #let resolve-style(base, overrides) = { let result = base for (key, value) in overrides.pairs() { result.insert(key, value) } return result } #let element(id, content, links, type, node-options) = { internal-element( id, content, links, options => { let style = type.node-options if type.name in options.elements-style-override { style = resolve-style(style, options.elements-style-override.at(type.name)) } style = resolve-style(style, node-options) return style } ) } /// Creates a condition with as many choices as needed. /// - id (str): unique ID of the element /// - content (content): content displayed in the decision block /// - args (args): choices of this condition /// and additional named options to pass to the `node` function from fletcher. /// /// The options defined here will take precedence over the default options /// of the the decision type and the overrides defined in /// #the-param[fc-declarative][elements-style-override]. #let condition(id, content, ..args) = { element( id, pad(1em, content), args.pos().map(choice => internal-link(choice.destination, choice.label)), decision, args.named() ) } /// Creates a single choice with a destination and an optional label. #let choice(destination, label: none) = { ( destination: destination, label: label ) } /// Creates an action with a type and an optional destination. /// /// - id (str): unique ID of the element /// - content (content): content displayed in the action block /// - destination (str, element): next element (optional) /// - type (beginning, end, process, predefined-process, input-output, dict): type of the action /// - node-options (args): additional named options to pass to the `node` function from fletcher. /// /// The options defined here will take precedence over the default options /// of the #the-param[action][type] and the overrides defined in /// #the-param[fc-declarative][elements-style-override]. #let action(id, content, destination: none, type: process, ..node-options) = { assert(node-options.pos().len() == 0, message: "Cannot pass non-named node options.") element( id, content, if destination == none {()} else {(internal-link(destination, none),)}, type, node-options.named() ) } #let raw-element(id, content, links: (), ..node-options) = { assert(node-options.pos().len() == 0, message: "Cannot pass non-named node options.") internal-element( id, content, links, _ => node-options.named() ) } #let set-links(element, links) = { let obj = element.value obj.links = links return metadata(obj) }
https://github.com/ohmycloud/computer-science-notes
https://raw.githubusercontent.com/ohmycloud/computer-science-notes/main/Misc/miniserve.typ
typst
#show link: underline = 本地/内网文件共享服务搭建方法 1. 下载 #link("https://github.com/svenstaro/miniserve")[miniserve] 到 /usr/local/bin 目录。 2. 创建系统服务。 在 /etc/systemd/system 目录下新建一个名为 miniserve.service 的文件, 内容如下: ```sh [Unit] Description=miniserve After=network-online.target Wants=network-online.target systemd-networkd-wait-online.service [Service] ExecStart=/usr/local/bin/miniserve \ --enable-tar \ --enable-zip \ --no-symlinks \ --verbose \ -p 3333 \ -o \ -a username:password \ --title 通用软件仓库 \ -F \ --mkdir -u -- /data/software IPAccounting=yes IPAddressAllow=localhost IPAddressDeny=any PrivateTmp=yes PrivateDevices=yes NoNewPrivileges=true ProtectSystem=strict ProtectHome=yes CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_DAC_READ_SEARCH [Install] WantedBy=multi-user.target ``` 3. 启用服务 ```sh systemctl enable miniserve ``` 4. 启用服务 ```sh systemctl start miniserve ``` 5. 查看服务状态 ```sh systemctl status miniserve ``` 打开浏览器, 访问 #link("http://127.0.0.1:3333")[http://127.0.0.1:3333]
https://github.com/AU-Master-Thesis/thesis
https://raw.githubusercontent.com/AU-Master-Thesis/thesis/main/lib/blocks.typ
typst
MIT License
#import "catppuccin.typ": * #let std-block = block.with( fill: catppuccin.latte.base, radius: 1em, inset: 0.75em, stroke: none, width: 100%, breakable: true, ) #let cut-block = block.with( fill: none, radius: 1em, stroke: none, breakable: true, clip: true, ) #let blocked(title: none, content, color: catppuccin.latte.base, height: auto, divider-stroke: white + 2pt) = { set align(left) std-block(fill: color, height: height)[ #v(0.25em) #text(catppuccin.latte.text, size: 1.2em, weight: 900, title) // #v(-0.15em) #move(dx: -0.75em, dy: 0pt, line(length: 100% + 2 * 0.75em, stroke: divider-stroke)) #content #v(0.5em) ] }
https://github.com/ayoubelmhamdi/typst-phd-AI-Medical
https://raw.githubusercontent.com/ayoubelmhamdi/typst-phd-AI-Medical/master/chapters/ch06-men.typ
typst
MIT License
#import "../functions.typ": heading_center, images, italic #let finchapiter = text(size: 24pt, fill:rgb("#1E045B"),[■]) /* * * MEN BOOK 05 * */ = DETECTING LUNG CANCER NODULES. == Introduction. The project is to create a detector for lung cancer, and based on the *LUNA dataset* #link("https://luna16.grand-challenge.org/Description")[luna16.grand-challenge.org] that is a collection of CT scans of patients with lung nodules, which are small growths in the lungs that may indicate cancer. The dataset is part of a Grand Challenge, which is a competition among researchers to develop and test methods for detecting and classifying nodules. The dataset is open and publicly available, LUNA16 It contains #italic("1,186 lung nodules") annotated in #italic("888 CT scans") by 4 experienced radiologists. The LUNA dataset has two tracks: one for finding the locations of nodules in the scans, and another for reducing false positives by distinguishing benign from malignant nodules. Automating this process will provide an experience in dealing with difficult scenarios where solving problems is challenging. Automatic detection of lung cancer is challenging, and even professional specialists face difficulty in identifying malignant tumors. Automating the process with deep learning will be more demanding and require a structured approach to succeeding. Detecting lung cancer early is essential for increasing the patient's survival rate, but it's tough to do manually, especially on a large scale. The problem space of lung tumor detection is important because it is an active research area with promising results. However, it is also unsolved, which satisfies the authors' objective of using PyTorch to tackle state-of-the-art projects. In large-scale project, it will be working with 3D data and require data manipulation, as no pre-built library is available for suitable training samples. The project will involve using convolutional layers followed by a resolution-reducing downsampling layer. To handle the computational requirements, you will need access to a GPU with at least `8 GB` of RAM or `220 GB` of free disk space for raw training data, cached data, and trained models. Instead of analyzing the entire CT scan, it will break down the problem into simpler tasks. CT scans are 3D X-rays consisting of a three-dimensional array of single-channel data, with each voxel having a numeric value that approximately corresponds to the average mass density of the matter contained inside. As for choosing the batch size, it depends on your specific situation. For example, with an image size of 2400x2400x3x4, a single image takes ~70 MB, so a batch size of 5 might be more realistic. However, this depends on the available GPU memory, and using 16-bit values instead of 32-bit can help double the batch size //[](https://ai.stackexchange.com/questions/3938/how-do-i-handle-large-images-when-training-a-cnn). === Definitions. - *CNN*: The design of a convolutional neural network for detecting tumors are based on alternative image recognition that can be used as a starting point. This Convolutional neural networks typically have a tail, backbone, and head. The tail processes the input, while the backbone contains most of the layers arranged in series of blocks. The head converts the output from the backbone to the desired output form. - *Epoch Training*: The epoch is divided into 20193 steps called batches, each containing 256 data points. Once the data is loaded and the training process begins, monitoring the performance of the computing resources is crucial to ensure that resources are being used effectively. - *Metrics*: displays training and validation metrics in a graphical format, making it easier to interpret the data. We can adjust the smoothing option to remove noise from trend lines if our data is noisy. - Recall is the ability to identify all relevant things. - while precision is the ability to identify only relevant things. The logging output are include the precision by including the count of correctly identified and the total number of samples for both negative and positive samples. - *Overfitting*: Overfitting occurs when a model learns specific properties of the training set, losing the ability to generalize, and making it less accurate in predicting samples that haven't been trained on. For instance. To avoid overfitting, we must examine the right metrics. Looking at our overall loss, everything might seem fine, but that's because our validation set is unbalanced, and the negative samples dominate, making it hard for the model to memorize individual details. To prevent overfitting, we use data augmentation, which involves modifying a dataset by applying synthetic alterations to individual samples, resulting in a new dataset with a larger number of effective samples. Five specific data augmentation techniques are discussed, including mirroring the image, shifting it by a few voxels, scaling it up or down, rotating it around the head-foot axis, and adding noise to the image. - *Data Automating*: This technique are designed to create new training samples from the existing ones by applying simple transformations. The transformations include shifting/mirroring, scaling, rotation, and adding noise. - *Thresholding* is a simple and common method of segmentation that works by selecting a pixel value (called a threshold) that separates the foreground (the region of interest) from the background (the rest of the image)[^3^]. For example, if you want to segment the bone from a CT scan, you can choose a threshold that corresponds to the intensity of bone pixels and ignore the pixels that are lower or higher than that value. However, thresholding is not always accurate or robust, especially when dealing with complex or noisy images. === Approach for Training our Model involves five main steps. The goal of this project is to create an end-to-end solution for detecting cancerous nodules in lung CT scans using PyTorch. The approach involves five main steps: 1. Loading the CT data and converting it into a PyTorch dataset. 2. Segmenting the image to identify potential tumors. 3. Grouping interesting voxels to form candidates. 4. Classifying the nodules using a classification model. 5. Diagnosing the patient based on the malignancy of the identified nodules, combining segmentation and classification models for a final diagnosis. == step 1: Manipulation the Data. === Data Conversions. To process the data, it is necessary to convert raw data files into a format that is usable by PyTorch, which means converting the row data from 3D array of intensity data to `Tensors` pyTorch format. This data is around 32 million voxels, which is much larger than the nodules. To make the task more manageable, the model will focus on a relevant crop of the CT scan. There are various steps involved in processing the data, including understanding the data, mapping location information to array indexes, and converting the CT scan intensity into mass density. Identifying the key concepts of the project, such as nodules, is crucial. === Data loading. The first step in creating a neural network for detecting lung cancer using PyTorch is handling the dataset. The goal is to produce a training sample from raw CT scan data and a list of annotations, by following this topics: - Loading and processing raw data files - Implementing a Python class to represent the data - Converting the data into a format usable by PyTorch - Visualizing the training and validation data Overall, the quality of the data used to train the model has a significant impact on the project's success. === Raw CT Data Files. Loading CT data and processes the information to produce a 3D array, and transforms the patient coordinate system to the index, row, and column coordinates of each voxel in the array. Annotation data from LUNA with nodule coordinates and malignancy flags are also loaded. It contains information about all lumps that look like nodules, whether they are malignant, benign, or something else. We'll use this to build a list of candidates that can be split into training and validation datasets. This Dataset contains information about some of the candidates that have been flagged as nodules, including the diameter. This information is useful for ensuring a representative range of nodule sizes in the training and validation data. === Training and validation sets. Splitting a dataset into training, validation, and test sets is a crucial step in building a machine learning model. It allows for the model to be trained on one set, tuned on another, and evaluated on a final set. This helps prevent overfitting and gives an accurate measure of the model's performance. We want to ensure that both sets represent the real-world input data that we expect to see and handle normally. If either set is significantly different from our actual use cases, it's highly likely that our model will behave differently than we expect. This split helps us evaluate and improve the model's performance before we deploy it on production data. === Loading individual CT scans. We need to understand how to load and understand CT scan data, which is usually stored in a DICOM file format. The MetaIO format is suggested for easier use, and the Python SimpleITK library can be used to convert it to a NumPy array. The Hounsfield Unit (HU) scale is used to measure CT scan voxel density, with air at -1000 HU, water at 0 HU, and bone at least 1000 HU. === Data Ranges and Model Inputs. Starting with adding channels of information to our samples. To prevent the overshadowing of the new channels by raw HU values, we must be aware that our data ranges from -1,000 to +1,000. We won't add more channels of data for the classification step, so our data handling will remain the same. Fixed-size inputs are necessary due to a fixed number of input neurons. We want to train our model using a crop of the CT scan that accurately centers the candidate, making identification easier for the model by decreasing the variation in expected inputs. === The Patient Coordinate System. The candidate center data expressed in millimeters, not voxels. We need to convert our coordinates from the millimeter-based coordinate system $(X, Y, Z)$ to the voxel-address-based coordinate system $(I, R, C)$. The patient coordinate system defines the positive $X$ to be patient left, positive $Y$ to be patient behind, and the positive $Z$ to be toward patient head. The patient coordinate system is often used to specify the locations of interesting anatomy in a way that is independent of any particular scan. === CT Scan Shape and Voxel Sizes. The size of the voxels varies between CT scans and typically are not cubes. The row and column dimensions usually have voxel sizes that are equal, (...) and the index dimension has a larger value, but other ratios can exist. === Converting Between Millimeters and Voxel Addresses. (...) Converting between patient coordinates in millimeters and $(I, R, C)$ array coordinates, we define some utility code to assist with the conversion. Flipping the axes is encoded in a $3 times 3$ matrix. The metadata we need to convert from patient coordinates to array coordinates is contained in the MetaIO file alongside the CT data itself. (...) In CT scan images of patients with lung nodules, most of the data is not relevant to the nodule (up to 99.9999%). To extract the nods, an area around each candidate will be extracted, so the model can focus on one candidate at a time. // segmentation ch13 // classification ch12 // /* *------------------------------------------------------------------------------ */ == step 2: Segmentation. The process of segmentation to identify possible nodules, which is step 2 of the project's plan. The segmentation model is created using a *U-Net*. The objective is to flag voxels that might be part of a nodule and use the classification step to reduce the number of incorrectly marked voxels. === Semantic segmentation: Per-pixel classification. U-Net network is a popular architecture for semantic segmentation. It was originally proposed for biomedical image segmentation, but since then it has been widely used for several other domains such as self-driving cars, satellite imagery, and more. The U-Net architecture includes an encoder that down-samples the input image, which is then followed by an up-sampling decoder. This allows the network to learn high-level semantic features while preserving the spatial information, making it suitable for semantic segmentation. Semantic segmentation identifies different objects and where they are in a given image. If there are multiple cats in an image, semantic segmentation can identify each cat's position. The existing classification models can't pinpoint where the cat is; they can only predict whether or not a cat is present in the image. Semantic segmentation requires combining raw pixels to develop specific detectors for items like color and then building on this to create more informative feature detectors to finally identify specific things like a cat or a dog. Nonetheless, the segmentation model will not give us a single classification-like list of binary flags like classification models since the output should be a heatmap or mask. === Why we need heatmap or mask as output of segmentation. The output of a U-Net network in biomedical image segmentation is typically a heatmap or a mask because these formats provide a clear visual representation of the boundaries that the network has identified in the image. A heatmap is a colored image that highlights the regions of the input image that are most important for the output classes, whereas a mask is a binary image that indicates which pixels belong to which class. In biomedical image segmentation, it is important to accurately identify the areas of interest, such as tumors or blood vessels, to aid in diagnosis and treatment. The heatmap or mask allows for easy visualization of these areas and can be used by medical professionals to make more informed decisions. Moreover, the heatmap or mask output can be used as input for further processing and analysis, such as quantifying the size or volume of a segmented region. === UNet Architecture for Image Segmentation. *U-Net* is a convolutional neural network that can produce pixelwise output for image segmentation. It has a U-shaped encoder-decoder structure that operates at different resolutions. The encoder network reduces the spatial dimensions and increases the number of filters at each block, while the decoder network does the opposite. The key innovation of U-Net is the use of skip connections that link the encoder and decoder blocks at the same level, allowing the network to capture multi-scale features and produce more precise segmentations. We use the same source data as before: CT scans and annotation data. Our goal is to create bounding boxes that cover the whole nodules based on their annotated centers. This will help us with segmentation, which is the process of identifying regions of interest in images. To do this, we search for voxels with high density around the center of each nodule on the row and column axis. We stop when we reach voxels with low density, which indicate normal lung tissue. Then we repeat the search in the third dimension. We have seven input channels for UNet: three context slices before and after the focus slice, and one focus slice that we segment. We have one output class indicating whether a voxel is part of a nodule. === Visualizing CT Image with Predicted Nodules. The U-Net model has an encoder that captures the context of the image and a decoder that produces the segmentation map. To evaluate the performance of the model, we create an empty image with 512x512 pixels and three color channels. We overlay the predicted segmentation map on the original CT image and use different colors to indicate the errors. We use red for false positives (pixels that are predicted as nodules but are not), green for true positives (pixels that are predicted as nodules and are), and orange for false negatives (pixels that are not predicted as nodules but are). The pixel values are normalized between 0 and 1. The goal is to have a clear visualization of the nodules and the errors in the prediction. == step 3: Grouping nodules. We use segmentation to find ROIs that might be nodules in CT images. Then we group pixels that are next to each other and above the limit. Each group is a nodule candidate with a center point (index, row, column). We use these points to classify the candidates. Grouping makes the search easier and removes noise. == step 4: Classification /* *------------------------------------------------------------------------------ */ This step involves dividing the CT scan into individual slices. The output of the segmentation step is an array of per-pixel probabilities, indicating whether the pixel is part of a nodule. These slice-wise predictions are collected in a mask array with the same shape as the CT scan input, and a threshold is applied to the predictions to obtain a binary array. A cleanup step which shortens the flagged area and removes small components. == step 5: Diagnosing the patient. /* *------------------------------------------------------------------------------ */ We use the LIDC annotations to decide if a nodule (a small lump) in the lung is cancerous or not. The LIDC annotations are labels that up to four doctors gave to each nodule based on how it looks in a CT scan. They used a scale from 1 to 5, where 1 means the nodule is very unlikely to be cancerous and 5 means it is very likely to be cancerous. These labels are not based on other information about the patient, such as their medical history or symptoms. To make a final decision, we will use a rule that says a nodule is cancerous if at least two doctors gave it a 4 or a 5. This rule is not very precise and there are other ways of using the labels, such as taking the average or ignoring some nodules. == Conclusion. The identifying nodule candidates in CT scans for possible cancer detection. A connected-components algorithm is used for grouping the suspected nodule voxels. The labeled chunks are passed on to a classification module to reduce false positives. Finally, the identified regions in the CT scan are cropped and passed onto the classification module using DataLoader. We use a data loader, to loop over a candidate list to threshold. The output probabilities to get a list of things our model thinks are actual nodules, which would be output for a radiologist to inspect while adjusting the threshold to err a bit on the safe side. A single CT scan from the validation set is run, and 16 nodule candidates are found. The task of identifying malignant nodules from benign ones in CT scans after implementing the nodule-detection task of the LUNA challenge. Even with a good system, diagnosing malignancy would need a more comprehensive view of the patient, additional non-CT context, and a biopsy instead of just looking at a particular nodule in isolation on a CT scan. This task is likely to be performed by a doctor for some time to come. - Splitting training and validation (and test) sets between patients is important to avoid errors. - Converting pixel-wise marks to nodules can be achieved using traditional image processing. - The diagnosis performs both segmentation and classification. - TensorBoard can help us visualize and identify network anomalies. - There is no magic bullet when training neural networks. This system is not ready to replace a human radiologist, but it could be a useful tool to help them find suspicious areas in the scans. And would need more data and validation from experts, as well as regulatory approval from authorities. The system would also need to run in a scalable environment that can handle different cases and situations. // test outline = CONCLUSION.
https://github.com/tingerrr/hydra
https://raw.githubusercontent.com/tingerrr/hydra/main/src/lib.typ
typst
MIT License
#import "/src/core.typ" #import "/src/util.typ" #import "/src/selectors.typ" /// An anchor used to search from. When using `hydra` ouside of the page header, this should be /// placed inside the pge header to find the correct searching context. `hydra` always searches from /// the last anchor it finds, if and only if it detects that it is outside of the top-margin. #let anchor() = [#metadata(()) <hydra-anchor>] /// Query for an element within the bounds of its ancestors. /// /// The context passed to various callbacks contains the resolved top-margin, the current location, /// as well as the binding direction, primary and ancestor element selectors and customized /// functions. /// /// This function is contextual. /// /// - ..sel (any): The element to look for, to use other elements than headings, read the /// documentation on selectors. This can be an element function or selector, or an integer /// declaring a heading level. /// - prev-filter (function, auto): A function which receives the `context` and `candidates`, and /// returns if they are eligible for display. This function is called at most once. The primary /// next candidate may be none. If this is `auto` no filter is applied. /// - next-filter (function, auto): A function which receives the `context` and `candidates`, and /// returns if they are eligible for display. This function is called at most once. The primary /// prev candidate may be none. If this is `auto` no filter is applied. /// - display (function, auto): A function which receives the `context` and candidate element to /// display. If this is `auto`, the default implementaion will be used. /// - skip-starting (bool): Whether `hydra` should show the current candidate even if it's on top of /// the current page. /// - use-last (bool): If hydra should show the name of the first or last candidate on the page. // Defaults to false. /// - dir (direction, auto): The reading direction of the document. If this is `auto`, the text /// direction is used. Be cautious about leaving this option on `auto` if you switch text /// direction mid-page and use hydra outside of footers or headers. /// - binding (alignement, auto): The binding of the document. If this is `auto`, the binding is /// inferred from `dir`, similar to how it is done in page. Be cautious about leaving this on /// option on `auto` if you switch text direction mid-page and use hydra outside of footers or /// headers. /// - book (bool): The binding direction if it should be considered, `none` if not. If the binding /// direction is set it'll be used to check for redundancy when an element is visible on the last /// page. Make sure to set `binding` and `dir` if the document is not using left-to-right reading /// direction. /// - anchor (label, none): The label to use for the anchor if `hydra` is used outside the header. /// If this is `none`, the anchor is not searched. /// -> content #let hydra( prev-filter: auto, next-filter: auto, display: auto, skip-starting: true, use-last: false, dir: auto, binding: auto, book: false, anchor: <hydra-anchor>, ..sel, ) = { util.assert.types("prev-filter", prev-filter, function, auto) util.assert.types("next-filter", next-filter, function, auto) util.assert.types("display", display, function, auto) util.assert.types("skip-starting", skip-starting, bool) util.assert.types("use-last", use-last, bool) util.assert.enum("dir", dir, ltr, rtl, auto) util.assert.enum("binding", binding, left, right, auto) util.assert.types("book", book, bool) util.assert.types("anchor", anchor, label, none) let (named, pos) = (sel.named(), sel.pos()) assert.eq(named.len(), 0, message: util.fmt("Unexected named arguments: `{}`", named)) assert(pos.len() <= 1, message: util.fmt("Unexpected positional arguments: `{}`", pos)) let sanitized = selectors.sanitize("sel", pos.at(0, default: heading)) let default-filter = (_, _) => true let dir = util.auto-or(dir, core.get-text-dir) let binding = util.auto-or(binding, () => page.binding) let binding = util.auto-or(binding, () => util.page-binding(dir)) let ctx = ( prev-filter: util.auto-or(prev-filter, () => default-filter), next-filter: util.auto-or(next-filter, () => default-filter), display: util.auto-or(display, () => core.display), skip-starting: skip-starting, use-last: use-last, dir: dir, binding: binding, book: book, anchor: anchor, primary: sanitized.primary, ancestors: sanitized.ancestors, ) core.execute(ctx) }
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/indenta/0.0.2/lib.typ
typst
Apache License 2.0
#let fix-indent(unsafe: false)={ return it=>{ let st=0 let _is_block(e,fn)=fn==heading or (fn==math.equation and e.block) or (fn==raw and e.block) or fn==figure or fn==block or fn==list.item or fn==enum.item or fn==table or fn==grid or fn==align let _is_inline(e,fn)=fn==text or fn==box or (fn==math.equation and not e.block) or (fn==raw and not e.block) for e in it.children{ let fn=e.func() if st==0{ if _is_block(e,fn){st=1} }else if st==1{ if fn==parbreak{st=2} else if _is_block(e,fn) or e==[ ]{} else{st=0} }else if st==2{ if _is_block(e,fn){st=1} else { if unsafe or _is_inline(e,fn){context h(par.first-line-indent)} st=0 } } e } }}
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/array-38.typ
typst
Other
// Error: 4 expected closing paren #{(} // Error: 3-4 unexpected closing paren #{)} // Error: 4-6 unexpected end of block comment #(1*/2) // Error: 6-8 invalid number suffix: u #(1, 1u 2) // Error: 3-4 unexpected comma #(,1) // Missing expression makes named pair incomplete, making this an empty array. // Error: 5 expected expression #(a:) // Named pair after this is already identified as an array. // Error: 6-10 expected expression, found named pair #(1, b: 2) // Keyed pair after this is already identified as an array. // Error: 6-14 expected expression, found keyed pair #(1, "key": 2)
https://github.com/derekchai/k-mapper
https://raw.githubusercontent.com/derekchai/k-mapper/main/manual.typ
typst
MIT License
// Copyright 2024 <NAME>. // Use of this code is governed by a MIT license in the LICENSE.txt file. #import "lib.typ": * #let version = "1.1.0" #let conf(title, doc) = { set page( paper: "us-letter", header: align( right + horizon, title ), numbering: "1", // margin: (1.5in) ) set par(first-line-indent: 1em, justify: true) set text( font: "Linux Libertine", size: 11pt, ) columns(1, doc) } #show: doc => conf( [the `k-mapper package`\ version #version], doc, ) #show par: set block(spacing: 0.55em) #show heading: set block(above: 2em, below: 1em) #show "k-mapper": `k-mapper` #show link: underline = the k-mapper package #version #block( fill: luma(240), inset: 12pt, radius: 4pt, [ *Karnaugh map* /ˈkɑːnɔː/ _noun_ \ a diagram consisting of a rectangular array of squares each representing a different combination of the variables in a Boolean function ] ) == introduction k-mapper is a Typst package for adding customizable Karnaugh maps of 2~by~2, 2~by~4, and 4~by~4 grid sizes to your Typst projects. \ This Manual has been typeset in Typst, using the k-mapper package, and is intended for the #version version of k-mapper. See the source code on the Github repository for the project #link("https://github.com/derekchai/k-mapper/tree/main")[here]. \ See the changelog for the package #link("https://github.com/derekchai/k-mapper/blob/698e8554ce67e3a61dd30319ab8f712a6a6b8daa/changelog.md")[here]. == using `karnaugh()` The main function of this package is the `karnaugh()` function, which allows you to create and customize all sizes of Karnaugh maps. === gray code position The position of implicants in k-mapper are declared via _Gray code position_. This is similar to Karnaugh map packages in LaTeX. The Gray code position of a cell in a Karnaugh map can be determined by looking at the Gray code labels of the Karnaugh map: the Gray code position is the decimal equivalent of the binary number formed from the number(s) on the left and the number(s) on the top. The empty maps below show each cell's Gray code position. Note that the Gray code position for a cell differs depending on the Karnaugh map's grid size. #grid( columns: (1fr, 1fr, 1fr), align: center + horizon, karnaugh( 4, manual-terms: (0, 1, 2, 3) ), karnaugh( 8, manual-terms: (0, 1, 2, 3, 4, 5, 6, 7) ), karnaugh( 16, manual-terms: (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), implicants: ((14, 14), ), colors: ((rgb(100, 100, 100, 100), )) ) ) For example, the shaded cell above's Gray code position (`14`) can be determined by concatenating the binary numbers to its left on the y-axis (`11`) and above it on the x-axis (`10`), giving `1110` which equals `14` in decimal. \ \ \ \ === function arguments #table( columns: (auto, auto, 1fr, auto), stroke: none, inset: 10pt, table.header([*name*], [*default*], [*description*], [*example values*]), [*`grid-size`*\ `int`], [required], [The size of the Karnaugh map's grid. This value can be only `4` (2~by~2), `8` (2~by~4), or `16` (4~by~4). Any other values will throw an error.], [``` 4 8 16```], [*`x-label`*\ `content`], [```typst $$```], [The label (usually a variable name) to go on the top (x-axis) of the Karnaugh map.], [```typst $A$ [foo]```], [*`y-label`*\ `content`], [```typst $$```], [The label (usually a variable name) to go on the left (y-axis) of the Karnaugh map.], [```typst $B$ [bar]```], [*`minterms`*\ `(int)`\ `none`], [```typst none```], [The `array` of Gray code positions#footnote[See p. 1.]<gcp> where at that position is a minterm (`0`). Mutually exclusive with `maxterms` and `manual-terms`.], [```typst (3, 4, 6) (1, ) ```], [*`maxterms`*\ `(int)`\ `none`], [```typst none```], [The array of Gray code positions@gcp where at that position is a maxterm (`1`). Mutually exclusive with `minterms` and `manual-terms`.], [```typst (0, 1, 2, 3, 5, 11, 12 (7, )```], [*`manual-terms`*\ `(content)`\ `none`], [```typst none```], [The `array` of `content` in each cell in order of Gray-code position@gcp. The length of this `array` _must_ equal the `grid-size`. Mutually exclusive with `minterms` and `maxterms`.], [```typst // Grid-size 4 (0, "X", 1, 1) ```], [*`implicants`*\ `((int, int), )`], [```typst ()```], [An `array` where each element is an `array` of two `int`s, where each `int` is a Gray code position@gcp corner of a _rectangular_ implicant.], [```typst ((0, 3), (1, 1)) ((0, 2), )```], [*`horizontal -implicants`*\ `((int, int), )`], [```typst ()```], [An `array` where each element is an `array` of two `int`s, where each `int` is a Gray code position@gcp corner of a _horizontal split_ implicant --- that is, one which wraps around the vertical edges of the Karnaugh map.], [```typst // Grid-size 16 ((0, 6), (8, 10))```], [*`vertical -implicants`*\ `((int, int), )`], [```typst ()```], [An `array` where each element is an `array` of two `int`s, where each `int` is a Gray code position@gcp corner of a _vertical split_ implicant --- that is, one which wraps around the horizontal edges of the Karnaugh map.], [```typst // Grid-size 8 ((0, 4), ) // Grid-size 16 ((0, 9), (2, 10))```], [*`corner -implicants`*\ `bool`], [```typst false```], [A `bool` which indicates whether the Karnaugh map contains a `corner split` implicant --- that is, one which wraps around both vertical and horizontal edges of the Karnaugh map.], [```typst true```], [*`cell-size`*\ `length`], [```typst 20pt```], [The size of an individual cell in the Karnaugh map.], [```typst 1cm ```], [*`stroke-width`*\ `length`], [```typst 0.5pt```], [The stroke width of the Karnaugh map grid.], [```typst 0.2pt ```], [*`colors`*\ `(color)`], [array of: \ red \ green \ blue \ cyan \ magenta \ yellow], [An array of RGBA `color`s to be used in displaying implicants. The first implicant uses the first `color` in the array, the second implicant the second color, etc. If there are more implicants than there are colors, each subsequent implicant will use the least recently used color (i.e. it wraps around). By default, all `color`s in `colors` have alpha values of `100`.], [```typst // Grayscale K-map (rgb( 200, 200, 200, 100 ), ) ```], [*`implicant-inset`*\ `length`], [```typst 2pt```], [The inset of implicants within each cell.], [```typst 3pt ```], [*`edge-implicant -overflow`*\ `length`], [```typst 5pt```], [How much _split implicants_ (horizontal, vertical, corner) overflow the bounds of the grid.], [```typst 2mm ```], [*`implicant-radius`*\ `length`], [```typst 5pt```], [The corner radius of implicants.], [```typst 3mm ```], [*`implicant-stroke -transparentize`*\ `ratio`], [```typst #-100%```], [The ratio to transparentize the stroke color of implicants by. If set to `0%`, the stroke color of implicants are the same as the fill color, darked by the factor set in `implicant-stroke-darken` (`60%` by default). Negative values mean the stroke color becomes more opaque.], [```typst -50% ```], [*`implicant-stroke -darken`*\ `ratio`], [```typst 60%```], [The ratio to darken the stroke color of implicants by.], [```typst 100% ```], [*`implicant-stroke -width`*\ `length`], [```typst 0.5pt```], [The stroke width of implicants.], [```typst 1pt ```], ) == examples #grid( columns: (auto, 1fr), align: (right, left), gutter: 20pt, karnaugh( 4, minterms: (0, ), implicants: ((1, 3), (2, 3)), colors: (rgb(100, 100, 100, 100), ) ), [```typst // Grayscale Karnaugh map #karnaugh( 4, minterms: (0, ), implicants: ((1, 3), (2, 3)), colors: (rgb(100, 100, 100, 100), ) // <- ) ```], karnaugh( 8, x-label: $C$, y-label: $A B$, manual-terms: (0, 1, 0, 0, 0, "X", 1, 1), implicants: ((6, 7), ), vertical-implicants: ((1, 5), ) ), [```typst #karnaugh( 8, x-label: $C$, y-label: $A B$, manual-terms: (0, 1, 0, 0, 0, "X", 1, 1), implicants: ((6, 7), ), vertical-implicants: ((1, 5), ) ) ```], karnaugh( 16, x-label: $C D$, y-label: $A B$, maxterms: (0, 2, 5, 7, 13, 15, 8, 10), implicants: ((5, 15), ), corner-implicants: true ), [```typst #karnaugh( 16, x-label: $C D$, y-label: $A B$, maxterms: (0, 2, 5, 7, 13, 15, 8, 10), implicants: ((5, 15), ), corner-implicants: true ) ```], karnaugh( 8, manual-terms: (0, 1, 2, 3, 4, 5, 6, 7), implicants: ((0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7)), ), [```typst #karnaugh( 8, manual-terms: (0, 1, 2, 3, 4, 5, 6, 7), implicants: ( (0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7) ) ) ```], karnaugh( 16, x-label: $C D$, y-label: $A B$, manual-terms: (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), implicants: ((5, 7), (5, 13), (15, 15)), vertical-implicants: ((1, 11), ), horizontal-implicants: ((4, 14), ), corner-implicants: true, ), [```typst #karnaugh( 16, x-label: $C D$, y-label: $A B$, manual-terms: ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ), implicants: ((5, 7), (5, 13), (15, 15)), vertical-implicants: ((1, 11), ), horizontal-implicants: ((4, 14), ), corner-implicants: true, ) ```], karnaugh( 8, x-label: $C$, y-label: $A B$, manual-terms: (0, 1, 2, 3, 4, 5, 6, 7), implicants: ((0, 3), (2, 7)), horizontal-implicants: ((4, 5), ), colors: (rgb(255, 255, 255, 0), ), implicant-stroke-width: 1pt ), [```typst // No fill Karnaugh map #karnaugh( 8, x-label: $C$, y-label: $A B$, manual-terms: (0, 1, 2, 3, 4, 5, 6, 7), implicants: ((0, 3), (2, 7)), horizontal-implicants: ((4, 5), ), colors: (rgb(255, 255, 255, 0), ), implicant-stroke-width: 1pt ) ```] )
https://github.com/binhtran432k/ungrammar-docs
https://raw.githubusercontent.com/binhtran432k/ungrammar-docs/main/contents/literature-review/astro.typ
typst
#import "/components/glossary.typ": gls == Astro <sec-astro> Astro is a relatively new web framework that has gained popularity due to its focus on performance, flexibility, and developer experience. We will explore the key features, benefits, and challenges of using Astro for building modern web applications. Astro is a promising web framework that offers a compelling combination of performance, flexibility, and developer experience. Its focus on island architecture, component-based development, and integration with popular tools makes it a strong choice for building modern web applications. As the Astro ecosystem continues to mature, it is likely to become an even more popular and widely adopted framework @bib-astro. === Key Features of Astro - *Component-Based Architecture*: Astro promotes a component-based approach, allowing you to build reusable #gls("ui") components and structure your projects effectively. - *Island Architecture*: Astro's island architecture enables you to selectively hydrate components, improving initial page load performance and reducing JavaScript bundle size. - *#gls("ssg", mode:"full")*: Astro can generate static #gls("html") files for your website, providing excellent performance and #gls("seo") benefits. - *#gls("ssr", mode:"full")*: For dynamic content, Astro supports server-side rendering, ensuring optimal #gls("seo") and user experience. - *Integration with Popular Tools*: Astro seamlessly integrates with popular JavaScript frameworks and tools, such as React, Preact (@sec-preact), Vue, and Svelte. - *Customizability*: Astro offers a high degree of customization, allowing you to tailor the framework to your specific project needs. === Benefits of Using Astro - *Performance Optimization*: Astro's island architecture and focus on performance result in faster page loads and improved user experience. - *Flexibility*: Astro's component-based approach and integration with popular frameworks provide flexibility in building different types of web applications. - *Developer Experience*: Astro is designed with developer experience in mind, offering a streamlined workflow and intuitive #gls("api"). - *SEO-Friendly*: Astro's #gls("ssg", mode:"full") capabilities make it ideal for #gls("seo") optimization. - *Community and Ecosystem*: Astro is backed by a growing community and ecosystem of tools and resources.
https://github.com/NwaitDev/Typst-Accessibility-Template
https://raw.githubusercontent.com/NwaitDev/Typst-Accessibility-Template/main/article_example.typ
typst
#import "article_template.typ": article_template #import "Components/authors.typ": author #let abstractofthearticle = lorem(59) #show: doc => article_template( title: [A title for this example article], subtitle:[And its associated very long subtitle that describes the subject way too precisely compared to the name of the article], authors:( author("<NAME>", "Some Jersey University", email:"<EMAIL>"), author("<NAME>", "North Carolina Uni", email:"<EMAIL>"), author("<NAME>", "London College of sth") ), abstract: abstractofthearticle, cols:2, fontsize:10pt, doc, ) = Lists in Typst \ Let's take a look at lists. There are 2 kinds of lists: - itemized lists, - numbered lists. The example above is an itemized list. Each item is preceded with an `-` character. But if you want to enumerate the different steps of a procedure, you may prefer using this kind of list: + Step One + Step Two + step Three + Final Step Such lists are produced by preceding the items by the `+` character. == Nested lists It is perfectly possible to nest those lists by indenting the items in the Typst script: + A first list - one of the items of that list - another item of that list + A second list - one of the items of that list - another item of that list = Introducing images \ Here is an example of a figure introduced into the document. It took a picture of one of my favorite video game: _Tranistor_ @transistor2014. The main character, Red (as shown on @Red), is a singer who lost her voice after one of her concert was interrupted by a secret organization trying to murder her. #figure( image( "Images/transistor_game.jpg", width :100%, alt: "Red headed woman in modern clothes with holding a sort of glimmering sword. abstract background mixing smoke, geometrical forms and warm colours" ), caption: [ Red, the main character of the _Transistor_ video game ] )<Red> = Doing Math \ Let's jump right into it with one of my favorite equations: $ (a+b)^n = sum_(k=0)^n binom(n,k) a^k times b^(n-k) $ <newtonEquation> The @newtonEquation is one of the most fundamental equations of algebra: Newton's Binomial Theorem. = Talking about code \ Since I'm a computer scientist, I just cannot make an example document without putting some examples of code listing in it! So let's see what it would look like. \ \ A main function for a Java class: ```java public static void main(String [] args){ System.out.println("Hello World!"); } ``` \ Yeah, I know Java is so lame. Why not coding in C! ```c int main(int argc, char** argv){ int i = 0; for (int i; i<argc; ++i){ printf("arg n°%i : \"%s\"",i,argv[i]); } return 0; } ``` = And finally, the good old Lorem Ipsum \ Because without it, a template document wouldn't be a good template document... \ #lorem(100) #bibliography("Bibliography/ArticleBibliography.bib",title: "Biblio")
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/import-21.typ
typst
Other
// Error: 2:2 expected semicolon or line break #import "module.typ "stuff
https://github.com/TypstApp-team/typst
https://raw.githubusercontent.com/TypstApp-team/typst/master/tests/typ/layout/par-bidi.typ
typst
Apache License 2.0
// Test bidirectional text and language configuration. --- // Test reordering with different top-level paragraph directions. #let content = par[Text טֶקסט] #text(lang: "he", content) #text(lang: "de", content) --- // Test that consecutive, embedded LTR runs stay LTR. // Here, we have two runs: "A" and italic "B". #let content = par[أنت A#emph[B]مطرC] #set text(font: ("PT Sans", "Noto Sans Arabic")) #text(lang: "ar", content) #text(lang: "de", content) --- // Test that consecutive, embedded RTL runs stay RTL. // Here, we have three runs: "גֶ", bold "שֶׁ", and "ם". #let content = par[Aגֶ#strong[שֶׁ]םB] #set text(font: ("Linux Libertine", "Noto Serif Hebrew")) #text(lang: "he", content) #text(lang: "de", content) --- // Test embedding up to level 4 with isolates. #set text(dir: rtl) א\u{2066}A\u{2067}Bב\u{2069}? --- // Test hard line break (leads to two paragraphs in unicode-bidi). #set text(lang: "ar", font: ("Noto Sans Arabic", "PT Sans")) Life المطر هو الحياة \ الحياة تمطر is rain. --- // Test spacing. L #h(1cm) ריווחR \ Lריווח #h(1cm) R --- // Test inline object. #set text(lang: "he") קרנפיםRh#box(image("/files/rhino.png", height: 11pt))inoחיים --- // Test whether L1 whitespace resetting destroys stuff. الغالب #h(70pt) ن#" "ة --- // Test setting a vertical direction. // Ref: false // Error: 16-19 text direction must be horizontal #set text(dir: ttb)
https://github.com/El-Naizin/cv
https://raw.githubusercontent.com/El-Naizin/cv/main/modules/professional.typ
typst
Apache License 2.0
#import "../brilliant-CV/template.typ": * #cvSection("Professional Experience") #cvEntry( title: [Director of Data Science], society: [XYZ Corporation], logo: "../src/logos/xyz_corp.png", date: [2020 - Present], location: [San Francisco, CA], description: list( [Lead a team of data scientists and analysts to develop and implement data-driven strategies, develop predictive models and algorithms to support decision-making across the organization], [Collaborate with executive leadership to identify business opportunities and drive growth, implement best practices for data governance, quality, and security], ) ) #cvEntry( title: [Data Analyst], society: [ABC Company], logo: "../src/logos/abc_company.png", date: [2017 - 2020], location: [New York, NY], description: list( [Analyze large datasets using SQL and Python, collaborate with cross-functional teams to identify business insights], [Create data visualizations and dashboards using Tableau, develop and maintain data pipelines using AWS], ) ) #cvEntry( title: [Data Analysis Intern], society: [PQR Corporation], logo: "../src/logos/pqr_corp.png", date: [Summer 2017], location: [Chicago, IL], description: list( [Assisted with data cleaning, processing, and analysis using Python and Excel, participated in team meetings and contributed to project planning and execution], [Developed data visualizations and reports to communicate insights to stakeholders, collaborated with other interns and team members to complete projects on time and with high quality], ) )
https://github.com/bpkleer/typst-academicons
https://raw.githubusercontent.com/bpkleer/typst-academicons/main/lib-impl.typ
typst
MIT License
/// Render a Font Awesome icon by its name or unicode /// /// Parameters: /// - `name`: The name of the icon /// - This can be name in string or unicode of the icon /// - `solid`: Whether to use the solid version of the icon /// - `fa-icon-map`: The map of icon names to unicode /// - Default is a map generated from FontAwesome metadata /// - *Not recommended* You can provide your own map to override it /// - `..args`: Additional arguments to pass to the `text` function /// /// Returns: The rendered icon as a `text` element #let ai-icon( name, ai-icon-map: (:), ..args, ) = { text( font: ("Academicons"), // TODO: We might need to check whether this is needed weight: { 400 }, // If the name is in the map, use the unicode from the map // If not, pass the name and let the ligature feature handle it ai-icon-map.at(name, default: name), ..args, ) }
https://github.com/voidiz/typst-liuthesis
https://raw.githubusercontent.com/voidiz/typst-liuthesis/master/README.md
markdown
MIT License
# typst-liuthesis A [Typst](https://typst.app/) template for writing a bachelor's or master's thesis at Linköping University (LiU). Based on [liuthesis](https://gitlab.liu.se/olale55/liuthesis), and specifically `demo_student_thesis.tex`. I made this for my master's thesis, so it does not support all features of `liuthesis` and may contain a number of bugs. ## Usage Place this repo in a folder called `typst-liuthesis` in your Typst project. The template can then be used as follows. See `template.typ` for an explanation of each parameter. ```typst #import "typst-liuthesis/template.typ": * #show: liu.with( title: [LiU Thesis], subtitle: [A thesis template for Linköping University.], titleSwedish: [LiU Thesis], subtitleSwedish: [En mall för uppsatser vid Linköpings universitet.], authors: ( "An Author", "Another Author" ), abstract: [This is the abstract.], bibliography-file: "references.bib", supervisor: "My Supervisor", examiner: "My Examiner", ) ```
https://github.com/dalon-work/aoc2023
https://raw.githubusercontent.com/dalon-work/aoc2023/master/aoc.typ
typst
#let day7() = { [= Day 7] let base13 = "23456789TJQKA" let convert_cc(string) = { let rev_string = string.rev() let count = (0,0,0,0,0,0,0,0,0,0,0,0,0) let decimal = 0 let c = 1 for s in rev_string { let p = base13.position(s) [ p ] count.at(p) += 1 let b13 = c * p decimal += b13 c *= 13 } if count.any( x => x == 5 ) { // 5 of a kind decimal += c * 6 } else if count.any( x => x == 4 ) { // 4 of a kind decimal += c * 5 } else if count.any( x => x == 3 ) { if count.any( x => x == 2 ) { // full house decimal += c * 4 } else { // 3 of a kind decimal += c * 3 } } else if count.any( x => x == 2 ) { let pair_count = count.filter( x => x == 2 ).len() if pair_count == 2 { // two pairs decimal += c * 2 } else { // 2 of a kind decimal += c * 1 } } return decimal } let cards = read("data/day7.txt").split("\n").slice(0,-1).map( x => x.split() ).map( x => (x.at(0), convert_cc(x.at(0)), int(x.at(1))) ) let sorted_cards = cards.sorted( key: x => x.at(1) ) let part1 = 0 for (i,c) in sorted_cards.enumerate() { part1 += (i+1) * c.at(2) } [ #part1 ] } #let day6() = { [= Day 6] let times1 = (42, 89, 91, 89) let dists1 = (308, 1170, 1291, 1467) // Part 2 let times2 = (42899189,) let dists2 = (308117012911467,) // Example //let times = (7, 15, 30) //let dists = (8, 40, 200) let ways(times, dists) = { let ranges = () for (T, D) in times.zip(dists) { let zero_n = calc.floor( (T - calc.sqrt(calc.pow(T,2) - 4*D))/2 + 1) let zero_p = calc.ceil( (T + calc.sqrt(calc.pow(T,2) - 4*D))/2 - 1 ) let valid_range = zero_p - zero_n + 1 ranges.push( valid_range ) } return ranges.product() } [ Part 1: #ways(times1, dists1) \ ] [ Part 2: #ways(times2, dists2) \ ] } #let day5() = { [= Day 5] let sections = read("data/day5.txt").split("\n\n") let preprocess(array) = { array.split("\n").slice(1).filter( s => s.len() > 0 ).map( s => s.split().map(int)).sorted( key: a => a.at(1) ) } let seed_to_soil = preprocess(sections.at(1)) let soil_to_fert = preprocess(sections.at(2)) let fert_to_watr = preprocess(sections.at(3)) let watr_to_lght = preprocess(sections.at(4)) let lght_to_temp = preprocess(sections.at(5)) let temp_to_hmty = preprocess(sections.at(6)) let hmty_to_locn = preprocess(sections.at(7)) let dest(src, ranges) = { let dst = src let i = 0 for (dst_start, src_start, rlen) in ranges { if src_start <= src and src < src_start + rlen { return dst_start + (src - src_start) } } return dst } let seeds = sections.at(0).split().slice(1).map(int) let locs = () for seed in seeds { let soil = dest(seed, seed_to_soil) let fert = dest(soil, soil_to_fert) let watr = dest(fert, fert_to_watr) let lght = dest(watr, watr_to_lght) let temp = dest(lght, lght_to_temp) let hmty = dest(temp, temp_to_hmty) let locn = dest(hmty, hmty_to_locn) locs.push(locn) } [ Part 1: #calc.min(..locs) \ ] let dest_part2(src, ranges) = { let dst = ( src, calc.pow(2,62) ) let i = 0 for (dst_start, src_start, rlen) in ranges { if src_start <= src and src < src_start + rlen { dst.at(0) = dst_start + (src - src_start) dst.at(1) = (src_start + rlen) - src return dst } } return dst } let seed_range_idx = 0 let locs = () while seed_range_idx < seeds.len() { let test_seed = seeds.at(seed_range_idx) let seed_range_end = test_seed + seeds.at(seed_range_idx+1) seed_range_idx += 2 while test_seed < seed_range_end { let (soil, soil_range) = dest_part2(test_seed, seed_to_soil) let (fert, fert_range) = dest_part2(soil, soil_to_fert) let (watr, watr_range) = dest_part2(fert, fert_to_watr) let (lght, lght_range) = dest_part2(watr, watr_to_lght) let (temp, temp_range) = dest_part2(lght, lght_to_temp) let (hmty, hmty_range) = dest_part2(temp, temp_to_hmty) let (locn, locn_range) = dest_part2(hmty, hmty_to_locn) locs.push(locn) test_seed += calc.min(soil_range, fert_range, watr_range, lght_range, temp_range, hmty_range, locn_range) } } [ Part 2: #calc.min(..locs) ] } #let day4() = { [= Day 4] let cards = read("data/day4.txt").split("\n").slice(0,-1) let card_sum = 0 for card in cards { let points = 0 let (ws, hs) = card.split(":").at(1).split("|") let winning = ws.split() let have = hs.split() for h in have { if winning.contains(h) { if points == 0 { points = 1 } else { points = points + points } } } card_sum += points } [Part 1: #card_sum \ ] let copies = (1,) for c in cards.slice(0,-1) { copies.push(1) } for (i,c) in cards.enumerate() { let matches = 0 let (ws, hs) = c.split(":").at(1).split("|") let winning = ws.split() let have = hs.split() for h in have { if winning.contains(h) { matches += 1 } } let j = 0; while j < matches { copies.at(i+j+1) += copies.at(i) j += 1 } } [Part 2: #copies.sum()] } #let day2() = { [= Day 2] let games = read("data/day2.txt").split("\n").slice(0,-1) let game_regex = regex("(\d*) (red|green|blue)") let max_cubes = ( red : 12, green : 13, blue : 14,) let game_part1_is_valid(game) = { for m in game.matches(game_regex) { if int(m.captures.at(0)) > max_cubes.at(m.captures.at(1)) { return false } } return true } [Part 1: #games.filter(game_part1_is_valid).map(game => int(game.split(":").at(0).split(" ").at(1)) ).sum()\ ] let game_part2_power(game) = { let cubes = (red : (0,), green : (0,), blue : (0,),) for m in game.matches(game_regex) { cubes.at( m.captures.at(1) ).push( int(m.captures.at(0)) ) } return cubes.values().map(a => calc.max(..a) ).product() } [Part 2: #games.map(game_part2_power).sum()\ ] } #let day1() = { [= Day 1] // We get an empty string at the end for some reason let lines = read("data/day1.txt").split("\n").slice(0,-1) let line_part1(line) = { let first_digit = line.find(regex("\d")) let last_digit = line.rev().find(regex("\d")) int( first_digit + last_digit ) } [Part 1: #lines.map(line_part1).sum()\ ] let words = ( one: "1", two: "2", three: "3", four: "4", five: "5", six: "6", seven: "7", eight: "8", nine: "9",) let all_numbers = words for (key, value) in words { all_numbers.insert(key.rev(), value) all_numbers.insert(value, value) } let key_regex = regex( (words.keys() + ("\d",)).join("|") ) let rev_key_regex = regex( (words.keys().map(str.rev) + ("\d",)).join("|") ) let line_part2(line) = { let first_digit = all_numbers.at( line.match(key_regex).text ) let last_digit = all_numbers.at( line.rev().match(rev_key_regex).text ) int( first_digit + last_digit ) } [Part 2: #lines.map(line_part2).sum()\ ] } #day1() #day2() #day4() #day5() #day6() #day7()
https://github.com/frectonz/the-pg-book
https://raw.githubusercontent.com/frectonz/the-pg-book/main/book/211.%20users.html.typ
typst
users.html What I've Learned from Users September 2022I recently told applicants to Y Combinator that the best advice I could give for getting in, per word, was Explain what you've learned from users. That tests a lot of things: whether you're paying attention to users, how well you understand them, and even how much they need what you're making.Afterward I asked myself the same question. What have I learned from YC's users, the startups we've funded?The first thing that came to mind was that most startups have the same problems. No two have exactly the same problems, but it's surprising how much the problems remain the same, regardless of what they're making. Once you've advised 100 startups all doing different things, you rarely encounter problems you haven't seen before.This fact is one of the things that makes YC work. But I didn't know it when we started YC. I only had a few data points: our own startup, and those started by friends. It was a surprise to me how often the same problems recur in different forms. Many later stage investors might never realize this, because later stage investors might not advise 100 startups in their whole career, but a YC partner will get this much experience in the first year or two.That's one advantage of funding large numbers of early stage companies rather than smaller numbers of later-stage ones. You get a lot of data. Not just because you're looking at more companies, but also because more goes wrong.But knowing (nearly) all the problems startups can encounter doesn't mean that advising them can be automated, or reduced to a formula. There's no substitute for individual office hours with a YC partner. Each startup is unique, which means they have to be advised by specific partners who know them well. [1]We learned that the hard way, in the notorious "batch that broke YC" in the summer of 2012. Up till that point we treated the partners as a pool. When a startup requested office hours, they got the next available slot posted by any partner. That meant every partner had to know every startup. This worked fine up to 60 startups, but when the batch grew to 80, everything broke. The founders probably didn't realize anything was wrong, but the partners were confused and unhappy because halfway through the batch they still didn't know all the companies yet. [2]At first I was puzzled. How could things be fine at 60 startups and broken at 80? It was only a third more. Then I realized what had happened. We were using an O(n2) algorithm. So of course it blew up.The solution we adopted was the classic one in these situations. We sharded the batch into smaller groups of startups, each overseen by a dedicated group of partners. That fixed the problem, and has worked fine ever since. But the batch that broke YC was a powerful demonstration of how individualized the process of advising startups has to be.Another related surprise is how bad founders can be at realizing what their problems are. Founders will sometimes come in to talk about some problem, and we'll discover another much bigger one in the course of the conversation. For example (and this case is all too common), founders will come in to talk about the difficulties they're having raising money, and after digging into their situation, it turns out the reason is that the company is doing badly, and investors can tell. Or founders will come in worried that they still haven't cracked the problem of user acquisition, and the reason turns out to be that their product isn't good enough. There have been times when I've asked "Would you use this yourself, if you hadn't built it?" and the founders, on thinking about it, said "No." Well, there's the reason you're having trouble getting users.Often founders know what their problems are, but not their relative importance. [3] They'll come in to talk about three problems they're worrying about. One is of moderate importance, one doesn't matter at all, and one will kill the company if it isn't addressed immediately. It's like watching one of those horror movies where the heroine is deeply upset that her boyfriend cheated on her, and only mildly curious about the door that's mysteriously ajar. You want to say: never mind about your boyfriend, think about that door! Fortunately in office hours you can. So while startups still die with some regularity, it's rarely because they wandered into a room containing a murderer. The YC partners can warn them where the murderers are.Not that founders listen. That was another big surprise: how often founders don't listen to us. A couple weeks ago I talked to a partner who had been working for YC for a couple batches and was starting to see the pattern. "They come back a year later," she said, "and say 'We wish we'd listened to you.'"It took me a long time to figure out why founders don't listen. At first I thought it was mere stubbornness. That's part of the reason, but another and probably more important reason is that so much about startups is counterintuitive. And when you tell someone something counterintuitive, what it sounds to them is wrong. So the reason founders don't listen to us is that they don't believe us. At least not till experience teaches them otherwise. [4]The reason startups are so counterintuitive is that they're so different from most people's other experiences. No one knows what it's like except those who've done it. Which is why YC partners should usually have been founders themselves. But strangely enough, the counterintuitiveness of startups turns out to be another of the things that make YC work. If it weren't counterintuitive, founders wouldn't need our advice about how to do it.Focus is doubly important for early stage startups, because not only do they have a hundred different problems, they don't have anyone to work on them except the founders. If the founders focus on things that don't matter, there's no one focusing on the things that do. So the essence of what happens at YC is to figure out which problems matter most, then cook up ideas for solving them — ideally at a resolution of a week or less — and then try those ideas and measure how well they worked. The focus is on action, with measurable, near-term results.This doesn't imply that founders should rush forward regardless of the consequences. If you correct course at a high enough frequency, you can be simultaneously decisive at a micro scale and tentative at a macro scale. The result is a somewhat winding path, but executed very rapidly, like the path a running back takes downfield. And in practice there's less backtracking than you might expect. Founders usually guess right about which direction to run in, especially if they have someone experienced like a YC partner to bounce their hypotheses off. And when they guess wrong, they notice fast, because they'll talk about the results at office hours the next week. [5]A small improvement in navigational ability can make you a lot faster, because it has a double effect: the path is shorter, and you can travel faster along it when you're more certain it's the right one. That's where a lot of YC's value lies, in helping founders get an extra increment of focus that lets them move faster. And since moving fast is the essence of a startup, YC in effect makes startups more startup-like.Speed defines startups. Focus enables speed. YC improves focus.Why are founders uncertain about what to do? Partly because startups almost by definition are doing something new, which means no one knows how to do it yet, or in most cases even what "it" is. Partly because startups are so counterintuitive generally. And partly because many founders, especially young and ambitious ones, have been trained to win the wrong way. That took me years to figure out. The educational system in most countries trains you to win by hacking the test instead of actually doing whatever it's supposed to measure. But that stops working when you start a startup. So part of what YC does is to retrain founders to stop trying to hack the test. (It takes a surprisingly long time. A year in, you still see them reverting to their old habits.)YC is not simply more experienced founders passing on their knowledge. It's more like specialization than apprenticeship. The knowledge of the YC partners and the founders have different shapes: It wouldn't be worthwhile for a founder to acquire the encyclopedic knowledge of startup problems that a YC partner has, just as it wouldn't be worthwhile for a YC partner to acquire the depth of domain knowledge that a founder has. That's why it can still be valuable for an experienced founder to do YC, just as it can still be valuable for an experienced athlete to have a coach.The other big thing YC gives founders is colleagues, and this may be even more important than the advice of partners. If you look at history, great work clusters around certain places and institutions: Florence in the late 15th century, the University of G�ttingen in the late 19th, The New Yorker under Ross, Bell Labs, Xerox PARC. However good you are, good colleagues make you better. Indeed, very ambitious people probably need colleagues more than anyone else, because they're so starved for them in everyday life.Whether or not YC manages one day to be listed alongside those famous clusters, it won't be for lack of trying. We were very aware of this historical phenomenon and deliberately designed YC to be one. By this point it's not bragging to say that it's the biggest cluster of great startup founders. Even people trying to attack YC concede that.Colleagues and startup founders are two of the most powerful forces in the world, so you'd expect it to have a big effect to combine them. Before YC, to the extent people thought about the question at all, most assumed they couldn't be combined — that loneliness was the price of independence. That was how it felt to us when we started our own startup in Boston in the 1990s. We had a handful of older people we could go to for advice (of varying quality), but no peers. There was no one we could commiserate with about the misbehavior of investors, or speculate with about the future of technology. I often tell founders to make something they themselves want, and YC is certainly that: it was designed to be exactly what we wanted when we were starting a startup.One thing we wanted was to be able to get seed funding without having to make the rounds of random rich people. That has become a commodity now, at least in the US. But great colleagues can never become a commodity, because the fact that they cluster in some places means they're proportionally absent from the rest.Something magical happens where they do cluster though. The energy in the room at a YC dinner is like nothing else I've experienced. We would have been happy just to have one or two other startups to talk to. When you have a whole roomful it's another thing entirely.YC founders aren't just inspired by one another. They also help one another. That's the happiest thing I've learned about startup founders: how generous they can be in helping one another. We noticed this in the first batch and consciously designed YC to magnify it. The result is something far more intense than, say, a university. Between the partners, the alumni, and their batchmates, founders are surrounded by people who want to help them, and can.Notes[1] This is why I've never liked it when people refer to YC as a "bootcamp." It's intense like a bootcamp, but the opposite in structure. Instead of everyone doing the same thing, they're each talking to YC partners to figure out what their specific startup needs.[2] When I say the summer 2012 batch was broken, I mean it felt to the partners that something was wrong. Things weren't yet so broken that the startups had a worse experience. In fact that batch did unusually well.[3] This situation reminds me of the research showing that people are much better at answering questions than they are at judging how accurate their answers are. The two phenomena feel very similar.[4] The Airbnbs were particularly good at listening — partly because they were flexible and disciplined, but also because they'd had such a rough time during the preceding year. They were ready to listen.[5] The optimal unit of decisiveness depends on how long it takes to get results, and that depends on the type of problem you're solving. When you're negotiating with investors, it could be a couple days, whereas if you're building hardware it could be months. Thanks to <NAME>, <NAME>, <NAME>, and <NAME> for reading drafts of this.
https://github.com/HiiGHoVuTi/requin
https://raw.githubusercontent.com/HiiGHoVuTi/requin/main/math/series.typ
typst
#import "../lib.typ": * #show heading: heading_fct #import "@preview/gloss-awe:0.0.5": gls #show figure.where(kind: "jkrb_glossary"): it => {it.body} _"A generating function is a clothesline on which we hang up a sequence of numbers for display."_ #align(right, "- <NAME>") Si $u in AA^NN$ est une suite à valeurs dans un anneau $AA$, on peut définir une _série formelle_ $S in AA[[X]]$ comme $S := sum u_n X^n$. On utilise alors la somme terme-à-terme et le produit de Cauchy pour munir $AA[[X]]$ d'une structure d'anneau. On a $AA[X] tilde.equiv AA^NN$. On munit aussi $AA[[X]]$ de la composition et de la dérivation formelle. On notera $AA((X))$ le _corps des fractions_ de $AA[[X]]$ si $AA$ est un corps. === Une mise en bouche Dans cette partie, on choisit $AA := CC$. Soit $F in CC((X))$ la série formelle associée à la suite de Fibonacci (l'unique suite $f$ telle que $f_0 = 0$, $f_1 = 1$ et $f_(n+2)=f_(n+1)+f_n$). #question(0)[Calculer $F + X F$.] #question(1)[En déduire $F = (-X)/(X^2 + X - 1).$] #question(1)[En faisant un développement en éléments simples, en déduire la _formule de Binet_ donnant explicitement $f_n$.] === `ADT`, ou l'anneau des types _À partir de cette partie, on travaille à la bijection près, mais sans s'autoriser à dénombrer les types polymorphes (du type `'a` en `OCaml`)._ On considère ici l'ensemble des types `OCaml`, qu'on notera `Type`. On munit `Type` des deux lois de composition internes suivantes #align(center, grid(columns: (1fr, 1fr), [- `type a+b = A of a | B of b`], [- `type a*b = AB of a * b`] )) #question(0)[Montrer que `int * (float + string) = int * float + int * string`.] #question(1)[Montrer que l'on a muni `Type` d'une structure d'anneau.] #question(0)[Définir le type `'a list` en `OCaml`.] On note $1$ le type à un seul élément. #question(1)[En déduire une équation vérifiée par $"list"(alpha)$.] #question(1)[Résoudre cette équation dans $CC$. En déduire une expression de $"list"(alpha)$ sous forme de série.] #question(0)[Interpréter ce résultat. Est-il cohérent ?] === Classes combinatoires Une #gls(entry: "Classe combinatoire")[_classe combinatoire_] est un ensemble $cal(C)$ muni d'une fonction $"taille" = |dot| : cal(C) --> NN$ telle que pour tout $n in NN$, $"taille"^(-1)(n)$ est un ensemble fini. On la munit d'une suite $c_n := |"taille"^(-1)(n)|$ (lettre minuscule) et de la série formelle $C_n in CC((X))$ (lettre majuscule droite). On introduit les classes #align(center, grid(columns: (1fr, 1fr, 1fr), [- $cal(E) = emptyset$], [- $cal(Z) = {circle.filled}$, $|circle.filled|=1$], [- $cal(N) = NN$, $|n| = n$] )) #question(0)[Déterminer $E$, $Z$ et $N$.] On munit les classes combinatoires d'une structures d'anneau grâce à leurs séries formelles. On identifiera $k in NN$ et son équivalent dans l'anneau ainsi défini. #question(1)[Comment interpréter les objets de $cal(A+B)$ ? De $cal(A times B)$ ?] On pose $cal(P) := cal(N times N)$. #question(2)[Déterminer combien il existe de paires d'entiers naturels dont la somme vaut $n$.] #question(0)[Par analogie à un type `OCaml`, définir $cal(T)$ la classe combinatoire des arbres binaires.] #question(2)[En déduire le nombre d'arbres binaires à $n$ noeuds.] On introduit $compose$ l'opérateur de composition de séries formelles, étendu aux classes combinatoires. #question(1)[Que représente la classe combinatoire $cal(N) compose cal(C)$ pour $cal(C)$ quelconque ?] En considérant que $cal(N)$ est la classe des suites à support fini d'éléments indistinguables, #question(1)[En déduire la signification de $cal(A compose B)$ en général.] Soit $cal(B)$ une classe combinatoire, et $"MSet"(cal(B))$ la classe combinatoire des multi-ensembles d'éléments de $cal(B)$. #question(3)[Montrer que la série associée à $"MSet"(cal(B))$ est $ exp( sum_(k=1)^oo B(z^k)/k ) $] #correct[ Chaque élément de $cal(B)$ peut apparaître un nombre arbitraire de fois dans $"MSET"(cal(B))$. $ "MSet"(cal(B)) = product_(k in NN) cal(N) compose b_k$ ... à finir. ]
https://github.com/ClazyChen/Table-Tennis-Rankings
https://raw.githubusercontent.com/ClazyChen/Table-Tennis-Rankings/main/history_CN/2019/WS-10.typ
typst
#set text(font: ("Courier New", "NSimSun")) #figure( caption: "Women's Singles (1 - 32)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [1], [孙颖莎], [CHN], [3413], [2], [陈梦], [CHN], [3405], [3], [朱雨玲], [MAC], [3360], [4], [刘诗雯], [CHN], [3350], [5], [丁宁], [CHN], [3272], [6], [王曼昱], [CHN], [3257], [7], [陈幸同], [CHN], [3224], [8], [王艺迪], [CHN], [3097], [9], [伊藤美诚], [JPN], [3094], [10], [#text(gray, "木子")], [CHN], [3091], [11], [何卓佳], [CHN], [3083], [12], [石川佳纯], [JPN], [3068], [13], [平野美宇], [JPN], [3062], [14], [孙铭阳], [CHN], [3060], [15], [#text(gray, "武杨")], [CHN], [3058], [16], [冯亚兰], [CHN], [3050], [17], [顾玉婷], [CHN], [3045], [18], [早田希娜], [JPN], [3010], [19], [加藤美优], [JPN], [3003], [20], [#text(gray, "刘高阳")], [CHN], [2992], [21], [金宋依], [PRK], [2974], [22], [#text(gray, "胡丽梅")], [CHN], [2972], [23], [韩莹], [GER], [2957], [24], [傅玉], [POR], [2953], [25], [长崎美柚], [JPN], [2949], [26], [佐藤瞳], [JPN], [2949], [27], [冯天薇], [SGP], [2924], [28], [#text(gray, "陈可")], [CHN], [2918], [29], [李倩], [POL], [2911], [30], [张瑞], [CHN], [2899], [31], [倪夏莲], [LUX], [2897], [32], [李佳燚], [CHN], [2895], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Women's Singles (33 - 64)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [33], [张蔷], [CHN], [2892], [34], [于梦雨], [SGP], [2884], [35], [刘炜珊], [CHN], [2880], [36], [CHA Hyo Sim], [PRK], [2878], [37], [杜凯琹], [HKG], [2876], [38], [木原美悠], [JPN], [2869], [39], [伯纳黛特 斯佐科斯], [ROU], [2869], [40], [桥本帆乃香], [JPN], [2858], [41], [芝田沙季], [JPN], [2853], [42], [LIU Xi], [CHN], [2852], [43], [车晓曦], [CHN], [2846], [44], [安藤南], [JPN], [2845], [45], [石洵瑶], [CHN], [2832], [46], [钱天一], [CHN], [2826], [47], [#text(gray, "GU Ruochen")], [CHN], [2823], [48], [伊丽莎白 萨玛拉], [ROU], [2815], [49], [KIM Nam Hae], [PRK], [2812], [50], [李洁], [NED], [2810], [51], [徐孝元], [KOR], [2808], [52], [田志希], [KOR], [2808], [53], [#text(gray, "侯美玲")], [TUR], [2806], [54], [妮娜 米特兰姆], [GER], [2800], [55], [杨晓欣], [MON], [2799], [56], [刘斐], [CHN], [2794], [57], [郑怡静], [TPE], [2791], [58], [陈思羽], [TPE], [2787], [59], [<NAME> Daniela], [ROU], [2784], [60], [崔孝珠], [KOR], [2782], [61], [MATSUDAIRA Shiho], [JPN], [2781], [62], [EKHOLM Matilda], [SWE], [2765], [63], [#text(gray, "李芬")], [SWE], [2760], [64], [范思琦], [CHN], [2759], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Women's Singles (65 - 96)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [65], [李皓晴], [HKG], [2758], [66], [单晓娜], [GER], [2754], [67], [PESOTSKA Margaryta], [UKR], [2746], [68], [索菲亚 波尔卡诺娃], [AUT], [2742], [69], [李佼], [NED], [2738], [70], [森樱], [JPN], [2736], [71], [金河英], [KOR], [2731], [72], [邵杰妮], [POR], [2730], [73], [BILENKO Tetyana], [UKR], [2721], [74], [小盐遥菜], [JPN], [2713], [75], [POTA Georgina], [HUN], [2712], [76], [佩特丽莎 索尔佳], [GER], [2709], [77], [浜本由惟], [JPN], [2708], [78], [LIU Hsing-Yin], [TPE], [2706], [79], [SOO Wai Yam Minnie], [HKG], [2701], [80], [李恩惠], [KOR], [2698], [81], [布里特 伊尔兰德], [NED], [2697], [82], [曾尖], [SGP], [2696], [83], [LIU Xin], [CHN], [2695], [84], [阿德里安娜 迪亚兹], [PUR], [2692], [85], [大藤沙月], [JPN], [2686], [86], [GRZYBOWSKA-FRANC Katarzyna], [POL], [2685], [87], [#text(gray, "MATSUZAWA Marina")], [JPN], [2680], [88], [#text(gray, "LI Jiayuan")], [CHN], [2677], [89], [梁夏银], [KOR], [2676], [90], [MATELOVA Hana], [CZE], [2675], [91], [李时温], [KOR], [2675], [92], [#text(gray, "LANG Kristin")], [GER], [2662], [93], [MAEDA Miyu], [JPN], [2661], [94], [CHENG Hsien-Tzu], [TPE], [2660], [95], [#text(gray, "HUANG Yingqi")], [CHN], [2657], [96], [MIKHAILOVA Polina], [RUS], [2655], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Women's Singles (97 - 128)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [97], [张安], [USA], [2654], [98], [张墨], [CAN], [2654], [99], [朱成竹], [HKG], [2654], [100], [#text(gray, "<NAME>")], [JPN], [2650], [101], [<NAME>], [HUN], [2643], [102], [刘佳], [AUT], [2642], [103], [YOO Eunchong], [KOR], [2637], [104], [WINTER Sabine], [GER], [2635], [105], [KIM Byeolnim], [KOR], [2635], [106], [边宋京], [PRK], [2633], [107], [SOMA Yumeno], [JPN], [2630], [108], [SAWETTABUT Suthasini], [THA], [2630], [109], [SHIOMI Maki], [JPN], [2626], [110], [#text(gray, "森田美咲")], [JPN], [2622], [111], [申裕斌], [KOR], [2619], [112], [WU Yue], [USA], [2618], [113], [BALAZOVA Barbora], [SVK], [2616], [114], [玛妮卡 巴特拉], [IND], [2613], [115], [#text(gray, "NARUMOTO Ayami")], [JPN], [2613], [116], [SUN Jiayi], [CRO], [2612], [117], [YOON Hyobin], [KOR], [2611], [118], [高桥 布鲁娜], [BRA], [2605], [119], [维多利亚 帕芙洛维奇], [BLR], [2600], [120], [#text(gray, "PARK Joohyun")], [KOR], [2600], [121], [#text(gray, "KATO Kyoka")], [JPN], [2594], [122], [#text(gray, "KIM Youjin")], [KOR], [2592], [123], [琳达 伯格斯特罗姆], [SWE], [2591], [124], [TAILAKOVA Mariia], [RUS], [2586], [125], [HUANG Yi-Hua], [TPE], [2577], [126], [王 艾米], [USA], [2576], [127], [LI Xiang], [ITA], [2570], [128], [郭雨涵], [CHN], [2568], ) )
https://github.com/kotfind/typst_task
https://raw.githubusercontent.com/kotfind/typst_task/master/example/first_tour.typ
typst
#import "../task.typ": tour, task #show: tour.with( title: [First Tour] ) #task[Task A][ Condition A ][ Solution A Answer. A 42 ] #task[Task B][ Condition B ][ Solution B Answer. B 42 ]
https://github.com/sses7757/sustech-graduated-thesis
https://raw.githubusercontent.com/sses7757/sustech-graduated-thesis/main/sustech-graduated-thesis/pages/decl-page.typ
typst
Apache License 2.0
#import "../utils/indent.typ": indent #import "../utils/style.typ": 字号, 字体 // 研究生声明页 #let decl-page( anonymous: false, twoside: false, fonts: (:), ) = { // 0. 如果需要匿名则短路返回 if anonymous { return } // 1. 默认参数 fonts = 字体 + fonts // 2. 正式渲染 pagebreak(weak: true, to: if twoside { "odd" }) v(25pt) align( center, text( font: fonts.黑体, size: 字号.四号, weight: "bold", "南方科技大学学位论文原创性声明", ), ) v(46pt) block[ #set text(font: fonts.宋体, size: 字号.小四) #set par(justify: true, first-line-indent: 2em, leading: 1.2em) #indent 本人郑重声明,所提交的学位论文是本人在导师指导下独立进行科学研究工作所取得的成果。除本论文中已经注明引用的内容外,本论文不包含其他个人或集体已经发表或撰写过的研究成果,也不包含为获得南方科技大学或其他教育机构的学位证书而使用过的材料。对本文的研究做出重要贡献的个人和集体,均已在论文的致谢部分明确标明。本人郑重申明愿承担本声明的法律责任。 ] v(143pt) align(right)[ #set text(font: fonts.黑体, size: 字号.小四) 研究生签名:#h(5.8em) 日期:#h(5.8em) ] }
https://github.com/renyelin/-.zip
https://raw.githubusercontent.com/renyelin/-.zip/main/msc-tech%26algo.typ
typst
#align(center)[技术部纳新模拟] @picc 请先看看兔头 #figure( image("./纳新图/兔头.jpg",height: 40%), caption: [ 关注月之美兔谢谢喵 ], numbering: "1", )<picc> == 基础题 #show link:underline #block( fill: gray, width: 100%, inset: 5pt, outset: 5pt, radius: 4pt )[ + 先自我介绍一下吧(性别(optional),爱好,对社团的期待) + 对编程语言的理解(有参考书目) + 用任意语言写一个冒泡排序吧(也可以使用typst内置脚本) ] #block( fill: yellow, width: 100%, inset: 5pt, outset: 3pt, radius: 3pt, )[ #link("./纳新图/history.pdf")[ 参考文献:编程语言的历史 ] ] == 上难度的题 #show link:underline #block( fill: green, width: 100%, inset: 5pt, outset: 3pt, radius: 3pt, )[ #set enum(numbering: "1.a)") + 使用任意语言制作一款小游戏,以下是选项 + 经典贪吃蛇 + 2048 + 俄罗斯方块 + flappy bird + #link("https://www.luogu.com.cn/training/211")[学习一下动态规划],然后来做出以下题目中的一个,不限制语言,要求步骤清晰,每一个函数都写注释表明作用 + #link("https://www.luogu.com.cn/problem/P1216")[数字三角形] + #link("https://www.luogu.com.cn/problem/P4017")[最大食物链计数] + #link("https://www.luogu.com.cn/problem/P1049")[装箱问题] ] #block( fill: yellow, width: 100%, inset: 5pt, outset: 3pt, radius: 3pt, )[ 题目均不做强制要求 ] == 提交要求 #block( fill: gray, width: 100%, inset: 5pt, outset: 5pt, radius: 4pt )[ #link("https://github.com/MSC-CQU/MSC-23-New")[github上pull request] 或者发到邮箱<EMAIL> ]
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/a2c-nums/0.0.1/src/lib.typ
typst
Apache License 2.0
// Convert an int to Chinese number, for ex: 2024 become "二〇二四" #let int-to-cn-simple-num(n) = { let digits = ("〇", "一", "二", "三", "四", "五", "六", "七", "八", "九") let s = str(n) let result = "" for c in s.codepoints() { result += digits.at(int(c)) } return result } // Convert an int string to Chinese number, for ex: "2024" become "二千零二十四" #let str-to-cn-num(s) = { let digits = ("零", "一", "二", "三", "四", "五", "六", "七", "八", "九") let units = ("", "十", "百", "千", "万", "十", "百", "千", "亿", "十", "百", "千") let result = "" let len = s.len() - 1 let i = len while i >= 0 { result = digits.at(int(s.at(i))) + units.at(len - i) + result; i -= 1 } for i in (0, 1, 2, 3) { result = result.replace("零亿", "亿") result = result.replace("零万", "万") result = result.replace("零千", "零") result = result.replace("零百", "零") result = result.replace("零十", "零") result = result.replace("零零", "零") result = result.replace("亿万", "亿") } if result.len() > 3 and result.ends-with("零") { result = result.trim("零") } if result.len() == 9 or result.len() == 6 { result = result.replace("一十", "十") } return result } // Convert an int to Chinese number, for ex: 2024 become "二千零二十四" #let int-to-cn-num(n) = { let s = str(n) return str-to-cn-num(s) } // Convert an int string to Chinese ancient number, for ex: "2024" become "贰仟零贰拾肆" #let str-to-cn-ancient-num(s) = { let digits = ("零", "壹", "贰", "叁", "肆", "伍", "陆", "柒", "捌", "玖") let units = ("", "拾", "佰", "仟", "万", "拾", "佰", "仟", "亿", "拾", "佰", "仟") let result = "" let len = s.len() - 1 let i = len while i >= 0 { result = digits.at(int(s.at(i))) + units.at(len - i) + result; i -= 1 } for i in (0, 1, 2, 3) { result = result.replace("零亿", "亿") result = result.replace("零万", "万") result = result.replace("零仟", "零") result = result.replace("零佰", "零") result = result.replace("零拾", "零") result = result.replace("零零", "零") result = result.replace("亿万", "亿") } if result.len() > 3 and result.ends-with("零") { result = result.trim("零") } if result.len() == 9 or result.len() == 6 { result = result.replace("壹拾", "拾") } return result } // Convert an int to Chinese ancient number, for ex: 2024 become "贰仟零贰拾肆" #let int-to-cn-ancient-num(n) = { let s = str(n) return str-to-cn-ancient-num(s) } // Convert a number to Chinese currency, for ex: 1234.56 become "壹仟贰佰叁拾肆元伍角陆分" #let num-to-cn-currency(n) = { let digits = ("零", "壹", "贰", "叁", "肆", "伍", "陆", "柒", "捌", "玖") let units = ("角", "分") let intpart = "" let decimal = "" let value = str(calc.round(n, digits: 2)) let splits = value.split(".") intpart = splits.at(0) if splits.len() > 1 { decimal = splits.at(1) } let result = "" if decimal != none { for (i, c) in decimal.codepoints().enumerate() { if i <= 1 { result += digits.at(int(c)) + units.at(i) } } } result = str-to-cn-ancient-num(intpart) + "元" + result return result }
https://github.com/saurabtharu/Internship-repo
https://raw.githubusercontent.com/saurabtharu/Internship-repo/main/Internship%20Report%20-%20typst/chapters/00-cert-ack-abhijeet.typ
typst
= MENTOR'S RECOMMENDATION \ #text(12pt)[ /* This is to recommend that *Mr. <NAME>* has carried out the internship project entitled “An Internship report on *DevOps Engineer at F1Soft Pvt. Ltd*.” for the fulfillment of the requirement of the Bachelor’s degree of Computer Science and Information Technology(BSc. CSIT) is processed for the evaluation. */ This is to recommend that *Mr. <NAME>* has carried out the internship project entitled *“An Internship report on DevOps Engineer at F1Soft Pvt. Ltd*.” for the fulfillment of the requirement of the Bachelor’s degree of Computer Science and Information Technology(BSc. CSIT) is processed for the evaluation. \ \ \ \ \ \ \ \ ...................... \ <NAME> \ Mentor \ F1Soft Pvt. Ltd. \ Pulchowk, Lalitpur ] /* #pagebreak() #align(center, image("../images/ACHS.png", height: 20%) ) */ #pagebreak() #align(center, image("../images/images.png", height: 20%) ) = SUPERVISOR’S RECOMMENDATION \ /* #text(12pt)[ I hereby recommend that this report has been prepared under my supervision by <NAME> entitled *An Internship Report on DevOps Engineer at F1Soft International Pvt. Ltd.* in partial fulfillment of the requirements for the degree of Bachelor of Science in Computer Science and Information Technology of Tribhuvan University is processed for the evaluation. */ #text(12pt)[ I hereby recommend that this report has been prepared under my supervision by <NAME> entitled *An Internship Report on DevOps Engineer at F1Soft International Pvt. Ltd.* in partial fulfillment of the requirements for the degree of Bachelor of Science in Computer Science and Information Technology of Tribhuvan University is processed for the evaluation. #block( spacing: 3em, table( inset: ( x: 10pt, y: 80pt ), /* columns: (1fr, 0.3fr), [ #line(length: 15em,stroke: (thickness: 1pt, dash: ("dot",4pt))) *<NAME>* #parbreak() Supervisor #parbreak() Department of Computer Science and Information Technology #parbreak() Asian College of Higher Studies #parbreak() Ekantakuna, Lalitpur ], [], */ columns: (1fr, 0.3fr), [ #line(length: 15em,stroke: (thickness: 1pt, dash: ("dot",4pt))) *Mr. <NAME>* #parbreak() Supervisor #parbreak() Department of Computer Science and Information Technology #parbreak() National College of Computer Studies #parbreak() Paknajol, Kathmandu ], [], stroke: none ) )] #pagebreak() /**************************************************************************************/ /* #align(center, image("../images/ACHS.png", height: 20%) ) */ = LETTER OF APPROVAL /* \ #text(12pt)[ This is to certify that this internship report prepared by <NAME> entitled “Insured Application” has been submitted to the Department of Computer Science for acceptance in partial fulfillment of the requirements for the degree of B.Sc. in Computer Science and Information Technology. In our opinion, it is satisfactory in the scope and quality as a project for the required degree #table( inset: ( x: 15pt, y: 10pt ), columns: (0.5fr, 0.4fr), align: (center, center), // [*Signature of Supervisor*], [*Signature of HOD/ Coordinator *], [ *Signature of Supervisor* \ \ \ \ #line(length: 12em,stroke: (thickness: 1pt, dash: ("dot",4pt))) *<NAME>* \ Asian College of Higher Studies ], [ *Signature of HOD/ Coordinator \ \ \ \ * #line(length: 12em,stroke: (thickness: 1pt, dash: ("dot",4pt))) *Mr. <NAME>* \ F1Soft International Pvt. Ltd ], // [*Signature of External Examiner *],[], [*Signature of HOD * \ \ \ \ #line(length: 12em,stroke: (thickness: 1pt, dash: ("dot",4pt))) *<NAME>* \ Asian College of Higher Studies ], [ *Signature of External Examiner * \ \ \ \ #line(length: 12em,stroke: (thickness: 1pt, dash: ("dot",4pt))) *.....* \ IOST, Tribhuvan University ], // stroke: none ) ] */ #pagebreak() /************************************************************************/ /* = ACKNOWLEDGEMENT \ I would like to express my deepest appreciation to all those who provided me with the possibility to complete this internship report. Special gratitude to my supervisor *<NAME>* for the complete support and guidance throughout the internship period. Also, I would like to express my special gratitude to our Program Coordinator, *Pranaya Nakarmi* and administrative staff whose all-time encouragement helped me coordinate the internship tasks systematically. I would like to express my sincere thanks to my mentors *Mr. <NAME>* and *Mr. <NAME>* of F1Soft International Pvt. Ltd. for sharing their valuable knowledge and guiding me during the internship period, and making me learn new skills and abilities. I am also grateful to the entire staff of F1Soft International Pvt. Ltd for their constant support and guidance With all due respect and gratitude, I would like to give a word of thanks to the members of the IT department of Asian College of Higher Studies, who enc ouraged me to perform work activities. \ \ With Regards, \ <NAME> (24256/76) \ */ /************************************************************************/ = ACKNOWLEDGEMENT \ Deepest appreciation is expressed to all those who provided the possibility to complete this internship report. Special gratitude is extended to the supervisor, *Mr. <NAME>*, for the complete support and guidance throughout the internship period. // Special gratitude is also expressed to the Program Coordinator, <NAME>i, and the administrative staff whose all-time encouragement helped the internship tasks to be coordinated systematically. Sincere thanks are expressed to the mentors, *Mr. <NAME>* and* Mr. <NAME>*, of F1Soft International Pvt. Ltd. for sharing valuable knowledge and providing guidance during the internship period, and for imparting new skills and abilities. Gratitude is also extended to the entire staff of F1Soft International Pvt. Ltd. for the constant support and guidance given. With all due respect and gratitude, a word of thanks is given to the members of the IT department of National College of Computer Studies, who encouraged the performance of work activities. \ \ With Regards, \ <NAME>(23813/76) #pagebreak()
https://github.com/HPDell/typst-cineca
https://raw.githubusercontent.com/HPDell/typst-cineca/main/util/utils.typ
typst
MIT License
#let minutes-to-datetime(minutes) = { let h = calc.trunc(minutes / 60) let m = int(calc.round(calc.fract(minutes / 60) * 60)) return datetime(hour: h, minute: m, second: 0) } #let events-to-calendar-items(events, start) = { let dict = (:) for value in events { if value.len() < 4 { continue } let kday = str(value.at(0)) let stime = float(value.at(1)) let etime = float(value.at(2)) let body = value.at(3) if not dict.keys().contains(kday) { dict.insert(kday, (:)) } let istart = calc.min((calc.trunc(stime) - start), 24) * 60 + calc.min(calc.round(calc.fract(stime) * 100), 60) let iend = calc.min((calc.trunc(etime) - start), 24) * 60 + calc.min(calc.round(calc.fract(etime) * 100), 60) let ilast = iend - istart if ilast > 0 { dict.at(kday).insert(str(istart), (ilast, body)) } } dict } #let default-header-style(day) = { show: pad.with(y: 8pt) set align(center + horizon) set text(weight: "bold") [Day #{day+1}] } #let default-item-style(time, body) = { show: block.with( fill: white, height: 100%, width: 100%, stroke: ( left: blue + 2pt, rest: blue.lighten(30%) + 0.4pt ), inset: (rest: 0.4pt, left: 2pt), clip: true ) show: pad.with(2pt) set par(leading: 4pt) if time != none { terms( terms.item(time.display("[hour]:[minute]"), body) ) } else { body } } #let default-time-style(time) = { show: pad.with(x: 2pt) move(dy: -4pt, time.display("[hour]:[minute]")) } #let get-month-days(month, year) = { if month in (1,3,5,7,8,10,12) { return 31 } else if month in (4,6,9,11) { return 30 } else { if (calc.fract(year / 4) == 0.0) and (calc.fract(year / 400) != 0.0) { return 29 } else { return 28 } } } #let default-month-day(day, events) = { set align(left + top) show: block.with(inset: 2pt, clip: true) stack( dir: ttb, spacing: 4pt, day.display("[day]"), ..events.map(((t, content)) => { stack( dir: ltr, spacing: 2pt, t.display("[hour]:[minute]"), content ) }) ) } #let default-month-day-head(name) = { name } #let default-month-head(content) = { content } #let default-month-view( events, date-range, month-head: none, sunday-first: false, style-day-body: default-month-day, style-day-head: default-month-day-head, style-month-head: default-month-head, ..args ) = { let (date-from, date-to) = date-range let dates = range(date-from.day(), date-to.day() + 1).map(it => datetime( year: date-from.year(), month: date-from.month(), day: it )) let date-weekday = dates.map(it => it.weekday() + int(sunday-first)).map(i => if i > 7 { i - 7 } else { i }) // [#date-weekday] let nweek = dates.map(it => it.weekday()).filter(it => it == 1).len() if date-from.weekday() > 1 { nweek = nweek + 1 } let week-day-map = () for (i, (d, w)) in dates.zip(date-weekday).enumerate() { if i == 0 or w == 1 { week-day-map.push(()) } week-day-map.last().push((d, w)) } let events-map = (:) for e in events { let key = e.at(0).display("[year]-[month]-[day]") if (key not in events-map.keys()) { events-map.insert(key, ()) } events-map.at(key).push(e) } let header = week-day-map.at(1).map(((d, w)) => style-day-head(d.display("[weekday repr:short]"))) let title = if type(month-head) == content or type(month-head) == str { month-head } else { date-from.display("[month repr:long]") } let body = grid( columns: (1fr,) * 7, rows: (2em,) * 2 + (4em,) * nweek, stroke: 1pt, align: center + horizon, ..args, grid.cell(colspan: 7, style-month-head(title)), ..header, ..week-day-map.map(week => { ( range(1, week.first().at(1)).map(it => []), week.map(((day, w)) => { let day-str = day.display("[year]-[month]-[day]") if day-str in events-map.keys() { style-day-body(day, events-map.at(day-str)) } else { style-day-body(day, ()) } }) ).join() }).flatten() ) body } #let default-day-summary(day, shape) = { if type(shape) == array and type(shape.at(0)) == function { let (pen, args) = shape show: pen.with(..args) day.display("[day padding:none]") } else { day.display("[day padding:none]") } }
https://github.com/alberto-lazari/computer-science
https://raw.githubusercontent.com/alberto-lazari/computer-science/main/type-theory/theory-exercises/exercises/extra.typ
typst
#import "/common.typ": * #exercise( section: (num: "5", title: "How to translate predicative logic with equality into type theory"), ex: 15, solution: false, [Show that by using the *Propositional Equality with Path Induction*, for any type $A$ and $a in A$, there exists a proof-term #q $ #q in sum(z in sum(x in A) Idp(A, a, x)) fa(w in sum(x in A) Idp(A, a, x)) Idp(sum(x in A) Idp(A, a, x), z, w) $], ) #let q1 = $a$ #let q2 = $idp(a)$ #let elid2 = $idp(alpha)$ #let elsum2 = $(x1, x2). ElIdp(x2, elid2)$ #let q3 = $Elsum(w, elsum2)$ #let qtot = $angle.l alpha, lambda w. q3 angle.r$ First, I transform the universal quantifier into a dependent product, in order to be able to derive it in type theory. The original judgment so becomes $ sum(z in sum(x in A) Idp(A, a, x)) prod(w in sum(x in A) Idp(A, a, x)) Idp(sum(x in A) Idp(A, a, x), z, w) $ I will assume the following rules, that are essential to use the *Propositional Equality with Path Induction*: #box[ #set text(8pt) $#prooftree( axiom($A type ctx(Gamma)$), axiom($a in A ctx(Gamma)$), axiom($b in A ctx(Gamma)$), rule(n: 3, label: FIdp, $Idp(A, a, b) type ctx(Gamma)$) ) #h(2em) #prooftree( axiom($a in A ctx(Gamma)$), rule(label: IIdp, $idp(a) in Idp(A, a, a) ctx(Gamma)$) )$ #v(1em) #prooftree( axiom($C(y, z) type ctx(Gamma, y in A, z in Idp(A, a, y))$), axiom($a in A ctx(Gamma)$), axiom($b in A ctx(Gamma)$), axiom($p in Idp(A, a, b) ctx(Gamma)$), axiom($c in C(a, idp(a)) ctx(Gamma)$), rule(n: 5, label: EIdp, $ElIdp(p, c) in C(b, p) ctx(Gamma)$) ) ] These are different than the ones written in page 34 of the course notes, in fact a subscript $upright(sans(p))$ was added to $FId$ and $IId$, in order to distinguish the ones related to Propositional Equality with Path Induction from the ones defined for Martin-Löf's Propositional Equality. The subscript was also added to the term $id(x)$, for the same reason ($id(x) in Id(A, x, x)$, but $id(x) in.not Idp(A, x, x)$). Another small change in $EIdp$ was that $z in Idp(A, a, y)$, not $z in Id(A, a, y)$. #box(stroke: 0.5pt, width: 100%, inset: 0.5em, [ === Propositional Equality with Path Induction #set text(8pt) #v(0.5em) $#prooftree( axiom($A type ctx(Gamma)$), axiom($a in A ctx(Gamma)$), axiom($b in A ctx(Gamma)$), rule(n: 3, label: FId, $Idp(A, a, b) type ctx(Gamma)$) ) #h(2em) #prooftree( axiom($a in A ctx(Gamma)$), rule(label: IId, $id(a) in Idp(A, a, a) ctx(Gamma)$) )$ #v(1em) #prooftree( axiom($C(y, z) type ctx(Gamma, y in A, z in Id(A, a, y))$), axiom($a in A ctx(Gamma)$), axiom($b in A ctx(Gamma)$), axiom($p in Idp(A, a, b) ctx(Gamma)$), axiom($c in C(a, id(a)) ctx(Gamma)$), rule(n: 5, label: EIdp, $ElIdp(p, c) in C(b, p) ctx(Gamma)$) ) #align(center, [Page 34 of the course notes]) ]) == Solution Assuming: #a-enum[ + $A type ctx()$ + $a in A ctx()$ ] #let phi = $phi.alt$ - Let $alpha = angle.l a, idp(a) angle.r$ - Let $#q = qtot$ - Let $phi = sum(x in A) Idp(A, a, x)$ - Let $psi(z, w) = Idp(phi, z, w)$ #let judgment = $qtot in sum(z in phi) prod(w in phi) psi(z, w) ctx()$ #judgment derivable: #align(center, box[ #set text(9pt) #prooftree( axiom(label: $pi_1$, $alpha in phi ctx()$), axiom(label: $pi_2$, $q3 in psi(alpha, w) ctx(w in phi)$), rule(label: Iprod, $lambda w. q3 in prod(w in phi) psi(alpha, w) ctx()$), axiom(label: $pi_3$, $psi(z, w) type ctx(z in phi, w in phi)$), rule(label: Fprod, $prod(w in phi) psi(z, w) type ctx(z in phi)$), rule(n: 3, label: Isum, judgment) ) ]) #let var-cont(var) = ( axiom(label: $a_1$, $A type ctx()$), rule(label: Fc, $var in A cont$), ) Where: #pi-enum[ #{ judgment = $angle.l q1, q2 angle.r in sum(x in A) Idp(A, a, x) ctx()$ } + $alpha in phi ctx()$ derivable, because: - $alpha = angle.l a, idp(a) angle.r$ - $phi = sum(x in A) Idp(A, a, x)$ - #judgment derivable: #align(center, box[ #set text(6pt) #prooftree( axiom(label: $a_2$, $q1 in A ctx()$), axiom(label: $a_2$, $a in A ctx()$), rule(label: FIdp, $q2 in Idp(A, a, a) ctx()$), axiom(label: $a_1$, $A type ctx()$), ..var-cont($x$), rule(n: 2, label: "ind-ty)", $A type ctx(x in A)$), axiom(label: $a_2$, $a in A ctx()$), ..var-cont($x$), rule(n: 2, label: "ind-ter)", $a in A ctx(x in A)$), ..var-cont($x$), rule(label: var, $x in A ctx(x in A)$), rule(n: 3, label: FIdp, $Idp(A, a, x) type ctx(x in A)$), rule(n: 3, label: Isum, judgment) ) ]) #{ judgment = $q3 in Idp(phi, alpha, w) ctx(w in phi)$ } + $q3 in psi(alpha, w) ctx(w in phi)$ derivable, because: - $psi(alpha, w) = Idp(phi, alpha, w)$ - #judgment derivable: #align(center, box[ #set text(7pt) #prooftree( axiom(label: $pi_(2.5)$, $Idp(phi, alpha, z) type ctx(w in phi, z in phi)$), axiom(label: $pi_(3.1)$, $phi type ctx()$), rule(label: Fc, $w in phi cont$), rule(label: var, $w in phi ctx(w in phi)$), axiom(label: $pi_(2.1)$, $ElIdp(x2, idp(alpha)) in Idp(phi, alpha, angle.l x1, x2 angle.r) ctx(w in phi, x1 in A, x2 in Idp(A, a, x1))$), rule(n: 3, label: Esum, judgment) ) ]) Where: #pi-enum[ #{ judgment = $ElIdp(x2, idp(alpha)) in Idp(phi, alpha, angle.l x1, x2 angle.r) ctx(Gamma)$ } + $ElIdp(x2, idp(alpha)) in Idp(phi, alpha, angle.l x1, x2 angle.r) ctx(w in phi, x1 in A, x2 in Idp(A, a, x1))$ derivable, because: - Let $Gamma = w in phi, x1 in A, x2 in Idp(A, a, x1)$ - #judgment derivable: #align(center, box[ #set text(7.5pt) #prooftree( axiom($pi_(2.2)$), axiom(label: $pi_(2.3)$, $Gamma cont$), rule(label: var, $a in A ctx(Gamma)$), axiom(label: $pi_(2.3)$, $Gamma cont$), rule(label: var, $x1 in A ctx(Gamma)$), axiom(label: $pi_(2.3)$, $Gamma cont$), rule(label: var, $x2 in Idp(phi, a, x1) ctx(Gamma)$), axiom(label: $pi_1$, $alpha in phi ctx()$), axiom(label: $pi_(2.3)$, $Gamma cont$), rule(n: 2, label: "ind-te)", $alpha in phi ctx(Gamma)$), rule(label: FIdp, $elid2 in Idp(phi, alpha, alpha) ctx(Gamma)$), rule(n: 5, label: EIdp, judgment) ) ]) #{ judgment = $Idp(phi, alpha, angle.l y, z angle.r) type ctx(Sigma)$ } + $Idp(phi, alpha, angle.l y, z angle.r) type ctx(Gamma, y in A, z in Idp(A, a, y))$ derivable, because: - Let $Sigma = Gamma, y in A, z in Idp(A, a, y)$ - #judgment derivable: #align(center, box[ #set text(8pt) #prooftree( axiom(label: $pi_(3.1)$, $phi type ctx()$), axiom(label: $pi_(2.2.1)$, $Sigma cont$), rule(n: 2, label: "ind-ty)", $phi type ctx(Sigma)$), axiom(label: $pi_1$, $alpha in phi ctx()$), axiom(label: $pi_(2.2.1)$, $Sigma cont$), rule(n: 2, label: "ind-ter)", $alpha in phi ctx(Sigma)$), axiom(label: $pi_(2.2.2)$, $angle.l y, z angle.r in phi ctx(Sigma)$), rule(n: 3, label: FIdp, judgment) ) ]) Where: #pi-enum[ #{ judgment = $Gamma, y in A, z in Idp(A, a, y) cont$ } + $Sigma cont$ derivable, because: - $Sigma = Gamma, y in A, z in Idp(A, a, y)$ - #judgment derivable: #let gamma-ya-cont = ( axiom(label: $a_1$, $A type ctx()$), axiom(label: $pi_(2.3)$, $Gamma cont$), rule(n: 2, label: "ind-ty)", $A type ctx(Gamma)$), rule(label: Fc, $Gamma, y in A cont$), ) #align(center, box[ #set text(6pt) #prooftree( axiom(label: $a_1$, $A type ctx()$), ..gamma-ya-cont, rule(n: 2, label: "ind-ty)", $A type ctx(Gamma, y in A)$), axiom(label: $a_2$, $a in A ctx()$), ..gamma-ya-cont, rule(n: 2, label: "ind-ter)", $a in A ctx(Gamma, y in A)$), ..gamma-ya-cont, rule(label: var, $y in A ctx(Gamma, y in A)$), rule(n: 3, label: FIdp, $Idp(A, a, y) type ctx(Gamma, y in A)$), rule(label: Fc, judgment) ) ]) #{ judgment = $angle.l y, z angle.r in sum(x in A) Idp(A, a, x) ctx(Sigma)$ } + $angle.l y, z angle.r in phi ctx(Sigma)$ derivable, because: - $phi = sum(x in A) Idp(A, a, x)$ - #judgment derivable: #align(center, box[ #prooftree( axiom(label: $pi_(2.2.1)$, $Sigma cont$), rule(label: var, $y in A ctx(Sigma)$), axiom(label: $pi_(2.2.1)$, $Sigma cont$), rule(label: var, $z in Idp(A, a, y) ctx(Sigma)$), axiom(label: $pi_(2.2.3)$, $Idp(A, a, x) ctx(Sigma, x in A)$), rule(n: 3, label: Isum, judgment) ) ]) #{ judgment = $Idp(A, a, x) ctx(Sigma, x in A)$ } + #judgment derivable: #let sigma-xa-cont = ( axiom(label: $a_1$, $A type ctx()$), axiom(label: $pi_(2.2.1)$, $Sigma cont$), rule(n: 2, label: "ind-ty)", $A type ctx(Sigma)$), rule(label: Fc, $Sigma, x in A cont$), ) #align(center, box[ #set text(6pt) #prooftree( axiom(label: $a_1$, $A type ctx()$), ..sigma-xa-cont, rule(n: 2, label: "ind-ty)", $A type ctx(Sigma, x in A)$), axiom(label: $a_2$, $a in A ctx()$), ..sigma-xa-cont, rule(n: 2, label: "ind-ter)", $a in A ctx(Sigma, x in A)$), ..sigma-xa-cont, rule(label: var, $x in A ctx(Sigma, x in A)$), rule(n: 3, label: FIdp, judgment) ) ]) ] #{ judgment = $Delta, x2 in Idp(A, a, x1) cont$ } + $Gamma cont$ derivable, because: - $Gamma = w in phi, x1 in A, x2 in Idp(A, a, x1)$ - Let $Delta = w in phi, x1 in A$ - #judgment derivable: #align(center, box[ #set text(10pt) #prooftree( axiom(label: $a_1$, $A type ctx()$), axiom(label: $pi_(2.4)$, $Delta cont$), rule(n: 2, label: "ind-ty)", $A type ctx(Delta)$), axiom(label: $a_2$, $a in A ctx()$), axiom(label: $pi_(2.4)$, $Delta cont$), rule(n: 2, label: "ind-ter)", $a in A ctx(Delta)$), axiom(label: $pi_(2.4)$, $Delta cont$), rule(label: var, $x1 in A ctx(Delta)$), rule(n: 3, label: FIdp, $Idp(A, a, x1) type ctx(Delta)$), rule(label: Fc, judgment) ) ]) #{ judgment = $w in phi, x1 in A cont$ } + $Delta cont$ derivable, because: - $Delta = w in phi, x1 in A cont$ - #judgment derivable: #align(center, box[ #prooftree( axiom(label: $a_1$, $A type ctx()$), axiom(label: $pi_(3.1)$, $phi type ctx()$), rule(label: Fc, $w in phi cont$), rule(n: 2, label: "ind-ty)", $A type ctx(w in phi)$), rule(label: Fc, judgment) ) ]) #{ judgment = $Idp(phi, alpha, z) type ctx(w in phi, z in phi)$ } + #judgment derivable: #align(center, box[ #set text(7pt) #prooftree( axiom(label: $pi_(3.1)$, $phi type ctx()$), axiom(label: $pi_(3.2)$, $w in phi, z in phi cont$), rule(n: 2, label: "ind-ty)", $phi type ctx(w in phi, z in phi)$), axiom(label: $pi_1$, $alpha in phi ctx()$), axiom(label: $pi_(3.2)$, $w in phi, z in phi cont$), rule(n: 2, label: "ind-ter)", $alpha in phi ctx(w in phi, z in phi)$), axiom(label: $pi_(3.2)$, $w in phi, z in phi cont$), rule(label: var, $z in phi ctx(w in phi, z in phi)$), rule(n: 3, label: FIdp, judgment) ) ]) #{ judgment = $w in phi, z in phi cont$ } Where #judgment derivable: #align(center, box[ #prooftree( axiom(label: $pi_(3.1)$, $phi type ctx()$), axiom(label: $pi_(3.1)$, $phi type ctx()$), rule(label: Fc, $w in phi cont$), rule(n: 2, label: "ind-ty)", $phi type ctx(w in phi)$), rule(label: Fc, judgment) ) ]) ] #{ judgment = $Idp(phi, z, w) type ctx(z in phi, w in phi)$ } + $psi(z, w) type ctx(z in phi, w in phi)$ derivable, because: - $psi(z, w) = Idp(phi, z, w)$ - #judgment derivable: #align(center, box[ #set text(9pt) #prooftree( axiom(label: $pi_(3.1)$, $phi type ctx()$), axiom(label: $pi_(3.2)$, $z in phi, w in phi cont$), rule(n: 2, label: "ind-ty)", $phi type ctx(z in phi, w in phi)$), axiom(label: $pi_(3.2)$, $z in phi, w in phi cont$), rule(label: var, $z in phi ctx(z in phi, w in phi)$), axiom(label: $pi_(3.2)$, $z in phi, w in phi cont$), rule(label: var, $w in phi ctx(z in phi, w in phi)$), rule(n: 3, label: FIdp, judgment) ) ]) Where: #pi-enum[ #{ judgment = $sum(x in A) Idp(A, a, x) type ctx()$ } + $phi type ctx()$ derivable, because: - $phi = sum(x in A) Idp(A, a, x)$ - #judgment derivable: #align(center, box[ #set text(8pt) #prooftree( axiom(label: $a_1$, $A type ctx()$), ..var-cont($x$), rule(n: 2, label: "ind-ty)", $A type ctx(x in A)$), axiom(label: $a_2$, $a in A ctx()$), ..var-cont($x$), rule(n: 2, label: "ind-ter)", $a in A ctx(x in A)$), ..var-cont($x$), rule(label: var, $x in A ctx(x in A)$), rule(n: 3, label: FIdp, $Idp(A, a, x) type ctx(x in A)$), rule(label: Fsum, judgment) ) ]) #{ judgment = $z in phi, w in phi cont$ } + #judgment derivable: #align(center, box[ #prooftree( axiom(label: $pi_(3.1)$, $phi type ctx()$), axiom(label: $pi_(3.1)$, $phi type ctx()$), rule(label: Fc, $z in phi cont$), rule(n: 2, label: "ind-ty)", $phi type ctx(z in phi)$), rule(label: Fc, judgment) ) ]) ] ]
https://github.com/galaxia4Eva/galaxia4Eva
https://raw.githubusercontent.com/galaxia4Eva/galaxia4Eva/main/essays/nuclear-ethics-essay.typ
typst
#set text(font: "Mariupol", weight: "light", size:11pt) #set par(justify: true) #show heading: set text(font: "Arsenal", weight: "bold") #show heading: set par(justify: false) = An essay on an assumed ethics of a nuke. == There is no amount of words that would justify burning millions of people alive. #v(0.65em) #par[ There is no ethics in a nuke. Were the opposite the case, russia would have never attacked Ukraine. An armed robbery in a dark place has more ethics than a nuke could ever have. Anything else is just soothing one's inner Oppenheimer. There is no way to measure ethics, lest to say to measure it in kilotons or megatons. Anything else is just soothing one's inner Oppenheimer with post–rationalisations. ] #par[ The rants we hear from kremlin in moscow about nuclear bombardment of the world into multipolarity, in my opinion, are signs of their stagnation: communism proved to have liquidity issues on the idealogical market, oil is not that scarce to give them a competitive edge. So they try to assume a role of nuclear technology provider in a supposed multipolar world. And they don't care if in a fight of converting arid Central Asia into a blooming paradise, say, Uzbekistan and Kyrgyzstan convert their land into a nuclear desert. Were the opposite the case, russia would have never attacked Ukraine. ] #par[ There is no amount of words that would justify what russia did to Mariupol, Bucha, Izium and other Ukrainian towns and cities. I dare to remind you, that cities and towns are not just buildings, infrastructure, streets and street names. Cities and towns are people, their communities, their dreams and hopes. People of russia brought the war in Ukraine by their choice and they should take the responsibility as for their actions and for their inaction as well. ] #par[ An armed robbery in a dark place has more ethics than a nuke could ever have. And it does not matter if a victim of a robbery in the question also carries a gun. In the case, when the situation resolves in a way that everybody lives, the victim would call the police and the perpetrator will have to go in hiding from justice. In the case when the situation results in a wounded, or even dead, body, the society will notice that and inform the police. And for the surviving party justice will be, according to the way situation had been resolved, more severe. ] #par[ In a case when there were no police, no paramedics, no society, but only an armed perpetrator and, potentially an armed victim, whoever gets the bullet is most likely to bleed to death. End of story, no morals, just some dead bodies and no ethics but direct, natural consequences of actions. In this case, neither the victim nor the perpetrator are not necessary to be humans: direct, natural consequences work the same way for animals as for humans. Also, there is no reason for the place of a robbery to be a dark one. ] #par[ If Ukraine to have an ethical nuclear doctrine, it should not be directed to acquiring nuclear strike capabilities, but rather strive for the nuclear disarmament of russia, belarus, and any other party that violates, or enables a violation of Ukrainian sovereignty and territorial integrity. Anything else is a road to a nuclear desert. ]
https://github.com/GeorgeDong32/GD-Typst-Templates
https://raw.githubusercontent.com/GeorgeDong32/GD-Typst-Templates/main/functions/dirac.typ
typst
Apache License 2.0
#let ket(body) = $|#body angle.r$ #let bar(body) = $angle.l#body|$ #let barket(left, right) = $angle.l#left|#right angle.r$ #let ketbar(left, right) = $|#left angle.r angle.l #right|$
https://github.com/frectonz/the-pg-book
https://raw.githubusercontent.com/frectonz/the-pg-book/main/book/076.%20equity.html.typ
typst
equity.html The Equity Equation July 2007An investor wants to give you money for a certain percentage of your startup. Should you take it? You're about to hire your first employee. How much stock should you give him?These are some of the hardest questions founders face. And yet both have the same answer:1/(1 - n)Whenever you're trading stock in your company for anything, whether it's money or an employee or a deal with another company, the test for whether to do it is the same. You should give up n% of your company if what you trade it for improves your average outcome enough that the (100 - n)% you have left is worth more than the whole company was before.For example, if an investor wants to buy half your company, how much does that investment have to improve your average outcome for you to break even? Obviously it has to double: if you trade half your company for something that more than doubles the company's average outcome, you're net ahead. You have half as big a share of something worth more than twice as much.In the general case, if n is the fraction of the company you're giving up, the deal is a good one if it makes the company worth more than 1/(1 - n).For example, suppose Y Combinator offers to fund you in return for 7% of your company. In this case, n is .07 and 1/(1 - n) is 1.075. So you should take the deal if you believe we can improve your average outcome by more than 7.5%. If we improve your outcome by 10%, you're net ahead, because the remaining .93 you hold is worth .93 x 1.1 = 1.023. [1]One of the things the equity equation shows us is that, financially at least, taking money from a top VC firm can be a really good deal. <NAME> from Sequoia recently said at a YC dinner that when Sequoia invests alone they like to take about 30% of a company. 1/.7 = 1.43, meaning that deal is worth taking if they can improve your outcome by more than 43%. For the average startup, that would be an extraordinary bargain. It would improve the average startup's prospects by more than 43% just to be able to say they were funded by Sequoia, even if they never actually got the money.The reason Sequoia is such a good deal is that the percentage of the company they take is artificially low. They don't even try to get market price for their investment; they limit their holdings to leave the founders enough stock to feel the company is still theirs.The catch is that Sequoia gets about 6000 business plans a year and funds about 20 of them, so the odds of getting this great deal are 1 in 300. The companies that make it through are not average startups.Of course, there are other factors to consider in a VC deal. It's never just a straight trade of money for stock. But if it were, taking money from a top firm would generally be a bargain.You can use the same formula when giving stock to employees, but it works in the other direction. If i is the average outcome for the company with the addition of some new person, then they're worth n such that i = 1/(1 - n). Which means n = (i - 1)/i.For example, suppose you're just two founders and you want to hire an additional hacker who's so good you feel he'll increase the average outcome of the whole company by 20%. n = (1.2 - 1)/1.2 = .167. So you'll break even if you trade 16.7% of the company for him.That doesn't mean 16.7% is the right amount of stock to give him. Stock is not the only cost of hiring someone: there's usually salary and overhead as well. And if the company merely breaks even on the deal, there's no reason to do it.I think to translate salary and overhead into stock you should multiply the annual rate by about 1.5. Most startups grow fast or die; if you die you don't have to pay the guy, and if you grow fast you'll be paying next year's salary out of next year's valuation, which should be 3x this year's. If your valuation grows 3x a year, the total cost in stock of a new hire's salary and overhead is 1.5 years' cost at the present valuation. [2]How much of an additional margin should the company need as the "activation energy" for the deal? Since this is in effect the company's profit on a hire, the market will determine that: if you're a hot opportunity, you can charge more.Let's run through an example. Suppose the company wants to make a "profit" of 50% on the new hire mentioned above. So subtract a third from 16.7% and we have 11.1% as his "retail" price. Suppose further that he's going to cost $60k a year in salary and overhead, x 1.5 = $90k total. If the company's valuation is $2 million, $90k is 4.5%. 11.1% - 4.5% = an offer of 6.6%.Incidentally, notice how important it is for early employees to take little salary. It comes right out of stock that could otherwise be given to them.Obviously there is a great deal of play in these numbers. I'm not claiming that stock grants can now be reduced to a formula. Ultimately you always have to guess. But at least know what you're guessing. If you choose a number based on your gut feel, or a table of typical grant sizes supplied by a VC firm, understand what those are estimates of.And more generally, when you make any decision involving equity, run it through 1/(1 - n) to see if it makes sense. You should always feel richer after trading equity. If the trade didn't increase the value of your remaining shares enough to put you net ahead, you wouldn't have (or shouldn't have) done it.Notes[1] This is why we can't believe anyone would think Y Combinator was a bad deal. Does anyone really think we're so useless that in three months we can't improve a startup's prospects by 7.5%? [2] The obvious choice for your present valuation is the post-money valuation of your last funding round. This probably undervalues the company, though, because (a) unless your last round just happened, the company is presumably worth more, and (b) the valuation of an early funding round usually reflects some other contribution by the investors.Thanks to <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME> for reading drafts of this.
https://github.com/CaldeCrack/Typst-Templates
https://raw.githubusercontent.com/CaldeCrack/Typst-Templates/main/modules.typ
typst
// Import modules into a single file #import "modules/auto_table.typ": * #import "modules/auto_rect.typ": *
https://github.com/gianzamboni/cancionero
https://raw.githubusercontent.com/gianzamboni/cancionero/main/wip/actos-fallidos.typ
typst
#import "../theme.typ": *; == Actos Fallidos === Sol Pereyra Primero me corté las venas \ después me disparé. \ Más tarde me subí a un techo \ y desde arriba me tiré. \ Seguí en busca de una soga \ y en el cuello me la até. \ Después pasó un colectivo \ y abajo me le tiré. #newVerse Ya no sé que pasa \ si algo falló o resucité. \ Iré por un suicidio en masa \ así mi suerte cambiaré. \ Ya no sé que pasa \ si algo falló o resucité. \ Iré por un suicidio en masa \ con más apoyo lo lograré. #newVerse Salí en busca de pastillas \ y la receta me olvidé. \ Me fui al mar sin salvavidas \ pero sin querer nadé. \ Probé con un insecticida \ pero se venció ayer. \ Corrí a tirarme en unas vías \ y ya no pasaba el tren. #newVerse Ya no sé que pasa \ si algo falló o resucité. \ Iré por un suicidio en masa \ así mi suerte cambiaré. \ Ya no sé que pasa \ si algo falló o resucité. \ Iré por un suicidio en masa \ con más apoyo lo lograré.
https://github.com/rhedgeco/resume
https://raw.githubusercontent.com/rhedgeco/resume/main/template.typ
typst
#let default-primary-color = rgb("#fca854") #let default-secondary-color = rgb("#5bd8c3") // define default themes #let light-theme( primary: default-primary-color, secondary: default-secondary-color, ) = { ( text-color: rgb("#303030"), stroke-color: rgb("#d1d9e0"), background-color: rgb("#ffffff"), primary-color: primary, secondary-color: secondary, ) } #let dark-theme( primary: default-primary-color, secondary: default-secondary-color, ) = { ( text-color: rgb("#c6c6c6"), stroke-color: rgb("#2b2b2b"), background-color: rgb("#1f1f1f"), primary-color: primary, secondary-color: secondary, ) } #let default-theme = light-theme() // ICON DEFINITIONS #import "@preview/fontawesome:0.2.1": fa-icon #let file-icon = box( fa-icon("file"), ) #let github-icon = box( fa-icon("github"), ) #let linkedin-icon = box( fa-icon("linkedin"), ) #let youtube-icon = box( fa-icon("youtube"), ) #let globe-icon = box( fa-icon("globe"), ) #let file-layout( filename: "", source: "", theme: default-theme, body, ) = { let filename = filename.trim() if filename.trim() == "" { filename = "untitled" } // set default page and text settings set page( paper: "us-letter", fill: theme.background-color, margin: 1em, ) set text( fill: theme.text-color, font: "Noto Sans", ) show heading: heading => ( block(context { place(dx: -0.8em, link(here(), text(fill: theme.primary-color)[\#])) heading.body if heading.level == 1 { box( width: 1fr, pad(left: 0.25em, line(length: 100%, stroke: 1pt + theme.stroke-color)), ) } }) ) // build page block( width: 100%, spacing: 0pt, radius: 0.5em, stroke: 1pt + theme.stroke-color, { // file header block( width: 100%, spacing: 0em, { // render icon and filename box( pad( bottom: 1pt, box( stroke: (bottom: 2pt + theme.primary-color), pad(bottom: 0.8em - 1pt, rest: 0.8em, [#file-icon #filename]), ), ), ) // render source button if available if source.trim() != "" { h(1fr) box( pad( rest: 0.3em, link( source, box( radius: 0.25em, fill: theme.secondary-color.transparentize(80%), pad(rest: 0.5em, [View Source #github-icon]), ), ), ), ) } }, ) line(length: 100%, stroke: 1pt + theme.stroke-color) // file body show link: set text(fill: theme.secondary-color) block( width: 100%, spacing: 0em, pad( y: 0.8em, x: 2em, body, ), ) // footer v(1fr) place( bottom + right, pad( rest: 0.5em, text( size: 0.8em, weight: "bold", fill: theme.text-color.transparentize(75%), [ Updated #datetime.today().display("[month repr:long] [day], [year]") ], ), ), ) }, ) } #let resume_title( firstname: "John", lastname: "Doe", linkedin: "johndoe", github: "", youtube: "", website: "", subtitle: "", theme: default-theme, ) = { pad( top: 2em, bottom: 1em, align(center)[ #text(font: "JetBrains Mono", size: 4em, weight: "black", fill: theme.primary-color)[#firstname] #text(" ") #text(font: "JetBrains Mono", size: 4em, weight: "black", fill: theme.text-color)[#lastname] #if subtitle.trim() != "" { subtitle } #block( radius: 0.25em, stroke: 1pt + theme.stroke-color, fill: theme.stroke-color.transparentize(50%), { show link: set text(fill: theme.text-color) if linkedin.trim() != "" { link( "https://www.linkedin.com/in/" + linkedin, box(pad(rest: 0.5em, [#linkedin-icon #linkedin])), ) } if github.trim() != "" { link( "https://github.com/" + github, box(pad(rest: 0.5em, [#github-icon #github])), ) } if youtube.trim() != "" { link( "https://www.youtube.com/@" + youtube, box(pad(rest: 0.5em, [#youtube-icon #youtube])), ) } if website.trim() != "" { link( "https://" + website, box(pad(rest: 0.5em, [#globe-icon #website])), ) } }, ) ], ) }