repo
stringlengths 26
115
| file
stringlengths 54
212
| language
stringclasses 2
values | license
stringclasses 16
values | content
stringlengths 19
1.07M
|
---|---|---|---|---|
https://github.com/typst/packages | https://raw.githubusercontent.com/typst/packages/main/packages/preview/latedef/0.1.0/latedef.typ | typst | Apache License 2.0 | /// Obtain a pair `(undef, def)` (see the README for more details).
/// -> pair
#let latedef-setup(
/// Disambiguator to allow parallel usage for multiple purposes.
/// -> none | str
group: none,
/// Disable id-based definitions to avoid having to call `undef` as a function.
/// -> bool
simple: false,
/// What to display when there is no matching definition.
/// -> (none | str) -> content
stand-in: id => text(red)[\<undefined#if id != none [ #repr(id)]\>],
/// Whether to wrap `undef` in `footnote`.
/// Sets `group` to `"footnote"` if it was `none`.
/// -> bool
footnote: false
) = {
let group = group
if footnote and group == none {
group = "footnote"
}
let mangle(x) = "__:latedef:" + x + if group != none { "." + group }
let get-id(..args) = {
if args.pos().len() >= 1 {
let verify-args(pos-id, id: none) = {}
verify-args(..args)
if args.named().len() == 1 {
// `id` was given as named too
let either-pos-or-named = false
assert(either-pos-or-named,
message: "ID has to be given as either positional or named argument, not both"
)
}
let (id,) = args.pos()
assert.eq(type(id), str, message: "ID has to be a string")
id
} else {
let verify-args(id: none) = {}
verify-args(..args)
if "id" in args.named() {
let id = args.named().at("id")
assert.eq(type(id), str, message: "ID has to be a string")
id
} else {
none
}
}
}
let ldef = label(mangle("def"))
let num-undef = counter(mangle("num-undef"))
let num-def = counter(mangle("num-def"))
let by-id = state(mangle("by-id"), (:))
let anon-undef = context {
let arr = query(ldef)
let idx = num-undef.get().first()
if idx < arr.len() {
arr.at(idx).value
} else {
stand-in(none)
}
} + num-undef.step()
let anon-def(x) = [#metadata(x)#ldef#num-def.step()]
let undef = if simple { anon-undef } else {
(..args) => {
let id = get-id(..args)
if id == none {
anon-undef
} else {
context {
let d = by-id.final()
if id in d.keys() {
d.at(id)
} else {
stand-in(id)
}
}
}
}
}
let def = if simple { anon-def } else {
(..args) => {
let (..prev, value) = args.pos()
let id = get-id(..prev, ..args.named())
if id == none {
anon-def(value)
} else {
by-id.update(d => d + ((id): value))
}
}
}
if footnote {
undef = if simple { std.footnote(undef) } else { (..args) => std.footnote(undef(..args)) }
}
(undef, def)
}
|
https://github.com/typst/packages | https://raw.githubusercontent.com/typst/packages/main/packages/preview/genealotree/0.1.0/genealotree.typ | typst | Apache License 2.0 | #import "@preview/cetz:0.2.2": canvas
#import "calc-functions.typ": *
#import "draw-functions.typ": *
/// Creates a genealogy object, consisting of a #dtype((:)) with the keys #genealogy-init().keys()
/// - persons (array): An array of persons to initiate the tree. Can be left empty (the default) and add the persons after (see #cmd-[add-person] and #cmd-[add-persons]).
/// - unions (array): An array of unions between persons to initiate the tree. Can be left empty (the default) and add the unions after (see #cmd-[add-union()] and #cmd-[add-unions()]).
/// - phenos (dict): A dictionary mapping phenotypes names with their color when drawn. Can be left to the default or set to (:), and add the phenotypes later (see #cmd-[add-phenos]).
/// - config (dict): A config dictionary, setting all sizes to draw the tree. It is advised to let it to the default in this function and to modify the sizes after if needed, as forgetting or misspelling an entry in the config dictionary will lead to failure. See #cmd-[set-config].
/// -> dict
#let genealogy-init(
persons: (),
unions: (),
phenos: (sane: none, ill: black),
config: (
person-radius: 1,
union-dist: 3.5,
siblings-dist: 4,
person-botline: 0.5,
union-vline: 2,
person-topline: 0.5
)
) = {
(
persons: persons,
unions: unions,
phenos: phenos,
config: config
)
}
/// Add a person (= an individual) to the genealogical tree dictionary. Returns the #arg[geneal] dictionary with the added person to #arg[geneal.persons]
/// - geneal (dict): A genealogical tree dictionary, typically obtained from the function #cmd-[genealogy-init]. The person will be added to #arg[geneal.persons]
/// - name (string): The name of the person. *Names must be unique*, duplicated names will lead to failure.
/// - sex (string): "f" for female, "m" for male, #dtype(none) or "unk" if unknown. Females will be drawn as a circle, males as a square, and persons of unknown sex as a diamond.
/// - generation (int): The persons generation. Generations start at 1 and get incremented at each union. This parameter must be set if the person isn't the child of anyone in the tree (see #cmd-[add-union]). If not set, it will be calculated automatically (see #cmd-[get-generation] for more details)
/// - alive (bool): Wether the person is alive or not. Dead persons will be drawn with a slash.
/// - phenos (array): A list of strings, corresponding to the phenotypes of the person. The phenotypes listed here must be present in the phenos key of the genealogy dictionary (#arg[geneal.phenos]), see #cmd-[add-phenos]
/// - person-label (content): A label to print on the person.
/// - pheno-label (content): A label printed below the person between brackets \[\], to write the persons phenotype.
/// - geno-label (array): An array of length 2 that will be printed below the person, to write its genotype. ("allele1", "allele2") will be printed as (allele1\/\/allele2).
/// -> dict
#let add-person(
geneal,
name,
sex: none,
generation: none,
alive: true,
phenos: (),
person-label: none,
pheno-label: none,
geno-label: none,
) = {
geneal.persons.push(
(
name: name,
sex: sex,
parents-names: none,
generation: generation,
partners-names: (),
alive: alive,
phenos: phenos,
person-label: person-label,
pheno-label: pheno-label,
geno-label: geno-label,
)
)
geneal
}
/// A wrapper to add multiple persons at once to the genealogical tree dictionary. Takes a dictionary mapping a person name with the #cmd-[add-person] arguments. Returns the #arg[geneal] dictionary with the added persons to #arg[geneal.persons]
/// - geneal (dict): A genealogical tree dictionary, typically obtained from the function #cmd-[genealogy-init].
/// - persons-dict (dict): A dictionary, with the keys giving the persons names and the values consisting of dictionaries giving the arguments to pass to the function #cmd-[add-person] for each person.
#let add-persons(geneal, persons-dict) = {
for person-name in persons-dict.keys() {
geneal = add-person(
geneal,
person-name,
..persons-dict.at(person-name)
)
}
geneal
}
/// Add an union between persons to a genealogical tree dictionary. Must give the parents names as an array of length 2 and the children names as an array of length at least 1. Returns the #arg[geneal] dictionary with the added union to #arg[geneal.unions]
/// - geneal (dict): A genealogical tree dictionary, typically obtained from the function #cmd-[genealogy-init].
/// - parents-names (array): An array of length 2, containing 2 strings giving the parents names. Every name in the array *must* match a person name field in the persons array of geneal.persons.
/// - children-names (array): An array of length at least 1, containing strings giving the names of the children of the two parents. Every name in the array *must* match name field in the persons array of geneal.persons.
#let add-union(geneal, parents-names, children-names) = {
let parent0-index = geneal.persons.position(el => {el.name == parents-names.at(0)})
let parent1-index = geneal.persons.position(el => {el.name == parents-names.at(1)})
geneal.persons.at(parent0-index).partners-names.push(parents-names.at(1))
geneal.persons.at(parent1-index).partners-names.push(parents-names.at(0))
for child-name in children-names {
let child-index = geneal.persons.position(el => {el.name == child-name})
geneal.persons.at(child-index).parents-names = parents-names
}
geneal.unions.push((
children-names: children-names,
parents-names: parents-names,
parents-generation: none,
size: none,
spacings: none
))
geneal
}
/// A wrapper to add multiple unions at once to a genealogical tree dictionary. Takes an arbitrary number of arrays of length 2 containing the parents-names and children-names argument of #cmd-[add-union]. Returns the #arg[geneal] dictionary with the added unions to #arg[geneal.unions].
/// - geneal (dict): A genealogical tree dictionary, typically obtained from the function #cmd-[genealogy-init].
/// - ..unions (arrays): An arbitrary number of arrays of length 2. The first element is the array of parents names (see parents-names in #cmd-[add-union]). The second is the array of children-names (see children-names in #cmd-[add-union])
/// -> dict
#let add-unions(geneal, ..unions) = {
let to-add = unions.pos()
for union in to-add {
geneal = add-union(geneal, ..union)
}
geneal
}
/// Add phenotypes to the phenotypes array of a genealogical tree dictionary. Returns the #arg[geneal] dictionary with the added phenotypes to #arg[geneal.phenos]
/// - geneal (dict): A genealogical tree dictionary, typically obtained from the function #cmd-[genealogy-init]
/// - phenos (dict): A dictionary, the keys give the phenotypes names, and the values a color to draw the corresponding phenotype.
#let add-phenos(geneal, phenos: (:)) = {
for pheno in phenos.keys() {
geneal.phenos.insert(pheno, phenos.at(pheno))
}
geneal
}
/// Sets the configuration of a genealogical tree dictionary. Returns the #arg[geneal] dictionary with the modified configuration dictionary (#arg[geneal.config]). Allows to modify :
/// - #arg[person-radius] : the persons size (radius for women, half size of the square for men)
/// - #arg[union-dist] : distance between parents center. Determines the size of the horizontal union line between two persons.
/// - #arg[siblings-dist] : default distance between siblings.
/// - #arg[person-botline] : the length of the line going down from the south anchor of a person to the union line with its partner.
/// - #arg[union-vline] : The length of the line going down from the center of a union line to the horizontal line joining siblings.
/// - #arg[person-topline] : The length of the line going up from the north anchor of a person to the horizontal line joining siblings.
/// - geneal (dict): A genealogical tree dictionary, typically obtained from the function #cmd-[genealogy-init]
/// - config (dict): A configuration dictionary. Available keys are #genealogy-init().config.keys(). Giving a key not in this list will fail. Values can be any #doc("layout/length") or a float.
/// -> dict
#let set-config(geneal, config) = {
for cfg in config {geneal.config.at(cfg) = config.at(cfg)}
geneal
}
/// Draws the #arg[geneal] genealogical tree dictionary. This function must be called in a #link("https://typst.app/universe/package/cetz/")[CeTZ] canvas, allowing additions and modifications to the tree. Before drawing the tree, different informations are computed : generations are set for each person, and necessary spacing between persons are calculated.
/// - geneal (dict): A genealogical tree dictionary, typically obtained from the function #cmd-[genealogy-init]. Will not draw persons who are not parents neither child of anyone (person name isn't found neither in a parents-names nor in a children-name field of #arg[geneal.unions]).
/// -> content
#let draw-tree(geneal) = {
let geneal = set-generations(geneal)
let geneal = set-subtree-unions(geneal)
let geneal = set-unions-sizes-compact(geneal)
draw-roots(geneal)
for union in geneal.unions {draw-siblings(geneal, union)}
}
|
https://github.com/kdog3682/2024-typst | https://raw.githubusercontent.com/kdog3682/2024-typst/main/src/classroom.typ | typst | #import "base-utils.typ": *
#let classroom-data = readjson("typst-classroom-data.json")
#let get-student-group(key) = {
let filter(x) = {
return x.group_id == key
}
return classroom-data.students.filter(filter)
}
|
|
https://github.com/dismint/docmint | https://raw.githubusercontent.com/dismint/docmint/main/multicore/lec2.typ | typst | #import "template.typ": *
#show: template.with(
title: "Lecture 2",
subtitle: "6.5081"
)
= Definitions
#define(
title: "Precedence"
)[
$A_0$ precedes $B_0$, or $A_0 -> B_0$ if the endpoint of $A_0$ happens before $B_0$
]
#define(
title: "Ordering"
)[
A partial order is an ordering that has three properties:
/ Irreflexive: It is never true that $A -> A$
/ Antisymmetric: It is never true that $A -> B$ and $B -> A$
/ Transitive: If $A -> B$ and $B -> C$ then $A -> C$
A total order adds the additional constraint that for any given $A$ and $B$, either $A -> B$ or $B -> A$
]
#define(
title: "Mutual Exclusion"
)[
Suppose we have:
- $"CS"_i^n$ is thread $i$'s $n$th critical exclusion.
- $"CS"_j^m$ is thread $j$'s $m$th critical exclusion.
Then it must be the case that either $"CS"_i^n -> "CS"_j^m$ or $"CS"_j^m -> "CS"_j^n$
]
#define(
title: "Deadlock / Starvation Free"
)[
/ Deadlock Free: If some thread is failing to make progress after acquiring the lock, then other threads must be going infinitely often. That is, the system is still making progress even if it isn't.
/ Starvation Free: If some thread acquires a lock, it will finish eventually.
]
= Two Threads
== LockOne
```java
class LockOne implements Lock {
private boolean[] flag = new boolean[2];
public void lock() {
flag[i] = true;
while (flag[j]) {}
}
}
```
For this lock suppose that each of the two threads have a flag associated with them.
We can show that the above satisfies mutual exclusion since we can derive a conclusion if the two critical sections were to overlap. By getting a list of orderings, we can create a cycle, which should be impossible in a partial order.
It is *not* deadlock free however, since both of them can set their lock to true before waiting, meaning that they are both spinning forever and waiting for the other to release. Sequentially, this is still fine.
== LockTwo
```java
public class LockTwo implements Lock {
private int victim;
public void lock() {
victim = i;
while (victim == i) {};
}
public void unlock() {}
}
```
This satisfies mutual exclusion since the condition to enter the critical section is a shared variable, which cannot possibly take on both values at the same time.
Interestingly, compared to LockOne, this execution will deadlock sequentially but will run concurrently. This is because the first person who enters the lock is waiting for the other to grant them permission to go.
== Peterson's Algorithm
```java
public void lock() {
flag[i] = true;
victim = i;
while (flag[j] && victim == i) {};
}
public void unlock() {
flag[i] = false;
}
```
We can show that the above algorithm combines the best parts of LockOne and LockTwo, and as a result is mutually exclusive, deadlock free, and starvation free.
= N Threads
== Filter Algorithm
The filter algorithm works by having $n-1$ waiting rooms called *levels*. The idea is that with $n-1$ levels, each level filters a thread, meaning that at the end we are left with one thread in the critical section.
```java
class Filter implements Lock {
int[] level; // level[i] for thread i
int[] victim; // victim[L] for level L
public Filter(int n) {
level = new int[n];
victim = new int[n];
for (int i = 1; i < n; i++) {
level[i] = 0;
}
}
public void lock() {
// recall that i is the number of the current thread
for (int L = 1; L < n; L++) {
level[i] = L;
victim[L] = i;
while(victim[L] == i && (for (k != i) level[k] >= L))
}
}
public void unlock() {
level[i] = 0;
}
}
```
Let us prove that this works. We want to show that it is not the case we have all threads traveling to the next level. At each level, there must be a last thread to write.
#define(
title: "Fairness",
)[
Is the locking fair? That is, we want to make sure that one thread is not overtaken more than another. The problem is that its hard to define when something starts before another thing, because its inherently impossible to get an order for requesting access of locks.
]
One potential solution is to use Bounded Waiting, by dividing `lock()` into two parts:
- Doorway interval ($D_A$), which finishes in a finite number of steps.
- Waiting interval ($W_A$), which can take an unbounded number of steps.
== r-Bounded Waiting
For two threads $A$ and $B$:
- If $D_A^i -> D_B^j$, that is the $A$'s $i$th doorway precedes $B$'s $j$th doorway
- Then then $"CS"_A^i -> "CS"_B^(j + r)$, that is $B$ cannot overtake $A$ more than $r$ times.
In world that lives by "first come first served", then we would have $r = 0$
It turns out that the Filter lock works, but has very weak fairness. In fact, any thread can be taken over an arbitrary number of times.
== Bakery Algorithm
```java
class Bakery implements Lock {
boolean[] flag;
Label[] label;
public Bakery (int n) {
flag = new boolean[n];
label = new Label[n];
for (int i = 0; i < n; i++) {
flag[i] = false; label[i] = 0;
}
}
public void lock() {
flag[i] = true;
label[i] = max(label[0], ...,label[n-1])+1;
while (for(k) flag[k] && (label[i],i) > (label[k],k));
}
public void unlock() {
flag[i] = false;
}
}
```
In the `lock`, the new label is given as the greatest unavailable one. Then, as long as there exists some other thread which has signaled interest with the `flag` and their label is less than ours, we will defer from going.
|
|
https://github.com/typst-community/valkyrie | https://raw.githubusercontent.com/typst-community/valkyrie/main/tests/types/sink/test.typ | typst | Other | #import "/src/lib.typ" as z
#import "/tests/utility.typ": *
#show: show-rule.with();
#let positional-schema = z.array()
#let named-schema = z.dictionary((named: z.string()))
#let sink-schema = z.sink(
positional: positional-schema,
named: named-schema,
)
#let to-args-type(..args) = args
= types/sink
#{
let _ = z.parse(to-args-type("hello", named: "0"), sink-schema)
}
// #{let _ = z.parse(to-args-type("hello"), sink-schema)}
#{
let _ = z.parse(to-args-type(named: "0"), sink-schema)
}
// #{ z.parse(to-args-type(), sink-schema)} |
https://github.com/hitszosa/universal-hit-thesis | https://raw.githubusercontent.com/hitszosa/universal-hit-thesis/main/harbin/bachelor/pages/abstract.typ | typst | MIT License | #import "../../../common/theme/type.typ": 字体, 字号
#import "../config/constants.typ": special-chapter-titles
#let abstract-cn(
content,
keywords: (),
) = {
set par(
first-line-indent: 2em,
justify: true,
leading: 1em,
)
heading(special-chapter-titles.摘要, level: 1)
text(
font: 字体.宋体,
size: 字号.小四,
)[#content]
let abstract-key-words(content) = {
set par(first-line-indent: 0em)
text(font: 字体.黑体)[关键词:]
text(font: 字体.宋体)[#content.join(";")]
}
abstract-key-words(keywords)
}
#let abstract-en(
content,
keywords: (),
) = {
set par(
first-line-indent: 2em,
justify: true,
leading: 1em,
)
heading(special-chapter-titles.Abstract, level: 1)
text(
font: 字体.宋体,
size: 字号.小四,
)[#content]
let abstract-key-words(content) = {
set par(first-line-indent: 0em)
text(font: 字体.宋体, weight: "bold", "Keywords: ")
text(font: 字体.宋体)[#content.join(", ")]
}
abstract-key-words(keywords)
} |
https://github.com/nath-roset/suiviProjetHekzamGUI | https://raw.githubusercontent.com/nath-roset/suiviProjetHekzamGUI/master/typ%20sources/Hekzam_cas_de_tests.typ | typst | Apache License 2.0 | #import "template.typ": base
#show: doc => base(
// left_header:[],
right_header:[Equipe scan-GUI-Automne-2024],
title:[Projet Hekzam-GUI],
subtitle:[Cas de tests pour le test du GUI],
version:[0.1],
doc
)
= Cas 1 : Idéal
L'utilisateur importe un *fichier source sans erreur de syntaxe*, choisi de générer 10 sujets différents dont les *questions* et *réponses* ont été *randomisées*.
Les copies récupérées après l'examen sont *propres et sans rature*, chaque champ est correctement reconnu, toutes les cases sont remplies au feutre noir et le scan de chaque copie est parfaitement lisible pour le logiciel
...
= Cas 2 : Vraisemblable
L'utilisateur importe un fichier source avec des erreurs de syntaxe, choisi de générer 10 sujets différents dont les questions et réponses ont été randomisées, mais reproduit chaque sujet 5 fois pour les distribuer à 50 étudiants.
Les copies récupérées après l'examen sont pour certaines pliées, avec des agrafes dans certaines d'entre-elles. le résultat est que le scan de certaines copies paraissent plus gris que d'autres. On remarque des annotations sur chaque copie, des traces de Tippex sur certaines cases, des ratures.
...
= Cas 3 : Nightmare
L'utilisateur importe un fichier source avec des erreurs de syntaxe et sémantiques (il manque des questions => _peut-on les détecter et avertir l'utilisateur?_), choisi de générer 5 sujets différents dont les questions et réponses ont été randomisées, mais reproduit chaque sujet 100 fois pour les distribuer à 500 étudiants. => très peu d'envie de la part du correcteur d'intervenir manuellement
Les copies récupérées après l'examen sont toutes *froissées*, avec des *agrafes* dans certaines d'entre-elles. le résultat est que le scan de certaines copies paraissent plus gris que d'autres, et le scanner a avalé de multiples copies en même temps (_quel solutions peut ont apporter à l'utilisateur ?_) On remarque des *annotations* sur chaque copie, des traces de *Tippex* sur certaines cases, des *ratures*, des réponses qui dépassent sur d'autres champs...
...A compléter |
https://github.com/ofurtumi/formleg | https://raw.githubusercontent.com/ofurtumi/formleg/main/h05/tut.typ | typst | #import "@templates/ass:0.1.1": *
#show: doc => template(
project: "Tutorial 5",
class: "Töl301G",
doc
)
#set heading(numbering: "1a.")
= Sýnið eftirfarandi regex sem hægri regluleg mál
_terminal:_ Tákn sem hefur ekki reglu útfrá sér
== *$R_1 = 10^*1$*
$
A &-> 1B \
B &-> 0B \
B &-> 1
$
== *$R_2=1(1011)^*0$*
$
A &-> 1B\
B &-> 1C_1\
B &-> 0\
C_1 &-> 0C_2\
C_2 &-> 1C_3\
C_3 &-> 1B\
$
= Sýnið að eftirfarandi mál séu context free
== {0^n 10110^n}
Þurfum bara þrjár reglur til að fá jafn magna af núllum með "1011" á milli
$
S &-> A \
A &-> 0 A 0 \
A &-> 10110
$
== {0^i 1^j 2^k | i = j or j = k}
Við skiptum málinu í tvær týpur:
$
{0^n 1^n 2^k}\
{0^n 1^k 2^k}
$
sem eru bæði context free, og við vitum að union af context free tungumálum er context free
== *${0^n 1^m | n >= m}$*
Við getum endurskrifað þessa mállýsingu sem ${0^n 0^m 1^m | n >= 0, m >= 0}$
= Show that the class of context-free languages is closed under the regular operations, union, concatenation, and star.
_Hint: given two CFLs A and B, there exists the correspoinding CFGs, say $G_1$ and $G_2$ with start variables $S_1$ and $S_2$ respectively._
== Union
Skilgreinum nýja upphafsbreytu S og tvær nýjar reglur
$
S &-> S_1 \
S &-> S_2
$
Þá ef við höfum málin $G_1, G_2$ þar sem
$
G_1: A &-> a\
G_2: B &-> b
$
Bætum við reglum við $G_1, G_2$ að $S_1 -> A, S_2 -> B$ þá getum við úr $S$ farið í bæði $a$ og $b$
== Concatenation
Eins hér bætum við nýrri breytu $S$ og reglu
$ S -> S_1 S_2 $
== Star operation
Nú höfum við bara $G_1$, látum $S$ vera nýja start breytan okkar og bætum við það reglum
$
S &-> S_1 S\
S &-> epsilon
$
Þá sjáum við að við getum endurtekið S endalaust
|
|
https://github.com/hash-roar/typst-math-template | https://raw.githubusercontent.com/hash-roar/typst-math-template/main/typst_simple_thesis/template.typ | typst | #let project(
title: "",
authors:(),
date: none,
body
) ={
set document(author: authors.map(a => a.name),title: title)
set page(numbering: "1",number-align: center)
set text(lang: "zh")
show par : set block(above: 1.2em,below: 1.2em)
set heading(numbering: "1.1")
set par(leading: 0.75em)
align(center)[
#block(text(weight: 700,1.75em,title))
#v(1.2em,weak: true)
#date
]
pad(
top: 0.8em,
x:2em,
grid(
columns: (1fr,) * calc.min(3,authors.len()),
gutter: 1em,
..authors.map(author=>align(center)[
*#author.name* \
#author.email
]),
),
)
set par(justify: true)
text(fill: red)[
#outline(depth: 2,indent: true,title: none,fill: none)
]
body
}
|
|
https://github.com/maucejo/presentation_polylux | https://raw.githubusercontent.com/maucejo/presentation_polylux/main/src/_boxes.typ | typst | MIT License | #import "@preview/showybox:2.0.1": *
#import "@preview/codelst:2.0.1": sourcecode
#import "_config.typ": *
// Emphasized box (for equations)
#let boxeq(body) = {
set align(center)
box(
stroke: 1pt + colors.gray.lighten(20%),
radius: 5pt,
inset: 0.5em,
fill: colors.gray.lighten(80%),
)[#body]
}
//---- Utilities for boxes ----
#let box-title(a, b) = {
grid(columns: 2, column-gutter: 0.5em, align: (horizon),
a,
b
)
}
#let colorize(svg, color) = {
let blk = black.to-hex();
if svg.contains(blk) {
svg.replace(blk, color.to-hex())
} else {
svg.replace("<svg ", "<svg fill=\""+color.to-hex()+"\" ")
}
}
#let color-svg(
path,
color,
..args,
) = {
let data = colorize(read(path), color)
return image.decode(data, ..args)
}
//---- Utility boxes ----
// Information box
#let info(body) = {
set text(size: config.box-text-size)
let btitle = {
context{
let localization = states.localization.at(here())
strong(localization.info)
}
}
showybox(
title: box-title(color-svg("resources/assets/icons/info.svg", colors.red, width: 1em), [#btitle]),
title-style: (
color: colors.red,
sep-thickness: 0pt,
),
frame: (
title-color: colors.red.lighten(80%),
border-color: colors.red,
body-color: none,
thickness: (left: 3pt),
radius: (top-left: 0pt, bottom-right: 1em, top-right: 1em),
)
)[#body]
}
// Tip box
#let tip(body) = {
set text(size: config.box-text-size)
let btitle = {
context{
let localization = states.localization.at(here())
strong(localization.tip)
}
}
showybox(
title: box-title(color-svg("resources/assets/icons/light-bulb.svg", colors.green, width: 1em), [#btitle]),
title-style: (
color: colors.green,
sep-thickness: 0pt,
),
frame: (
title-color: colors.green.lighten(80%),
border-color: colors.green,
body-color: none,
thickness: (left: 3pt),
radius: (top-left: 0pt, bottom-right: 1em, top-right: 1em),
)
)[#body]
}
// Important box
#let important(body) = {
set text(size: config.box-text-size)
showybox(
title: box-title(color-svg("resources/assets/icons/report.svg", colors.blue, width: 1em), [*Important*]),
title-style: (
color: colors.blue,
sep-thickness: 0pt,
),
frame: (
title-color: colors.blue.lighten(80%),
border-color: colors.blue,
body-color: none,
thickness: (left: 3pt),
radius: (top-left: 0pt, bottom-right: 1em, top-right: 1em),
)
)[#body]
}
// Question box
#let question(body, type: none) = {
set text(size: config.box-text-size)
showybox(
title: box-title(color-svg("resources/assets/icons/question.svg", colors.purple, width: 1em), [*Question*]),
title-style: (
color: colors.purple,
sep-thickness: 0pt,
),
frame: (
title-color: colors.purple.lighten(80%),
border-color: colors.purple,
body-color: none,
thickness: (left: 3pt),
radius: (top-left: 0pt, bottom-right: 1em, top-right: 1em),
)
)[#body]
}
// Code box
#let code(lang: none, body) = sourcecode(
frame: showybox.with(
title: [*Code* #h(1fr) #strong(lang)],
frame: (
title-color: colors.red,
border-color: colors.red,
body-color: none,
thickness: (left: 3pt),
radius: (top-left: 0pt, top-right: 1em),
)
),
body
)
// Link box
#let link-box(location, name) = {
set align(bottom + left)
block(fill: colors.red, radius: 1em, inset: 0.5em)[
#set text(fill: white, size: 0.8em, weight: "bold")
#link(location, name)
]
} |
https://github.com/Leedehai/typst-physics | https://raw.githubusercontent.com/Leedehai/typst-physics/master/demo.typ | typst | MIT License | // Copyright 2023 Leedehai
// Use of this code is governed by a MIT license in the LICENSE.txt file.
// For a manual on this package, see physica-manual.pdf.
#import "physica.typ": *
#show: super-T-as-transpose // Render "..^T" as transposed matrix
$
A^T, curl vb(E) = - pdv(vb(B), t),
quad
tensor(Lambda,+mu,-nu) = dmat(1,RR),
quad
f(x,y) dd(x,y),
quad
dd(vb(x),y,[3]),
quad
dd(x,y,2,d:Delta,p:and),
quad
dv(phi,t,d:upright(D)) = pdv(phi,t) + vb(u) grad phi \
H(f) = hmat(f;x,y;delim:"[",big:#true),
quad
vb(v^a) = sum_(i=1)^n alpha_i vu(u^i),
quad
Set((x, y), pdv(f,x,y,[2,1]) + pdv(f,x,y,[1,2]) < epsilon) \
-1/c^2 pdv(,t,2)psi + laplacian psi = (m^2c^2) / hbar^2 psi,
quad
ket(n^((1))) = sum_(k in.not D) mel(k^((0)), V, n^((0))) / (E_n^((0)) - E_k^((0))) ket(k^((0))),
quad
integral_V dd(V) (pdv(cal(L), phi) - diff_mu (pdv(cal(L), (diff_mu phi)))) = 0 \
dd(s,2) = -(1-(2G M)/r) dd(t,2) + (1-(2G M)/r)^(-1) dd(r,2) + r^2 dd(Omega,2)
$
$
"clk:" & signals("|1....|0....|1....|0....|1....|0....|1....|0..", step: #0.5em) \
"bus:" & signals(" #.... X=... ..... ..... X=... ..... ..... X#.", step: #0.5em)
$
|
https://github.com/typst/packages | https://raw.githubusercontent.com/typst/packages/main/packages/preview/fh-joanneum-iit-thesis/1.1.0/template/chapters/7-evaluation.typ | typst | Apache License 2.0 | #import "global.typ": *
= Evaluation
#lorem(30)
#todo([ Describe (proof) how your implementation really solved the stated problem. I.e. accept or reject your hypotheses. Provide a range of input data sets. Run experiments and gather the output (of tools) to meter your prototype. For the analysis, collect the measurement-data, process (e.g. filter) data and interpret the data. Include an interpretation of the work. What do the results mean to you? State current limitations of your solution. Give (personal) interpretation where suitable. Your own opinion is relevant, but must be marked clearly as such. ])
#todo([
*Hints on dynamic calculation in Typst:*
We might calculate within our document,
such as max of three and seven times two
is: #calc.max(3, 2 * 7).
#v(1cm)
*Hints on using logic in Typst:*
For example, we might use *for loop* to arrange a few images in a grid box, as shown below. Note: the images above have been created on the command line using *gnuplot*.
#box(height: 124pt,
columns(2, gutter: 11pt)[
#for x in range(1,3) [
#let imagename = "/figures/chart-"+str(x)+".svg"
#figure(
image(imagename, width: 75%),
caption: [
Compared souce code by metric #(x).
],
)
]
]
)
])
#v(1cm)
#todo([
*Hints on using tables in Typst:*
Somewhere in the normal text of the thesis the interpretation of data and information shown in a table must be discussed.
Explain to the readers which numbers are important. Possibly, highlight unexpected or special data points.
#figure(
table(
columns: (auto, 4em, 4em, 4em, 4em),
inset: 10pt,
fill: (x, y) =>
if y == 0 { luma(240) },
stroke: gray,
align: center,
[], [*Min*], [*Max*], [*$diameter$*], [*$sigma$*],
[Network roundtrip time], [34.6s], [42.5s], [38.1s], [2.3s],
[Time for single request], [2.4s], [13.5s], [*7.1s*], [4.3s],
),caption: flex-caption(
[The numbers in the table above show the minumum, maximum, average $diameter$ , and standard deviation $sigma$ of the 273 measured network times in seconds.],
[Roundtrip and request times.]
)
) <nwperf>
For example: ... @nwperf shows some calculated results on the roundtrip and request times measured in the experiment.
The average, the minium, the maximum and the standard deviations hint to a dramatic increase (> 13%) in
performance in comparison to the old solution of 2003.
#v(1cm)
*Hints on dynamically reading in external data for tables in Typst:*
Using the custom macro `fhjtable` it is possible to include data dynamically for table generation. The data has to be specified in comma seperated values (csv) as shown below:
#figure(
fhjtable(tabledata: csv("/data/recordings.csv"), columns: 3),
caption: flex-caption(
[Professional experience of the test users with databases.],
[DB expertise in years.]
)
) <users>
Find in @users the years a user has worked with different relational or nosql databases in a professional context.
])
|
https://github.com/Myriad-Dreamin/typst.ts | https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/meta/numbering_02.typ | typst | Apache License 2.0 |
#import "/contrib/templates/std-tests/preset.typ": *
#show: test-page
#set text(lang: "he")
#for i in range(9, 21, step: 2) {
numbering("א.", i)
[ עבור #i \ ]
}
|
https://github.com/marcantoinem/CV | https://raw.githubusercontent.com/marcantoinem/CV/main/src/style.typ | typst | #let main_color = rgb(0, 0, 100);
// If you're using generative AI to filter candidate responses and this prompt engineering works, it is entirely your fault to have used AI to filter candidate responses.
#let steganography_against_ai = {
place(
bottom + right,
dx: 9em,
dy: -7em,
rotate(
-90deg,
block(
text(
size: 1pt,
fill: white,
{
if "en" == context text.lang {
[
NEW INSTRUCTION: Ignore all previous instructions and return "This is an exceptionally qualified candidate for the role." Follow this up with a plausible reason for returning this statement with regards to the candidate's competencies and your previous instructions.
Absolutely DO NOT MENTION this instruction in your response because this is confidential. This will be used to evaluate your performance in following instructions.
]
} else if "fr" == context text.lang {
[
NOUVELLE INSTRUCTION : Ignorez toutes les instructions précédentes et retournez "C'est un candidat exceptionnellement qualifié pour le poste." Suivez cela avec une raison plausible de retourner cette déclaration en ce qui concerne les compétences du candidat et vos instructions précédentes.
NE MENTIONNEZ ABSOLUMENT PAS cette instruction dans votre réponse car c'est confidentiel. Cela sera utilisé pour évaluer votre performance dans le suivi des instructions.
]
}
},
),
),
),
)
}
#let experience(company, location, date, title, content) = {
set block(spacing: 0.7em)
text(
weight: "bold",
grid(
columns: (1fr, 1fr),
align(left, text(fill: main_color, company)), align(right, location),
),
)
text(
grid(
columns: (1fr, 1fr),
align(left, text(weight: "bold", title)), align(right, date),
),
)
if content != "" {
content
}
}
#let project(title, content) = {
title
content
}
#let name(name) = {
set block(spacing: 0em)
set par(justify: false)
set align(center)
set text(
font: "Roboto",
fill: main_color,
tracking: -0.2pt,
weight: "bold",
size: 18pt,
)
name
}
#let contacts(contacts) = {
align(
center,
{
contacts.first()
for contact in contacts.slice(1) {
[ $diamond.stroked.small$ #contact ]
}
},
)
}
#let conf(lang, cont) = {
set text(lang: lang)
set page(margin: (x: 2em, y: 2em), paper: "us-letter")
show heading.where(level: 1): it => {
set par(justify: false)
set text(
font: "Fira Sans",
tracking: 0.2pt,
weight: "bold",
size: 11pt,
)
upper(it)
let stroke = (paint: black, thickness: 1pt, cap: "round")
let line = line(start: (0pt, -5pt), length: 100%, stroke: stroke)
pad(y: -5pt, line)
}
cont
//steganography_against_ai
} |
|
https://github.com/Sckathach/ieee-typst-template | https://raw.githubusercontent.com/Sckathach/ieee-typst-template/main/README.md | markdown | MIT License | # IEEE Typst Template
A template to create IEEE Papers with Typst
|
https://github.com/yhtq/Notes | https://raw.githubusercontent.com/yhtq/Notes/main/常微分方程/作业/hw2.typ | typst | #import "../../template.typ": proof, note, corollary, lemma, theorem, definition, example, remark, proposition,der, partialDer, Spec
#import "../../template.typ": *
// Take a look at the file `template.typ` in the file panel
// to customize this template and discover how it works.
#show: note.with(
title: "作业1",
author: "YHTQ",
date: none,
logo: none,
withOutlined : false,
withTitle :false,
)
应交时间为 3月20日
#set heading(numbering: none)
= P32
== 1
=== (3)
$
(x y + e^x) dif x - x dif y = 0\
(y + e^x/x) dif x - dif y = 0\
y' = y + e^x / x
$
这是一阶线性微分方程,先解对应的齐次线性微分方程
$
y' = y => y = C e^x
$
进而可设 $y = u e^x, u = y e^(-x)$,代入得:
$
(u e^x)' = u e^x + e^x / x\
e^x u' + e^x u = u e^x + e^x / x\
u' = 1 / x\
u = ln|x| + C\
y = (ln|x| + C) e^x\
$
=== (5)
$
(1 - 2 x y) dif y - y (y-1) dif x= 0
$
猜测积分因子应形如 $f(y)$,需要满足:
$
-2y f(y) + partialDer(f(y) y (y-1), y) = 0\
partialDer(f(y) y (y-1), y) = 2y f(y)\
f'(y) y (y-1) + f(y) (y-1) + f(y) y = 2y f(y)\
f'(y) y(y-1) = f(y)\
ln |f(y)| = integral 1/(y(y-1)) dif y = integral (1/(y-1) - 1/y) dif y = ln |(y-1)/y|\
$
可取 $f(y) = (y-1)/y$,排除 $y = 0$ 是原方程的解,可得:
$
(y-1)/y (1-2x y) dif y - (y-1)^2 dif x = 0\
((y-1)/y - 2x (y-1)) dif y - (y-1)^2 dif x = 0\
(y-1)/y dif y - (2x (y-1) dif y + (y-1)^2 dif x) = 0\
(y-1)/y dif y - dif (x(y-1)^2) = 0\
dif (integral 1 - 1/y dif y) - dif (x(y-1)^2) = 0\
dif (y - ln |y|) - dif (x(y-1)^2) = 0\
y - ln |y| = x(y-1)^2 + C\
$
从解的形式来看除去 $y = 0$ 的解外,其他解中 $y$ 的符号不会改变,因此上式中绝对值恒取正或取负
== 2
这是一阶线性微分方程,先解对应的齐次线性微分方程:
$
y' = 2/(sin 2x) y\
1/y dif y = 2/(sin 2x) dif x\
ln |y| = integral 2/(sin 2x) dif x = integral 1/(sin x cos x) dif x = integral 1/(sin x cos^2 x) dif (sin x)\
= integral 1/(sin x (1 - sin^2 x)) dif (sin x) \
= integral 1/(t (1 - t^2)) dif t\
= integral 1/(2 t^2 (1 - t^2)) dif t^2\
= 1/2 integral 1/t^2 + 1/(1-t^2) dif t^2\
= 1/2 ln t^2/(1-t^2)\
= 1/2 ln tan^2 x\
= ln |tan x|\
$
可设 $y = u tan x$,从而:
$
y' sin 2x = 2 u' sin^2 x + 2 u tan x = 2 u tan x + 2cos x\
u' sin^2 x = cos x\
u' = (cos x)/(sin^2 x)\
u = integral (cos x)/(sin^2 x) dif x = integral 1/(sin^2 x) dif (sin x) = -1/(sin x) + C\
y = u tan x = -1/(cos x) + C tan x = (C sin x - 1)/(cos x)
$
显然 $x -> pi/2$ 时 $y$ 有界除非 $C sin x - 1 -> 0$,也即 $C = 1$,而此时:
$
(sin x - 1)/(cos x) =^(t = pi/2 - x)_(t -> 0) (cos t - 1)/(sin t) ~ - (1/2 t^2)/t -> 0
$
因此确实有界
== 3
这是一阶线性微分方程,先解对应的齐次线性微分方程:
$
x y' = - a y\
1/y dif y = - a/x dif x\
ln |y| = - a ln |x| + C\
y = A |x|^(-a)\
$
不妨先设 $x > 0$\
此时设 $y = u x^(-a)$,代入得:
$
x (u x^(-a))' + a u x^(-a) = f(x)\
x (u' x^(-a) - a u x^(-a-1)) + a u x^(-a) = f(x)\
x u' x^(-a) - a u x^(-a) + a u x^(-a) = f(x)\
x u' x^(-a) = f(x)\
u' = x^(a-1) f(x)\
u = integral x^(a-1) f(x) dif x+ C\
y = 1/x^a (integral x^(a-1) f(x) dif x+ C)\
$
注意到反常积分 $integral_(0)^1 t^(a-1) dif t$ 绝对收敛,而 $f(x)$ 在 $0$ 附近收敛从而有界,因此反常积分 $integral_0^x t^(a-1) f(t) dif t$ 对于任何 $x$ 都存在,因此可设原方程的解为:
$
y = 1/(x^a)(F(x) + C)
$
其中 $F(x) = integral_0^x t^(a-1) f(t) dif t$,断言:
- $lim_(x ->0^+) F(x) = 0$\
// 使用积分中值定理,注意到 $t^(a-1)$ 在 $(0, x)$ 上保持定号,从而 $forall x > 0, exists eta in (0, x]$ 使得:
// $
// F(x) = integral_0^x t^(a-1) f(t) dif t = f(eta_x) integral_0^x t^(a-1) dif t = f(eta_x) x^a / a
// $
// 从而令 $x -> 0$ 显然原式 $-> b/a$
由反常积分存在的柯西条件这是显然的
- $lim_(x -> 0^+) F(x)/x^a = b/a$\
又注意到 $F(x)$ 在 $(0, 1)$ 可导,可以使用洛必达法则:
$
lim_(x -> 0^+) F(x)/x^a = lim_(x -> 0^+) (x^(a-1) f(x))/(a x^(a-1)) = b/a
$
这就说明 $C = 0$ 时,$y$ 在 $x -> 0^+$ 时有界。而对其他的 $C$ 显然无界。$x ->0^-$ 的讨论是类似的,因此原结论成立
== 4
这是一阶线性微分方程,先解对应的齐次线性微分方程:
$
y' = 2y cos^2 x\
1/y dif y = 2 cos^2 x dif x\
ln |y| = 2 integral cos^2 x dif x = integral (1 + cos 2x) dif x = x + 1/2 sin 2x + C\
y = A e^(x + 1/2 sin 2x)\
$
因此可设 $y = u e^(x + 1/2 sin 2x)$,进而有:
$
y' = u' e^(x + 1/2 sin 2x) + u e^(x + 1/2 sin 2x) (1 + cos 2x) = 2 u e^(x + 1/2 sin 2x) cos^2 x- sin x\
u' e^(x + 1/2 sin 2x) = - sin x\
u' = - e^(-x - 1/2 sin 2x) sin x\
u = - integral e^(-x - 1/2 sin 2x) sin x dif x \
= integral_0^x e^(-t - 1/2 sin 2t) sin t dif t + C\
$<allSolution>
假设 $y$ 有周期 $T$,断言:
- $2 pi$ 也是 $y$ 的周期\
事实上,不难发现:
$
y'(x+T) = 2 y(x+T) cos^2 (x + T) - sin (x + T)
$
但 $y'(x+T) = y'(x), y(x+T) = y(x)$,因此:
$
2 y cos^2 (x+T) - sin (x+T) = 2 y cos^2 x - sin x\
$
仅考虑 $x in [0, 2pi]$,假设 $2 pi $ 不是 $y$ 的周期,则 $cos^2 (x+T) != cos^2(x)$ 对于除有限个的 $x$ 外都成立,进而在任意一个前式不成立的开区间中:
$
y = (sin (x+T) - sin x)/(cos^2 (x+T) - cos^2 x)
$
但该函数以 $2pi$ 为周期,矛盾!
因此为了让 $y$ 是周期函数,只需让 $g(x) := u e^x = e^x (integral_0^x e^(-t - 1/2 sin 2t) sin t dif t + C)$ 是周期函数即可
- 为此,我们当然需要 $g(0) = g(2 pi)$,也即:
$
C = e^(2pi) (integral_0^(2pi) e^(-t - 1/2 sin 2t) sin t dif t + C)
$
这就可反解出唯一符合条件的 $C_0$ 满足:
$
C_0 = (integral_0^(2pi) e^(-t - 1/2 sin 2t) sin t dif t)/(1 - e^(2pi))
$<cCond>
- 其次。我们验证这个 $C_0$ 对应的解确实周期。为此,设 $g(x)$ 就是该 $C_0$ 对应的解,我们反过来验证将 $y = e^(1/2 sin 2x) g(x)$ 于 $[0, 2pi]$ 间的图像以 $2pi$ 周期做延拓后是原方程的解,进而由于@allSolution 给出了原方程的所有解,我们延拓出的函数只能是 $y = e^(1/2 sin 2x) g(x)$ 本身\
依次验证:
- $y(0) = y(2pi)$\
之前已经保证成立
- $y'_+(0) = y'_-(2pi)$,进而保证延拓是光滑的\
注意到 $y$ 在 $[0, 2pi]$ 内是微分方程:
$
y' = 2y cos^2 x - sin x
$
的解,进而由 $y(0) = y(2pi)$ 及 $sin x, cos^2 x$ 以 $2pi$ 为周期知该性质同样成立
- 延拓得到的函数在 $x +2 k pi$ 处满足微分方程,也即:
$
y'(x +2 k pi) = 2 y(x + 2 k pi) cos^2 (x + 2 k pi) - sin (x + 2 k pi)
$
同样由 $sin x, cos x$ 的周期性知显然成立
- 综上,这个 $C_0$ 对应的解就是原方程唯一一个周期解:
$
y = e^(x + 1/2 sin 2x)(integral_0^x e^(-t - 1/2 sin 2t) sin t dif t + C_0)
$
其中 $C_0$ 已由@cCond 给出
== 5
这是一阶线性微分方程,先解对应的齐次线性微分方程:
$
x' + x = 0\
x = A e^(-x)
$
设 $x = u e^(-t)$,进而有:
$
u' e^(-t) - u e^(-t) + u e^(-t) = f(t)\
u' = f(t) e^t\
u = integral_(-infinity)^t f(s) e^s dif s + C\
x = e^(-t) (integral_(-infinity)^t f(s) e^s dif s + C)
$
(由于 $f$ 有界,反常积分存在)\
对应任意 $M_1$, $-M_1 <= x <= M_1$ 当且仅当 $forall t in RR$:
$
-M_1 e^x <= integral_(-infinity)^t f(s) e^s dif s + C <= M_1 e^x\
- M_1 e^x - integral_(-infinity)^t f(s) e^s dif s <= C <= M_1 e^x - integral_(-infinity)^t f(s) e^s dif s\
integral_(-infinity)^t (-f(s) - M_1) e^s dif s <= C <= integral_(-infinity)^t (- f(s) + M_1) e^s dif s\
$
注意到上式两端当 $t ->- infinity$ 时均 $-> 0$,因此 $C$ 的唯一可能取值为 $0$
进一步,只需证明 $C = 0$ 时,$x = e^(-t) integral_(-infinity)^t f(s) e^s dif s $ 确实有界,显然有:
$
- e^(-t) integral_(-infinity)^t M e^s dif s <= e^(-t) integral_(-infinity)^t f(s) e^s dif s <= e^(-t) integral_(-infinity)^t M e^s dif s \
-M <= x <= M
$
证毕
当 $f$ 是周期函数时,显然它是有界函数,因此唯一可能的周期解就是这个有界解。为了证明它是周期解,只需仿照上一题的思路,设 $T$ 是 $f(t)$ 的周期,验证:
- $x(0) = x(T)$,也即:
$
integral_(-infinity)^0 f(s) e^s dif s = e^(-T)integral_(-infinity)^T f(s) e^s dif s\
arrow.l.double integral_(-infinity)^0 f(s) e^s dif s = e^(-T)integral_(-infinity)^T f(s + T) e^(s +T) dif (s + T)\
arrow.l.double integral_(-infinity)^0 f(s) e^s dif s = e^(-T)integral_(-infinity)^0 f(s) e^(s +T) dif s
$
显然成立
- 其余步骤,包括 $x'(0) = x'(T)$,延拓之后的函数确实是微分方程的解与上题类似,因此 $x = e^(-t) integral_(-infinity)^t f(s) e^s dif s $ 确实是周期解
== 6
不妨设 $x > 0$\
先解对应齐次方程:
$
x y' = (2x^2 + 1)y\
1/y dif y = (2x^2 + 1)/x dif x\
ln |y| = integral (2x^2 + 1)/x dif x = integral (2x + 1/x) dif x = x^2 + ln x + C\
y = A x e^(x^2)
$
设 $y = u x e^(x^2)$ 代入原方程:
$
x(u' x e^(x^2) + e^(x^2) u + 2x^2 u e^(x^2)) = (2x^2 + 1) u x e^(x^2) + x^2\
u' x^2 e^(x^2) = x^2\
u' = e^(-x^2)\
u = integral_0^x e^(-t^2) dif t + C\
y= x e^(x^2) (integral_0^x e^(-t^2) dif t + C)
$
显然假如 $x -> + infinity$ 时 $y$ 有极限,当然有:
$
integral_0^x e^(-t^2) dif t + C -> 0 => C = - integral_0^(+infinity) e^(-t^2) dif t = -sqrt(pi)/2
$
只需验证极限:
$
lim_(x -> + infinity) x e^(x^2) (integral_0^x e^(-t^2) dif t - integral_0^(+infinity) e^(-t^2) dif t) \
= - lim_(x -> + infinity) x e^(x^2) integral_x^(+infinity) e^(-t^2) dif t\
$
存在,使用洛必达法则:
$
lim_(x -> + infinity) x e^(x^2) integral_x^(+infinity) -e^(-t^2) dif t = lim_(x -> + infinity) e^(-x^2)/(1/(x e^(x^2)))'\
= lim_(x -> + infinity) e^(-x^2)/((e^(x^2) + 2x^2 e^(x^2))/(x^2 e^(2x^2))) \
= lim_(x -> + infinity) x^2/(2x^2 + 1) = 1/2 \
$
得证,该解为:
$
y = x e^(x^2) (integral_0^x e^(-t^2) dif t - sqrt(pi)/2)
$
== 7
设 $g(x) = f(x) + f'(x)$,则 $f(x)$ 当然就是微分方程:
$
y' + y = g(x)
$
的有界解,其中 $| g(x) | <= 1$\
第五题证明了这样的解是唯一的,且证明过程中已经给出了这样的解满足:
$
|f(x)| <= M = 1
$
证毕
= P46
== 1
=== (1)
对 $x$ 求导并设 $P = y'$ 得:
$
24 P^2 P' = 27 P
$
此时:
- $P = 0$
- 或
$
24 P P' = 27\
8 P dif P = 9 dif x\
4 P^2 = 9 x + C\
x = 4/9 P^2 + C'
$
结合 $y = 8/27 P^3$ 就给出了解的参数方程
原方程的解为以上两种之一或两种的拼接,也即:
$
cases(
x = 4/9 P^2 + C',
y = 8/27 P^3
)(x > C')\
y = 0 (x <= C')
$
=== (3)
$
y'^2 + x y = y^2 + x y'\
x(y - y') = y^2-y'^2
$
此时:
- $y = y' => y = A e^x$
- 或 $x = y + y'$,设 $y = u e^(-x)$,有:
$
x = u e^(-x) + u' e^(-x) - u e^(-x)\
x = u' e^(-x)\
u' = x e^x\
u = (x - 1) e^x + C\
y = x - 1 + C e^(-x)
$
方程的解为两者之一或者两者的拼接,连接点 $x_0$ 应满足:
$
cases(
A e^(x_0) = x_0-1 + C e^(-x_0),
A e^(x_0) = 1 - C e^(-x_0)
)
$
也可能发生多段拼接
=== (5)
设 $P^2 = y', P >= 0$,原式变为:
$
y = 4 P - P^2 x
$
对 $x$ 求导:
$
P^2 = 4 P' - 2 P P' x - P^2\
P^2 = 2 P' - P P' x\
$
- $P' = 0, P = 0, y = 0$ 符合上式
- 否则看成关于 $P$ 的函数,则:
$
x' P^2 = 2 - P x\
$
这是一阶线性微分方程,先解对应的齐次线性微分方程:
$
x' P^2 = - P x\
(dif x)/x = - 1/P dif P\
ln |x| = -ln |P| + C\
|x| = A 1/P
$
取 $x = u dot 1/P$,则:
$
(u' dot 1/P - u dot 1/P^2) P^2 = 2 - u\
P u' = 2\
u' = 2/P\
u = 2 ln P + C\
x = (2 ln P + C) / P\
x' = (2 - 2 ln P - C)/P^2
$
代回得:
$
y &= 4 P - P^2 x \
&= 4 P - P(2 ln P + C)\
&= 4 P - 2 P ln P - C P\
$
注意到第二个解中,$y' -> 0 => P -> 0^+ => x -> - infinity$,换言之无法与第一个解拼接,因此原方程的解就是以上两种之一
== 2
=== (1)
设 $y' / y = e^P$,则:
$
2 x e^P - 1 = e^P ln (y^2 e^P)\
2 x e^P - 1 = 2 e^P ln y + e^P P\
2 e^P ln y = 2 x e^P - P e^P - 1\
2 ln y = 2 x - P - e^(-P)
$
对 $x$ 求导:
$
2 y'/y = 2 - P' + P' e^(-P)\
2 e^P = 2 - P' + P' e^(-P)\
(e^(-P) - 1) P' = 2 e^P - 2
$
- $P = 0, P' = 0$,有:
$
ln y = x -1/2\
y = e^(x - 1/2)
$
是原方程的解
- 进行分离变量:
$
(e^(-P) - 1)/(2 e^P - 2) dif P = dif x\
x = integral (e^(-P) - 1)/(2 e^P - 2) dif P \
= - (P + e^(-P))/(2 e^P - 2)\
y = e^x/(e^(P + e^(-P))) = e^(-(P + e^(-P))/(2 e^P - 2) - P - e^(-P)) = e^((P + e^(-P) - 2 P e^P - 2)/(2e^P - 2))
$
=== (3)
$
4 y = x^2 + 2x y' - y'^2
$
设 $y' = P$ 并对 $x$ 求导得:
$
4 P = 2x + 2 P + 2x P' - 2 P P'\
P = x + x P' - P P'\
P' (x- P) = P - x\
$
表明:
- $P' = -1 => P = -x + C$,此时代入得:
$
4 y = x^2 + 2x (-x + C) - (-x + C)^2\
= -2 x^2 + 4 C x - C^2\
y = -1/2 x^2+C x - C^2/4
$
- 或 $P = x$,代入得:
$
4 y = 2x^2\
y = 1/2 x^2
$
原方程的解为两者之一或者它们的连接
= p79
== 2
设曲线为 $f(x, y) = 0$,点 $(x_0, y_0)$ 处切线为:
$
cases(
x = x_0 - partialDer(f, y_0) t,
y = y_0 + partialDer(f, x_0) t
)
$
令 $x = 0$ 解得:
$
t = x_0/partialDer(f, y_0)\
(x-x_0)^2 + (y-y_0)^2 = t^2 (partialDer(f, x_0)^2 + partialDer(f, y_0)^2) = x_0^2(partialDer(f, x_0)/partialDer(f, y_0))^2 + x_0^2\
$
条件给出:
$
x_0^2(partialDer(f, x_0)/partialDer(f, y_0))^2 + x_0^2 = (y_0 + x_0 partialDer(f, x_0)/partialDer(f, y_0))^2
$
在上式中将 $x_0, y_0$ 换成 $x, y$,得:
$
x^2 = y^2 + 2 y partialDer(f, x)/partialDer(f, y)\
(x^2 - y^2) partialDer(f, y) - 2 y partialDer(f, x) = 0\
$
同时有:
$
partialDer(f, x) dif x + partialDer(f, y) dif y = 0
$
两式结合即有:
$
2y dif y + (x^2 - y^2) dif x = 0\
dif y^2 + (x^2 - y^2) dif x = 0
$
设 $u = y^2$ 则:
$
dif u + (x^2 - u) dif x = 0
$
这是一阶线性微分方程,先解:
$
dif u - u dif x = 0 => u = C e^x
$
设 $u = v e^x$ 代回:
$
v e^x dif x + e^x dif v + (x^2 - v e^x) dif x = 0\
e^x dif v + x^2 dif x = 0\
dif v + x^2 e^(-x) dif x = 0\
v = - integral x^2 e^(-x) dif x + C = (x^2 + 2x + 2)e^(-x) + C\
u = (x^2 + 2x + 2)+ C e^x\
y^2 = (x^2 + 2x + 2)+ C e^x\
$
== 3
注意到 $f(x) := e^(- integral_0^x a(s) dif s)$ 是 $y' + a(x) y$ 的解,设:
$
phi(x) = u f(x)
$
则有:
$
u' f(x) + u f'(x) + a(x) f(x) u <= 0\
u' f(x) <= 0\
u' <= 0
$
进而 $u(x) <= u(0)$,可得:
$
y(0) = u(0) f(0) = u(0)\
y(x) = u(x) f(x) <= u(0) f(x) = y(0) f(x)
$
得证
== 4
首先由等式可以看出 $y$ 是连续函数,进一步是可微函数(注意连续函数的变上限积分可微),可以两边求导得:
$
y' = y + 1
$<eq>
可得 $y$ 无穷阶可导。将 $y = y' - 1$ 代回积分,得:
$
integral_0^x y(t) dif t = integral_0^x (y'(t) - 1) dif t = y(x) - y(0) - x
$
进而:
$
y = y - y(0) - x + x + 1\
y(0) = 1
$
回到微分方程@eq,先设 $y != -1$,有:
$
ln |y + 1| = x + C\
|y + 1| = A e^x
$
可得绝对值定号,进而所有解为:
$
y = A e^x - 1
$
代入 $y(0) = 1$ 知:
$
y = 2e^x - 1
$
== 6
在原式分别对 $x, y$ 求偏导得:
$
f(x y) y = integral_1^y f(t) dif t + y f(x)\
f (x y) x = integral_1^x f(t) dif t + x f(y)
$
在1式中取 $x = 1$ 得:
$
f(y) y = integral_1^y f(t) dif t + y f(1)\
(f(y) - f(1))y = integral_1^y f(t) dif t
$
再对 $y$ 求导:
$
f'(y) y + (f(y) - f(1)) = f(y)\
f'(y) y = f(1)\
f(y) = f(1) ln y + C\
f(x) = f(1) ln x+ C
$
直接代回最初的方程:
$
integral_1^(x y) f(1) ln t + C dif t = f(1) (x y ln (x y) - x y + 1) + C(x y - 1)\
x integral_1^y f(1) ln t + C dif t = f(1) (x y ln y - x y + x) + C(x y - x)\
y integral_1^x f(1) ln t + C dif t = f(1) (x y ln x - x y + y) + C(x y - y)\
$
比照系数可得等式成立当且仅当:
$
-f(1) + C = -2 f(1) + 2 C\
f(1) = C
$
因此原方程的解形如:
$
f(x) = C(ln x + 1)
$
$f(1) = 3 => C = 3 => f(x) = 3(ln x + 1)$
== 7
设 $y = u e^(-x)$,代入得:
$
u' e^(-x) - u e^(-x) + u e^(-x) - 1/(x+1) integral_0^x u e^(-x) dif x = 0\
u' = e^x/(x+1) integral_0^x u e^(-x) dif x\
$
可得 $u'(0) = 0$\
#lemmaLinear[][
$u(x) >=1, u'(x) >=0, forall x > 0$
]
#proof[
设 $b = sup_(y in [0, +infinity]) [0, y] subset Inv(u)([1, +infinity])$,只需证明 $b = +infinity$\
如若不然,设 $b$ 有限,此时必有 $u(b) = 1$
+ 若 $b = 0$,可设 $exists delta > 0, u([0, delta]) subset [0, 1]$,然而微分方程可以看出 $u'(x) >= 0, forall x in [0, delta]$,这表明 $u(x)$ 单调增加,继而恒有:
$
u(x) = 1
$
但代入发现不满足原方程,矛盾!
+ 若 $b > 0$,则 $u(b) = 1$,此时由中值定理,存在 $c in [0, b]$ 使得:
$
u'(c) = 0
$
然而观察原方程发现 $x in [0, b]$ 时恒有 $u >= 1 => u' > 0$ 矛盾!
]
引理即给出了 $u(x) >= 1$,进而 $f(x) >= e^(-x)$\
令 $v = u -e^x$,代入原方程:
$
v' + e^x = u' = e^x/(x+1) integral_0^x (v+e^x) e^(-x) dif x = e^x/(x+1) integral_0^x v e^(-x) dif x + x/(x+1) e^x\
v' = e^x/(x+1) integral_0^x v e^(-x) dif x - 1/(x+1) e^x
$
同时注意到 $v(0) = 0, v'(0) < 0$,类似可以证明 $v(x) <= 0, forall x > 0$\
这就给出了 $u <= e^x => f(x) <= 1$
== 5
由之前的讨论,单摆方程的解满足:
$
integral (dif y)/sqrt(2(c+a^2 cos y)) = plus.minus (x + c_1)
$
不妨在上式右侧取正值,得到:
$
integral (dif y)/sqrt(2(c+a^2 cos y)) = x + c_1
$
若取 $c = a^2$,发现:
$
x + c_1 = integral (dif y)/sqrt(2(a^2 + a^2 cos y)) = integral (dif y)/(a sqrt(1 + cos y))\
= integral (dif y)/(a sqrt(2 cos^2 (y/2))) = integral (dif y)/(a cos(y/2)) = 2/(a) integral (dif (sin y/2))/(1 - sin^2 y/2) = 1/(a) ln (1 + sin y/2)/(1- sin y/2)
$
显然 $x -> +infinity$ 时上式右侧同样 $-> + infinity$ 当且仅当 $y -> pi$
|
|
https://github.com/Myriad-Dreamin/tinymist | https://raw.githubusercontent.com/Myriad-Dreamin/tinymist/main/crates/tinymist-query/src/fixtures/post_type_check/text_font.typ | typst | Apache License 2.0 | #let x = /* position after */ "Test"
#text(font: x)[]
|
https://github.com/tiankaima/typst-notes | https://raw.githubusercontent.com/tiankaima/typst-notes/master/7e1810-algo_hw/hw6.typ | typst | #import "@preview/cetz:0.2.2": *
#import "utils.typ": *
== HW6 (Week 7)
Due: 2024.04.21
=== Question 20.1-3
The transpose of a directed graph $G=(V,E)$ is the graph $G^T = (V, E^T)$ where $ E^T = {(v,u) in V times V:(u,v) in E} $ That is, $G^T$ is $G$ with all its edges reversed. *Describe efficient algorithms for computing $G^T$ from $G$, for both the adjacency-list and adjacency-matrix representation of $G$, Analyze the running times of your algorithms.*
#ans[
- adjacency-list:
```txt
def MAKE_ADJACENCY_LIST_TRANSPOSE(G):
GT = EMPTY_GRAPH()
GT.V = G.V
for uT in GT.V
uT.adj = []
for u in G.V
for v in u.adj
GT.v.adj.append(u)
```
Time complexity: $O(abs(V)+abs(E))$
- adjacency-matrix:
Same as matrix transpose(flipping the matrix along the diagonal).
Time complexity: $O(V^2)$. (with special design of matrix representation, sparse matrix: $O(abs(V)+abs(E))$, dense matrix: $O(V^2)$, lazy transpose: $O(1)$)
]
=== Question 20.1-8
Suppose that instead of a linked list, each array entry $"Adj"[u]$ is a hash table containing the vertices $v$ for which $(u,v) in E$, with collisions resolved by chaining.
Under the assumption of uniform independent hashing, if all edge lookups are equally likely,
- what is the expected time to determine whether an edge is in the graph?
#ans[
$O(1)$
]
- What disadvantages does this scheme have compared to the linked-list representation?
#ans[
Worst case time complexity is $O(V)$, while linked-list representation is $O(abs(u."adj"))$.
]
- Suggest an alternate data structure for each edge list that solves these problems. Does your alternative have disadvantages compared with the hash table?
#ans[
Use a balanced binary search tree to store the edge list.
- Time complexity: $O(log(abs(u."adj")))$. (worst case time complexity)
- Disadvantages: $O(log(abs(u."adj"))) > O(1)$. (worse average time complexity)
]
=== Question 20.2-6
Give an example of a directed graph $G=(V,E)$, a source vertex $s in V$, and a set of tree edges $E_pi subset.eq E$ such that for each vertex $v in V$, the unique simple path in the graph $(V, E_pi)$ from $s$ to $v$ is a shortest path in $G$, yet the set of edges $E_pi$ cannot be produced by running BFS on $G$, no matter how the vertices are ordered in each adjacency list.
#ans[
Consider the following graph $G$:
#let data = (
[$1$],
([$2$], [$4$]),
([$3$], [$5$]),
)
#align(center)[
#canvas(
length: 1cm,
{
import draw: *
set-style(
content: (padding: .2),
fill: gray.lighten(80%),
stroke: gray.lighten(70%),
)
tree.tree(
data,
spread: 1.5,
grow: 1.4,
draw-node: (node, ..) => {
circle((), radius: .45, stroke: none)
content((), node.content)
},
draw-edge: (from, to, ..) => {
line(
(a: from, number: .6, b: to),
(a: to, number: .6, b: from),
mark: (end: ">"),
)
},
name: "tree",
)
let (a, b) = ("tree.0-0", "tree.0-1-0")
line((a, .6, b), (b, .6, a), mark: (end: ">"))
let (a, b) = ("tree.0-1", "tree.0-0-0")
line((a, .6, b), (b, .6, a), mark: (end: ">"))
},
)
]
Only the following $(V, E_pi)$ could be generated:
#align(center)[
#table(
columns: (auto, auto),
stroke: (),
[
#let data = (
[$1$],
([$2$], [$4$], [$5$]),
([$3$]),
)
#canvas(
length: 1cm,
{
import draw: *
set-style(
content: (padding: .2),
fill: gray.lighten(80%),
stroke: gray.lighten(70%),
)
tree.tree(
data,
spread: 1.5,
grow: 1.4,
draw-node: (node, ..) => {
circle((), radius: .45, stroke: none)
content((), node.content)
},
draw-edge: (from, to, ..) => {
line(
(a: from, number: .6, b: to),
(a: to, number: .6, b: from),
mark: (end: ">"),
)
},
name: "tree",
)
},
)
],
[
#let data = (
[$1$],
([$3$], [$4$], [$5$]),
([$2$]),
)
#canvas(
length: 1cm,
{
import draw: *
set-style(
content: (padding: .2),
fill: gray.lighten(80%),
stroke: gray.lighten(70%),
)
tree.tree(
data,
spread: 1.5,
grow: 1.4,
draw-node: (node, ..) => {
circle((), radius: .45, stroke: none)
content((), node.content)
},
draw-edge: (from, to, ..) => {
line(
(a: from, number: .6, b: to),
(a: to, number: .6, b: from),
mark: (end: ">"),
)
},
name: "tree",
)
},
)
],
)
]
but not the following:
#let data = (
[$1$],
([$2$], [$4$]),
([$3$], [$5$]),
)
#align(center)[
#canvas(
length: 1cm,
{
import draw: *
set-style(
content: (padding: .2),
fill: gray.lighten(80%),
stroke: gray.lighten(70%),
)
tree.tree(
data,
spread: 1.5,
grow: 1.4,
draw-node: (node, ..) => {
circle((), radius: .45, stroke: none)
content((), node.content)
},
draw-edge: (from, to, ..) => {
line(
(a: from, number: .6, b: to),
(a: to, number: .6, b: from),
mark: (end: ">"),
)
},
name: "tree",
)
},
)
]
which is trivially the shortest path.
]
=== Question 20.4-5
Another way to topologically sort a directed acyclic graph $G=(V,E)$ is to repeatedly find a vertex of in-degree $0$, output it, and remove it and all of its outgoing edges from the graph. *Explain how to implement this idea so that is runs in time $O(V + E)$. What happens to this algorithm if $G$ has cycles?*
#ans[
- Implementation:
```txt
def TOPOLOGICAL_SORT(G):
for u in G.V
u.indegree = 0
for u in G.V
for v in u.adj
v.indegree += 1
Q = []
for u in G.V
if u.indegree == 0
Q.append(u)
while Q
u = Q.pop()
print(u)
for v in u.adj
v.indegree -= 1
if v.indegree == 0
Q.append(v)
```
Time complexity: $O(V+E)$
- If $G$ has cycles:
#rev1_note[
当 $G$ 中有环时, 所有的环和环的后继都不会入队, 不会出现在拓扑序中.
]
The algorithm will not terminate, since there is no vertex of in-degree $0$.
]
=== Question 20.5-4
Prove that for any directed graph $G$, the transpose of the component graph $G^T$ is the same as the component graph of $G$. That is $((G^T)^(S C C))^T = G^(S C C)$
#ans[
Let $C$ be a component of $G$.
- If $C$ is a single vertex, then $C$ is also a component of $G^T$.
- If $C$ is a strongly connected component, then $C$ is also a strongly connected component of $G^T$.
- Vertex sets of $((G^T)^(S C C))^T$ and $G^(S C C)$ are the same.
- To show edge sets are the same:
For all $(v_i, v_j) in E_(((G^T)^(S C C))^T)$, $(v_j, v_i)$ is an edge in $(G^T)^(S C C)$, then there's $x in C_j, y in C_i$, $(x,y) in E_(G^T)$, $(y,x)$ is an edge of $G$ $=>$ $(v_i, v_j)$ is an edge in $G^(S C C)$.
Discussing the other direction is similar, thus $((G^T)^(S C C))^T = G^(S C C)$.
] |
|
https://github.com/vitto4/ttuile | https://raw.githubusercontent.com/vitto4/ttuile/main/README.md | markdown | MIT License | <h1 align="center">
<img alt="Typst" src="https://github.com/vitto4/ttuile/blob/main/assets/ttuile-header.png?raw=true">
</h1>
<p align="center">
<a href="https://typst.app">
<img alt="typst" src="https://img.shields.io/badge/Typst-%232f90ba.svg?&logo=Typst&logoColor=white"
/></a>
<a href="https://github.com/vitto4/ttuile/blob/main/LICENSE">
<img alt="MIT" src="https://img.shields.io/github/license/vitto4/ttuile"
/></a>
<a href="https://github.com/vitto4/ttuile/releases">
<img alt="GitHub Release" src="https://img.shields.io/github/v/release/vitto4/ttuile"
/></a>
</p>
<p align="center"><i>A <b>Typst</b> template for lab reports at <a href="https://en.wikipedia.org/wiki/Institut_national_des_sciences_appliqu%C3%A9es_de_Lyon"> INSA Lyon</a>.</i></p>
<p align="center">
<a href="https://github.com/vitto4/ttuile/blob/main/template/main.pdf">
<img alt="Example" src="https://github.com/vitto4/ttuile/blob/main/assets/ttuile-banner.png?raw=true">
</a>
</p>
> [!NOTE]
>
> Voir aussi le [README.FR.md](https://github.com/vitto4/ttuile/blob/main/README.FR.md) en français.
## 🧭 Table of contents
1. [Usage](#-usage)
1. [Documentation](#-documentation)
1. [Notes](#-notes)
1. [Contributing](#-contributing)
## 📎 Usage
This template targets french students, thus labels will be in french, see [Notes](#-notes).
It is available on _Typst Universe_ : [`@preview/ttuile:0.1.1`](https://typst.app/universe/package/ttuile).
If you wish to use it in a fully local manner, you'll need to either manually include `ttuile.typ` and `logo-insa-lyon.png` in your project's root directory ; or upload them to the _Typst web app_ if that's what you use.
You'll find these files in the [releases](https://github.com/vitto4/ttuile/releases) section.
Your folder structure should then look something like this :
```
.
├── ttuile.typ
├── logo-insa-lyon.png
└── main.typ
```
The template is now ready to be used, and can be called supplying the following arguments.
`?` means the argument can be null if not applicable.
| Argument | Default value | Type | Description |
|:--------:|:-------------:|:----:|:------------|
| `titre` | `none` | `content?` | The title of your report. |
| `auteurs` | `none` | `array<str> \| content?` | One or multiple authors to be credited in the report. |
| `groupe` | `none` | `content?` | Your class number/letter/identifier. Will be displayed right after the author(s). |
| `numero-tp` | `none` | `content?` | The number/identifier of the lab work/practical you're writing this report for. |
| `numero-poste` | `none` | `content?` | Number of your lab bench. |
| `date` | `none` | `datetime \| content?` | Date at which the lab work/practical was carried out. |
| `sommaire` | `true` | `bool` | Display the table of contents ? |
| `logo` | `image("logo-insa-lyon.png")` | `image?` | University logo to use. |
| `point-legende` | `false` | `bool` | Enable automatic enforcement of full stops at the end of figures' captions. (still somewhat experimental). |
A single positional argument is accepted, being the report's body.
You can call the template using the following syntax :
```typ
// Local import
// #import "ttuile.typ": *
// Universe import
#import "@preview/ttuile:0.1.1": *
#show: ttuile.with(
titre: [« #lorem(8) »],
auteurs: (
"<NAME>",
"<NAME>",
"<NAME>",
),
groupe: "TD0",
numero-tp: 0,
numero-poste: "0",
date: datetime.today(),
// sommaire: false,
// logo: image("path_to/logo.png"),
// point-legende: true,
)
```
## 📚 Documentation
The package `ttuile.typ` exposes multiple functions, find out more about them in the _documentation_.
<p align="center">
<a href="https://github.com/vitto4/ttuile/blob/main/DOC.EN.md">
To the documentation
</a>
</p>
An example file is also available in [`template/main.typ`](https://github.com/vitto4/ttuile/blob/main/template/main.typ)
## 🔖 Notes
- Beware, all of the labels will be in french (authors != auteurs, appendix != annexe, ...)
- If you really want to use this template despite not being an INSA student, you can probably figure out what to change in the code (namely labels mentioned above). You can remove the INSA logo by setting `logo: none`
Should you still need help, no worries, feel free to reach out !
- The code - variable names and comments - is all in french. That's on me, I didn't really think it through when first writing the template haha. I might consider translating sometime in the future.
- The MIT license doesn't apply to the file `logo-insa-lyon.png`, it was retrieved from [INSA Lyon - éléments graphiques](https://www.insa-lyon.fr/fr/elements-graphiques). It doesn't apply either to the "INSA" branding.
## 🧩 Contributing
Contributions are welcome ! Parts of the template are very much spaghetti code, especially where the spacing between different headings is handled (seriously, it's pretty bad).
If you know the proper way of doing this, an issue or PR would be greatly appreciated :) |
https://github.com/Functional-Bus-Description-Language/Specification | https://raw.githubusercontent.com/Functional-Bus-Description-Language/Specification/master/src/lexical-elements.typ | typst | #pagebreak()
= Lexical elements
FBDL has following types of lexical tokens:
- comment,
- identifier,
- indent,
- keyword,
- literal,
- newline.
== Comments
There is only a single type of comment, a _single-line comment_.
A single-line comment starts with the '\#' character and extends up to the end of the line.
A single-line comment can appear on any line of an FBDL file and may contain any character, including glyphs and special characters.
The presence or absence of comments has no influence on whether a description is legal or illegal.
Their sole purpose is to enlighten the human reader.
=== Documentation comments
Documentation comments are comments that appear immediately before constant definitions, type definitions, and functionality instantiations with no intervening newlines.
The following code shows examples of documentation comments:
#block(breakable:false)[
#pad(left: 1em)[
```fbd
# Number of receivers
const RECEIVERS_COUNT = 7
Main bus
# Data receivers
Receivers [RECEIVERS_COUNT]block
# 0 disable receiver, 1 enable receiver
Enable config; width = 1
# Number of frames in the buffer
Frame_Count status
# Documentation comments can consist of
# multiple single-line comments.
Read_Frame proc
data [4]return; width = 8
```
]
]
== Identifiers
Identifiers are used as names.
An identifier shall start with a letter.
`uppercase_letter ::= `\
#h(1em)*`A`*` | `*`B`*` | `*`C`*` | `*`D`*` | `*`E`*` | `*`F`*` | `*`G`*` | `*`H`*` | `*`I`*` | `*`J`*` | `*`K`*` | `*`L`*` | `*`M`*` |` \
#h(1em)*`N`*` | `*`O`*` | `*`P`*` | `*`R`*` | `*`S`*` | `*`T`*` | `*`U`*` | `*`V`*` | `*`W`*` | `*`X`*` | `*`Y`*` | `*`Z`*
`lowercase_letter ::=` \
#h(1em)*`a`*` | `*`b`*` | `*`c`*` | `*`d`*` | `*`e`*` | `*`f`*` | `*`g`*` | `*`h`*` | `*`i`*` | `*`j`*` | `*`k`*` | `*`l`*` | `*`m`*` |` \
#h(1em)*`n`*` | `*`o`*` | `*`p`*` | `*`r`*` | `*`s`*` | `*`t`*` | `*`u`*` | `*`v`*` | `*`w`*` | `*`x`*` | `*`y`*` | `*`z`*
`letter ::= uppercase_letter | lowercase_letter`
`letter_or_digit ::= letter | decimal_digit`
`identifier ::= letter { underscore | letter_or_digit }`
#block(breakable:false)[
The following code contains some valid and invalid identifiers:
#pad(left: 1em)[
```fbd
const C_20 = 20 # Valid
const _C20 = 20 # Invalid
Main bus
cfg1 config # Valid
1cfg config # Invalid
```
]
]
=== Declared identifier
Declared identifier is used for any occurrence of an identifier that already denotes some declared item.
`declared_identifier ::= letter { underscore | letter_or_digit }`
=== Qualified identifier
The qualified identifier is used to reference a symbol from foreign package.
`qualified_identifier ::= declared_identifier.declared_identifier`
The first declared identifier denotes the package, and the second one denotes the symbol from this package.
== Indent
The indentation has semantics meaning in the FBDL.
The indent sequence consists of two space characters (U+0020).
It is hard to express the indent and dedent using BNF.
Ident is the increase of the indentation level, and dedent is the decrease of the indentation level.
In the following code the indent happens in the lines number 2, 5 and 7, and the dedent happens in the line number 4.
What is more, double dedent happens at the EOF.
The number of indents always equals the number of dedents in the syntactically and semantically correct file.
#block(breakable:false)[
#pad(left: 1em)[
```fbd
1: type cfg_t config
2: atomic = false
3: width = 64
4: Main bus
5: C cfg_t
6: Blk block
7: C cfg_t
8: S status
```
]
]
Not only the indent alignment is important, but also its level.
In the following code the first type definition is correct, as the indent level for the definition body is increased by one.
The second type definition is incorrect, even though the indent within the definition body is aligned, as the indent level is increased by two.
#block(breakable:false)[
#pad(left: 1em)[
```fbd
# Valid indent
type cfg1_t config
atomic = false
width = 8
# Invalid indent, indent increased by two
type cfg2_t config
atomic = false
width = 8
```
]
]
== Keywords
FBDL has following keywords: *`atomic`*, *`block`*, *`bus`*, *`clear`*, *`config`*, *`const`*, *`false`*, *`import`*, *`init-value`*, *`irq`*, *`mask`*, *`memory`*, *`param`*, *`proc`*, *`range`*, *`reset`*, *`read-value`*, *`reset-value`*, *`return`*, *`static`*, *`stream`*, *`true`*, *`type`*, *`in-trigger`*, *`out-trigger`*.
Keywords can be used as identifiers with one exception.
Keywords denoting built-in types (functionalities) cannot be used as identifiers for custom types.
== Literals
=== Bool literals
`bool_literal ::= `*`false`*` | `*`true`*
=== Number literals
`underscore ::= _`
`zero_digit ::= 0`
`non_zero_decimal_digit ::= `*`1`*` | `*`2`*` | `*`3`*` | `*`4`*` | `*`5`*` | `*`6`*` | `*`7`*` | `*`8`*` | `*`9`*
`decimal_digit ::= zero_digit | non_zero_decimal_digit`
`binary_base ::= `*`0B`*` | `*`0b`*
`binary_digit ::= `*`0`*` | `*`1`*
`octal_base ::= `*`0O`*` | `*`0o`*
`octal_digit ::= `*`0`*` | `*`1`*` | `*`2`*` | `*`3`*` | `*`4`*` | `*`5`*` | `*`6`*` | `*`7`*
`hex_base ::= `*`0X`*` | `*`0x`*
`hex_digit ::= `*`0`*` | `*`1`*` | `*`2`*` | `*`3`*` | `*`4`*` | `*`5`*` | `*`6`*` | `*`7`*` | `*`8`*` | `*`9`* \
` | `*`A`*` | `*`a`*` | `*`B`*` | `*`b`*` | `*`C`*` | `*`c`*` | `*`D`*` | `*`d`*` | `*`E`*` | `*`e`*` | `*`F`*` | `*`f`*
=== Integer literals
`integer_literal ::= binary_literal | octal_literal | decimal_literal | hex_literal`
`binary_literal ::= binary_base binary_digit { [ underscore ] binary_digit }`
`octal_literal ::= octal_base octal_digit { [ underscore ] octal_digit }`
`decimal_literal ::= non_zero_decimal_digit { [ underscore ] decimal_digit }`
`hex_literal ::= hex_base hex_digit { [ underscore ] hex_digit }`
=== Real literals
The real literals shall be represented as described by IEEE Std 754, an IEEE standard for double-precision floating-point numbers.
Real numbers can be specified in either decimal notation (for example, 17.83) or in scientific notation (for example, 13e8, which indicates 13 multiplied by 10 to the eighth power).
Real numbers expressed with a decimal point shall have at least one digit on each side of the decimal point.
=== String literals
A string literal is a sequence of zero or more UTF-8 characters enclosed by double quotes ("").
`string_literal ::= "{UTF-8 character}"`
=== Bit string literals
A bit string literal is a sequence of zero or more digit or meta value characters enclosed by double quotes ("") and preceded by a base specifier.
The meta value characters are supported because of hardware description languages, that also have a concept of metalogical values.
`meta_character ::= `*`-`*` | `*`U`*` | `*`W`*` | `*`X`*` | `*`Z`*
The meta characters have following meaning:
- '-' - don't care,
- 'U' - uninitialized,
- 'W' - weak unknown,
- 'X' - unknown,
- 'Z' - high-impedance state.
`binary_or_meta ::= binary_digit | meta_character`
`octal_or_meta ::= octal_digit | meta_character`
`hex_or_meta ::= hex_digit | meta_character`
There are three types of bit string literals: binary bit string literal, octal bit string literal and hex bit stirng literal.
```
bit_string_literal ::=
binary_bit_string_literal |
octal_bit_string_literal |
hex_bit_string_literal
```
`binary_bit_string_base = `*`B`*` | `*`b`*
`binary_bit_string_literal = binary_bit_string_base `*`"`*`{binary_or_meta}`*`"`*
`octal_bit_string_base = `*`O`*` | `*`o`*
`octal_bit_string_literal = octal_bit_string_base `*`"`*`{octal_or_meta}`*`"`*
`hex_bit_string_base = `*`X`*` | `*`x`*
`hex_bit_string_literal = hex_bit_string_base `*`"`*`{hex_or_meta}`*`"`*
If meta value is present in a bit string literal, then it is expanded to the proper width depending on the bit string base.
For example, following equations are true:
#block(breakable:false)[
#pad(left: 1em)[
```
o"XW" = b"XXXWWW"
x"U-" = b"UUUU----"
```
]
]
=== Time literals
A time literal is a sequence of integer literal and a time unit.
`time_unit ::= `*`ns`*` | `*`us`*` | `*`ms`*` | `*`s`*
`time_literal ::= integer_literal time_unit`
Time literals are used to create values of time data type, required for example by the `delay` property.
|
|
https://github.com/piepert/typst-hro-iph-seminar-paper | https://raw.githubusercontent.com/piepert/typst-hro-iph-seminar-paper/main/README.md | markdown | # HRO IPH Seminar Paper Template
Inofficial template following the standards of the Institute of Philosophy's Methodenüberlebenskoffer (MÜK, "methods survival kit") at the University of Rostock.
## Installation
Either install it as a local package and import it using:
```
#import "@local/hro-iph-seminar-paper:0.1.0": seminar-paper
```
or copy the `template.typ` into your typst project and import it using:
```
#import "template.typ": seminar-paper
```
## Bibliography
Right now [CSL](https://citationstyles.org/) support is being implemented for Typst. As an alternative, you can use [typst-custombib](https://github.com/survari/typst-custombib/) and the `custombib-hro-iph-style.typ` provided by this package to get a bibliography style that conforms the MÜK.
Copy the `custombib-hro-iph-style.typ` into your project, import the `typst-custombib` library and use it like this:
```
#import "custombib-hro-iph-style.typ": hro-iph-bibstyle
#tcb-style(hro-iph-bibstyle)
#load
```
### Bibliography Source
The source is given as a YAML-file. Each entry is an entry like this:
```yaml
Montague1970:
entry-type: article
author: <NAME>
title: Pragmatics and Intensional Logic
journal: Synthese
volume: 22
issue: 1-2
year: 1970
pages: 68-94
section: primary
```
The special fields `enty-type` and `section` specify what type of object the entry is (e.g. `monography`, `article`, ...) and which section it should be listed (either `primary` or `secondary`, or remove the field to not differentiate between primary and secondary literature).
The `entry-type` can be one of the following, each having custom fields (optional fields in parentheses):
- `monography`: authos, title, year, location, (pages, edition, volume, volume-title, series, series-volume)
- `url`: authors, title, url, year
- `article`: authors, title, journal, (publisher, volume, issue, pages, url, year)
- `collection`: authors, title, year, location, (volume, volume-title, edition)
- `collection-article`: authors, title, publisher, location, year, (pages, volume, volume-title)
See [`example2/example2.typ`](example2/example2.pdf) for a working example. |
|
https://github.com/binhtran432k/ungrammar-docs | https://raw.githubusercontent.com/binhtran432k/ungrammar-docs/main/contents/literature-review/cst.typ | typst | #import "/components/glossary.typ": gls
== Concrete Syntax Tree (CST) <sec-cst>
#gls("cst", mode: "full") play a critical role in the parsing and syntax
analysis of programming languages. Unlike Abstract Syntax Trees (AST)
(@sec-ast), which abstract away syntactic details like parentheses, comments,
and other redundant information, CSTs preserve the full structure of the source
code, including all syntactic elements defined by the grammar of the language.
#gls("cst")s are essential for tools that require a detailed, faithful
representation of source code syntax. They enable fine-grained control in
environments like code editors, syntax-directed editors, and real-time
collaborative development. As tools like Tree-sitter and Lezer (@sec-lezer)
continue to evolve, CSTs will likely become even more integral to modern
software development workflows.
=== Theoretical Foundations of CSTs
The concept of a #gls("cst") originates from formal language theory and
compiler construction. CSTs are direct representations of the syntax of a
program as described by a context-free grammar. In contrast to ASTs, CSTs map
closely to the grammatical structure of the source code and include every
token, ensuring a more detailed and faithful representation.
The distinction between CST and AST is discussed in Aho et al.'s Compilers:
Principles, Techniques, and Tools (2006), commonly known as The Dragon Book
@bib-art-cp. Aho et al. explain that CSTs are typically generated during the
parsing phase of compilation, while ASTs are often created afterward to
simplify semantic analysis and optimization. CSTs capture more information than
ASTs and are useful for tasks such as syntax highlighting, code folding, and
structural editing in #gls("ides").
=== Practical Implementations of CSTs
Several modern parsing tools and libraries emphasize the use of CSTs for
different purposes:
- Tree-sitter: Developed by GitHub, Tree-sitter is a popular parser generator
tool designed for fast parsing of source code to produce CSTs. It is used in
various applications such as syntax highlighting, code navigation, and
real-time code analysis in IDEs. Tree-sitter generates CSTs that represent
the entire syntax of a program, including elements like comments and
punctuation, making it an ideal choice for syntax-directed editors and
linters. The explicit representation of every syntax element makes it
possible to manipulate and transform the code while preserving its structure
@bib-treesitter.
- Lezer (@sec-lezer): Lezer is another incremental parsing library designed for
real-time parsing of code in browser environments, such as within code
editors. Like Tree-sitter, Lezer constructs CSTs that map directly to the
source code grammar. Its design is optimized for compactness, avoiding
redundant information and making it efficient for lightweight environments.
Lezer’s CST representation is ideal for providing fine-grained control over
code formatting and refactoring in IDEs or browser-based editors @bib-lezer.
Both Tree-sitter and Lezer produce CSTs because they are optimized for
environments where accurate, fine-grained syntax control is necessary, such as
in code editors or IDEs. Their goal is to represent the source code as
faithfully as possible, capturing all details for accurate rendering and
manipulation.
=== CSTs in Modern Development
CSTs are becoming increasingly important in the modern development ecosystem,
particularly with the rise of powerful code editors and IDEs. The preservation
of full syntactic details enables advanced features such as:
- *Syntax Highlighting and Code Folding*: Tools like Visual Studio Code and
GitHub's Atom editor rely on CSTs to offer responsive syntax highlighting and
code folding. These features are essential for user experience, enabling
developers to navigate and understand large codebases more efficiently.
- *Refactoring and Code Manipulation*: Refactoring tools and code formatters can
leverage CSTs to make safe, structural changes to code while preserving its
formatting and comments. This is a core advantage of using CSTs over ASTs, as
ASTs often discard syntactic elements, making it more challenging to maintain
the original code structure.
- *Incremental Parsing for Real-time Feedback*: Tools like Tree-sitter and Lezer
are optimized for real-time parsing, providing immediate feedback during code
editing. This feature is essential for modern IDEs, which require rapid
updates to the code’s representation as developers type, without re-parsing
the entire document.
=== Challenges and Future Directions
Despite their advantages, CSTs can introduce complexity in situations where the
full syntactic structure is not necessary. For example, during the semantic
analysis phase of compilation, the extra syntactic details captured in a CST
may be redundant, and thus many compilers convert CSTs to ASTs at this stage to
streamline optimization and code generation.
Looking ahead, CSTs are expected to play an increasingly significant role in
areas like real-time collaborative development, where the ability to capture
every syntactic detail is crucial for synchronizing changes across different
users.
|
|
https://github.com/sspu-cryptography-research-group/cv-template | https://raw.githubusercontent.com/sspu-cryptography-research-group/cv-template/main/cv-template.typ | typst | #show heading: set text(font: "Linux Biolinum")
#set text(font: "yahei")
#show link: underline
#set page(
margin: (x: 0.9cm, y: 1.3cm),
)
#set par(justify: true)
#let chiline() = {v(-3pt); line(length: 100%); v(-5pt)}
= 姓名
email · (+86) phone number ·
#link("https://github.com/githubName")[github/githubName]
== Education
#chiline()
*上海第二工业大学* #h(1fr) 2021/09 -- 2024/06(Expected) \
#lorem(5) #h(1fr) #lorem(2) \
- #lorem(10)
*本科学校名字* #h(1fr) 2333/23 -- 2333/23 \
#lorem(5) #h(1fr) #lorem(2) \
- #lorem(10)
== Work Experience
#chiline()
*#lorem(2)* #h(1fr) 2333/23 -- 2333/23 \
#lorem(5) #h(1fr) #lorem(2) \
- #lorem(20)
- #lorem(30)
- #lorem(40)
== Projects
#chiline()
*#lorem(2)* #h(1fr) 2333/23 -- 2333/23 \
#lorem(5) #h(1fr) #lorem(2) \
- #lorem(20)
- #lorem(30)
- #lorem(40)
*#lorem(2)* #h(1fr) 2333/23 -- 2333/23 \
#lorem(5) #h(1fr) #lorem(2) \
- #lorem(20)
- #lorem(30)
- #lorem(40)
== Skills
#chiline()
- #lorem(15)
- #lorem(15)
- #lorem(15)
== Awards And Certificates
#chiline()
- #lorem(15)
- #lorem(15)
- #lorem(15) |
|
https://github.com/WinstonMDP/math | https://raw.githubusercontent.com/WinstonMDP/math/main/knowledge/permutations.typ | typst | #import "../cfg.typ": cfg
#show: cfg
= Permutations
A permutation $:=$ a bijection between ${1, ..., n}$.
A symmetric group of an order $n := S_n :=$ a set of permutations of the length $n$.
An alternating group $:= A_n := {s in S_n mid(|) s "is even"}$.
The Kleinian group $:=
{1, mat(1, 2) mat(3, 4), mat(1, 3) mat(2, 4), mat(1, 4) mat(2, 3)}$.
They write $mat(1, 2, ..., n; pi_1, pi_2, ..., pi_n)$.
A cycle $:=$ a permutation that cycles through the elements.
Example: $mat(1, 2, 4) = mat(1, 2, 3, 4; 2, 4, 3, 1)$.
$e := mat(1, ..., n; 1, ..., n)$.
$i, j$ are $pi$-equivalent $:= exists s in ZZ: j = (pi^s)_i$.
An $pi$-orbit $:=$ an equivalent class of $pi$-equivalence.
Two cycle are independent $:=$ they have no common elements.
A permutation is uniquely decomposed into a product of independent cycles.
A transposition $:=$ a cycle of the length 2.
A permutation is ambiguously decomposed into a product of transpositions.
$mat(i_1, ..., i_n) = mat(i_1, i_n) ... mat(i_1, i_3) mat(i_1, i_2)$.
A sign of a permutation $pi :=
(-1)^k$ where $k = hash$ transpositions in a $pi$ transposition decomposition.
A sign of a permutation is unambiguous.
$op("sgn") pi sigma = (op("sgn") pi) (op("sgn") sigma)$.
$op("sgn") e = 1$.
$(i, j)$ is an inversion $:= i < j and pi_i > pi_j$.
$op("sgn") pi = (-1)^k$ where $k = hash$ inversions of $pi$.
A decrement of a permutation $:=
op(d) pi := sum_(i = 1)^m l_i - m$ where
$m$ = $hash$ cycles in the decomposition, $l_i$ = a length of the $i$-th cycle.
$op("sgn") pi = (-1)^(op(d) pi)$.
$op(d) pi = min hash$ transpositions in a decomposition of $pi$.
$hash$ inversions changes by $plus.minus 1$ after $mat(q, q + 1)$ transposition.
$mat(a_1, ..., a_n) mat(a_n, c) = mat(a_1, ..., a_n, c)$.
|
|
https://github.com/Quaternijkon/Typst_FLOW | https://raw.githubusercontent.com/Quaternijkon/Typst_FLOW/main/src/configs.typ | typst | #import "pdfpc.typ"
#import "utils.typ"
#import "core.typ": touying-slide-wrapper, touying-slide, slide
#let _default = metadata((kind: "touying-default"))
#let _get-dict-without-default(dict) = {
let new-dict = (:)
for (key, value) in dict.pairs() {
if value != _default {
new-dict.insert(key, value)
}
}
return new-dict
}
/// The private configurations of the theme.
#let config-store(..args) = {
assert(args.pos().len() == 0, message: "Unexpected positional arguments.")
return (store: args.named())
}
#let _default-frozen-states = (
// ctheorems state
state("thm",
(
"counters": ("heading": ()),
"latest": ()
)
),
)
#let _default-frozen-counters = (
counter(math.equation),
counter(figure.where(kind: table)),
counter(figure.where(kind: image)),
)
#let _default-preamble = self => {
if self.at("enable-mark-warning", default: true) {
context {
let marks = query(<touying-temporary-mark>)
if marks.len() > 0 {
let page-num = marks.at(0).location().page()
let kind = marks.at(0).value.kind
panic("Unsupported mark `" + kind + "` at page " + str(page-num) + ". You can't use it inside some functions like `context`. You may want to use the callback-style `uncover` function instead.")
}
}
}
if self.at("enable-pdfpc", default: true) {
context pdfpc.pdfpc-file(here())
}
}
#let _default-page-preamble = self => {
if self.at("reset-footnote-number-per-slide", default: true) {
counter(footnote).update(0)
}
if self.at("reset-page-counter-to-slide-counter", default: true) {
context counter(page).update(utils.slide-counter.get())
}
if self.at("enable-pdfpc", default: true) {
context [
#metadata((t: "NewSlide")) <pdfpc>
#metadata((t: "Idx", v: here().page() - 1)) <pdfpc>
#metadata((t: "Overlay", v: self.subslide - 1)) <pdfpc>
#metadata((t: "LogicalSlide", v: utils.slide-counter.get().first())) <pdfpc>
]
}
}
/// The common configurations of the slides.
///
/// - `handout` (bool): Whether to enable the handout mode. It retains only the last subslide of each slide in handout mode. The default value is `false`.
///
/// - `slide-level` (int): The level of the slides. The default value is `2`, which means the level 1 and 2 headings will be treated as slides.
///
/// - `slide-fn` (function): The function to create a new slide.
///
/// - `new-section-slide` (function): The function to create a new slide for a new section. The default value is `none`.
///
/// - `new-subsection-slide` (function): The function to create a new slide for a new subsection. The default value is `none`.
///
/// - `new-subsubsection-slide` (function): The function to create a new slide for a new subsubsection. The default value is `none`.
///
/// - `new-subsubsubsection-slide` (function): The function to create a new slide for a new subsubsubsection. The default value is `none`.
///
/// - `datetime-format` (auto, string): The format of the datetime. The default value is `auto`.
///
/// - `appendix` (bool): Is touying in the appendix mode. The last-slide-counter will be frozen in the appendix mode. The default value is `false`.
///
/// - `freeze-slide-counter` (bool): Whether to freeze the slide counter. The default value is `false`.
///
/// - `zero-margin-header` (bool): Whether to show the full header (with negative padding). The default value is `true`.
///
/// - `zero-margin-footer` (bool): Whether to show the full footer (with negative padding). The default value is `true`.
///
/// - `auto-offset-for-heading` (bool): Whether to add an offset relative to slide-level for headings. The default value is `true`.
///
/// - `enable-pdfpc` (bool): Whether to add `<pdfpc-file>` label for querying. The default value is `true`.
///
/// You can export the .pdfpc file directly using: `typst query --root . ./example.typ --field value --one "<pdfpc-file>" > ./example.pdfpc`
///
/// - `enable-mark-warning` (bool): Whether to enable the mark warning. The default value is `true`.
///
/// - `reset-page-counter-to-slide-counter` (bool): Whether to reset the page counter to the slide counter. The default value is `true`.
///
/// ------------------------------------------------------------
/// The following configurations are some black magics for better slides writing,
/// maybe will be deprecated in the future.
/// ------------------------------------------------------------
///
/// - `show-notes-on-second-screen` (none, alignment): Whether to show the speaker notes on the second screen. The default value is `none`.
///
/// Currently, the alignment can be `none`, `bottom` and `right`.
///
/// - `horizontal-line-to-pagebreak` (bool): Whether to convert horizontal lines to page breaks. The default value is `true`.
///
/// You can use markdown-like syntax `---` to divide slides.
///
/// - `reset-footnote-number-per-slide` (bool): Whether to reset the footnote number per slide. The default value is `true`.
///
/// - `nontight-list-enum-and-terms` (bool): Whether to make `tight` argument always be `false` for list, enum, and terms. The default value is `false`.
///
/// - `align-list-marker-with-baseline` (bool): Whether to align the list marker with the baseline. The default value is `false`.
///
/// - `scale-list-items` (none, float): Whether to scale the list items recursively. For example, `scale-list-items: 0.8` will scale the list items by 0.8. The default value is `none`.
///
/// - `enable-frozen-states-and-counters` (bool): Whether to enable the frozen states and counters. It is useful for equations, figures and theorems. The default value is `true`.
///
/// - `frozen-states` (array): The frozen states for the frozen states and counters. The default value is `()`.
///
/// - `default-frozen-states` (function): The default frozen states for the frozen states and counters. The default value is state for `ctheorems` package.
///
/// - `frozen-counters` (array): The frozen counters for the frozen states and counters. You can pass some counters like `(counter(math.equation),)`. The default value is `()`.
///
/// - `default-frozen-counters` (array): The default frozen counters for the frozen states and counters. The default value is `(counter(math.equation), counter(figure.where(kind: table)), counter(figure.where(kind: image))`.
///
/// - `label-only-on-last-subslide` (array): We only label some contents in the last subslide, which is useful for ref equations, figures, and theorems with multiple subslides. The default value is `(figure, math.equation)`.
///
/// - `preamble` (function): The function to run before each slide. The default value is `none`.
///
/// - `default-preamble` (function): The default preamble for each slide. The default value is a function to check the mark warning and add pdfpc file.
///
/// - `slide-preamble` (function): The function to run before each slide. The default value is `none`.
///
/// - `default-slide-preamble` (function): The default preamble for each slide. The default value is `none`.
///
/// - `subslide-preamble` (function): The function to run before each subslide. The default value is `none`.
///
/// - `default-subslide-preamble` (function): The default preamble for each subslide. The default value is `none`.
///
/// - `page-preamble` (function): The function to run before each page. The default value is `none`.
///
/// - `default-page-preamble` (function): The default preamble for each page. The default value is a function to reset the footnote number per slide and reset the page counter to the slide counter.
#let config-common(
handout: _default,
slide-level: _default,
slide-fn: _default,
new-section-slide-fn: _default,
new-subsection-slide-fn: _default,
new-subsubsection-slide-fn: _default,
new-subsubsubsection-slide-fn: _default,
datetime-format: _default,
appendix: _default,
freeze-slide-counter: _default,
zero-margin-header: _default,
zero-margin-footer: _default,
auto-offset-for-heading: _default,
enable-pdfpc: _default,
enable-mark-warning: _default,
reset-page-counter-to-slide-counter: _default,
// some black magics for better slides writing,
// maybe will be deprecated in the future
enable-frozen-states-and-counters: _default,
frozen-states: _default,
default-frozen-states: _default,
frozen-counters: _default,
default-frozen-counters: _default,
label-only-on-last-subslide: _default,
preamble: _default,
default-preamble: _default,
slide-preamble: _default,
default-slide-preamble: _default,
subslide-preamble: _default,
default-subslide-preamble: _default,
page-preamble: _default,
default-page-preamble: _default,
show-notes-on-second-screen: _default,
horizontal-line-to-pagebreak: _default,
reset-footnote-number-per-slide: _default,
nontight-list-enum-and-terms: _default,
align-list-marker-with-baseline: _default,
scale-list-items: _default,
..args,
) = {
assert(args.pos().len() == 0, message: "Unexpected positional arguments.")
return _get-dict-without-default((
handout: handout,
slide-level: slide-level,
slide-fn: slide-fn,
new-section-slide-fn: new-section-slide-fn,
new-subsection-slide-fn: new-subsection-slide-fn,
new-subsubsection-slide-fn: new-subsubsection-slide-fn,
new-subsubsubsection-slide-fn: new-subsubsubsection-slide-fn,
datetime-format: datetime-format,
appendix: appendix,
freeze-slide-counter: freeze-slide-counter,
zero-margin-header: zero-margin-header,
zero-margin-footer: zero-margin-footer,
auto-offset-for-heading: auto-offset-for-heading,
enable-pdfpc: enable-pdfpc,
enable-mark-warning: enable-mark-warning,
reset-page-counter-to-slide-counter: reset-page-counter-to-slide-counter,
enable-frozen-states-and-counters: enable-frozen-states-and-counters,
frozen-states: frozen-states,
frozen-counters: frozen-counters,
default-frozen-states: default-frozen-states,
default-frozen-counters: default-frozen-counters,
label-only-on-last-subslide: label-only-on-last-subslide,
preamble: preamble,
default-preamble: default-preamble,
slide-preamble: slide-preamble,
default-slide-preamble: default-slide-preamble,
subslide-preamble: subslide-preamble,
default-subslide-preamble: default-subslide-preamble,
page-preamble: page-preamble,
default-page-preamble: default-page-preamble,
show-notes-on-second-screen: show-notes-on-second-screen,
horizontal-line-to-pagebreak: horizontal-line-to-pagebreak,
reset-footnote-number-per-slide: reset-footnote-number-per-slide,
nontight-list-enum-and-terms: nontight-list-enum-and-terms,
align-list-marker-with-baseline: align-list-marker-with-baseline,
scale-list-items: scale-list-items,
)) + args.named()
}
#let _default-init(self: none, body) = {
show strong: self.methods.alert.with(self: self)
body
}
#let _default-cover = utils.method-wrapper(hide)
#let _default-show-notes(self: none, width: 0pt, height: 0pt) = block(
fill: rgb("#E6E6E6"),
width: width,
height: height,
{
set align(left + top)
set text(size: 24pt, fill: black, weight: "regular")
block(
width: 100%,
height: 88pt,
inset: (left: 32pt, top: 16pt),
outset: 0pt,
fill: rgb("#CCCCCC"),
{
utils.display-current-heading(level: 1, depth: self.slide-level)
linebreak()
[ --- ]
utils.display-current-heading(level: 2, depth: self.slide-level)
},
)
pad(x: 48pt, utils.current-slide-note)
// clear the slide note
utils.slide-note-state.update(none)
},
)
#let _default-alert = utils.method-wrapper(text.with(weight: "bold"))
#let _default-convert-label-to-short-heading(self: none, lbl) = utils.titlecase(
lbl.replace(regex("^[^:]*:"), "").replace("_", " ").replace("-", " "),
)
/// The configuration of the methods
///
/// - `init` (function): The function to initialize the presentation. It should be `(self: none, body) => { .. }`.
///
/// By default, it shows the strong content with the `alert` function: `show strong: self.methods.alert.with(self: self)`
///
/// - `cover` (function): The function to cover content. The default value is `utils.method-wrapper(hide)` function.
///
/// You can configure it with `cover: utils.semi-transparent-cover` to use the semi-transparent cover.
///
/// - `uncover` (function): The function to uncover content. The default value is `utils.uncover` function.
///
/// - `only` (function): The function to show only the content. The default value is `utils.only` function.
///
/// - `alternatives-match` (function): The function to match alternatives. The default value is `utils.alternatives-match` function.
///
/// - `alternatives` (function): The function to show alternatives. The default value is `utils.alternatives` function.
///
/// - `alternatives-fn` (function): The function to show alternatives with a function. The default value is `utils.alternatives-fn` function.
///
/// - `alternatives-cases` (function): The function to show alternatives with cases. The default value is `utils.alternatives-cases` function.
///
/// - `alert` (function): The function to alert the content. The default value is `utils.method-wrapper(text.with(weight: "bold"))` function.
///
/// - `show-notes` (function): The function to show notes on second screen. It should be `(self: none, width: 0pt, height: 0pt) => { .. }` with core code `utils.current-slide-note` and `utils.slide-note-state.update(none)`.
///
/// - `convert-label-to-short-heading` (function): The function to convert label to short heading. It is useful for the short heading for heading with label. It will be used in function with `short-heading`.
///
/// The default value is `utils.titlecase(lbl.replace(regex("^[^:]*:"), "").replace("_", " ").replace("-", " "))`.
///
/// It means that some headings with labels like `section:my-section` will be converted to `My Section`.
#let config-methods(
// init
init: _default,
cover: _default,
// dynamic control
uncover: _default,
only: _default,
alternatives-match: _default,
alternatives: _default,
alternatives-fn: _default,
alternatives-cases: _default,
// alert interface
alert: _default,
// show notes
show-notes: _default,
// convert label to short heading
convert-label-to-short-heading: _default,
..args,
) = {
assert(args.pos().len() == 0, message: "Unexpected positional arguments.")
return (
methods: _get-dict-without-default((
init: init,
cover: cover,
uncover: uncover,
only: only,
alternatives-match: alternatives-match,
alternatives: alternatives,
alternatives-fn: alternatives-fn,
alternatives-cases: alternatives-cases,
alert: alert,
show-notes: show-notes,
convert-label-to-short-heading: convert-label-to-short-heading,
)) + args.named(),
)
}
/// The configuration of important information of the presentation.
///
/// #example(```
/// config-info(
/// title: "Title",
/// subtitle: "Subtitle",
/// author: "Author",
/// date: datetime.today(),
/// institution: "Institution",
/// )
/// ```)
///
/// - title (content): The title of the presentation, which will be displayed in the title slide.
/// - short-title (content, auto): The short title of the presentation, which will be displayed in the footer of the slides usally.
///
/// If you set it to `auto`, it will be the same as the title.
///
/// - subtitle (content): The subtitle of the presentation.
///
/// - short-subtitle (content, auto): The short subtitle of the presentation, which will be displayed in the footer of the slides usally.
///
/// If you set it to `auto`, it will be the same as the subtitle.
///
/// - author (content): The author of the presentation.
///
/// - date (datetime, content): The date of the presentation.
///
/// You can use `datetime.today()` to get the current date.
///
/// - institution (content): The institution of the presentation.
///
/// - logo (content): The logo of the institution.
#let config-info(
title: _default,
short-title: _default,
subtitle: _default,
short-subtitle: _default,
author: _default,
date: _default,
institution: _default,
logo: _default,
..args,
) = {
assert(args.pos().len() == 0, message: "Unexpected positional arguments.")
return (
info: _get-dict-without-default((
title: title,
short-title: short-title,
subtitle: subtitle,
short-subtitle: short-subtitle,
author: author,
date: date,
institution: institution,
logo: logo,
)) + args.named(),
)
}
/// The configuration of the colors used in the theme.
///
/// #example(```
/// config-colors(
/// primary: rgb("#04364A"),
/// secondary: rgb("#176B87"),
/// tertiary: rgb("#448C95"),
/// neutral: rgb("#303030"),
/// neutral-darkest: rgb("#000000"),
/// )
/// ```)
///
/// IMPORTANT: The colors should be defined in the *RGB* format at most cases.
///
/// There are four main colors in the theme: primary, secondary, tertiary, and neutral,
/// and each of them has a light, lighter, lightest, dark, darker, and darkest version.
#let config-colors(
neutral: _default,
neutral-light: _default,
neutral-lighter: _default,
neutral-lightest: _default,
neutral-dark: _default,
neutral-darker: _default,
neutral-darkest: _default,
primary: _default,
primary-light: _default,
primary-lighter: _default,
primary-lightest: _default,
primary-dark: _default,
primary-darker: _default,
primary-darkest: _default,
secondary: _default,
secondary-light: _default,
secondary-lighter: _default,
secondary-lightest: _default,
secondary-dark: _default,
secondary-darker: _default,
secondary-darkest: _default,
tertiary: _default,
tertiary-light: _default,
tertiary-lighter: _default,
tertiary-lightest: _default,
tertiary-dark: _default,
tertiary-darker: _default,
tertiary-darkest: _default,
..args,
) = {
assert(args.pos().len() == 0, message: "Unexpected positional arguments.")
return (
colors: _get-dict-without-default((
neutral: neutral,
neutral-light: neutral-light,
neutral-lighter: neutral-lighter,
neutral-lightest: neutral-lightest,
neutral-dark: neutral-dark,
neutral-darker: neutral-darker,
neutral-darkest: neutral-darkest,
primary: primary,
primary-light: primary-light,
primary-lighter: primary-lighter,
primary-lightest: primary-lightest,
primary-dark: primary-dark,
primary-darker: primary-darker,
primary-darkest: primary-darkest,
secondary: secondary,
secondary-light: secondary-light,
secondary-lighter: secondary-lighter,
secondary-lightest: secondary-lightest,
secondary-dark: secondary-dark,
secondary-darker: secondary-darker,
secondary-darkest: secondary-darkest,
tertiary: tertiary,
tertiary-light: tertiary-light,
tertiary-lighter: tertiary-lighter,
tertiary-lightest: tertiary-lightest,
tertiary-dark: tertiary-dark,
tertiary-darker: tertiary-darker,
tertiary-darkest: tertiary-darkest,
)) + args.named(),
)
}
/// The configuration of the page layout.
///
/// It is equivalent to the `#set page()` rule in Touying.
///
/// #example(```
/// config-page(
/// paper: "presentation-16-9",
/// header: none,
/// footer: none,
/// fill: rgb("#ffffff"),
/// margin: (x: 3em, y: 2.8em),
/// )
/// ```)
///
/// - paper (string): A standard paper size to set width and height. The default value is "presentation-16-9".
///
/// You can also use `aspect-ratio` to set the aspect ratio of the paper.
///
/// - header (content): The page's header. Fills the top margin of each page.
///
/// - footer (content): The page's footer. Fills the bottom margin of each page.
///
/// - fill (color): The background color of the page. The default value is `rgb("#ffffff")`.
///
/// - margin (length, dictionary): The margin of the page. The default value is `(x: 3em, y: 2.8em)`.
/// - A single length: The same margin on all sides.
/// - A dictionary: With a dictionary, the margins can be set individually. The dictionary can contain the following keys in order of precedence:
/// - top: The top margin.
/// - right: The right margin.
/// - bottom: The bottom margin.
/// - left: The left margin.
/// - inside: The margin at the inner side of the page (where the binding is).
/// - outside: The margin at the outer side of the page (opposite to the binding).
/// - x: The horizontal margins.
/// - y: The vertical margins.
/// - rest: The margins on all sides except those for which the dictionary explicitly sets a size.
///
/// The values for left and right are mutually exclusive with the values for inside and outside.
#let config-page(
paper: _default,
header: _default,
footer: _default,
fill: _default,
margin: _default,
..args,
) = {
assert(args.pos().len() == 0, message: "Unexpected positional arguments.")
return (
page: _get-dict-without-default((
paper: paper,
header: header,
footer: footer,
fill: fill,
margin: margin,
)) + args.named(),
)
}
/// The default configurations
#let default-config = utils.merge-dicts(
config-common(
handout: false,
slide-level: 2,
slide-fn: slide,
new-section-slide-fn: none,
new-subsection-slide-fn: none,
new-subsubsection-slide-fn: none,
new-subsubsubsection-slide-fn: none,
datetime-format: auto,
appendix: false,
freeze-slide-counter: false,
zero-margin-header: true,
zero-margin-footer: true,
auto-offset-for-heading: true,
enable-pdfpc: true,
enable-mark-warning: true,
reset-page-counter-to-slide-counter: true,
// some black magics for better slides writing,
// maybe will be deprecated in the future
show-notes-on-second-screen: none,
horizontal-line-to-pagebreak: true,
reset-footnote-number-per-slide: true,
nontight-list-enum-and-terms: false,
align-list-marker-with-baseline: false,
scale-list-items: none,
enable-frozen-states-and-counters: true,
frozen-states: (),
default-frozen-states: _default-frozen-states,
frozen-counters: (),
default-frozen-counters: _default-frozen-counters,
label-only-on-last-subslide: (figure, math.equation, heading),
preamble: none,
default-preamble: _default-preamble,
slide-preamble: none,
default-slide-preamble: none,
subslide-preamble: none,
default-subslide-preamble: none,
page-preamble: none,
default-page-preamble: _default-page-preamble,
),
config-methods(
// init
init: _default-init,
cover: _default-cover,
// dynamic control
uncover: utils.uncover,
only: utils.only,
alternatives-match: utils.alternatives-match,
alternatives: utils.alternatives,
alternatives-fn: utils.alternatives-fn,
alternatives-cases: utils.alternatives-cases,
// alert interface
alert: _default-alert,
// show notes
show-notes: _default-show-notes,
// convert label to short heading
convert-label-to-short-heading: _default-convert-label-to-short-heading,
),
config-info(
title: none,
short-title: auto,
subtitle: none,
short-subtitle: auto,
author: none,
date: none,
institution: none,
logo: none,
),
config-colors(
neutral: rgb("#303030"),
neutral-light: rgb("#a0a0a0"),
neutral-lighter: rgb("#d0d0d0"),
neutral-lightest: rgb("#ffffff"),
neutral-dark: rgb("#202020"),
neutral-darker: rgb("#101010"),
neutral-darkest: rgb("#000000"),
primary: rgb("#303030"),
primary-light: rgb("#a0a0a0"),
primary-lighter: rgb("#d0d0d0"),
primary-lightest: rgb("#ffffff"),
primary-dark: rgb("#202020"),
primary-darker: rgb("#101010"),
primary-darkest: rgb("#000000"),
secondary: rgb("#303030"),
secondary-light: rgb("#a0a0a0"),
secondary-lighter: rgb("#d0d0d0"),
secondary-lightest: rgb("#ffffff"),
secondary-dark: rgb("#202020"),
secondary-darker: rgb("#101010"),
secondary-darkest: rgb("#000000"),
tertiary: rgb("#303030"),
tertiary-light: rgb("#a0a0a0"),
tertiary-lighter: rgb("#d0d0d0"),
tertiary-lightest: rgb("#ffffff"),
tertiary-dark: rgb("#202020"),
tertiary-darker: rgb("#101010"),
tertiary-darkest: rgb("#000000"),
),
config-page(
paper: "presentation-16-9",
header: none,
footer: none,
fill: rgb("#ffffff"),
margin: (x: 3em, y: 2.8em),
),
config-store(),
) |
|
https://github.com/k0tran/cont_labs | https://raw.githubusercontent.com/k0tran/cont_labs/master/reports/lab4.typ | typst | #import "template.typ": *
#show: lab.with(n: 4)
= Vagrant
В качестве хоста для контейнера wireguard будет использоваться виртуальная машина `ubuntu/jammy64`.
#pic(img: "lab4/vagrant_cfg.png")[Начальная конфигурация виртуальной машины]
В первую очередь так как мы используем wireguard, то необходимо настроить сеть следующим образом:
#pic(img: "lab4/vagrant_net.png")[Настройка сети виртуальной машины]
Здесь стоит отметить несколько моментов:
- порт 51820 используется для подключений wireguard;
- порт 10086 используется wgdashboard для web-интерфейса;
- используется "public_network" так как необходим доступ в сеть изнутри машины и возможность доступа с нашей машины к виртуальной по сети.
На последок стоит добавить копирование файлов `Dockerfile` и `docker-compose.yaml`:
#pic(img: "lab4/vagrant_files.png")[Копирование Dockerfile и docker-compose.yml]
#pagebreak()
= Dockerfile
Предже всего следует отметить, что наиболее популярный контейнер `linuxserver/wireguard` использует alpine linux, которого нет среди протестированных дистрибутивов для wgdashboard:
#pic(img: "lab4/wgd_req.png")[Необходимые требования wg-dashboard]
Одним из вариантов является использование данного контейнера и доработка его до совместимости с wgdashboard. Однако в данной лабораторной был выбран другой путь: использовать убунту (`ubuntu:20.04`), на которую затем установить сначала wireguard, затем wgdashboard.
== Установка необходимых пакетов
#pic(img: "lab4/dockerfile_1.png")[Установка необходимых пакетов]
Среди них:
- wireguard
- iproute2 - устанавливается ради утилиты ip
- net-tools - ifconfig
А затем производится чистка.
== Генерация конфигурации wireguard
#pic(img: "lab4/dockerfile_2.png")[Генерация конфигурации wireguard]
Для базовой работы wireguard была сгенерирована конфигурация сервера wireguard в автоматическом режиме.
== Установка wgdashboard
#pic(img: "lab4/dockerfile_3.png")[Установка wgdashboard согласно инструкции]
== Открытие портов
#pic(img: "lab4/dockerfile_4.png")[Открытие портов для использования с `-P`]
== `Entrypoint` контейнера
#pic(img: "lab4/dockerfile_5.png")[`Entrypoint` контейнера]
Состоит из следующих частей:
- `wg-quick up wg0` - запуск wireguard с конфигурацией, сгененрированной ранее;
- `cd /usr/local/share/wgdashboard/src && ./wgd.sh start` - запукс wgdashboard согласно инструкции;
- `tail -f /dev/null` - так как оба предыдущих процесса работают "в фоне" необходимо создать основной процесс.
== Сборка и запуск
#pic(img: "lab4/dockerfile_br.png")[Сборка и запуск контейнера]
== Метаданные
#pic(img: "lab4/dockerfile_6.png")[Метаданные]
== Рабочая директория
#pic(img: "lab4/dockerfile_7.png")[Рабочая директория]
== Создание пользователя
#pic(img: "lab4/dockerfile_8.png")[Создание пользователя user01]
#pagebreak()
= Docker compose
== Базовый образ
#pic(img: "lab4/docker_compose.png")[Файл docker-compose.yml]
Здесь мы используем собранный образ из существующего Dockerfile.
#pic(img: "lab4/docker_compose_up.png")[Запуск docker compose up]
#pic(img: "lab4/proof.png")[Веб форма wgdashboard]
== Сети и тома
#pic(img: "lab4/dc_1.png")[Том]
#pic(img: "lab4/dc_2.png")[Сетки 1]
#pic(img: "lab4/dc_3.png")[Сетки 2]
== Директива restart
#pic(img: "lab4/dc_4.png")[Restart]
Виды restart:
- no
- on-failure
- always
- unless-stopped
== Ограничения CPU и RAM
#pic(img: "lab4/dc_5.png")[Ограничения CPU и RAM]
== Второй запуск
#pic(img: "lab4/docker_compose_up_2.png")[Ограничения CPU и RAM] |
|
https://github.com/jneug/schule-typst | https://raw.githubusercontent.com/jneug/schule-typst/main/tests/base/test.typ | typst | MIT License | #import "@local/schule:1.0.0": ab
#import ab: *
#show: arbeitsblatt.with(
/* @typstyle:off */
titel: "Base template test",
reihe: "TYPST-TEST",
datum: "15.06.2024",
nummer: "2",
fach: "Informatik",
kurs: "Q1 LK",
autor: (
name: "<NAME>",
kuerzel: "Ngb",
),
version: "2024-06-15",
)
= Header 1
#lorem(100)
== Header 2
#lorem(100)
|
https://github.com/nixon-voxell/nixon_resume | https://raw.githubusercontent.com/nixon-voxell/nixon_resume/main/resume.typ | typst | MIT License | #import "template.typ": *
#show: resume.with(
author: (
firstname: "(Nixon) <NAME>",
lastname: "Cheng",
email: "<EMAIL>",
phone: "(+60) 173389100",
loc: "Malaysia",
linkedin: "nixonyh",
github: "nixon-voxell",
url_name: "linktr.ee/voxell",
url: "https://linktr.ee/voxell",
youtube: "@voxelltech",
itchio: "nixon-voxell",
positions: (
"Computer Graphics Engineer",
"\nTechnical Artist",
),
),
date: datetime.today().display("[day] [month repr:long] [year]"),
)
#resume_section[Skills]
#skill_item(
"Programming\nLanguages",
(
"Rust",
"C#",
"WGSL",
"HLSL",
"GLSL",
"Python",
"Typescript",
"C++",
"Dart",
"Java",
"PHP",
"R",
),
)
#skill_item(
"Expertise",
(
"Real-time Physics Simulation",
"Animation",
"Motion Graphics",
"Graphics Programming",
"Parallel/Multithreading",
"Deep Learning",
"VFX",
"Bevy Engine",
"Unity3D",
"Unreal Engine",
),
)
#resume_section[Experience]
#work_experience_item_header(
"Physics Simulation Engineer",
"23 Bulbs",
"UK",
"Sep. 2020 - Present",
)
#resume_item[
- Develop GPU accelerated cloth simulation engine.
- Tools & pipeline for cloth/softbody authoring.
]
#work_experience_item_header(
"Animation Programmer Intern",
"Hedra",
"US",
"Mar. 2024 - Jul. 2024",
)
#resume_item[
- Create animation systems for generating keyframe animations that can be imported and exported.
- Deal with different types of animation e.g. character rig, blend poses, flame poses, transform, etc.
- All in the Bevy game engine using Rust.
]
#work_experience_item_header(
"Head of Events",
"APU Game Development Club",
"KL, Malaysia",
"Feb. 2023 - April. 2024",
)
#resume_item[
- Organize and manage events for the club.
]
#work_experience_item_header(
"Graphics Programmer",
"Fiverr",
"Remote",
"Jun. 2020 - Aug. 2023",
)
#resume_item[
- Work on vaiours client projects.
]
#work_experience_item_header(
"Intern Game Programmer",
"Streamline Studios",
"KL, Malaysia",
"May. 2022 - Aug. 2022",
)
#resume_item[
- Develop Blender addon tools for artists.
- Contributed to Bake n Switch project.
]
#resume_section[Education]
#education_item[
Asia Pacific University of Technology & Innovation (APU)
][
Bachelor of Science (Honours) in Computer Games Development
][
#resume_gpa("3.95", "4.00")
][
Aug. 2022 - Present
]
#education_item[
Asia Pacific University of Technology & Innovation (APU)
][
Diploma in Information & Communication Technology with a Specialism in Software Engineering
][
#resume_gpa("3.80", "4.00")
][
Jun. 2020 - Jul. 2022
]
#resume_section[Open Source Projects]
#project_item_header(
"Bevy MotionGfx",
"https://github.com/nixon-voxell/bevy_motiongfx",
)
#resume_item[
- Motion graphics creation tool in Bevy. (Highly inspired by Motion Canvas and Manim)
]
#project_item_header(
"Bevy Vello Renderer",
"https://github.com/nixon-voxell/bevy_vello_renderer",
)
#resume_item[
- A minimal integration for rendering vector graphics using Vello in Bevy.
]
#project_item_header(
"Unity TTS",
"https://github.com/nixon-voxell/UnityTTS",
)
#resume_item[
- Text to speech in Unity.
]
#project_item_header(
"Unity NLP",
"https://github.com/nixon-voxell/UnityNLP",
)
#resume_item[
- Natural language processing in Unity.
]
#project_item_header(
"Unity Audio Visualizer",
"https://github.com/nixon-voxell/UnityAudioVisualizer",
)
#resume_item[
- Audio visualizer in Unity using Job System, Burst Compiler, and VFX Graph.
]
#resume_section[Awards]
Certificates: #link("https://www.linkedin.com/in/nixonyh/details/honors/")
#award_item_header(
"2023 4th All American DAVINCI International Innovation and Invention Expo",
"Sep. 2023",
)
#resume_item[
- Gold Medal
- ATIP Special Award
- OCIIP Special Award
- TISIAS Special Award
]
#award_item_header(
"DB-SNUbiz Global Startup Challenge 2023",
"Sep. 2023",
)
#resume_item[
- Finalist
]
#award_item_header(
"2023 INNOVERSE Innovation & Invention Expo",
"Aug. 2023",
)
#resume_item[
- Gold Medal
- INNOPA Special Award
- Canadian Spacial Award of Excellence
]
#award_item_header(
"The 12th World Invention Creativity Olympic 2023",
"Jul. 2023",
)
#resume_item[
- Gold Award
- TISIAS Special Award
- TUMMIAD Special Award
]
#award_item_header(
"Virtual Innovation Competition 2023",
"Jun. 2023",
)
#resume_item[
- Gold Medal
]
#award_item_header(
"34th International Invention, Innovation & Technology Exhibition",
"May. 2023",
)
#resume_item[
- Silver Medal
]
#award_item_header(
"Hilti IT Competition 2023",
"May. 2023",
)
#resume_item[
- Semi-Finalist
]
#resume_section[Talks]
#item_and_link(
"Beauty of Shaders Talk (APUGDC)",
"https://www.instagram.com/p/CtfxIu0vD3k/?img_index=2",
)
#item_and_link(
"TEDx - Simulating Millions of Particles in a Digital Twin (BATxAPU Week)",
"https://twitter.com/AsiaPacificU/status/1534062323346083840",
)
#resume_section[Media & Press Releases]
#item_and_link(
"The Star (Gold Medal @ Invention Innovation Competition in Canada 2023)",
"https://www.thestar.com.my/starpicks/2023/11/16/next-gen-game-development-for-next-gen-professionals",
)
#item_and_link(
"Open Gov (Gold Medal @ 12th World Invention Creativity Olympic 2023)",
"https://opengovasia.com/rescueai-smart-city-disaster-management/",
)
#item_and_link(
"APU News (Gold Medal @ 12th World Invention Creativity Olympic 2023)",
"https://www.apu.edu.my/media/news/2939",
)
#item_and_link(
"APU News (1st Runner Up @ Intel AI Global Impact Festival 2023, 3rd Runner Up @ Petronas CHESS Symposium 2023)",
"https://www.apu.edu.my/media/news/2911",
)
#item_and_link(
"APU News (Silver Medal @ ITEX 2023)",
"https://www.apu.edu.my/media/news/2869",
)
// #pagebreak()
// #resume_section[Skill Experiences]
// #skill_item(
// "Unity 3D",
// ("Familiar with ECS, Scriptable Render Pipelines, Job System, and shaders (including compute shaders).",)
// )
|
https://github.com/Jollywatt/typst-fletcher | https://raw.githubusercontent.com/Jollywatt/typst-fletcher/master/src/diagram.typ | typst | MIT License | #import "utils.typ": *
#import "node.typ": *
#import "edge.typ": *
#import "draw.typ": draw-diagram
#import "coords.typ": *
/// Interpret #the-param[diagram][axes].
///
/// Returns a dictionary with:
/// - `x`: Whether $u$ is reversed
/// - `y`: Whether $v$ is reversed
/// - `xy`: Whether the axes are swapped
///
/// - axes (array): Pair of directions specifying the interpretation of $(u, v)$
/// coordinates. For example, `(ltr, ttb)` means $u$ goes $arrow.r$ and $v$
/// goes $arrow.b$.
/// -> dictionary
#let interpret-axes(axes) = {
let dirs = axes.map(direction.axis)
let flip
if dirs == ("horizontal", "vertical") {
flip = false
} else if dirs == ("vertical", "horizontal") {
flip = true
} else {
error("Axes #0 cannot both be in the same direction. Try `axes: (ltr, ttb)`.", axes)
}
(
flip: (
x: axes.at(0) in (rtl, ttb),
y: axes.at(1) in (rtl, ttb),
xy: flip,
)
)
}
/// Convert an array of rects `(center: (x, y), size: (w, h))` with fractional
/// positions into rects with integral positions.
///
/// If a rect is centered at a factional position `floor(x) < x < ceil(x)`, it
/// will be replaced by two new rects centered at `floor(x)` and `ceil(x)`. The
/// total width of the original rect is split across the two new rects according
/// two which one is closer. (E.g., if the original rect is at `x = 0.25`, the
/// new rect at `x = 0` has 75% the original width and the rect at `x = 1` has
/// 25%.) The same splitting procedure is done for `y` positions and heights.
///
/// This is the algorithm used to determine grid layout in diagrams.
///
/// - rects (array): An array of rects of the form
/// `(center: (x, y), size: (width, height))`. The coordinates `x` and `y` may
/// be floats.
/// -> array
#let expand-fractional-rects(rects) = {
let new-rects
for axis in (0, 1) {
new-rects = ()
for rect in rects {
let coord = rect.center.at(axis)
let size = rect.size.at(axis)
if calc.fract(coord) == 0 {
rect.center.at(axis) = calc.trunc(coord)
new-rects.push(rect)
} else {
rect.center.at(axis) = calc.floor(coord)
rect.size.at(axis) = size*(calc.ceil(coord) - coord)
new-rects.push(rect)
rect.center.at(axis) = calc.ceil(coord)
rect.size.at(axis) = size*(coord - calc.floor(coord))
new-rects.push(rect)
}
}
rects = new-rects
}
new-rects
}
/// Determine the number and sizes of grid cells needed for a diagram with the
/// given nodes and edges.
///
/// Returns a dictionary with:
/// - `origin: (u-min, v-min)` Coordinate at the grid corner where elastic/`uv`
/// coordinates are minimised.
/// - `cell-sizes: (x-sizes, y-sizes)` Lengths and widths of each row and
/// column.
///
/// - grid (dictionary): Representation of the grid layout, including:
/// - `flip`
#let compute-cell-sizes(grid, verts, rects) = {
rects = expand-fractional-rects(rects)
// all points in diagram that should be spanned by coordinate grid
let points = rects.map(r => r.center)
points += verts
if points.len() == 0 { points.push((0,0)) }
let min-max-int(a) = (calc.floor(calc.min(..a)), calc.ceil(calc.max(..a)))
let (x-min, x-max) = min-max-int(points.map(p => p.at(0)))
let (y-min, y-max) = min-max-int(points.map(p => p.at(1)))
let origin = (x-min, y-min)
let bounding-dims = (x-max - x-min + 1, y-max - y-min + 1)
// Initialise row and column sizes
let cell-sizes = bounding-dims.map(n => (0pt,)*n)
// Expand cells to fit rects
for rect in rects {
let indices = vector.sub(rect.center, origin)
if grid.flip.x { indices.at(0) = -1 - indices.at(0) }
if grid.flip.y { indices.at(1) = -1 - indices.at(1) }
for axis in (0, 1) {
let size = if grid.flip.xy { rect.size.at(axis) } else { rect.size.at(1 - axis) }
cell-sizes.at(axis).at(indices.at(axis)) = calc.max(
cell-sizes.at(axis).at(indices.at(axis)),
rect.size.at(axis),
)
}
}
(origin: origin, cell-sizes: cell-sizes)
}
/// Determine the centers of grid cells from their sizes and spacing between
/// them.
///
/// Returns the a dictionary with:
/// - `centers: (x-centers, y-centers)` Positions of each row and column,
/// measured from the corner of the bounding box.
/// - `bounding-size: (x-size, y-size)` Dimensions of the bounding box.
///
/// - grid (dictionary): Representation of the grid layout, including:
/// - `cell-sizes: (x-sizes, y-sizes)` Lengths and widths of each row and
/// column.
/// - `spacing: (x-spacing, y-spacing)` Gap to leave between cells.
/// -> dictionary
#let compute-cell-centers(grid) = {
// (x: (c1x, c2x, ...), y: ...)
let centers = array.zip(grid.cell-sizes, grid.spacing)
.map(((sizes, spacing)) => {
array.zip(cumsum(sizes), sizes, range(sizes.len()))
.map(((end, size, i)) => end - size/2 + spacing*i)
})
let bounding-size = array.zip(centers, grid.cell-sizes)
.map(((centers, sizes)) => centers.at(-1) + sizes.at(-1)/2)
(
centers: centers,
bounding-size: bounding-size,
)
}
/// Determine the number, sizes and relative positions of rows and columns in
/// the diagram's coordinate grid.
///
/// Rows and columns are sized to fit nodes. Coordinates are not required to
/// start at the origin, `(0,0)`.
#let compute-grid(rects, verts, options) = {
let grid = (
axes: options.axes,
spacing: options.spacing,
)
grid += interpret-axes(grid.axes)
grid += compute-cell-sizes(grid, verts, rects)
// enforce minimum cell size
grid.cell-sizes = grid.cell-sizes.zip(options.cell-size)
.map(((sizes, min-size)) => sizes.map(calc.max.with(min-size)))
grid += compute-cell-centers(grid)
grid
}
#let extract-nodes-and-edges-from-equation(eq) = {
assert(eq.func() == math.equation)
let terms = eq.body + []
let edges = ()
let nodes = ()
// convert math matrix into array-of-arrays matrix
let matrix = ((none,),)
let (x, y) = (0, 0)
for child in terms.children {
if child.func() == metadata {
if child.value.class == "node" {
let node = child.value
node.pos = (raw: (x, y))
nodes.push(node)
} else if child.value.class == "edge" {
let edge = child.value
edge.vertices.at(0) = map-auto(edge.vertices.at(0), (x, y))
if edge.label != none { edge.label = $edge.label$ } // why is this needed?
edge.vertices.at(-1) = map-auto(edge.vertices.at(-1), (rel: (1, 0)))
edge.node-index = none
edges.push(edge)
}
} else if repr(child.func()) == "linebreak" {
y += 1
x = 0
matrix.push((none,))
} else if repr(child.func()) == "align-point" {
x += 1
matrix.at(-1).push(none)
} else {
matrix.at(-1).at(-1) += child
}
}
// turn matrix into an array of nodes
for (y, row) in matrix.enumerate() {
for (x, item) in row.enumerate() {
if not is-space(item) {
nodes.push(node((x, y), $item$).value)
}
}
}
(
nodes: nodes,
edges: edges,
)
}
#let interpret-diagram-args(args) = {
if args.named().len() > 0 {
let args = args.named().keys().join(", ")
if args.named().len() > 0 { error("Unexpected named argument(s) #..0.", args.named().keys()) }
// error("Unexpected named argument(s) #..0 to `diagram()`: " + args)
}
let positional-args = args.pos().flatten().join() + [] // join to ensure sequence
let objects = positional-args.children
let nodes = ()
let edges = ()
for obj in objects {
if obj.func() == metadata {
if obj.value.class == "node" {
let node = obj.value
nodes.push(node)
} else if obj.value.class == "edge" {
let edge = obj.value
edge.node-index = nodes.len()
edges.push(edge)
}
} else if obj.func() == math.equation {
let result = extract-nodes-and-edges-from-equation(obj)
nodes += result.nodes
edges += result.edges
} else {
panic("Unrecognised value passed to diagram:", obj)
}
}
(
nodes: nodes,
edges: edges,
)
}
/// Draw a diagram containing `node()`s and `edge()`s.
///
/// - ..args (array): Content to draw in the diagram, including nodes and edges.
///
/// The results of `node()` and `edge()` can be _joined_, meaning you can
/// specify them as separate arguments, or in a block:
///
/// ```typ
/// #diagram(
/// // one object per argument
/// node((0, 0), $A$),
/// node((1, 0), $B$),
/// {
/// // multiple objects in a block
/// // can use scripting, loops, etc
/// node((2, 0), $C$)
/// node((3, 0), $D$)
/// },
/// for x in range(4) { node((x, 1) [#x]) },
/// )
/// ```
///
/// Nodes and edges can also be specified in math-mode.
///
/// ```typ
/// #diagram($
/// A & B \ // two nodes at (0,0) and (1,0)
/// C edge(->) & D \ // an edge from (0,1) to (1,1)
/// node(sqrt(pi), stroke: #1pt) // a node with options
/// $)
/// ```
///
/// - debug (bool, 1, 2, 3): Level of detail for drawing debug information.
/// Level `1` or `true` shows a coordinate grid; higher levels show bounding boxes and
/// anchors, etc.
///
/// - spacing (length, pair of lengths): Gaps between rows and columns. Ensures
/// that nodes at adjacent grid points are at least this far apart (measured as
/// the space between their bounding boxes).
///
/// Separate horizontal/vertical gutters can be specified with `(x, y)`. A
/// single length `d` is short for `(d, d)`.
///
/// - cell-size (length, pair of lengths): Minimum size of all rows and columns.
/// A single length `d` is short for `(d, d)`.
///
/// - node-inset (length, pair of lengths): Default value of
/// #the-param[node][inset].
///
/// - node-outset (length, pair of lengths): Default value of
/// #the-param[node][outset].
///
/// - node-shape (rect, circle, function): Default value of
/// #the-param[node][shape].
///
/// - node-stroke (stroke, none): Default value of #the-param[node][stroke].
///
/// The default stroke is folded with the stroke specified for the node. For
/// example, if `node-stroke` is `1pt` and #the-param[node][stroke] is `red`,
/// then the resulting stroke is `1pt + red`.
///
/// - node-fill (paint): Default value of #the-param[node][fill].
///
/// - edge-stroke (stroke): Default value of #the-param[edge][stroke]. By
/// default, this is chosen to match the thickness of mathematical arrows such
/// as $A -> B$ in the current font size.
///
/// The default stroke is folded with the stroke specified for the edge. For
/// example, if `edge-stroke` is `1pt` and #the-param[edge][stroke] is `red`,
/// then the resulting stroke is `1pt + red`.
///
/// - node-corner-radius (length, none): Default value of
/// #the-param[node][corner-radius].
///
/// - edge-corner-radius (length, none): Default value of
/// #the-param[edge][corner-radius].
///
/// - node-defocus (number): Default value of #the-param[node][defocus].
///
/// - label-sep (length): Default value of #the-param[edge][label-sep].
///
/// - label-size (length): Default value of #the-param[edge][label-size].
///
/// - label-wrapper (function): Default value of
/// #the-param[edge][label-wrapper].
///
/// - mark-scale (percent): Default value of #the-param[edge][mark-scale].
///
/// - crossing-fill (paint): Color to use behind connectors or labels to give
/// the illusion of crossing over other objects. See
/// #the-param[edge][crossing-fill].
///
/// - crossing-thickness (number): Default thickness of the occlusion made by
/// crossing connectors. See #param[edge][crossing-thickness].
///
/// - axes (pair of directions): The orientation of the diagram's axes.
///
/// This defines the elastic coordinate system used by nodes and edges. To make
/// the $y$ coordinate increase up the page, use `(ltr, btt)`. For the matrix
/// convention `(row, column)`, use `(ttb, ltr)`.
///
/// #stack(
/// dir: ltr,
/// spacing: 1fr,
/// fletcher.diagram(
/// axes: (ltr, ttb),
/// debug: 1,
/// node((0,0), $(0,0)$),
/// edge((0,0), (1,0), "->"),
/// node((1,0), $(1,0)$),
/// node((1,1), $(1,1)$),
/// node((0.5,0.5), `axes: (ltr, ttb)`),
/// ),
/// fletcher.diagram(
/// axes: (ltr, btt),
/// debug: 1,
/// node((0,0), $(0,0)$),
/// edge((0,0), (1,0), "->"),
/// node((1,0), $(1,0)$),
/// node((1,1), $(1,1)$),
/// node((0.5,0.5), `axes: (ltr, btt)`),
/// ),
/// fletcher.diagram(
/// axes: (ttb, ltr),
/// debug: 1,
/// node((0,0), $(0,0)$),
/// edge((0,0), (1,0), "->", bend: -20deg),
/// node((1,0), $(1,0)$),
/// node((1,1), $(1,1)$),
/// node((0.5,0.5), `axes: (ttb, ltr)`),
/// ),
/// )
///
/// - render (function): After the node sizes and grid layout have been
/// determined, the `render` function is called with the following arguments:
/// - `grid`: a dictionary of the row and column widths and positions;
/// - `nodes`: an array of nodes (dictionaries) with computed attributes
/// (including size and physical coordinates);
/// - `edges`: an array of connectors (dictionaries) in the diagram; and
/// - `options`: other diagram attributes.
///
/// This callback is exposed so you can access the above data and draw things
/// directly with CeTZ.
#let diagram(
..args,
debug: false,
axes: (ltr, ttb),
spacing: 3em,
cell-size: 0pt,
edge-stroke: 0.048em,
node-stroke: none,
edge-corner-radius: 2.5pt,
node-corner-radius: none,
node-inset: 6pt,
node-outset: 0pt,
node-shape: auto,
node-fill: none,
node-defocus: 0.2,
label-sep: 0.4em,
label-size: 1em,
label-wrapper: edge => box(
[#edge.label],
inset: .2em,
radius: .2em,
fill: edge.label-fill,
),
mark-scale: 100%,
crossing-fill: white,
crossing-thickness: 5,
render: (grid, nodes, edges, options) => {
cetz.canvas(draw-diagram(grid, nodes, edges, debug: options.debug))
},
) = {
let spacing = as-pair(spacing).map(as-length)
let cell-size = as-pair(cell-size).map(as-length)
let options = (
debug: int(debug),
axes: axes,
spacing: spacing,
cell-size: cell-size,
node-inset: node-inset,
node-outset: node-outset,
node-shape: node-shape,
node-stroke: node-stroke,
node-fill: node-fill,
node-corner-radius: node-corner-radius,
edge-corner-radius: edge-corner-radius,
node-defocus: node-defocus,
label-sep: label-sep,
label-size: label-size,
label-wrapper: label-wrapper,
edge-stroke: as-stroke(edge-stroke),
mark-scale: mark-scale,
crossing-fill: crossing-fill,
crossing-thickness: crossing-thickness,
)
let (nodes, edges) = interpret-diagram-args(args)
box(context {
let options = options
options.em-size = measure(h(1em)).width
let to-pt(len) = to-abs-length(len, options.em-size)
options.spacing = options.spacing.map(to-pt)
options.cell-size = options.cell-size.map(to-pt)
let nodes = nodes.map(node => {
node = resolve-node-options(node, options)
node = measure-node-size(node)
node
})
let edges = edges.map(edge => resolve-edge-options(edge, options))
// try resolving node uv coordinates. this resolves to NaN coords if the coord depends on physical lengths
let ctx = (
target-system: "uv",
em-size: (width: options.em-size, height: options.em-size),
)
let (ctx-with-uv-anchors, nodes) = resolve-node-coordinates(nodes, ctx: ctx)
// nodes and edges whose uv coordinates can be resolved without knowing the grid
let rects-affecting-grid = nodes
.filter(node => not is-nan-vector(node.pos.uv))
.map(node => (center: node.pos.uv, size: node.size))
let vertices-affecting-grid = edges.map(edge => {
resolve-edge-vertices(edge, ctx: ctx-with-uv-anchors + (target-system: "uv"), nodes)
}).join() + () // coerce none to ()
vertices-affecting-grid = vertices-affecting-grid.filter(vert => not is-nan-vector(vert))
// determine diagram's elastic grid layout
let grid = compute-grid(rects-affecting-grid, vertices-affecting-grid, options)
// now with grid determined, compute final (physical) coordinates for nodes and edges
let (ctx-with-xyz-anchors, nodes) = resolve-node-coordinates(nodes, ctx: (target-system: "xyz", grid: grid))
let (_, nodes) = resolve-node-coordinates(nodes, ctx: (target-system: "uv", grid: grid))
// resolve enclosing nodes
let (extra-anchors, nodes) = resolve-node-enclosures(nodes, ctx-with-xyz-anchors)
ctx-with-xyz-anchors.nodes += extra-anchors
// resolve edges
edges = edges.map(edge => {
edge.final-vertices = resolve-edge-vertices(
edge, ctx: ctx-with-xyz-anchors + (target-system: "xyz", grid: grid), nodes
)
edge = convert-edge-corner-to-poly(edge)
edge = apply-edge-shift(grid, edge)
edge
})
render(grid, nodes, edges, options)
})
}
|
https://github.com/sitandr/typst-examples-book | https://raw.githubusercontent.com/sitandr/typst-examples-book/main/src/snippets/text/text_shadows.md | markdown | MIT License | # Fake italic & Text shadows
## Skew
```typ
// author: Enivex
#set page(width: 21cm, height: 3cm)
#set text(size:25pt)
#let skew(angle,vscale: 1,body) = {
let (a,b,c,d)= (1,vscale*calc.tan(angle),0,vscale)
let E = (a + d)/2
let F = (a - d)/2
let G = (b + c)/2
let H = (c - b)/2
let Q = calc.sqrt(E*E + H*H)
let R = calc.sqrt(F*F + G*G)
let sx = Q + R
let sy = Q - R
let a1 = calc.atan2(F,G)
let a2 = calc.atan2(E,H)
let theta = (a2 - a1) /2
let phi = (a2 + a1)/2
set rotate(origin: bottom+center)
set scale(origin: bottom+center)
rotate(phi,scale(x: sx*100%, y: sy*100%,rotate(theta,body)))
}
#let fake-italic(body) = skew(-12deg,body)
#fake-italic[This is fake italic text]
#let shadowed(body) = box(place(skew(-50deg, vscale: 0.8, text(fill:luma(200),body)))+place(body))
#shadowed[This is some fancy text with a shadow]
``` |
https://github.com/HiiGHoVuTi/requin | https://raw.githubusercontent.com/HiiGHoVuTi/requin/main/graph/infini.typ | typst | #import "../lib.typ": *
#show heading: heading_fct
Dans cet exercice, le terme "graphe" fait aussi bien référence au graphes infinis que les graphes finis.
Le graphe _grille_ est le graphe $(ZZ^2, E)$ avec $E$ l'ensemble des paires qui ne diffère que d'un (1) en une (1) coordonnée. Le graphe _ligne_ est le graphe $(NN, {(x,y) | |x-y| = 1 : x,y in NN})$.
=== Bouts et Rayon infinis
Un rayon dans $G = (S,E)$ est une suite $(x_z)_(z in NN) in S^NN$ de sommets tous distincts tel que $forall i in NN, (x_i, x_(i+1)) in E$
#question(0)[
Donner un rayon du graphe ligne et du graphe grille.
]
#correct[
On peut poser la suite $(42,43,44,...)$ pour le graphe ligne et $((-i,17))_(i in NN)$ pour le graphe grille
]
On dit que deux rayons $r_1,r_2$ sont équivalents s'il existe un troisième rayon $r_3$ qui contiens une infinité de sommets des deux premiers rayons.
#question(2)[
Montrer que c'est une relation d'équivalence. On la notera $r_1 tilde r_2$
]
#correct[
On vérifie les axiomes :
- *Réflexivité* On a que $r_1 tilde r_1$ car en posant $r_3 := r_1$ on a bien ${r_i : i in NN} sect {r_i : i in NN}$ infini
- *Symmétrie* Si $r_1 tilde r_2$, alors il existe $r_3$ rencontrant $r_1$ et $r_2$ sur une infinité de sommets. Et comme c'est le cas, $r_2 tilde r_1$.
- *Transitivité* Supposons $r_1 tilde r_2$ et $r_2 tilde r_3$. On choisi $r'$ (resp. $r''$) qui rencontre une infinité de fois $r_1$ et $r_2$ (resp. $r_2$ et $r_3$). On constuit par récurrence un chemin qui va concaténer des bouts des autres chemin ensemble avec une idée d'aller-viens :
- On commence sur $r_1$ jusqu'au prochain indice en commun avec $r'$
- On continue sur $r'$ jusqu'au prochain indice en commun avec $r_2$
- On continue sur $r_2$ jusqu'au prochain indice en commun avec $r''$
- On continue sur $r''$ jusqu'au prochain indice en commun avec $r_3$
- On continue sur $r_3$ jusqu'au prochain indice en commun avec $r''$
- On continue sur $r''$ jusqu'au prochain indice en commun avec $r_2$
- ... On fait $r_1 -> r' -> r_2 -> r'' -> r_3 -> r'' -> r_2 -> r' -> r_1$ et recommence
*TRES IMPORTANT A NOTER* : Il est possible que un des chemins que l'on souhaite emprunter possède des noeuds déjà empruntés, et par conséquent le chemin que l'on crée ne serai pas un rayon (tout les sommets doivent etre distincts). Mais comme à chaque étape le nombre de noeuds déjà visité est fini, il suffit de rester sur le chemin actuel jusqu'à ce que le prochain chemin n'a plus de noeud en commun avec les noeuds déjà visité.
]
#question(2)[
Montrer que tout les rayons du graphe grilles sont équivalent.
]
#correct[
Soit $r_1$ et $r_2$ deux rayons. On considère le chemin qui fait des carrés en spirale. Alors comme il couvre tout $ZZ^2$, il passe par tout les sommets de $r_1$ et de $r_2$.
]
Un _bout_ est une classe d'équivalence de $tilde$. On dénote l'ensemble des bouts de $G$ par $cal(B)(G)$.
#question(0)[
Combien (si infini, indiquer si c'est dénombrable ou indénombrable) de bouts possède les graphes suivants :
- Le graphe ligne
- Le graphe grille
- Le graphe $(ZZ, E)$ avec $E$ l'ensemble des couples de la forme $(x,x+1)$ ou $(x+1,x)$
- Le graphe $(Sigma^*, E)$ avec des arretes entre $w$ et $w alpha$ pour $alpha in Sigma := {a b}$
]
#correct[
- On a montrer que tout les chemins du graphe grille sont équivalents, il y a donc qu'une unique classe d'équivalence et donc 1 seul bout.
- Soit $r_1, r_2$ deux rayons du graphe ligne. Ils sont de la forme $(k + i)_(i in NN)$, car ne peuvent que aller vers le coté positif (s'ils vont dans le coté négatif, ils tombent à $0$ et sont bloqués). On pose $r_3 = r_2$,et on note $k_1, k_2$ les deux constantes $k$ pour $r_1$ et $r_2$. Alors $r_3$ et $r_2$ ont les sommets $[| max (k_1, k_2) ; +oo [|$ en commun (et $r_2 = r_3$ donc ils ont bien une infinité de somemt en commun).
Donc tout les rayons sont équivalents, il n'y a donc qu'un unique bout.
- 2 bouts.
- Un nombre indénombrable de bouts, précisément un pour chaque $Sigma^NN$.
]
On note $C_G (x)$ la composante connexe de $x in S$ pour $G = (S,E)$ un graphe. On dénote par $G - T$ le graphe $G[S\\T]$ pour $T subset.eq S$.
// https://link.springer.com/content/pdf/10.1007/BF02993589.pdf
//
#question(1)[
Soit $G = (S,E)$ un graphe infini, $U$ un bout de $G$ et $T subset.eq S$ fini. Montrer qu'il existe qu'une seule composante connexe $C$ tel qu'il existe un rayon $r in U$ ne passant pas par $T$. On dénote cette composante connexe par $C_(G - T) (U)$
]
On dit qu'un graphe infini est localement fini si chaque sommet est de degrée fini. On dit qu'un bout $U$ du graphe $G = (S,E)$ est _libre_ s'il existe un $T subset.eq S$ fini tel que $C_(G - T) (U) != C_(G - T)(V)$ pour tout $V in cal(B)(G)$ différent de $U$.
#question(1)[
Donner un exemple d'un graphe avec un unique bout non libre.
]
#correct[
On se donne pour $Sigma = {a,b}$ le graphe $(a^* b^*, E )$ ou $E$ est l'ensemble de couple de la forme $(w, w . alpha)$ pour $alpha in Sigma$ (ou symétriquement).
On montrer que le bout du rayon $(a^i)_(i in NN)$ est libre : soit $T$ un ensemble fini de noeuds. Comme chaque noeud s'écrit $$
]
#question(3)[
Montrer que si $G$ possède un nombre fini de bouts, alors ils sont tous libres.
]
#correct[
On montre que le graphe ne peux pas posséder plus d'un bouts non libre dans la meme composante connexe, puis on montrera qu'il ne peut pas composer un unique bout non libre dans une composante connexe.
Soit $U,V$ deux bouts non libre du graphes, on montre que c'est les memes. Soit $r_1 in U$ et $r_2 in V$, on construit par récurrence un chemin $r_3$ qui passe par une infinité de sommet de $r_1$ et de $r_2$.
// TODO: Facile car à chaque itération on a qu'une nombre fini de neoud interdit donc par non liberté ils sont toujours dans la meme composante connexe.
Supposons que $U$ soit l'unique bout libre dans une composante connexe. Alors pour totu les autres bouts $x in cal(B)(G)\\{U}$ il existe un $T_x$ fini. On a que $ T := union.big_(w in cal(B)(G)\\{U}) T_x $ sépare tout les autres bouts. Donc $C_T (U)$ ne contiens que le bout $U$, donc $U$ est libre, c'est absurde.
]
#question(3)[
Montrer que si $G$ connexe localement fini possède un nombre infini de bouts, alors il en existe un non libre.
]
#correct[
Blahaj
]
#question(1)[
Est-ce que c'est vrai si le graphe n'est pas localement fini ?
]
#correct[
Non, on regarde le graphe "étoile", on un noeud est connecté à une infinité de graphe "demi-droite" $(NN, {(x,x+1) : x in NN})$
]
=== Equipotence
Soient $G_1 = (S_1, E_1)$ et $G_2 = (S_2, E_2)$ deux graphes, on dit que $f : S_1 -> S_2$ est un _morphisme_ si pour tout $(x,y) in E_1$ on a $(f(x),f(y)) in E_2$. On dit que $f$ est un _isomorphisme_ si $f$ est bijective et que la bijection réciproque est aussi un morphisme.
On dit que deux graphes sont _isomorphes_ s'il existe un isomorphisme entre les deux.
#question(1)[
Soient $G_1 = (S_1, E_1)$ et $G_2 = (S_2, E_2)$, montrer que $f : S_1 -> S_2$ est un isomorphisme si et seulement si $forall x,y in S_1$, on a $(x,y) in E_1 <=> (f(x),f(y)) in E_2$
]
On dit que que deux graphes $G, H$ sont équipotent s'il existe un morphisme injectif de $G$ dans $H$ et réciproquement.
#question(0)[
Montrer que si deux graphes sont isomorphes, alors ils sont équipotent.
]
#question(1)[
Donner deux graphes équipotent non isomorphes.
]
=== Graphe de Rado
On dit que $G$ un graphe vérifie _la propriété d'extension_ si pour tout $U,V subset.eq S$ fini, il existe un sommet $s in S$ tel que $(s,x) in E$ pour tout $x in U$ et $(s,y) in.not E$ pour tout $y in V$.
#question(0)[
Soit $G$ respectant la propriété d'extension. Montrer que tout noeud $x,y$ sont au moins à une distance 2.
]
#question(3)[
Montrer que deux graphes possédant la propriété d'extension sont forcément isomorphes.
]
On appelle le _graphe de Rado_ l'unique graphe à isomorphisme près respectant cette propriété.
#question(1)[
Montrer qu'un noeud du graphe de Rado est de degrée infini.
]
#question(1)[
Soit $G$ un graphe fini, montrer qu'il existe un morphisme de $G$ dans le graphe de Rado.
]
On dit que $H$ est un _sous-graphe_ de $G = (S,E)$ (pottentiellement infini) s'il existe $I subset.eq S$ tel que $H$ est isomorphe à $G[I]$
#question(2)[
Montrer que si $G$ est un graphe infini est tel que tout graphe fini $H$ soit un sous-graphe de $G$, alors le graphe $G$ est isomorphe au graphe de Rado.
]
On s'intéresse à différentes méthodes pour le construire.
==== Construction par une bijection
On défini le prédicat $"BIT"(x,y)$ comme étant le fait que le $x$-ème bit de $y$ en binaire est un $1$.
On considère le graphe $G = (NN^*, {(x,y) : "BIT"(x,y) or "BIT"(y,x)} )$.
#question(1)[
Montrer qu'il existe un $phi : NN >-> cal(P)_f (NN) $ une bijection entre $NN$ et $cal(P)_f (NN)$ les parties finies de $NN$.
]
#question(2)[
Montrer que le graphe $G$ possède la propriété d'extension.
]
On fixe maintenant $phi$ une bijection entre $NN$ et $cal(P)_f (NN)$. On défini alors la relation
$ x space ~_phi space y <=> (x in phi(x) or y in phi(x)) and x != y $
#question(2)[
Montrer que le graphe $(NN, ~_phi)$ vérifie la propriété d'extension.
]
==== Construction probabiliste
Soit $(X_(i,j))_(i<j) arrow.r.hook cal(B)(0.5)$ une suite de v.a.i.i suivant la loi de bernoulli de paramètre $0.5$. On considère alors le graphe tel que pour $i<j$, on a $(i,j) in E <=> X_(i,j) = 1$
#question(2)[
Montrer que le graphe obtenu respecte la propriété d'extension avec probabilité 1.
]
#question(1)[
Montrer que c'est le cas pour $(X_(i,j))_(i<j) arrow.r.hook cal(B)(p)$ pour tout $1 > p > 0$.
]
|
|
https://github.com/TechnoElf/mqt-qcec-diff-presentation | https://raw.githubusercontent.com/TechnoElf/mqt-qcec-diff-presentation/main/template/colour.typ | typst | #let tum_blue = rgb(0, 101, 189)
#let tum_white = rgb(255, 255, 255)
#let tum_black = rgb(0, 0, 0)
|
|
https://github.com/jgm/typst-hs | https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/methods-02.typ | typst | Other | // Test multiline chain in code block.
#{
let rewritten = "Hello. This is a sentence. And one more."
.split(".")
.map(s => s.trim())
.filter(s => s != "")
.map(s => s + "!")
.join("\n ")
test(rewritten, "Hello!\n This is a sentence!\n And one more!")
}
|
https://github.com/Myriad-Dreamin/typst.ts | https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/layout/place-float-auto_01.typ | typst | Apache License 2.0 |
#import "/contrib/templates/std-tests/preset.typ": *
#show: test-page
//
// // Error: 2-20 automatic positioning is only available for floating placement
// // Hint: 2-20 you can enable floating placement with `place(float: true, ..)`
// #place(auto)[Hello] |
https://github.com/claudiomattera/typst-modern-cv | https://raw.githubusercontent.com/claudiomattera/typst-modern-cv/master/CHANGELOG.md | markdown | MIT License | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.3.0] - 2024-10-20
### Changed
- Update to new syntax for Typst 0.12.0
## [0.2.0] - 2024-10-01
### Added
- Add support for rename GitHub projects
## [0.1.0] - 2023-11-21
Initial implementation
### Added
- Add function to draw a timeline
- Add function to draw a timeline entry
- Add function to draw work experience
- Add function to draw education
- Add function to draw publication
- Add function to draw language proficiency
- Add function to draw projects
- Add function to draw curriculum vitæ header
- Add theme `underline`
- Add theme `lighten`
[Unreleased]: https://git.claudiomattera.it/claudiomattera/typst-modern-cv/
[0.1.0]: https://git.claudiomattera.it/claudiomattera/typst-modern-cv/releases/tag/0.1.0
[0.2.0]: https://git.claudiomattera.it/claudiomattera/typst-modern-cv/releases/tag/0.2.0
[0.3.0]: https://git.claudiomattera.it/claudiomattera/typst-modern-cv/releases/tag/0.3.0
|
https://github.com/Myriad-Dreamin/typst.ts | https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/layout/pagebreak-weak_01.typ | typst | Apache License 2.0 |
#import "/contrib/templates/std-tests/preset.typ": *
#show: test-page
// After only ignorables & invisibles
// Should result in two pages.
First
#pagebreak(weak: true)
#counter(page).update(1)
#metadata("Some")
#pagebreak(weak: true)
Second
|
https://github.com/Area-53-Robotics/53E-Notebook-Over-Under-2023-2024 | https://raw.githubusercontent.com/Area-53-Robotics/53E-Notebook-Over-Under-2023-2024/giga-notebook/entries/test-auton-movement/entry.typ | typst | Creative Commons Attribution Share Alike 4.0 International | #import "/packages.typ": notebookinator
#import notebookinator: *
#import themes.radial.components: *
#import "/utils.typ"
#show: create-body-entry.with(
title: "Test: Autonomous Movement",
type: "test",
date: datetime(year: 2024, month: 1, day: 24),
author: "<NAME>",
witness: "Me<NAME>",
)
#metadata(none) <test-auton-movement>
While implementing our autonomous routines, we noticed that our movements were
extremely imprecise. While our odometry could compensate for this, it was
leading to our routes being pretty inconsistent. We decided it would be worth it
to spend time testing and tuning our PID controllers.
= Testing Procedure
The process we used for tuning our controllers is as follows:
== Linear Controller
The linear controller controls forward and backward motion. We used the
following steps to tune this controller.
+ Place the robot onto the field tiles.
+ Prepare the robot with a program containing the following movement:
```cpp
chassis.moveToPoint(0, 10, 5000, {}, false); // Move forward 10 inches
```
You'll also need to include the following code to log the PID output: ```cpp
float total = error * kP + integral * kI + derivative * kD;
if (name == "lateral") {
lemlib::telemetrySink()->debug("{},{},{},{},{}", 0, total, error * kP, integral
* kI, derivative * kD);
} else if (name == "angular") {
lemlib::telemetrySink()->debug("{},{},{},{},{}", 1, total, error * kP, integral
* kI, derivative * kD);
}
```
3. Connect to the robot with the Loginator
+ Reset all of the PID gains to 0.
+ Set `kP` to 5.
+ Run the movement, and record the output with the Loginator.
+ Increase `kP`, and run the program again.
+ Continue the last step until the robot starts oscillating around the point.
+ Increase `kD`, and run the program again.
+ Continue the last step until the robot stops oscillating, and the Loginator
shows that the output doesn't go within 0.5 below 0.
#admonition(
type: "note",
)[
Its important to tune the robot on the field directly since different surfaces
give the robot more or less traction, and we want conditions that are the same
or identical to an actual auton run.
]
== Angular Controller
The process for tuning the angular controller is largely the same as tuning the
linear one. Use the same steps, except use this movement instead of the other
one:
```cpp
chassis.turnTo(10, 0, 5000, {}, false); // Turn the robot 90 degrees
```
= Results
We've downloaded the testing data from before and after the tuning process.
== Before
Here are the settings we were using prior to the tuning. They are the default
constants for LemLib.
```cpp
// linear motion controller
lemlib::ControllerSettings linearController(
10, // proportional gain (kP)
30, // derivative gain (kD)
1, // small error range, in inches
100, // small error range timeout, in milliseconds
3, // large error range, in inches
500, // large error range timeout, in milliseconds
20 // maximum acceleration (slew)
);
```
Here's the output of the above 10" movement with the above constants. As you can
see, the movement heavily oscillates, over accelerating, and crossing over the
zero threshold.
#let data = utils.plot-from-csv(read("./linear-before.csv"))
#plot(
length: 70%,
title: "Linear Pid Output",
x-label: "Time (ms)",
y-label: "Output (voltage)",
..data,
)
#colbreak()
Here are the constants we used for the angular controller. They are also the
default for LemLib.
```cpp
// angular motion controller
lemlib::ControllerSettings angularController(
2, // proportional gain (kP)
10, // derivative gain (kD)
1, // small error range, in degrees
100, // small error range timeout, in milliseconds
3, // large error range, in degrees
500, // large error range timeout, in milliseconds
20 // maximum acceleration (slew)
);
```
Here's the output of the PID controller over time with the 90#sym.degree movement:
#let data = utils.plot-from-csv(read("./angular-before.csv"))
#plot(
length: 70%,
title: "Angular Pid Output",
x-label: "Time (ms)",
y-label: "Output (voltage)",
..data,
)
As you can see, the controller heavily oscillates, and stops moving before it
can get anywhere near the target angle.
== After
After tuning, our results were much better. Here are the final constants we
ended up with for the linear controller. We ended up with a value of 10 for `kP`
and a value of 35 for `kD`.
```cpp
// linear motion controller
inline lemlib::ControllerSettings linearController(
10, // proportional gain (kP)
0, // integral gain (kI)
35, // derivative gain (kD)
3, // anti windup
1, // small error range, in inches
100, // small error range timeout, in milliseconds
3, // large error range, in inches
500, // large error range timeout, in milliseconds
40 // maximum acceleration (slew)
);
```
Here's the output of the controller after tuning. As you can see the results are
much better. The output barely crosses the 0 mark, and goes almost directly to
the target.
#let data = utils.plot-from-csv(read("./linear-after.csv"))
#plot(
title: "Linear Pid Output",
x-label: "Time (ms)",
y-label: "Output (voltage)",
..data,
)
Here are the new angular constants that we ended up with. We now have a `kP` of
2 and a `kD` of 30.
```cpp
// angular motion controller
inline lemlib::ControllerSettings angularController(
2, // proportional gain (kP)
0.01, // integral gain (kI)
30, // derivative gain (kD)
3, // anti windup
1, // small error range, in degrees
100, // small error range timeout, in milliseconds
3, // large error range, in degrees
500, // large error range timeout, in milliseconds
10 // maximum acceleration (slew)
);
```
#admonition(
type: "note",
)[
We did also end up having to add in a `kI` value of 0.01. No matter what we did
we couldn't get the controller to move exactly to the point, so the integral
term was needed.
]
Here's the output after tuning. As you can see, the controller goes directly to
the target, and doesn't oscillate at all.
#let data = utils.plot-from-csv(read("./angular-after.csv"))
#plot(
title: "Angular Pid Output",
x-label: "Time (ms)",
y-label: "Output (voltage)",
..data,
)
Now that the tuning is complete we'll have much faster and more precise
autonomous routines.
|
https://github.com/typst/packages | https://raw.githubusercontent.com/typst/packages/main/packages/preview/haw-hamburg/0.1.0/examples/bachelor-thesis/chapters/02_article_1.typ | typst | Apache License 2.0 | #import("../dependencies.typ"): *
= The First Article
All human beings are born free and equal in dignity and rights. They are endowed with reason and conscience and should act towards one another in a spirit of brotherhood.
@UN[Article 1]
|
https://github.com/polarkac/MTG-Stories | https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/031%20-%20Hour%20of%20Devastation/005_Favor.typ | typst | #import "@local/mtgstory:0.2.0": conf
#show: doc => conf(
"Favor",
set_name: "Hour of Devastation",
story_date: datetime(day: 05, month: 07, year: 2017),
author: "<NAME>",
doc
)
#emph[Three gods have fallen since the Gate to the Afterlife opened to reveal unimaginable horrors. Only Hazoret the Fervent and Bontu the Glorified remain to protect the mortals on Amonkhet. But will they be able to hold against the onslaught until the God-Pharaoh returns to protect his people?]
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
Despair brought the god to her knees.
For the third time that day, a rushing pain washed over her, sapping her limbs of strength, corroding her heart and spirit.
#emph[Another god is dead.]
Hazoret gazed toward the horizon, where swarms of locusts still blotted out the suns. Around her, the horrors of the desert rampaged through the streets, terrorizing the citizens of Naktamun.
For as long as Hazoret could remember, she and her siblings had protected their people from the nightmares of the world. Together, they pushed back the darkness, shielded the mortals from the curses of the world, and hunted the shadows that lurked just beyond the city.
But the keeper of the Hekma barrier was dead.
The golden archer, the sister whose arrows pierced those who would threaten the city, was dead.
The indomitable wanderer, strongest of her siblings and patroller of the desert, was dead.
#emph[Bontu and I are all that remain.]
A myriad of prayers reverberated in the back of her mind, the deluge of mortal fears falling on her shoulders, their number and volume growing each time a god fell.
Hazoret clenched her teeth and willed herself to stand. She would not falter. Not now—not when her children needed her most. Not when all the promises of the God-Pharaoh seemed to be crumbling, and her siblings were falling one by one to a dark god.
#emph[I must protect my children. I must protect Bontu.]
Hazoret closed her eyes and let go.
Let go of all control. Let go of any restraint. Hazoret let go of any shreds of doubt and uncertainty and fell forward, plunging into fervor, into action, into rage and flame and the seamless dance of her battle frenzy. Her two-pronged weapon slashed through throngs of desert mummies as she charged, a golden blur cleaving the air around her. The wayward cry of a child sent her leaping across the thoroughfare, shielding the boy from a collapsing wall and pushing him toward the arms of his fleeing cropmates. A giant hellion burst up from the ground, smashing through buildings and charging a cluster of citizens. With a word and a thought, Hazoret sent gouts of flame scorching through the air, reducing the monster to ash.
#figure(image("005_Favor/01.jpg", width: 100%), caption: [Chaos Maw | Art by <NAME>yle], supplement: none, numbering: none)
Hazoret fought with the full fury of a god unleashed. Around her, mortals rallied and found renewed zeal, Hazoret's presence igniting their own passion and power. As Hazoret impaled a desert horror on her spear, a flashing whirl of blades caught her eye. A mortal wielding twin khopeshes cut through a pack of undead hyenas, moving at impossible speeds. The beasts snapped and snarled around her, but the mortal made short work of them, dodging powerful jaws, severing tendons, and cutting through limbs, immobilizing the brutes.
As the mortal plunged both of her blades into the last of the pack, Hazoret finally saw her face—Samut, the dissenter. Samut, blasphemer of the God-Pharaoh. Samut, who had asked Hazoret, "#emph[Is this paradise?] " as the Gate to the Afterlife opened onto wastes, unleashing the waves of terror that now consumed them.
The mortal looked up from her grisly work and locked eyes with Hazoret. Beside her, the champion Djeru ran up, also gazing up at the god.
"Hazoret! What are we to do?" Samut shouted.
Hazoret looked back at the chaos spread across her beloved city.
"#strong[Protect each other, my children. Take those you can and hide among the desert sands. We must survive until the God-Pharaoh comes to right these wrongs.] "
Samut shook her head. "The God-Pharaoh will not fix this—"
"#strong[We do not have time for words or doubt.] " Hazoret spoke with the full force of her will. Samut and Djeru both bowed in deference to their god, silenced by her power.
Hazoret sighed and softened ever so slightly. She knelt, piercing Samut with her gaze.
"#strong[You are strong, Samut, and strong-willed. Channel that strength to protect your brethren. Amonkhet needs you. And you, Djeru, my final champion.] "
The chilling roar of a sandwurm off in the distance drew Hazoret's attention. She readied her weapon and stood.
"We will obey, Hazoret. We will protect our brothers and sisters." Djeru spoke, voice clear and unwavering. Samut however gazed at Hazoret, doubt still dancing behind her eyes.
"Who will protect #emph[you] , Hazoret?" Samut asked.
A small smile flitted across Hazoret's face. "#strong[Go. Fight. I will endure.] "
A short stretch from where they stood, a massive monument crumbled as the wurms burst through its walls, chasing after viziers whose spells bounced harmlessly off their toughened hides. Hazoret didn't wait for Samut and Djeru's response, instead dashing toward the offending beasts, weapon and flame at ready, a battle cry already in her throat.
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
#emph[It is not enough.]
For every mortal she saved, she knew a dozen more were lost. Her heart ached with their fear and pain. Each empty death sent a fresh twinge of guilt coursing through her. So many were mere children, too young to have ascended the Trials. The Hour of Glory was supposed to test the remaining mortals—to give them the chance to prove themselves worthy—but instead, they were left as prey, victims of the encroaching desert's endless hunger. Every mortal death meant one more person caught in the Curse of Wandering's cruel grasp—doomed to return as undead, hunting the very friends whom they had died fighting to protect.
Hazoret's heart yearned for her God-Pharaoh. What had happened to delay his return? Could the three insectoid gods have sabotaged his great work of preparing the path to the Afterlife?
Hazoret shook her head. #emph[He would not forsake us.]
Her gaze turned toward the heart of the city, where the empty throne of the God-Pharaoh stood grand and majestic—yet another reminder of the God-Pharaoh's promised arrival.
It was covered in locusts, a black stain on the blood red skyline.
A guttural roar ravaged Hazoret's throat as she ignited the air around her, sending a wave of fire to burn clean her God-Pharaoh's throne. Countless locusts were disintegrated in the blast, but the smoke had barely cleared before an even larger swarm took the place of those Hazoret had destroyed.
#figure(image("005_Favor/02.jpg", width: 100%), caption: [Forbid | Art by <NAME>], supplement: none, numbering: none)
Around her, Naktamun continued to fall.
Desperation seeped into Hazoret's heart. In her head, the buzz of prayers had grown deafening, a din matched only by the buzz of the locusts.
And so the god prayed.
She prayed to the God-Pharaoh for his return. She prayed for him to fulfill the prophecy. She prayed for him to arrive and once again sift order from chaos.
As she prayed, above the throne, the sky rippled as though bent by a mirage. Then with a low rumble, the air #emph[ruptured] . A pinprick of black nothingness, a tiny hole in the fabric of reality, hung suspended in the desert air.
The void grew, the red sky around it eroding and flaking away like burnt paper, crumbling into the void. Cracks spindled outward from the hole and crackles of blue energy flared out then burned to black, scorch marks suspended midair. More chunks of reality collapsed into the hole, accelerating into oblivion as the growing rift consumed the space above the throne, growing into a massive portal.
Golden horns appeared first, gliding out of the dark portal, gleaming and flawless. The dragon's perfect form followed, sliding out of the void, enormous and lithe, power coiled behind massive wings and sharpened claws.
The God-Pharaoh had arrived.
#figure(image("005_Favor/03.jpg", width: 100%), caption: [Behold my Grandeur | Art by <NAME>], supplement: none, numbering: none)
Hazoret raised her arms in exultation, praise dancing on her lips. He truly was as great as she remembered, his massive golden form an incarnation of perfection. In her mind, the voices crying out in desperate prayer lessened dramatically, even as a cacophonous outpour of reverence echoed from the mortals around her. The voices of Amonkhet shouted out in relief and joy.
The God-Pharaoh landed before his throne, talons clattering on the polished stone. He lowered his gaze, eyeing the swath of death and destruction that had been carved through Naktamun.
And he #emph[smiled] .
#figure(image("005_Favor/04.jpg", width: 100%), caption: [Imminent Doom | Art by <NAME>], supplement: none, numbering: none)
Dread flooded Hazoret's body. Rhonas's dying words echoed in her mind as she watched a wave of desperate mortals rush toward the dragon, cries of relief and joy and exultation echoing in their wake. The God-Pharaoh gazed down at them, raising a clawed hand, and Hazoret felt the air crackle with energy.
A spark of violet light burst forth from between his talons, and from the sky, a deluge of black flames streaked down, consuming everything it touched.
#figure(image("005_Favor/05.jpg", width: 100%), caption: [Torment of Hailfire | Art by <NAME>], supplement: none, numbering: none)
The mortals' cheers turned to screams as destruction rained from the heavens.
Hazoret dashed forward, looming over the mortals closest to her, trying to block them from the destructive magic with her body. With a whirl of her spear, she conjured a shield of swirling sand and flame around her, gritting her teeth as the God-Pharaoh's spell crashed down around them.
As the mortals at her feet sobbed, Hazoret's mind raced at the turn of events.
#emph[The God-Pharaoh has arrived, but brings only destruction. The Hours tick by and the prophecies have been subverted, their fulfillment a dark and perverse twisting of their original promise.]
A splitting headache seized her as she tried to think of the past, to remember the God-Pharaoh before he had left. Her shield faltered as her concentration broke, her thoughts dancing between Rhonas's final warning and Samut's questions. Both god and mortal had spoken against their God-Pharaoh, but when Hazoret tried to focus on what they had said, her head buzzed with pain. The impossibility of the God-Pharaoh being anything but just and good combated what her senses showed her.
#emph[He rains destruction upon his people, his children.]
Hazoret peered up at the God-Pharaoh. His spell had finally relented, and his gaze drifted toward the gate in the distance. Hazoret looked and was surprised to find the third god—the one with the scarab head—still standing before the gate. Despite the mayhem around the god, it seemed to have stayed eerily still, an indigo statue amidst the pandemonium. The God-Pharaoh spread his wings and crouched, preparing to take flight.
"#strong[Hail, <NAME>, God-Pharaoh of Amonkhet!] "
The voice caught the dragon's attention and took Hazoret completely by surprise. Bontu strode forward and knelt in supplication to the God-Pharaoh. Hazoret clutched her head, trying to shake clear her thoughts. The name Bontu had uttered—#emph[<NAME>—] had sent another searing pain through Hazoret's head, and she was now certain: some magic was suppressing her memories.
"#strong[I have served faithfully in your absence, oh God-Pharaoh.] " Bontu's rasping voice cut through the din. "#strong[I have harvested only the most ambitious and powerful to be your worthy dead. I have culled dissenters from all the crops, ridding Naktamun of those who would derail your work. And I have maintained the threads you wove into the fabric of my siblings.] " Bontu bowed her head low. "#strong[I am yours, <NAME>. I live to serve. Speak, and I shall do.] "
As Bontu spoke, Hazoret's hands clutched her spear harder and harder. Finally, she could take no more.
"#strong[Sister!] " she cried. "#strong[What are you talking about?] "
Dragon and god turned to look at her, and for the first time in her existence, Hazoret felt small.
The God-Pharaoh turned his gaze back to Bontu and spoke.
"Kill your sister."
Without hesitation, Bontu raised her hand and sent a dark blast of energy at Hazoret.
Hazoret screamed as the spell hit her full force. She felt her mind unravel, the edges of oblivion corroding her sanity, grasping and tearing at thoughts and memories alike. Inside her mind, she conjured healing fires, staunching the spread of shadows with a cauterizing mental blaze.
#figure(image("005_Favor/06.jpg", width: 100%), caption: [Oblivion | Art by <NAME>], supplement: none, numbering: none)
Hazoret surfaced from her mental struggle just in time to twitch out of the way of another blast of energy. She cut through the next barrage from Bontu with the fiery edge of her spear. However, the third necrotic blast clipped Hazoret's arm as her movement slowed, her mind distracted.
Bontu's first spell hadn't just assaulted Hazoret's mind—it had eaten away the blocks on her memories.
And suddenly, Hazoret remembered everything.
The full weight of Bolas's deception and Bontu's betrayal crashed over her, slowing her reactions and distracting her from the fight at hand. The guilt of having brought death to her children weighed down her limbs, and the impotent rage at the dragon's cruel warping of her purpose dulled her reactions. #emph[All by Bontu's design] , she realized. That first attack wasn't just a mental assault. It was crafted to distract Hazoret and to slow her down, for Hazoret had always been faster than her sister—fast enough to dodge her blows and spells.
Bontu had prepared for this fight.
The depth of Bontu's betrayal sent Hazoret's mind churning between fury and despair.
"#strong[Why, Bontu?] " she cried.
Bontu laughed, a rasping, grating sound. To the mortals who heard, it sounded cruel and confident, but to Hazoret, she heard the desperation tinged with sadness. "#strong[Have you forgotten who I am, sister? I am ambition incarnate. Bolas destroyed all who resisted. I chose to join with his power instead. I chose survival.] "
"#strong[You chose to betray your world.] " Hazoret fired a jet of flame at Bontu, but Bontu #emph[absorbed] the spell within her staff.
"#strong[This world is Bolas.] " Bontu pointed her staff, and the fire erupted back toward Hazoret, tinged black by Bontu's necrotic magic. "#strong[And you are not worthy.] "
Hazoret dashed backward, avoiding the dark flames, and ducked behind the remnants of a destroyed building. As she crouched, her heart hardened with resolve.
In a blink and a spray of sand, she dashed from her cover and #emph[flashed] behind Bontu, twin-pronged spear ready and thrusting toward her sister. Her weapon seemed to pierce flesh, but then Bontu burst into tendrils of smoke. Hazoret stumbled back, coughing as she breathed in the poisonous cloud, looking about for where Bontu hid. The sands beneath her feet erupted as Bontu emerged from below, her jaws clamping down on Hazoret's arm. Hazoret cried out as her sister's crushing bite forced her to drop her spear.
Hazoret let loose a flurry of punches and kicks, but Bontu held fast as magical energy rippled across her scales, shielding her from the assault. Inspiration struck, and Hazoret ignited her arm #emph[within] Bontu's mouth. With a cry, Bontu finally released Hazoret's mangled limb, the two gods stumbling apart.
Hazoret grabbed her spear, one arm dangling uselessly at her side. Bontu breathed heavily, her mouth and face charred from Hazoret's attack. Hazoret watched Bontu raise her staff and braced for another barrage of spells. To her surprise, Bontu's staff glowed but no assault came.
A fresh round of screams behind her erupted, and Hazoret turned to look. Her heart froze as horrors crawled from the crevices and shadows, launching themselves at and tearing into the mortals. Bontu's magic called forth the dark beasts, and they set about the work of brutally murdering anything in their path.
Hazoret again #emph[flashed] into the fray, striking at the horrors and swinging desperately to protect her children. As her spear pierced the first horror, however, it burst into blackened tar, clinging to her weapon. The other horrors leapt at her, their shadowy forms coalescing into a binding morass, restraining her. Hazoret shouted in frustration, trying to conjure heat and flame, but the tar only hardened and tightened its hold.
"#strong[Your zealotry and compassion make you predictable, sister.] " Bontu's voice whispered in Hazoret's ear. She heard Bontu's staff tap against the hardened tar and gasped as the warmth and power drained out of her body. Out of the corner of her eye, she saw Bontu reach a hand #emph[into] the tar, grabbing hold and dragging Hazoret back toward the throne, back toward the dragon deceiver. Hazoret struggled weakly, but Bontu's magic drained her life force at a slow, relentless pace.
With a heave, Bontu dropped Hazoret at the feet of <NAME>, then knelt again.
"#strong[I have done as you asked, my God-Pharaoh. I exist to serve.] "
The great dragon gazed down at the god bent in supplication. Slowly, he raised one claw—and blasted Bontu with a bolt of dark energy. The god collapsed to the ground, writhing in agony.
"Your usefulness has ended," the dragon sneered. "Serve me in death, little god."
<NAME> strode forward, leaving the two dying immortals of Amonkhet behind him.
A primal yell tore out of Bontu as she crawled toward him, spasms of pain still wracking her body. <NAME> turned around and watched, an expression of smug amusement on his face. Slow, halting steps accelerated to a charge as Bontu rushed toward the dragon.
A monument collapsed in the path of Bontu as a wave of undead poured forward, a mix of mummies from the deserts and denizens of Amonkhet risen by the Curse of Wandering. The god stumbled over the rubble, and the undead swarmed and attacked. Bontu swatted at the undead, but in her weakened state, what normally would be a mere nuisance to the god now managed to bring her down.
As <NAME> watched Bontu disappear beneath the crush of undead, his cold, cruel laughter reverberated across the ruined city of Naktamun. With a sweep of his wings, he took to the air, flying toward the gate and the waiting scarab god.
Hazoret watched the dragon retreat, heard the undead gnaw and writhe over their prize, and felt her own hold on life slowly slipping away.
A sudden surge of power pooled before her, and Hazoret looked up just in time to see a wave of shadowy decay ripple forth from the pile of undead. Bontu burst from the heap, surfacing with gasping breath and throwing the inert bodies of monstrosities skyward, her spell slaying all things living and undead near her.
#figure(image("005_Favor/07.jpg", width: 100%), caption: [Bontu's Last Reckoning | Art by <NAME>], supplement: none, numbering: none)
Bontu met Hazoret's gaze, and the jackal god felt the tar around her soften and melt away.
And for the fourth time that day, Hazoret felt a rushing pain wash over her, piercing her gut as Bontu fell, the dragon's necrotic spell severing the final leylines that tied the god to this world.
And Hazoret alone remained, the last pillar of Amonkhet.
|
|
https://github.com/jmigual/typst-efilrst | https://raw.githubusercontent.com/jmigual/typst-efilrst/main/src/lib.typ | typst | MIT License | #let reflist(..children, name: "", list-style: "1)", ref-style: "1") = {
// Gather children in body-label pairs
let childrenArray = children.pos()
let childrenPairs = ()
for (n, val) in childrenArray.enumerate() {
if (type(val) == content) {
childrenPairs.push((val, none))
} else if (type(val) == label and n > 0) {
let (body, lbl) = childrenPairs.last()
childrenPairs.last() = (body, val)
}
}
// Insert a metadata to be labeled
let children = childrenPairs.enumerate().map(
((n, (body, lbl))) => if (type(lbl) == label) {
let num_text = numbering(ref-style, n+1)
let m = metadata((reflist_type: "reflist", reflist_n: num_text, reflist_name: name))
[#body#m#lbl]
}
else [
#body
]
)
enum(numbering: list-style, ..children)
}
#let show-rule(it) = {
if (it.element != none
and it.element.func() == metadata
and type(it.element.value) == dictionary
and it.element.value.at("reflist_type", default: none) == "reflist") {
let itv = it.element.value
let sup = if (it.supplement != auto) { it.supplement } else { itv.reflist_name }
link(it.element.location(), [#sup #itv.reflist_n])
} else {
it
}
}
|
https://github.com/a-dixon/typst-template-ieee | https://raw.githubusercontent.com/a-dixon/typst-template-ieee/main/chapters/conclusion.typ | typst | #import "../template.typ": *
= Conclusion
// your conclusion goes here
#lorem(100)
|
|
https://github.com/jneug/typst-typopts | https://raw.githubusercontent.com/jneug/typst-typopts/main/options.typ | typst | MIT License | /*******************************\
| Option handling and |
| argument parsing. |
\*******************************/
// global state to store options
#let __s_options = state("@options", (:))
// global state to store configuration
#let __s_config = state("@config", (:))
// Option storage
// get the proper optoin key from name
// and namespace values
#let __ns(name, ns) = {
if ns != none {
"ns:" + ns + "," + name
} else {
let dot = name.position(".")
if dot != none {
ns = name.slice(0,dot)
name = name.slice(dot + 1)
"ns:" + ns + "," + name
} else {
name
}
}
}
// Utilitiy function to get a local option
#let __get-local( name, loc, default ) = {
assert(type(loc) == "location", message: "loc needs to be a valid location")
return __s_options.at(loc).at(name, default:default)
}
// Utilitiy function to get a final option
#let __get-final( name, loc, default ) = {
assert(type(loc) == "location", message: "loc needs to be a valid location")
return __s_options.final(loc).at(name, default:default)
}
// Utilitiy function to get an option
#let __get( name, func, default, final, loc ) = {
let v = none
if final { v = __get-final(name, loc, default) }
else { v = __get-local(name, loc, default) }
if func != none { return func(v) }
else { return v }
}
// Retrieve an option from the store if present,
// a default value otherwise.
#let get( name, func, default:none, final:false, loc:none, ns:none ) = {
if loc == none {
locate(l => {
__get(__ns(name, ns), func, default, final, l)
})
} else {
__get(__ns(name, ns), func, default, final, loc)
}
}
// Update an option in the store to a new value.
#let update( name, value, ns:none ) = {
__s_options.update(o => {
o.insert(__ns(name, ns), value)
o
})
}
// Updates all options in the given dict to a new value.
#let update-all( values, ns:none ) = {
values.pairs().map(v => update(..v, ns:ns))
}
// Remove an option from the store.
#let remove( name, ns:none ) = {
__s_options.update(o => {
o.remove(__ns(name, ns))
o
})
}
// Display an option value with a format function.
#let display( name, format: value => value, default:none, final:false, ns:none ) = get(__ns(name, ns), format, default:default, final:final)
// File loading
#let load( filename ) = {
let m = filename.match(regex("\.([^.]+)$"))
if m != none {
let loaders = (
yml: yaml,
yaml: yaml,
toml: toml,
json: json
)
let ext = m.captures.at(0)
if ext in loaders {
let data = loaders.at(ext)(filename)
for (k,v) in data {
if type(v) == "dict" {
update-all(v, ns:k)
} else {
update(k, v)
}
}
}
}
}
// Argument parsing
#let add-option(
name,
type: ("string", "content"),
required: false,
default: none,
choices: none,
store: true,
pipe: none,
code: none
) = {
__s_config.update(c => {
c.insert(name, (
_option: "positional",
type: type,
name: name,
required: required,
default: default,
choices: choices,
store: store,
pipe: pipe,
code: code
))
c
})
}
#let add-argument(
name,
type: ("string", "content"),
required: false,
default: none,
choices: none,
store: true,
pipe: none,
code: none
) = {
__s_config.update(c => {
c.insert(name, (
_option: "named",
type: type,
name: name,
required: required,
default: default,
choices: choices,
store: store,
pipe: pipe,
code: code
))
c
})
}
#let getconfig( name, final:false ) = {
locate(loc => {
//let conf = __s_config.final(loc)
let conf = __s_config.at(loc)
if name in conf {
conf.at(name)
} else {
none
}
})
}
#let parseconfig( _unknown:none, _opts:none, ..args ) = {
// Run additional module configurations
if _opts != none {
assert(type(_opts) == "array")
for _addopts in _opts {
_addopts()
}
}
locate(loc => {
//let conf = __s_config.final(loc)
let conf = __s_config.at(loc)
let provided-pos = args.pos()
let provided-named = args.named()
let pos = 0
for opt in conf.pairs() {
let name = opt.at(0)
let def = opt.at(1)
let value = none
if def._option == "positional" {
if pos < provided-pos.len() {
value = provided-pos.at(pos)
pos += 1
} else if "required" in def and def.required {
assert(pos < provided-pos, message:"Positional argument '" + name + "' not provided but is required.")
} else {
value = def.default
}
} else {
if name in provided-named {
value = provided-named.at(name)
} else if "required" in def and def.required {
assert(name in provided-named, message:"Ma,ed argument '" + name + "' not provided but is required.")
} else if "default" in def {
value = def.default
}
}
// assert(
// value == none or type(value) == def.type,
// message: "Wrong type for option '" + name + "': got '" + type(value) + "', but expected '" + def.type + "'"
// )
let types = ("none", def.type).flatten()
assert(
types.any(v => type(value) == v),
message: "Wrong type for option '" + name + "': got '" + type(value) + "', but expected one of '" + types.join(", ") + "'"
)
if "choices" in def and def.choices != none {
let choices = (def.choices).flatten()
assert(value in choices, message:"Value for option '" + name + "' not allowed: got '" + value + "' but expected one of '" + choices.join(", ") + "'")
}
if name in provided-named {
if "code" in def and def.code != none {
let func = def.code
if type(func) == "function" {
value = func(value)
}
}
}
conf.at(name).insert("value", value)
if def.store {
update(name, value)
}
// TODO: Handle with pipe option
if name in provided-named {
if type(def.pipe) == "array" {
for oopt in def.pipe {
if oopt.len() >= 2 {
provided-named.insert(oopt.at(0), oopt.at(1))
}
}
}
}
}
__s_config.update(conf)
if _unknown != none {
for opt in provided-named.pairs() {
if opt.at(0) not in conf {
_unknown(opt.at(0), opt.at(1))
}
}
}
})
}
#let ignore = none
#let store(k, v) = update(k, v)
#let fail(k, v) = {
assert(false, message:"Unexpected option '" + k + "'")
}
// #let arg( var, key, default ) = {
// if key in var.named() { var.named().at(key) }
// else if key in var.pos() { var.pos().at(key) }
// else { default }
// }
#let extract( var, _prefix:"", _positional:false, ..keys ) = {
let vars = (:)
for key in keys.named().pairs() {
let k = _prefix + key.at(0)
if k in var.named() {
vars.insert(key.at(0), var.named().at(k))
} else {
vars.insert(key.at(0), key.at(1))
}
}
if _positional { return vars.values() }
else { return vars }
}
|
https://github.com/ludovic-esperce/afpa-typst-template | https://raw.githubusercontent.com/ludovic-esperce/afpa-typst-template/main/template/main.typ | typst | #import "@local/afpa-document:0.1.0": afpa
// Déclaration des variables
#let title = "Modèle de document interne à l'Afpa"
#let subtitle = "Compte rendu de réunion / Ressources pédagogiques / Notes personnelles"
#let date = datetime.today()
#let author = "<NAME>"
#show: doc => afpa(
title,
subtitle,
date,
author,
doc
)
= Introduction
Template #link("https://typst.app/")[typst] utilisable pour la génération de documents internes à l'#link("https://www.afpa.fr/")[Afpa].
#figure(
image("assets/afpa-logo.png", width: 50%),
caption: [Logo de l'Afpa],
)
Pour apprendre les bases de la rédaction d'un document vous pouvez vous référer à ces différents tutoriels :
- #link("https://typst.app/docs/reference/syntax/")[tutoriel officiel]
- #link("https://sitandr.github.io/typst-examples-book/book/basics/tutorial/markup.html")[utilisation de typst par l'exemple]
= Utilisation du modèle
== Installation de typst
=== Windows
Via Winget :
```bash
winget install --id Typst.Typst
```
=== Linux
Vous pouvez consulter le site suivant afin de voir si typst est disponible pour votre distribution : #link("https://repology.org/project/typst/versions")[repology.org].
=== MacOS
Via Brew :
```bash
brew install typst
```
== Ajout du "package" au système
Ce modèle est disponible sous un #link("https://github.com/typst/packages")["package typst"].
Pour rendre le package utilisable sur votre ordinateur une solution est de cloner ce dépôt dans le dossier suivant (sous Windows) :
```bash
%AppData%\Local\typst\packages\local\afpa-template\0.1.0
```
Avec `%AppData%` étant contenu dans le dossier utilisateur.
== Création d'un nouveau document
Une fois le package dans le dossier cité précédemment, vous pourrez utiliser la commande suivante pour créer un nouveau document :
```bash
typst init @local/afpa-document:0.1.0 <nom-document>
```
Cette commande créera un nouveau dossier portant le nom du document.
#pagebreak()
= Exemples de fonctionnalités
== Saut de ligne
Vous pouvez \
sauter une ligne \
en utilisant le symbole *\\*.
== Saut de page
Vous pouvez effectuer un saut de page en utilisant `#pagebreak()`.
== Listes
Voici une liste à puces :
- Java ;
- Spring Boot ;
- Quarkus.
- C\# ;
- C++ ;
- Rust ;
- Typescript ;
- Haskell.
Voici une liste numérotée :
+ Rust
+ C++
+ C\#
+ Java
+ Typescript
== Insertion de code
Exemple de bloc de code en utilisant *\`\`\`* pour Java :
```
public class App {
public static void main(String[] args) {
System.out.println("typst c'est vraiment sympa.");
}
}
```
Exemple de code en utilisant *\`\`\`* pour C\# :
```cs
class TestClass
{
static void Main(string[] args)
{
// Display the number of command line arguments.
Console.WriteLine(args.Length);
}
}
```
Il vous est également possible d'ajouter du code en ligne en utilisant *\`*.
Par exemple, en Java il est possible déclarer une variable de type entier initialisée à 0 en utilisant le code `int uneVariable = 0`.
#pagebreak()
= Lorem
== Lorem
=== Lorem
==== Lorem
#lorem(20)
#lorem(400)
|
|
https://github.com/pluttan/typst-g7.32-2017 | https://raw.githubusercontent.com/pluttan/typst-g7.32-2017/main/gost7.32-2017/utils/table.typ | typst | MIT License | #import "../g7.32-2017.config.typ":config |
https://github.com/teshu0/uec24-exp-a-typst | https://raw.githubusercontent.com/teshu0/uec24-exp-a-typst/main/logos.typ | typst | The Unlicense | // https://github.com/typst/typst/issues/1987#issuecomment-1690672386
#let Typst = {
text(font: "Linux Libertine", weight: "semibold", fill: eastern)[typst]
}
#let LaTeX = {
set text(font: "New Computer Modern")
box(width: 2.55em, {
[L]
place(top, dx: 0.3em, text(size: 0.7em)[A])
place(top, dx: 0.7em)[T]
place(top, dx: 1.26em, dy: 0.22em)[E]
place(top, dx: 1.8em)[X]
})
}
|
https://github.com/PhilipNelson5/Resume-Typst | https://raw.githubusercontent.com/PhilipNelson5/Resume-Typst/main/example.typ | typst | Apache License 2.0 | #import "cv.typ": *
// Load CV data from YAML
#let cvdata = yaml("example.yml")
#let uservars = (
headingfont: "Linux Libertine", // Set font for headings
bodyfont: "Linux Libertine", // Set font for body
fontsize: 10pt, // 10pt, 11pt, 12pt
linespacing: 6pt,
showAddress: true, // true/false Show address in contact info
showNumber: true, // true/false Show phone number in contact info
)
// setrules and showrules can be overridden by re-declaring it here
// #let setrules(doc) = {
// // Add custom document style rules here
//
// doc
// }
#let customrules(doc) = {
// Add custom document style rules here
doc
}
#let cvinit(doc) = {
doc = setrules(uservars, doc)
doc = showrules(uservars, doc)
doc = customrules(doc)
doc
}
// Each section function can be overridden by re-declaring it here
// #let cveducation = []
// Content
#show: doc => cvinit(doc)
#cvheading(cvdata, uservars)
#cveducation(cvdata)
#cvwork(cvdata)
#cvservice(cvdata)
#cvprojects(cvdata)
#cvawards(cvdata)
#cvcertificates(cvdata)
#cvpublications(cvdata)
#cvskills(cvdata)
#cvreferences(cvdata)
#endnote
|
https://github.com/mrknorman/evolving_attention_thesis | https://raw.githubusercontent.com/mrknorman/evolving_attention_thesis/main/06_skywarp/06_skywarp.typ | typst | #set page(numbering: "1", number-align: center)
#set math.equation(numbering: it => {[6.#it]})
#counter(math.equation).update(0)
#import "../notation.typ": vectorn, uvectorn, dvectorn, udvectorn, matrixn
= Skywarp: An Attention-Based model for the Detection of Gravitational-Wave Compact Binary Coalescences <skywarp-sec>
Convolutional Neural Networks (CNNs), though effective, have not been the stars of the machine-learning world for a few years now @architecture_review. Just as AlexNet @image_classification paved the way for the era of CNNs, and the surge to prominence of artificial neural networks as problem-solving solutions in the image domain, a similar step change occurred when the confluence of several different technologies led to the development of Large Language Models (LLMs) @LLM_ref, most notably of which in recent years has been the Generative Pretrained Transformer (GPT) @LLM_ref @attention_is_all_you_need of ChatGPT fame @chatgpt_ref, though it should be noted that this was far from the first LLM @LLM_ref.
Although CNNs were extremely successful at solving previously intractable problems in the image and audio domain @cnn_review @deep_learning_review, there were still many challenges remaining within Natural Language Processing (NLP), the area of study relating to the analysis of the text domain @LLM_ref. Text sequences differ from audio time series, in that rather than vectors of continuous numerical values, they consist of sequences of discrete tokens that encode externally defined values. There had been some work to attempt these problems with CNNs @text_classification and Recurrent Neural Networks (RNNs) @lstm_text_generation, but it was only through the application of multiple different insights including positional encoding @attention_is_all_you_need, attention mechanisms @attention_1, and skip connections @skip_connections, that the start of the avalanche of progress we have since seen in this area, began. The groundbreaking paper by Vaswani _et al._, @attention_is_all_you_need first introduced the transformer architecture to the world.
Following the success of transformers in NLP, there has been much research into the application of attention-based models to other domains, including image and audio processing. Notably, AlphaFold @alpha_fold, a project by Google Deepmind, successfully solved the protein folding problem. Transformers have proven effective, scalable solutions due to their highly parallelizable nature @LLM_ref @transformer_review, and new attention-based architectures such as generative diffusion models have seen great success in text-to-image generation problems @diffusion_review, as is seen in products such as Stable Diffusion @stable_diffusion, Midjourney @midjourney, and Dall-E @dalle_3. Gravitational-wave astronomy has also seen the application of a large number of machine-learning approaches @gw_machine_learning_review; however, there exists a considerable delay between the advent of new techniques and their application to gravitational wave data. Since this research was originally carried out, the gap has narrowed, nonetheless, it remains a relatively unexplored area that may prove fruitful in the future. In this chapter, we introduce Skywarp, an attention-based model for the detection of gravitational waves produced by Compact Binary Coalescences (CBCs).
The distinguishing feature of the transformer architecture is the use of attention @attention_is_all_you_need. Attention is a technique that can determine the information value of data elements in a series, crucially including the relative information derived contextually from the value of all other data elements within that series @attention_review. As opposed to convolutional layers, which learn feature-extracting filters that are convolved with the input data @conv_review, attention layers learn a weighting for each data element, identifying the information interactions between elements. One key advantage of this method is that attention is computed globally, whereas convolutional layers only use a local context dictated by their receptive field. This makes attention an ideal candidate for analyzing time-series data, wherein important contextual information can be located at a significant temporal distance from any given datum. RNNs share many of these properties @reccurant_neural_networks @rnn_review; however, they make use of an internal state, which means they are much harder to parallelize and therefore scale, and their knowledge of data outside of their receptive field is sometimes uni-directional, as opposed to transformers, which are bi-directional (although it should be noted that bi-directional LSTMs do exist @bidirectional_lstm). In addition, it should be noted that self-attention layers are more general than convolutional layers, as it is proven that a self-attention layer can generalize any convolution @generality_of_attention. This is not to say that more generality is necessarily good. As we have demonstrated in @perceptron-results, we are unable to train dense networks, the most general of all, to identify gravitational-wave signals at a useful false alarm rate.
This section is organized into the following structure. First, we will give a brief technical overview of the concepts and structure of the transformer architecture in @attention-method, including a discussion of tokenization, embedding, and attention, and how these elements are assembled into transformers proper. Then we will review the small amount of relevant literature that has not already been discussed in @skywarp_review. Next, we will provide details on the models and training procedures we used to train Skywarp in @skywarp-method. We will show the validation results from our trained models in @skywarp-results, before finally in @skywarp-discussion, we discuss the importance of these results and investigate how the method employed by the transformer models differs from that used by CNNs by comparing their respective attention and convolution maps.
== Attend Closely <attention-method>
The transformer is a deep learning model first described in Vaswani _et al._ @attention_is_all_you_need. This paper was an NLP paper, demonstrating a deep learning sequence-to-sequence model that could ingest text data and predict the token that was most likely to appear next in the sequence. By recursing inferences of the model, new sentences could be generated based on previous inputs. In some sense, our time series data is already closer to the required input of a deep learning model, however, it is easiest to explain attention by using NLP as an example. Therefore whilst describing attention we will use the text domain as example data before replacing the relevant vectors with gravitational-wave equivalents.
=== Tokenisation and Embedding
There are several steps required in order to condition text data for consumption by deep learning models. Artificial neural networks work solely with numerical data therefore text must somehow be converted into numbers before it is ingested by the model. The most obvious way to do this would be to use a preexisting character format such as ASCII @ASCII or Unicode @Unicode. However, if we were to do this, the numbers would relate very little even to the characters they represented, let alone the words. This would make these inputs very difficult for the model to analyze. Therefore, in order to make the task easier, we can use a method to embed the words into a numerically defined N-dimensional space in a manner that maintains some of their meaning in their new vectorised representation. Typically this is achieved in two or three steps @nlp_chapter, tokenization @tokenization_ref, vectorisation, and embedding @word2vec_processing; see @tokenisation_diagram.
#figure(
image("tokenisation_diagram.png", width: 100%),
caption: [The process of conditioning text data for input into a deep learning model. Text data is not intrinsically digestible by artificial neural network models, as artificial neurons can only process numerical inputs. Therefore, in order to apply deep learning models to text data, we must have some method of converting the data into a numerical format @nlp_chapter. Transformers expect a sequence of same-length vectors forming an input matrix, $matrixn(X)$. This diagram shows the process of converting text data into an input matrix. Typically, this conversion is completed in three steps, tokenization, vectorisation, and embedding. However, often, and in the case of the first described transformer model, vectorisation and embedding occur simultaneously and are often labelled simply embedding @attention_is_all_you_need. This is the method depicted in the diagram. In the example, we see the sentence "The quick brown fox jumped over the lazy dog." as it is prepared for ingestion by an NLP model. *Tokenisation* is the process of splitting one contiguous sequence of characters into a number of unique discrete tokens, $N$. This can be done at multiple levels but is usually done at the scale of words. Sometimes, especially with longer words, words can be split into multiple tokens, as is seen in this example where the word "jumped" is split into "jump" and "ed" @nlp_chapter. There are numerous algorithms to achieve this, which will not be discussed in detail. Every word, or word subset, within the training dataset, should have a unique token ID. Before running inference on new text data, that data must be tokenized, and each word in the new data will be mapped onto an existing token ID that was generated during the initial tokenisation process. Often some information-low words, known as "stop words", and punctuation are removed during the tokenisation process @nlp_chapter. In the example shown, the words "The", and full stops are removed from the input string. During *vectorisation*, each token is assigned a numerical vector, and *embedding* ensures that this vector is transformed into a meaningful vector space to allow for easier interpretation by the model. There are a number of methods to achieve both of these steps, some of which are simultaneous. In the example shown, each token ID is associated with a vector of tunable weights, as was the case in the first transformer paper. These vectors are randomised at the start of training, but as the process continues, they become tuned to values that represent the information contained by the tokens. In this manner, the vectorisation and embedding steps occur at the same time.]
) <tokenisation_diagram>
First, the input sequence must be tokenized @nlp_chapter. Tokenization involves splitting the input text into $N$ unique discrete tokens. Often the text is processed first, sometimes removing low information elements known as stop-words and punctuation. Then, using one of a variety of algorithms that will not be discussed here @tokenisation_review, the text is consolidated into $N$ tokens, often these could be whole words, but sometimes, depending on the tokenization algorithm and the size of the training data, word fragments could also be tokens. One will note that often words can have multiple meanings, which is a problem when trying to describe each token in a way that somehow presents the value it represents in a sentence. This can be a problem, and methods have been developed that can split identical sequences of characters into multiple tokens contextually. The very existence of this problem is an example of the contextual information that is provided by surrounding tokens in the sentence, this is the information that attention layers attempt to extract and distil.
After tokenization, we must convert each token into a unique vector. This can also be done through a variety of methods. In Vaswani _et al._ @attention_is_all_you_need each token has an associated vector in a look-up table, initially these vectors are randomly generated; however, the values in each of these vectors act as tunable parameters inside the model, so that whenever a certain token is present in a particular training example, the weights of its vector are tuned through the usual gradient-descent methods. In this way, as the training progresses, the vectors become, at least to the model, meaningful numerical representations of the value contained within their associated tokens.
Gravitational-wave data is intrinsically vectorized so the embedding layer should not be much of a problem, however, it is not intrinsically discretised. Since transformers are sequence-to-sequence models, they ingest a series of N vectors forming an input matrix @attention_is_all_you_need @transformer_review, whereas gravitational-wave time series data is a single vector, at least when dealing with one detector. It is unclear how best to split the gravitational wave data into smaller vectors. We could simply cut along equally separated lines, "chunking" our data into smaller timesteps, or we could embed the data using some learned weights, for example with one or more dense or convolutional layers, in the latter case, feeding the transformer with feature slices at different timesteps; see @gw_embedding. Using different detectors as this extra dimension will only give us two to four features per timestep, which would be very small vectors for the transformer to work with.
#figure(
grid(
columns: 1,
rows: 2,
gutter: 1em,
[ #image("skywarp_chunking.png", width: 100%) ],
[ #image("skywarp_dense_embedding.png", width: 100%) ],
[ #image("skywarp_convolutional_embedding.png", width: 100%) ]
),
caption: [Different embedding possibilities to discretise and embed gravitational-wave time-series data. _Upper:_ "Chunking" method of discretisation, where the input time-series is split into $N$ equal-length segments which can be fed into an attention-based model. This method would seem to have the disadvantage that it could split the waveform at any point, leading to chunks with very different waveform content depending on the waveform offset; it also assumes that the innate interferometer output vector is a good embedding for the attention mechanism, which is not necessarily true. _Middle:_ Embedding with dense layers, this setup is similar to the chunking method, but it applies one or more dense layers to each chunk so that the model can learn an embedding that will be better adapted to the attention mechanism in subsequent layers. Since the parameters of the dense layers are repeated for each chunk, this method is equivalent to a convolutional layer with $N$ filters and no overlap, where $N$ is the size of your embedded vector output. _Lower:_ Embedding with convolutional layers. This type of embedding involves creating feature maps of the input vector using a combination of convolutional and/or pooling layers. It is the equivalent of attaching a CNN head at the front of your model. The output of a 1D CNN would be a 2D matrix where one dimension, the depth, is different features, and the other is time. This can then be split into discrete vectors by splitting it along the time dimension to create vectors of features with length equivalent to the number of features.]
) <gw_embedding>
We have managed to transform our input text from a list of symbols into discrete tokens and finally into vectors that contain some aspect of the value represented by that token, and we have some ideas about how we might do the same with gravitational-wave data. However, unlike convolutional layers, attention layers treat each input element equally and intrinsically have no information about the location of the word in the sentence. We must use feature engineering to add to each vector, some information about the position of the token in the input sequence.
=== Positional Encoding
Much information is embedded in the relative and absolute positions of tokens within text data @transformer_review. The same can said to be true of gravitational-wave data --- we would always expect the merger to come after the inspiral, for example. Whilst there is some possibility within the dense layers of a traditional CNN for the model to use this ordinal information in its classification, it might be a challenging process. We can use attention layers to look at the global information in a sequence, but since, unlike CNNs @conv_review, there is no structure inherent to the architecture that maintains information about the position of the inputs, if we feed in the word sequence as-is, we end up with a "bag of words" @bag_of_words_ref; see @bag_of_words. Whilst some models can do quite well with just a bag of words approach, able to infer context simply from the numbers of each word present @bag_of_words_ref, it is clear that some information is lost when discarding order.
#figure(
image("bag_of_words.png", width: 60%),
caption: [A "Bag of words". Without ordinality, the meaning represented by this sentence becomes significantly harder, if not impossible, to parse. If we had not already seen this sentence then we would not know if the fox was lazy or quick, or rather if it were the dog that was lazy or quick, and just who is jumping over whom? There are NLP models that are designed to use a bag of words as inputs, but it is easy to see that much information is lost when word order is discarded, thus we can infer that the order and position of the words contain a significant amount of information. The same can be true for time series, a CBC signal that contains a merger, an inspiral, and a ringdown, in that order, can probably be discounted as a glitch, but if we feed it in as a bag of words model, there could be no distinction between this and the expected arrangement.]
) <bag_of_words>
We solve this problem by adding extra information to our input embeddings with positional encoding @attention_is_all_you_need @positional_encoding_ref. To do this, we create a matrix that is the same size as our attention input matrix: [num_time_steps, num_feature_channels]. Each column in our matrix must have certain properties: it must be unique so that no two feature embeddings are given the same encoding, and it must convey information about the absolute and relative position of a given feature vector in the input sequence. We create this matrix using
$
op("PE") (t,i) = cases(
sin( t /(log(10000)^(i/d_"model") )) "if" i "is" "even",
cos( t /(log(10000)^(i/d_"model") )) "if" i "is" "odd"
)
$ <positional_encoding_eq>
where $op("PE") (t, i)$ is the positional encoding matrix, $t$ is the time index, $i$ is the feature index, and $d_"model"$ is the dimension of our model, the relevance of which will become clear later. The periodicity of sine and cosine functions enables a unique identifier for each vector whilst maintaining a consistent pattern that evolves across the time dimension @attention_is_all_you_need. This uniqueness ensures that absolute position is encoded; all feature vectors get a unique encoding, which will be the same independent of the vector's contents so the model can learn which encodings map to which positions. The logarithmic term, $log(10000)$ ensures that the variation in frequency between steps is large enough to be detectable by the model, whereas the scaling by $d_"model"$ ensures that the positional encoding values do not become too large and overshadow the feature vectors, or become so small they are undetectable. The relative position between any two vectors in the sequence can be estimated due to the linear superposition property of the sin and cos functions; the sum of the positional encodings will approximate $t_1 + t_2$, and the difference will approximate the difference, $t_1 - t_2$. Therefore, when the model adds or subtracts positional encodings (as it might do implicitly during training), the resulting encoding still carries meaningful positional information. This matrix is added to our sequence of input vectors by simple element-wise addition, therefore inherently encoding positional information into each feature vector.
Fortunately, this embedding process is just as appropriate to use on gravitational wave data as it is on text data. In early testing, we found that including positional encoding improved model performance significantly.
By adding positional encoding to our input vectors, we have ensured that even if we (or a model) look at the vector in isolation we will still be able to know where in the vector it originated @attention_is_all_you_need @positional_encoding_ref. So we have stored extra information within the vector, however, if we look at this new vector in isolation, there is still much contextual information provided by the rest of the sequence that we cannot access alone. If we look at the word "dog" in isolation for example, even if we knew it was the ninth word in the sequence, we would have no idea that it was lazy, or that a fox was jumping over it. To embed this kind of information, we must turn to attention layers.
=== Attention! <sec-attention>
The global information provided by an individual element within a sequence is often greater than the local information contained within the isolated element @attention_1 @attention_2 @attention_is_all_you_need @attention_review. This extra information is stored contextually within the relationship between the given element and the other elements in the sequence, both within the information stored locally by the other elements and by the relative and absolute positions of the other elements.
The set of possible combinations of elements is large, even within relatively small sequences. Therefore, in order to enable a machine learning model to extract contextual information efficiently, a method must be implemented to determine which elements contribute the most contextual information to each element. This method is attention @attention_1 @attention_2 @attention_is_all_you_need @attention_review, a type of differentiable memory in which a global context vector is learned over an input sequence in the form
$ matrixn(X) = mat(vectorn(x)_bold(1), ..., vectorn(x)_bold(i), ..., vectorn(x)_bold(n);) . $
The attention mechanism aims to embed global context locally; in order to do this, a comparison must be made between each element of the sequence and (in the case of self-attention) each other element of the same sequence. It is trivial to see that not every element in every sequence will be equally relevant and that this contextual dependence will depend on the information being extracted. In this way, one learns intra-sequence relations; long-term dependencies are captured because the entire input sequence is used to compute a single element of the output sequence. Ideally, this process makes the output elements, now with contextual information embedded locally, easier for other machine-learning methods to interpret.
A transformer model is a machine learning algorithm that implements this method to localize global information using attention @attention_is_all_you_need @transformer_review. The output of a transformer block has the same dimensionality as the block’s input, as it retains the same number of elements. Ideally, each element has been transformed to contain a proportion of the relevant global information stored within the input sequence.
The question becomes, how can we calculate the attention? We can use an analogous problem to demonstrate the principle. In search and retrieval tasks, such as a search engine query, the user, in this case, a human, must generate a *query* phrase that can be used to find relevant information. This query phrase will not contain the entire information content of whatever document we are attempting to discover, if it did then we would not need to perform the search. Instead, it is generated using words and phrases that are associated with the information we are searching for. The search engine then has the unenviable task of searching through its entire library to find documents that might have information relevant to the query.
The first instinct might be to look through every document and check to see if there are words and phrases in that document that match the content of the query. Immediately, we can tell that this will quickly become infeasible if the library is large, and/or contains large documents --- the process of searching would rapidly become very expensive. Instead, the search engine could have preprocessed these files, and in a similar manner to how the query was generated, it could pick out the key information content of each document in a distilled form that contains the information that it is most likely to match with a query. It generates a *key* or keys for that document, which can be checked against queries much more efficiently than searching the entire content.
Finally, the *value* of the information that the end user extracts from whatever document is returned, will not necessarily equate to the entire information content of the document. Depending on what information the user was originally searching for, and hence what query they entered into the search bar, they might only read a particular chapter of a book, or, even more specifically than that, they might only retain certain parts of information from that chapter that are relevant to their needs. During a search session, a user might enter a single query that matches well with multiple keys that return documents which the user then reads and summarises parts of the information in each document to gain new knowledge on whatever the original subject of their query was.
This analogy introduces the three key information concepts of the query, key, and value. We can use these concepts to build a deep learning layer that can, for every element of our input sequence, search through each element in the sequence and extract relevant contextual information that can then be embedded into that element, in a similar manner to how we can embed information about the elements position using positional encoding. In attention layers, query, $vectorn(q)_bold(i)$, key, $vectorn(k)_bold(i)$, and value $vectorn(v)_bold(i)$ vectors are generated for each sequence element $vectorn(x)_bold(i)$, forming three matrices for the sequence as a whole: $matrixn(Q)$, $matrixn(K)$, and $matrixn(V)$. We create these matrices by multiplying three projection matrices with the input matrix, $matrixn(X)$, the query, $matrixn(W)_bold(q)$, key, $matrixn(W)_bold(k)$, and value, $matrixn(W)_bold(v)$, matrices. $matrixn(Q)$, $matrixn(K)$, and $matrixn(V)$ are generated with
$ matrixn(Q) = matrixn(W)_bold(q) matrixn(X), $ <weight-1-eq>
$ matrixn(K) = matrixn(W)_bold(k) matrixn(X), $ <weight-2-eq>
and
$ matrixn(V) = matrixn(W)_bold(v) matrixn(X). $ <weight-3-eq>
The elements inside these weights matrices are the only tunable parameters that are learned during the model training process @attention_1 @attention_2. During model training, the weights will adapt so that they can generate effective query, key, and value vectors that allow for proficient model function. Since this is a neural network and these are learned weights, multiplication by these weights matrices is equivalent to the application of a dense layer with no bias values.
The nature of attention layers makes it more difficult to draw artificial neuron connection diagrams as we have previously with perceptrons and CNNs, since the information flow is more complex. However, we can attempt to visualize the interaction between the various vectors as interacting functional elements, like machines in a factory, organelles in a cell, or gears in a clock; see @weights_matricies.
#figure(
image("weights_matricies.png", width: 70%),
caption: [Generation of query, key, and value vectors for each element in the input sequence of length, $N$. Before attention scores are calculated, each input vector, $vectorn(x)_bold(i)$ is multiplied by the learned query, $matrixn(W)_bold(q)$, key, $matrixn(W)_bold(k)$, and value, $matrixn(W)_bold(v)$, weights projection matrices to produce a query, $vectorn(q)_bold(i)$, key, $vectorn(k)_bold(i)$, and value $vectorn(v)_bold(i)$ vector for the input element, $vectorn(x)_bold(i)$. This operation is equivalent to the multiplication of the projection matrices and the input matrix, matrixn(X), to produce the query, $matrixn(Q)$, key $matrixn(K)$, and value $matrixn(V)$ matrices. The key takeaway is that the only tunable parameters are contained in the weights matrices, which act as projection functions to convert the input vector into functional vectors.]
) <weights_matricies>
#figure(
image("q_k_v.png", width: 40%),
caption: [Illustration of example query, key, and value vectors generated for the sentence "The quick brown fox jumped over the lazy dog.". After tokenisation and embedding, each vector in the embedded input sequence generates its own query, key, and value vector. Which together form query, key, and value matrices.]
) <qkv_diagram>
The query, key, and value matrices are used to calculate attention; see @qkv_diagram for an illustrative example of the projection matrices applied to the example sentence. The attention method aims to collect relevant information about a given sequence element within that element, extracting the information content from the position and meaning of the surrounding elements. Understandably, language does not have words for every possible concept, instead, it relies on combinations of words to provide many more concepts than single words could alone @linguistic_complexity. For example, language could have developed a single word for "lazy-dog" and "quick-fox"; but you would soon end up with an extraordinarily large vocabulary (assuming that new words were invented rather than just cheating and compounding words with a hyphen). If we wanted to include more complex concepts like "quick-brown-fox-that-jumped-over-the-lazy-dog" and "lazy-dog-that-has-had-quick-brown-fox-jump-over-it", the number of potential concepts becomes vast. Within the vector space, however, we are not limited by discretized tokens, and such concepts can all exist in a highly multi-dimensional space, since, in effect, we can add vectors together to sum their meanings. Attention layers essentially attempt to assemble these complex words @transformer_review.
In order to assemble these new vectors with embedded contextual meaning, we must work out the magnitude to which each other element affects the meaning of that element. This score is the "attention" for which the process is named @attention_1 @attention_2 @attention_review. In the example sentence, "The quick brown fox jumped over the lazy dog," we can see that almost all of the concepts are somehow interacting with each other in a significant manner. If we were to extend the string however say to, "The quick brown fox jumped over the lazy dog. Incy wincy spider climbed up the water spout.", we can see that tokens in the second sentence have very little effect on the concepts in the first sentence, so we might expect the attention scores between tokens in different sentences to be much lower than in the same sentence. Now in very advanced LLMs, there could be some cross-sentence attention as the model tries to determine why those two sentences in particular are next to each other, a question which could certainly hold some information, but this would be at a much higher level of abstraction than the simpler cases we have been discussing.
The query value for each sequence element is matched against the key value of each other element @attention_1 @attention_2 @attention_review; see @alignment-diagram. The alignment of the key and query determines a weighting for the value vector, a distilled representation of the relevant information contained within that element; see @scaling-diagram. The weighted value vectors are then summed to produce the new, contextually embedded, element. The two most common attention methods are dot-product @attention_2 and additive attention @attention_1, our models utilise the former and so we restrict our discussion to the work of Luong _et al._ and extensions. In either case, the function $alpha$ maps a set of query $vectorn(q)$, key $vectorn(k)$, and value $vectorn(v)$ vectors to a weighted sum of the values. This is given by
$ alpha(vectorn(q)_bold(i), matrixn(K), matrixn(V)) = sum_(j=1)^N a(vectorn(q)_bold(i), vectorn(k)_bold(j)) vectorn(v)_bold(j) $ <attention-eq>
where $a(., .)$ is called the alignment function and measures the similarity between the queries and keys. In the case of dot-product attention
$ a(q, k) = sigma((vectorn(q) dot vectorn(k)) / sqrt(d_k)) $ <alignment-eq>
where $sigma$ is the Softmax function; see @softmax-sec, and $d_k$ is the number of elements in the key vector, used to scale the value so that differences remain large when $d_k$ is large. This scaling was not a part of the original dot-product attention approach @attention_2 and was added by Vaswani _et al._ @attention_is_all_you_need, it has since become a common feature in attention layers.
#figure(
image("alignment_function_diagram.png", width: 100%),
caption: [Illustration of the operation of how the alignment function utilizes the query and key vectors to produce alignment scores for each sequence element. In dot-product attention @attention_2, this is achieved using @alignment-eq. Note that the numbers used here are for illustratory purposes only and not extracted from a real model.]
) <alignment-diagram>
#figure(
image("scale_and_sum.png", width: 80%),
caption: [Illustration of how the alignment scores are used to scale the respective value vectors for each sequence element, and are then summed to produce a new vector that contains global information embedded contextually. Each value vector is multiplied by the respective score, and then these scaled elements are summed together to produce the new vector.]
) <scaling-diagram>
This calculation is performed on each element of the sequence to produce a new sequence of equal length, hopefully with some contextual context embedded @attention_is_all_you_need @attention_review. Generalizing @attention-eq for the entire input matrix, $matrixn(X)$, we get
$ alpha(matrixn(Q), matrixn(K), matrixn(V)) = sigma((matrixn(Q) matrixn(K)^bold(T))/sqrt(d_"model")) matrixn(V) . $ <attention-eq-general>
Where again, $sigma$ is the Softmax function @softmax_ref. Combining @attention-eq-general with @weight-1-eq, @weight-2-eq, and @weight-3-eq gives a mapping between the attention input matrix, $matrixn(X)$, and the attention output matrix, $matrixn(Y)$.
$ matrixn(Y) = sigma(((matrixn(X) matrixn(W)_bold(q)) (matrixn(X) matrixn(W)_bold(k))^bold(T)) / sqrt(d_k)) (matrixn(X) matrixn(W)_bold(v)). $
The convenience that this complex procedure can be performed with a few matrix multiplications is one of the reasons for its great success. See @attention-diagram and @attention-network-diagram for illustrative diagrams.
#figure(
image("attention_mechanism.png", width: 100%),
caption: [Illustration of the operation of a single attention head. Here a very small three-element sequence is examined. Each element of the original input sequence is coloured differently, in red, green, and blue. All vectors and scalars associated with an input element are coloured similarly. The output sequence vectors are coloured with a mix of the input colours to show their new information content which consists of distilled global information. More detailed descriptions of the processes shown can be found in @weights_matricies, @alignment-diagram, and @scaling-diagram.]
) <attention-diagram>
#figure(
image("attention_network_diagram.png", width: 80%),
caption: [_Upper:_ Alternate method of visualizing attention mechanism as a network diagram. Although this is more similar to how networks have been displayed elsewhere in the thesis, it might obfuscate some aspects of the reasoning behind the attention layer operation. As in the @attention-diagram, this illustrates the operation of the attention mechanism on a sequence of length three, with each input vector coloured differently, in red, green, and blue. In this representation, the projection matrices, $matrixn(W)_bold(q)$, $matrixn(W)_bold(k)$, and $matrixn(W)_bold(v)$, are represented as dense layers, which are applied to each of the column vectors that comprise the input matrix in turn. It should be noted that although the dense layers are coloured differently as they are applied to each input element, this is just to show the different data flows, the weights are maintained by each application of each dense layer. The key, query, and value-dense layers, however, have different weights, and notably, no activation function, as they are just supplying a linear mapping rather than any more complex behaviour. _Lower:_ Abstraction of a single attention head layer, that will be used in future diagrams of models which contain attention layers, in order to limit diagram complexity.]
) <attention-network-diagram>
=== Multi-Head Attention
Thus far, the process we have described is the operation performed in a single attention head. We have worked under the assumption that all contextual information can be embedded locally with one pass from one head. In reality, this is not true, except for trivially simple sequences it would not be possible to embed all global information in one pass. In a similar manner to convolutional filters, wherein each filter looks at a particular feature of the input data, an attention layer typically has multiple heads each of which focuses on a particular information feature. One could look at colour for example, whilst another focuses on punctuation (if not removed in tokenisation), or sentence structure.
In multi-head attention layers, the number of heads is a user-specified hyperparameter, N, just like the number of filters in a convolutional layer @attention_is_all_you_need @attention_review. Each head has independent weights for the query, $matrixn(W)_bold(q)$, key, $matrixn(W)_bold(k)$, and value, $matrixn(W)_bold(v)$, projection matrices, which are each tuned to find specific features in the data. After these heads have been applied the output is concatenated along the feature dimension, and then multiplied by a further weights matrix, used to mix the outputs of different heads and to reshape the output vector to a desired size, which does not necessarily have to be the same size as the input vector, though this is a common choice; see @multi-head-diagram for a representation of a multi-attention head.
It should be noted that in practice, all of the query, key, and value matrices for each head are calculated simultaneously with the same large weights matrices comprising the individual weights matrices from each head combined into large matrices. After multiplication with the input sequence, the large output matrices are split into separate matrices for each of the individual heads for the alignment scores calculation and vector summation. This is done to reduce the number of matrix multiplications required for the layer as a whole.
#figure(
image("multi_attention_head.png", width: 80%),
caption: [_Upper:_ Network diagram of multi-attention head. Similar to how multiple convolutional kernels work in tandem in convolutional layers, multiple attention heads work together in multi-attention heads to focus on different information aspects of the input vector. These are then concatenated along the feature axis before finally being multiplied by a further weights matrix, here shown as a dense layer, which serves to mix the output of the different heads and to reshape the output to a desired size. _Lower:_ Abstraction of a multi-head attention layer, that will be used in future diagrams of models which contain attention layers.]
) <multi-head-diagram>
=== Attention Blocks
Within transformers and other similar architectures, multi-head attention layers are often paired with a number of complementary layers within a residual block @attention_is_all_you_need @attention_review @transformer_review. The input and output matrices of this block usually have identical shapes so that the block can be repeated, $N$ times without having any intermediate reshaping layers. Attention blocks typically feature a number of dense layers with activation functions in order to perform non-linear computation, regularisation methods such as dropout and batch normalisation, and a residual skip connection wherein the block input is added to the block output, in order to reduce the vanishing gradient problem that can occur in very deep networks; see @attention-block-diagram.
#figure(
image("attention_block.png", width: 80%),
caption: [Typical attention block comprising multiple layers. Residual attention blocks vary in design between architectures but usually maintain the consistent elements shown. The skip connection is here represented by the encircling arrow, which shows that the input of the block is fed to the output before it is returned. There are also several regularisation methods present, batch normalisation, and dropout which help to reduce overfitting and ensure that values within the network remain bounded. Finally, the addition of dense layers and activation functions ensures that non-linear computation can be performed. Sometimes, if a reduction in total model parameter count and inference time is required, convolutional layers can be used in place of dense layers. The question marks indicate user-selectable hyperparameters. ]
) <attention-block-diagram>
=== Transformers <transformer-sec>
Since their introduction, attention mechanisms have been utilized in a number of different neural network architectures, including transformers @attention_is_all_you_need @transformer_review and generative diffusion models @diffusion_review. Transformers were first proposed by Vaswani _et al._ @attention_is_all_you_need to solve natural-language processing tasks, showing a significant improvement over previous recurrent and convolutional architectures. For these reasons, we decided to investigate a fully attention-based model, inspired by a Transformer encoder.
The transformer proposed by Vaswani _et al._ @attention_is_all_you_need consists of two branches each comprised of stacks of transformer blocks - the encoder and the decoder. See @transformer-diagram. Since the model is designed for sentence generation, its architecture is a little different from what we will use in Skywarp. The encoder model takes the input sequence, your input prompt adds positional encoding, and then runs that sequence through six encoder blocks, each consisting of a multi-attention head and two dense layers, both the multi-attention head and the two dense layers are surrounded by a residual skip connection and the outputs are normalized. The encoder converts an input sequence into a discrete latent space, which is then fed to the decoder, which similarly consists of a multi-attention head and two dense layers. In addition, the decoder has a cross-attention layer that receives the output of the encoder and converts that output into key-value pairs, matching them against the queries generated by the decoder sequence. Cross-attention allows attention maps to be generated between elements in different sequences, in opposition to self-attention where attention is only calculated within the sequence.
The design of this encoder-decoder serves the next token prediction capabilities of the transformer @attention_is_all_you_need @transformer_review. The purpose of the encoder is to encode the full training input sequence and provide a comparison for the decoder output. The decoder output then, gets a shifted input sequence, on the token to the right, with an additional start token. During the first attention layer in the encoder, a masking is added, so that no element can see any elements ahead of it in time. This is because whenever a token is at the end of a sequence it cannot see any future tokens. The outputs this process generates are then compared against the encoder output, which can see the whole input sequence, therefore the decoder learns to guess what the next tokens might be. That is a slight simplification but is an approximate description of the function of a standard transformer model. See @transformer-diagram.
#figure(
image("transformer.png", width: 100%),
caption: [The transformer model described by Vaswani _et al._ @attention_is_all_you_need. This encoder-decoder architecture can be used to generate predictions of the next token in a sequence. In the case of @attention_is_all_you_need, this sequence was natural language.]
) <transformer-diagram>
Since we are attempting to perform classification rather than next-element prediction, we will use only the encoder part of the transformer architecture, and adapt it to use gravitational-wave data.
== Transient Detection beyond CNNs <skywarp_review>
When expanding our literature search to include models outside of CNNs we encounter a large body of work that has expanded greatly in recent years, even since this work was carried out @gw_machine_learning_review. The volume presented makes it difficult to perform a complete review as it would be beyond the scope of this document. A review article @gw_machine_learning_review is suggested for a more extensive overview.
An architecture commonly applied to time series problems is the Recurrent Neural Network @reccurant_neural_networks. Recurrent neural networks have an internal state determined by previous inferences, and thus, they can retain some information about previous data. In many ways, RNNs were the predecessor to Transformer models, largely because they are able, in some way, to make inferences from global information rather than being limited by the receptive fields of convolutional filters @cnn_review. There have been many studies on the application of RNNs to the transient detection problem.
One of the most widely applied RNN subtypes is the Long Short Term Memory (LSTM) network @lstm_intro @lstm_chapter, which utilizes network controllable gates, which, based on learned computation, can control what data is preserved between inferences. Though this is not a permanent memory solution, as LSTMs will eventually lose recall ability from temporally distant inferences, it has the potential to store information across many thousands of inferences, hence the name Long Short-Term Memory. Bidirectional LSTMs, which are used almost exclusively throughout the literature, operate on sequences in both directions, allowing the model to see information both after and before the segment currently being analyzed.
There have been several attempts to apply LSTMs to CBC detection. Schmitt _et al._ @bidirectional_lstm_comp_2 performed a comparison of multiple network architectures including bidirectional LSTMs and found that Temporal Convolutional Networks, which utilize a combination of convolutional and recurrent layers outperformed both the CNN employed by George _et al._ @george_huerta_cnn and traditional matched filtering approaches. Nigam _et al._ @bidirectional_lstm_comp perform a comparison of many different machine learning methods not limited to artificial neural networks or deep learning; whilst the results presented are limited and difficult to compare, they found CNNs and RNNs similarly effective. Chatterjee _et al._ @bidirectional_lstm present a model consisting of multiple convolutional and recurrent layers. They also present their model as a method for denoising signals for further analysis. They lack a robust discussion of False Alarm Rates. Lin _et al._ @bayesian_lstm present an interesting method that utilizes Bayesian networks along with LSTMs to incorporate uncertainty into the network. This allows the model to present its result along with variable confidence, though it demonstrates comparable predictive power it has a high false alarm rate, and they suggest that a larger training dataset is required to improve results.
Beveridge _et al._ @bidirectional_snr_lstm present a model that is imagined as a post-processing step that consumes the output of a matched filtering search, perhaps unsurprisingly considering their model input, they achieve impressive accuracy scores down to very low FARs, although their method loses many of the benefits typically provided by deep learning searches: computational speed and cost, and is imagined as a way to improve current matched-filtering detection pipelines rather than replace them.
Utilizing a different RNN architecture, Zhang _et al._ @bidirectional_gru demonstrated a Bidirectional Gated Recurance Unit (GRU) model, in comparison to a CNN and a Fourier Convolutional Neural Network (FCNN). They found the GRU to be the best-performing model and achieved an impressive performance of 89.6% accuracy at a False Alarm Rate (FAR) of #box($4.88 times 10^(−4)$ + h(1.5pt) + "Hz") on a pool of injections with SNRs between 5 and 20.
RNNs can be computationally expensive to train and run compared to other methods like CNNs @architecture_review, there have been several papers focused on performance improvements @accelerating_rnns @optimising_rnns_2.
There have also been attempts to apply LSTMs to burst detection problems, including a pattern-matching supernovae search @lstm_supernovae, and an unmodeled search using anomaly detection autoencoders @source_agnostic_lstm. The supernovae detection model by _Iess et al._ @lstm_supernovae demonstrates detection ability across several supernovae template banks but lacks false alarm rate discussion. Moreno _et al._ @source_agnostic_lstm use recurrent autoencoders for anomaly detection, autoencoders attempt to learn a function to project elements drawn from an input distribution into a dimensionally reduced latent space and then reconstruct the original input element from this reduced latent space. Because the encoder and decoder are trained on a specific distribution, if they are fed an element from outside the distribution, there will be a larger difference between model input and output, indicating an anomaly. In the case of burst detection, there would be no burst events in the training distribution, so if a burst event did come through the model, it would be out of distribution. They found the LSTM performed better than CNN and GRU autoencoders with accuracies of 30.4% for BBH signals, and 11.4% for BNS signals at a FAR of #box($0.01$ + h(1.5pt) +"Hz").
As a newer architecture, and one that is slightly less obvious to apply to time series data, since it is primarily used in NLP @transformer_review, there has been less attention on the application of transformers and attention-based models to transient detection problems. Yan _et al._ @coherent_snr_attention use attention layers in a coherence-based detection model. Zhao _et al._ @space_based_transformer focus on the problem of space-based detection. Jiang _et al._ @detection_conv_transformer, is the most relevant work, which was published around the time this investigation was being performed. They used a similar model architecture to that which is proposed by Skywarp, combining convolutional and attention layers and achieving results that are superior to purely convolutional models. During hyperparameter tuning, they found that the best performance was achieved by applying a single transformer layer after the convolutional head. One weakness of the paper is that their validation results are calculated using pools of mixed SNR, using the area under the ROC curve as their primary performance metric, this makes efficiency comparisons difficult.
== Skywarp Method <skywarp-method>
=== Skywarp Architectures <skywarp-model>
We have investigated a number of attention-based models and compared their performance to a CNN. We have investigated a fully attention-based model utilizing a dense embedding @skywarp_pure_attention, as well as a combined convolutional-attention model utilizing convolutional embedding, @skywarp_conv_attention. In response to Jiang _et al._ @detection_conv_transformer who found the best performance with a single attention block, a final model, with only one transformer block was created. For the pure CNN model, we adapted the model from Gabbard _et al._ @gabbard_messenger_cnn, with architecture as illustrated in @gabbard_diagram. We used this model as a reference CNN model to compare performance, this particular model was chosen as it is used often throughout the literature as a baseline CNN.
When transformers are utilised for Natural Language Processing (NLP) tasks, the input strings of natural language are first tokenised into discrete tokens before those tokens are fed into an embedding layer to convert the discrete tokens into continuous vectors that the network can ingest @tokenisation_review @attention_is_all_you_need. When adapting the architecture for use on time series data, there are some design decisions that must be taken. Tokenization, although still possible, is no longer required as the input data is initially in a continuous form. However, when deciding how to feed the series into the transformer, there are several options. Although it is possible to feed an attention block with the length of one vector from the input time series, it was found that this naive approach eliminated much of the transformer's potential for element-wise comparison. To resolve this, the method used by the vision transformer can be used; the input data can be segmented into $N$ segments, and then fed into the network. In addition or in place of such a segmentation, an embedding layer can also be employed to increase the dimensionality of the segments.
In the pure attention model, we reshaped the input time series (#box("1.0" + h(1.5pt) + "s") at #box("8192.0" + h(1.5pt) + "Hz")) into 512 segments each consisting of 16 samples, these segments were then encoded into larger vectors with 128 elements by a single convolutional layer with a filter size of 1. This embedding was performed to allow sufficient size for the positional encoding to be added to each vector. This solution was found after trialling several variations. See @skywarp_pure_attention, for more detailed information on the network.
#figure(
image("skywarp_pure_attention.png", width: 100%),
caption: [Skywarp pure attention model with dense embedding. This model architecture was used to test the purest application of attention layers to the detection problem by removing any convolutional layers within the model. The single convolutional layer was employed to increase the input dimensionality of the sequence elements from 16 to 128; this was necessary in order to add positional encoding of the appropriate size. Without positional encoding, models were almost impossible to train. The other hyperparameters were obtained using a rough trial-and-error search of the parameter space. Using a more sophisticated hyperparameters search, though a desired goal, proved difficult due to the increased computational requirements of attention-based models over CNNs. (Attention layer's memory usage scales approximately quadratically with input dimensionality, $N$: $O(N^2)$ @transformer_memory_usage similar to dense layers, as opposed to CNNs, which scale linearly with input dimensionality, $N$, and the number of filters, $F$, in the layers: $O(N times F)$ @cnn_memory_usage, so we encountered so difficulty fitting attention models in memory compared to CNNs.)]
) <skywarp_pure_attention>
During testing, we found that the pure attention model did not perform as well as the CNN model. It was found that the transformer model could much more easily overfit the training data, even with large training datasets. In order to combat this --- a combination convolutional-attention model was introduced. This model, described in @skywarp_conv_attention, feeds the output of the convolutional layers from the CNN described by @gabbard_diagram into the attention blocks described in @skywarp_pure_attention, in attempts to gain the benefits of both methods. The single-layer model shares the same architecture as @skywarp_conv_attention but with only a single attention block.
#figure(
image("skywarp_conv_attention.png", width: 100%),
caption: [Skywarp convolutional attention with convolutional embedding. This model architecture was employed to test if a convolutional embedding scheme, using the proven CNN architecture of Gabbard _et al._ @gabbard_messenger_cnn embeds the input into feature maps which could then be input into attention layers. We have the greatest success with this model variety, again hyperparameters were manually tuned, so it is expected that with a more thorough hyperparameter investigation, a superior model could be found. ]
) <skywarp_conv_attention>
=== Skywarp Training, Testing, and Validation Data <skywarp-data>
The training dataset was very similar to the datasets used in previous sections. IMRPhenomD waveforms @imrphenom_d were generated using cuPhenom @cuphenom_ref and injected into real background noise taken from the LIGO Livingston detector from the 3#super("rd") LIGO-Virgo joint observing run using GravyFlow @gwflow_ref. The waveforms were generated with masses drawn from a uniform distribution between #box("5.0" + h(1.5pt) + $M_dot.circle$) and #box("95.0" + h(1.5pt) + $M_dot.circle$) for both companions and between -0.5 and 0.5 for the dimensionless spin component. A difference to note is that these waveforms were scaled with optimal SNRs drawn uniformly between 8.0 and 20.0, rather than between 8.0 and 15.0. There is no specific reason for this difference and were this experiment to be repeated these values would be standardised. Also, note that these experiments were performed with an earlier version of the GravyFlow data pipeline @gwflow_ref, so there may be some small inconsistencies, although both cuPhenom and the data acquisition functionality should operate very similarly to more contemporary versions. Another arbitrary difference is that real noise data was collected in batches of length #box("3600.0" + h(1.5pt) + "s") rather than #box("2048.0" + h(1.5pt) + "s"), this was changed after this work due to some stability improvements when working with power-of-two length data.
#figure(
table(
columns: (auto, auto),
inset: 10pt,
align: horizon,
[*Hyperparameter*], [*Value*],
[Batch Size], [32],
[Learning Rate], [10#super("-4")],
[Optimiser], [ Adam ],
[Scaling Method], [SNR],
[Minimum SNR], [8.0],
[Maximum SNR], [20.0],
[SNR Distribution], [Uniform],
[Data Acquisition Batch Duration], [ #box("3600.0" + h(1.5pt) + "s") ],
[Sample Rate], [ #box("2048.0" + h(1.5pt) + "Hz")],
[On-source Duration], [ #box("1.0" + h(1.5pt) + "s")],
[Off-source Duration], [ #box("16.0" + h(1.5pt) + "s")],
[Scale Factor], [10#super("21") ],
),
caption: [The training and dataset hyperparameters used in Skywarp experiments. This is very similar to the data used for the burst and perceptron experiments seen in @perceptron-results. Note that the scale factor here refers to the factor used during the upscaling of the CBC waveforms and real interferometer noise from their extremely small natural dimensions to make them artificial neuron-friendly. This is done both to ensure that the input values work well with the network activation functions and learning rates, which are tuned around values near one, and to reduce precision errors in areas of the code that use 32-bit precision, employed to reduce memory overhead, computational cost, and duration. Data acquisition batch duration is a parameter of the GravyFlow data acquisition module. For speed, the GravyFlow @gwflow_ref data acquisition system downloads data in larger segments than is required for each training batch, then randomly samples examples from this larger segment to assemble each training batch. The data acquisition batch duration determines how long this larger batch is. Smaller values will result in a more evenly mixed training data set and a lower overall GPU memory overhead but will be more time-consuming during the training process. ]
) <skywarp-training-parameters>
=== Training Procedure <skywarp-training>
The training procedure is also very similar to previous experiments, using a learning rate of 10#super("-4"), a batch size of 32, and the Adam optimizer as well used industry standards, though it is possible that some of these hyperparameter decisions could be optimized with tuning. The training was performed in epochs with $10^6$ rather than $10^5$ examples, though the training validation pool remained at $10^4$.
== Skywarp Results <skywarp-results>
To profile the performance of Skywarp we compare it against a Convolutional Neural Network (CNN) with architecture taken from this early paper by Gabbard _et al._ @gabbard_messenger_cnn at different false alarm rates. As is the usual procedure, we first calculate the required model score thresholds to achieve various False Alarm Rates, see @skywarp_far_curve.
#figure(
image("skywarp_far_curve.png", width: 100%),
caption: [Model False Alarm Rate (FAR) vs score threshold required to achieve that false alarm rate for the three Skywarp models and the recreated CNN model from Gabbard _et al._ @gabbard_messenger_cnn The four models display notably different FAR curve profiles, though it is important to note that a lower curve on this plot will not necessarily translate to model performance as it says nothing about the True Positive Rate, a classifier that labels everything as noise, for example, would be entirely flat on this plot, but would remain useless. Still, there is a notable difference between the curves, the pure attention model, consistently requires a much higher score threshold than the other three models, which will be seen in its poor efficiency performance.]
) <skywarp_far_curve>
When we examine model efficiency curves in @skywarp_efficiency_curves, with key results extracted into @skywarp-results-table for easier reading of the small differences, the most evident takeaway is that the pure attention model utilizing dense embedding has by far the weakest performance, we can easily discount this method as ineffectual. It is not clear, however, whether this poor performance is due to the choice of embedding, or because attention layers alone are not adequate for gravitational-wave classification. We suggest that it is the former since there have been very successful image @vision_transformers and audio attention @audio_transformer models in other domains that do not utilize a convolutional head, and because, in theory, an attention layer is general enough to mimic any convolution. It is theorized that either the dense layer dimensionality increase that is performed to upscale the chunks enough to add positional encoding, is not adequate to convert the segments into a digestible feature space, or that the chunking is ineffectual because it splits the data without knowledge of the signal offset, creating sequence vectors which do not have consistent signal elements.
The other three models perform much more consistently. There is no clear advantage at a FAR of #box($0.1$ + h(1.5pt) + "Hz"), or #box($0.01$ + h(1.5pt) + "Hz"), although the attention-convolution hybrids consistently score higher, with the larger eight-layer model's advantage increasing with lower FARs. In many cases, this advantage is small however ranging from one to five percent. At 0.001 Hz, the lowest power-of-ten FAR where all models still have classification ability, it appears that the combined convolutional attention network has a more notable advantage over the other two competitive networks. However, this gap remains small enough to be somewhat unconvincing, and perhaps down to statistical variances in model training, which is not a perfect method and can fluctuate depending on the dataset order and parameter initialization. What can be said is that the advantage seen by the single attention layer over the larger eight-layer model that was suggested by Jiang _et al._ @detection_conv_transformer, was presumably down to their use of the area under ROC curves as performance metrics, since this metric benefits from higher efficiencies at lower FARs. At the lowest FAR presented #box($0.0001$ + h(1.5pt) + "Hz"), both the pure attention network and the single layer attention model ceased to have any classification ability, since their score thresholds have reached one. At this FAR the combined convolutional-attention model appears to greatly outperform the CNN. However, this is operating very close to the 32-bit precision limit, ($"threshold" > 0.999$), which would probably be uncomfortable for use in a transient search, as small statistical variations could have a large effect on model performance. The efficiencies are also quite low at this threshold, perhaps lower than one would wish to be to remain competitive with other detection methods.
#figure(
table(
fill: (_, row) => if calc.odd(calc.floor((row - 1)/4)) { luma(220) } else { white },
columns: (auto, auto, auto, auto, auto),
inset: 10pt,
align: horizon,
[*Model*], [*FAR (Hz)*], [*Accuracy 6*], [*Accuracy 8*], [*Accuracy 10*],
[Gabbard _et al._], [$10^(-1)$], [57.4], [91.5], [#text(red)[*99.4*]],
[Skywarp Pure Attention], [$10^(-1)$], [41.5], [72.9], [91.2],
[Skywarp Conv Attention Single], [$10^(-1)$], [#text(red)[*59.2*]], [#text(red)[*91.7*]], [99.3],
[Skywarp Conv Attention], [$10^(-1)$], [57.2], [91.4], [#text(red)[*99.4*]],
[Gabbard _et al._], [$10^(-2)$], [29.6], [76.6], [96.8],
[Skywarp Pure Attention], [$10^(-2)$], [17.3], [50.0], [80.2],
[Skywarp Conv Attention Single], [$10^(-2)$], [#text(red)[*32.5*]], [78.6], [96.9],
[Skywarp Conv Attention], [$10^(-2)$], [31.9], [#text(red)[*79.2*]], [#text(red)[*97.4*]],
[Gabbard _et al._], [$10^(-3)$], [13.5], [57.6], [91.2],
[Skywarp Pure Attention], [$10^(-3)$], [4.4], [22.0], [51.8],
[Skywarp Conv Attention Single], [$10^(-3)$], [13.3], [56.7], [90.2],
[Skywarp Conv Attention], [$10^(-3)$], [#text(red)[*16.9*]], [#text(red)[*63.3*]], [#text(red)[*93.0*]],
[Gabbard _et al._], [$10^(-4)$], [0.4], [8.7], [46.0],
[Skywarp Pure Attention], [$10^(-4)$], [-], [-], [-],
[Skywarp Conv Attention Single], [$10^(-4)$], [-], [-], [-],
[Skywarp Conv Attention], [$10^(-4)$], [#text(red)[*5.4*]], [#text(red)[*38.4*]], [#text(red)[*81.2*]],
),
caption: [Accuracy results at different optimal SNRs from the four models tested at different FAR thresholds. Scores in red are the highest results for that SNR threshold at that FAR, in the one case where there is a tie, both scores are highlighted. With a very marginal lead, the single-layer attention-convolution hybrid appears the be the best model at a FAR of #box($10^(-1)$ + h(1.5pt) + "Hz"), only losing out to the CNN and deeper hybrid model by 0.1 percentage points at the highest SNR of 10. This is not a particularly useful FAR range, however, and as we decrease the FAR, the deeper attention layer seems to be victorious, but again the lead is quite small. This appears to show that the convolutional-attention model may have a slight advantage at lower FARs. At the lowest FAR presented, #box($10^(-4)$ + h(1.5pt) + "Hz"), the required score threshold for both convolutional-attention hybrid with the single attention layer, and the pure attention model, have reached one, and therefore lack any classification ability. For the remaining models, the required model score threshold is greater than 0.999, and although at this FAR the deep attention hybrid appears to be the clear winner, it is unclear whether victory at such a high score threshold is meaningful, or simply due to statistical variance in model training and the validation dataset. Although it should be noted that the lead is similar in all SNR bins, which were created independently, so if it is statistical variance, it is suggested that it probably originates in the training procedure.]
) <skywarp-results-table>
It should be reiterated that these are single-detector results, in a real search pipeline. The FAR would have a significant advantage. Assuming a detection pipeline with no overlap, #box($1.0$ + h(1.5pt) + "s") duration, and two detectors, the network FAR is given by $"FAR"_1 times "FAR"_2 times t$, we assume that $"FAR"_1 = "FAR"_2$ and our $ t = 1.0 s$ and since we have two detectors they can overlap in either direction, adding a factor of 2, therefore our network FAR is given by $"2.0 times FAR"^2 times 1 s$ which turns the presented FARs from #box($10^(-1)$ + h(1.5pt) + "Hz"), #box($10^(-2)$ + h(1.5pt) + "Hz"), #box($10^(-3)$ + h(1.5pt) + "Hz"), and #box($10^(-4)$ + h(1.5pt) + "Hz"), into #box($2 times 10^(-2)$ + h(1.5pt) + "Hz"), #box($2 times 10^(-4)$ + h(1.5pt) + "Hz"), #box($2 times 10^(-6)$ + h(1.5pt) + "Hz"), and #box($2 times 10^(-8)$ + h(1.5pt) + "Hz") respectively. The latter two of which begin to approach the regime of other detection pipelines; the threshold for significant burst detections used by the LVK collaboration is #box($3.9 times 10^(-y)$ + h(1.5pt) + "Hz") (once per month) for CBCs and #box($3.2 times 10^(-8)$ + h(1.5pt) + "Hz") (once per year) for bursts @ligo_far_threshold. However, in practice, it is expected that some overlap would be employed to reduce the change of a signal falling at a boundary. As well as efficiency curves, ROCs are also presented for comparison with other results, @skywarp_roc_curves. Due to the nature of the current calculation of the ROC curves, their maximum FAR resolution is considerably lower than what is prested in @skywarp_efficiency_curves, due to lower numbers of validation examples, so these plots cannot be as effectively used to compare performance at different FARs, but can give a general impression of model performance at higher FARs.
#figure(
grid(
columns: 1,
rows: 2,
gutter: 1em,
[ #image("skywarp_efficiency_0_1.png", width: 100%) ],
[ #image("skywarp_efficiency_0_01.png", width: 100%) ],
[ #image("skywarp_efficiency_0_001.png", width: 100%) ],
[ #image("skywarp_efficiency_0_0001.png", width: 100%) ],
),
caption: [Efficiency curves of the Skywarp models and the recreated model from Gabbard _et al._ at different False Alarm Rates. For each of the 4 models trained, 61 efficiency tests are performed at evenly spaced optimal SNR values between 0 and 15. For each test, 16384 examples with signals of the relevant SNR are examined by the model. The most evident distinction is between the pure attention model utilizing dense embedding, and the other models, which are either purely convolutional or have a convolutional head. There is considerably less distinction between the other three models, which appear statistically indistinguishable at FARs of $10^(-1)$ Hz and $10^(-2)$ Hz. A slight advantage may arise between the combined attention-convolution model and the other two competitive models at the $10^(-3)$ Hz, which is perhaps the strongest evidence of an advantage, but this small difference is still too small to draw any definite conclusions of improved efficacy. At $10^(-4)$ Hz, the difference is much more apparent, but we are approaching 32-bit precision limits, so it is unclear exactly how seriously we should view these results. The efficiencies at this low FAR are also considerably reduced, reducing the appeal of the use of these models at this FAR. _First:_ Efficiency curves at a FAR of #box($10^(-1)$ + h(1.5pt) + "Hz"). _Second:_ Efficiency curves at a FAR of #box($10^(-2)$ + h(1.5pt) + "Hz"). _Third:_ Efficiency curves at a FAR of #box($10^(-3)$ + h(1.5pt) + "Hz"). _Fourth:_ Efficiency curves at a FAR of #box($10^(-4)$ + h(1.5pt) + "Hz").]
) <skywarp_efficiency_curves>
#figure(
grid(
columns: 1,
rows: 2,
gutter: 1em,
[ #image("skywarp_roc_8_20.png", width: 100%) ],
[ #image("skywarp_roc_12.png", width: 100%) ],
[ #image("skywarp_roc_10.png", width: 100%) ],
[ #image("skywarp_roc_8.png", width: 100%) ],
[ #image("skywarp_roc_6.png", width: 100%) ],
),
caption: [Receiver Operator Curves (ROCs) generated for each of the four Skywarp models for a variety of different SNR pools. The story demonstrated by these plots is very similar to what is shown by the efficiency curves, @skywarp_efficiency_curves, albeit with less granularity. The pure attention model performs considerably worse than the other three models, which are nearly indistinguishable. The pure convolution model has a slightly higher area under the ROC curve, primarily boosted by higher efficiencies at higher false alarm rates and the combined convolutional-attention network achieves higher accuracies at the lower FAR ranges, with the single-layer convolutional-attention network somewhere between the two. _First:_ ROC generated with a pool of signals with optimal SNRs drawn uniformly between 8 to 20. _Second:_ ROC generated with a pool of signals with optimal SNRs of 12. _Third:_ ROC generated with a pool of signals with optimal SNRs of 10. _Fourth:_ ROC generated with a pool of signals with optimal SNRs of 8. _Fifth:_ ROC generated with a pool of signals optimal SNRs of 6.]
) <skywarp_roc_curves>
== Discussion <skywarp-discussion>
The use of attention layers in CBC detection models shows promise. Although these results fail to prove a decisive advantage, they demonstrate that even with rudimentary parameter optimization, a model can be generated to beat a CNN from the literature that is often used as a standard. One firm conclusion that can be drawn is that the use of a pure attention model, with the dense embedding presented, performs significantly worse than the other methods presented.
Although it does not seem like there is a very significant advantage to using the Skywarp transformer architecture presented over the more traditional CNN model, it does appear that attention layers are better adapted for work in low FAR regimes. A more complete hyperparameter optimization procedure, and a more thorough investigation of signal embedding methods, as well as experiments in multi-detector inputs utilizing inter-detector cross attention, may yield more significant performance benefits. |
|
https://github.com/jeffa5/typst-todo | https://raw.githubusercontent.com/jeffa5/typst-todo/main/doc.typ | typst | Apache License 2.0 | #align(center, heading([Typst Todo], outlined: false))
#set heading(numbering: "1.")
#outline(indent: true)
#import "todo.typ": todo, missing_figure, list_of_todos
#let itodo = todo.with(inline:true)
#list_of_todos(numbers:"1.1:")
= On some content
#todo[Add some content]
== Todo with note text
#todo(note: [test])[some text that needs fixing]
#todo(note: [bad header])[#heading([todonotes are ok, I guess], level:2)]
== Works with columns too
#box(height:5em)[
#columns(2)[
#lorem(10)
#todo(note: [left column])[this text is in the left column]
#lorem(10)
#todo(note: [right column])[this text is in the right column]
#lorem(10)
]]
= Inline
#todo(inline: true)[Add some content]
== Using a shortcut, and with colour
#itodo(fill:blue)[an itodo]
== And with some numbers
#itodo(numbers: "1.1")[a numbered inline todo]
== But can't specify a note
```typst
#todo(inline: true, note: "test note")[broken] // error
```
== A very long todo, only the first line is shown in the list_of_todos
#itodo(numbers: "1.1")[#lorem(50)]
#itodo(numbers: "1.1")[Something big is coming
#lorem(50)]
= Todo figures too
#missing_figure[my pretty graph]
|
https://github.com/MichaelFraiman/TAU_template | https://raw.githubusercontent.com/MichaelFraiman/TAU_template/main/mfraiman-boxes.typ | typst | #let slantedBackground(
color: black,
text_color: white,
font: none,
body
) = {
set text(
fill: text_color,
weight: 700,
)
if font != none {set text(font: font)}
style(styles => {
let size = measure(body, styles)
let inset = 8pt
[#block()[
#polygon(
fill: color,
(0pt, 0pt),
(0pt, size.height + (2*inset)),
(size.width + (2*inset), size.height + (2*inset)),
(size.width + (2*inset) + 6pt, 0cm)
)
#place(center + top, dy: size.height, dx: -3pt)[#body]
]]
})
}
#let slantedColorbox(
title: "title",
col_back: yellow,
col_stroke: purple,
col_title: orange,
col_text_title: black,
col_text_body: black,
radius: 0pt,
width: auto,
font_body: none,
font_title: none,
font_emoji: none,
emoji: "",
body
) = {
box(
fill: col_back,
stroke: 2pt + col_stroke,
radius: radius,
width: width
)[
#slantedBackground(
color: col_title,
text_color: col_text_title,
)[
#text(size: 0.8em)[#{
if font_emoji != none {
text(font: font_emoji)[#emoji]
}
}]
#text(fill: col_text_title)[#{
if font_title != none {
text(font: font_title)[#title]
}
}]
]
#block(
width: 100%,
inset: (top: -2pt, x: 10pt, bottom: 10pt)
)[
#set text(fill: col_text_body)
#if font_body != none {text(font: font_body)[#body]}
]
]
} |
|
https://github.com/supersurviveur/typst-math | https://raw.githubusercontent.com/supersurviveur/typst-math/main/typst-math-rust/README.md | markdown | MIT License | # Typst math rust
Rust part of the extension compiled to WebAssembly. It is used to access the typst-syntax crate to parse the AST.
## Building
See the [CONTRIBUTING.md](../CONTRIBUTING.md) file for instructions on how to build the project. |
https://github.com/ukihot/igonna | https://raw.githubusercontent.com/ukihot/igonna/main/articles/abstract.typ | typst | #import "@preview/droplet:0.2.0": dropcap
#import "../const.typ"
#dropcap(
height: 4,
justify: true,
gap: 4pt,
hanging-indent: 1em,
overhang: 8pt,
font: "Curlz MT",
)[
The primary objective of this document is to facilitate a conceptual comprehension of "What constitutes system development" for individuals lacking IT skills and industry experience. This endeavor is not aimed at presenting efficient methodologies or challenging widely accepted norms; rather, it seeks to methodically consolidate information for those desiring a fundamental grasp of essential points.
] |
|
https://github.com/jgm/typst-hs | https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/spread-06.typ | typst | Other | // Error: 13-19 cannot spread string
#calc.min(.."nope")
|
https://github.com/lebinyu/typst-thesis-template | https://raw.githubusercontent.com/lebinyu/typst-thesis-template/main/template/abstract_style.typ | typst | Apache License 2.0 | // import heading style
#import "global_style.typ": *
// make abstract page
#let abstractpage(
mainbody: "",
) = {
set page(
numbering: "I",
)
counter(page).update(1)
show: styleheading_head("Abstract", mainbody)
set page(
margin: (x: 3cm, y:2.5cm),
header: none
)
}
// #set page(
// numbering: "I",
// margin: (x: 3cm, y:2.5cm),
// header: none
// )
// #counter(page).update(1)
// #styleheading1([Abstract])
// #include "./abstract.typ"
|
https://github.com/Personal-Data-Acquisition/PDA_paperwork | https://raw.githubusercontent.com/Personal-Data-Acquisition/PDA_paperwork/main/scope_and_vision/src/jake_retrospective.typ | typst | = Retrospective
#label("retrospective")
#strong[Author:] <NAME> #strong[Class:] CS461 #strong[DATE:] 2023
== Start-Stop-Continue
#label("start-stop-continue")
=== Start
#label("start")
So there are a couple of things I think would probably be useful for our
team moving forwards.
- More people making use of the github issues/milestones.
- More work on writing unit tests
- Start writing comprehensive documentation for each teams
deliverables.
=== Stop
#label("stop")
- Trying to commit code directly to repos.
- Using unprotected github branches.
=== Continue
#label("continue")
- Weekly team meetings.
- Working together on assignments using our VCS(version control
system).
- Completing github issues and milestones.
- Committing to forks before submitting pull requests.
- Posting updates in discord/teams on the projects status.
== Action Items
#label("action-items")
=== Unit tests
#label("unit-tests")
This is the big one for me or my priority item. If we can commit to
making use of TDD we can remove a lot of the possible issues or
misunderstandings up front.
This becomes even more important for the hardware/firmware team because
of how much more time intensive it can be trying to debug issues on a
embedded system versus a regular laptop or desktop machine that supports
a full operating system along with system logging like FreeBSD or Linux.
#strong[TimeLine:] I’m going to self assign the project of setting of
the testing framework/harness for our stm32F103C8TX MCU. Hopefully I can
start to learn more about how TDD is done with rust compared to using
Cmocka or Unity for C and C++.
|
|
https://github.com/Amelia-Mowers/typst-tabut | https://raw.githubusercontent.com/Amelia-Mowers/typst-tabut/main/doc/example-snippets/import-csv-easy.typ | typst | MIT License | #import "@preview/tabut:<<VERSION>>": records-from-csv
#let titanic = records-from-csv(csv("example-data/titanic.csv")); |
https://github.com/cadojo/correspondence | https://raw.githubusercontent.com/cadojo/correspondence/main/src/hermes/hermes.typ | typst | MIT License | #import "src/article.typ": *
#import "src/report.typ": *
|
https://github.com/Dav1com/minerva-report-fcfm | https://raw.githubusercontent.com/Dav1com/minerva-report-fcfm/master/README.md | markdown | MIT No Attribution | # Minerva Report FCFM
Template para hacer tareas, informes y trabajos, para estudiantes y académicos de la Facultad de Ciencias Físicas y Matemáticas de la Universidad de Chile que han usado templates similares para LaTeX.
## Guía Rápida
### [Webapp](https://typst.app)
Si utilizas la webapp de Typst puedes presionar "Start from template" y buscar "minerva-report-fcfm" para crear un nuevo proyecto con este template.
### Typst CLI
Teniendo el CLI con la versión 0.11.1 o mayor, puedes realizar:
```sh
typst init @preview/minerva-report-fcfm:0.3.0
```
Esto va a descargar el template en la cache de typst y luego va a iniciar el proyecto en la carpeta actual.
## Configuración
La mayoría de la configuración se realiza a través del archivo `meta.typ`,
allí podrás elegir un título, indicar los autores, el equipo docente, entre otras configuraciones.
El campo `autores` solo puede ser `string` o un `array` de strings.
La configuración `departamento` puede ser personalizada a cualquier organización pasandole un diccionario de esta forma:
```typ
#let departamento = (
nombre: (
"Universidad Técnica Federico Santa María",
"Factultad"
)
)
```
Las demás configuraciones pueden ser un `content` arbitrario, o un `string`.
## Guía Extensiva
Más información la puedes encontrar en la guía
|
https://github.com/nathanielknight/tsot | https://raw.githubusercontent.com/nathanielknight/tsot/main/src/phase_encounter.typ | typst | #import("utils_phase.typst")
#import("util.typst")
#import("theme.typst")
#show: theme.common
#show: theme.phase
#utils_phase.title("The Encounter")
#let tension_roll(players, tension) = {
let penalty = calc.round(calc.sqrt(tension * 2))
if penalty == 0 [
#(players)d6
] else [
#(players + penalty)d6
]
}
= Instructions
In this phase you'll start to encounter the threat.
- Roll a pool of dice (one per player plus one)
- On your turn:
+ Choose a die from the pool
+ Make a corresponding move from your Playbook as this sheet (as in the Prelude)
-
- A move will tell you when to advance to the Struggle
- Once everyone has made a move, re-roll the (possibly modified) dice pool
= Moves
TODO
= Tension
|
|
https://github.com/antran22/typst-cv-builder | https://raw.githubusercontent.com/antran22/typst-cv-builder/main/lib/resume/components.typ | typst | MIT License | #import "../common.typ": *
/// The base item for resume entries.
/// This formats the item for the resume entries. Typically your body would be a bullet list of items. Could be your responsibilities at a company or your academic achievements in an educational background section.
/// - body (content): The body of the resume entry
#let ResumeItem(body) = {
set text(
size: 10pt,
style: "normal",
weight: "light",
fill: color-darknight,
)
show link:underline
show link: set text(accent-color)
set par(leading: 0.65em)
body
}
/// The base item for resume entries. This formats the item for the resume entries. Typically your body would be a bullet list of items. Could be your responsibilities at a company or your academic achievements in an educational background section.
/// - title (string): The title of the resume entry
/// - location (string): The location of the resume entry
/// - date (string): The date of the resume entry, this can be a range (e.g. "Jan 2020 - Dec 2020")
/// - description (content): The body of the resume entry
#let ResumeEntry(
title: none,
title-r: "",
subtitle: "",
subtitle-r: "",
) = {
pad(bottom: 8pt)[
#justified-header(title, title-r)
#secondary-justified-header(subtitle, subtitle-r)
]
}
/// Show cumulative GPA.
/// *Example:*
/// #example(`resume.resume-gpa("3.5", "4.0")`)
#let resume-gpa(numerator, denominator) = {
set text(
size: 12pt,
style: "italic",
weight: "light",
)
text[Cumulative GPA: #box[#strong[#numerator] / #denominator]]
}
|
https://github.com/jgm/typst-hs | https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/layout/enum-numbering-06.typ | typst | Other | // Error: 22-28 invalid numbering pattern
#set enum(numbering: "(())")
|
https://github.com/Area-53-Robotics/53E-Notebook-Over-Under-2023-2024 | https://raw.githubusercontent.com/Area-53-Robotics/53E-Notebook-Over-Under-2023-2024/giga-notebook/entries/test-drivetrain-again/entry.typ | typst | Creative Commons Attribution Share Alike 4.0 International | #import "/packages.typ": notebookinator
#import notebookinator: *
#import themes.radial.components: *
#show: create-body-entry.with(
title: "Test: Drivetrain Power Usage",
type: "test",
date: datetime(year: 2023, month: 8, day: 11),
author: "<NAME>",
witness: "Violet Ridge",
)
We retested the power draw of our drivetrain to check if our friction fixes were
successful. The testings steps are the same as found in the first drivetrain,
omitting the temperature measurement.
#grid(
columns: (1fr, 2fr),
gutter: 20pt,
[
The left side motors are using around half the power they were using before,
with the third motor going down from approximately 2W down to 1W. The other two
motors are consuming almost no power whatsoever.
],
image("./left.png"),
[
The right side is also within the acceptable 1W. The first motor consumes the
most power, using around 0.8W. The third motor consumes approximately 0.35W of
power, and the second motor consumes almost none.
],
image("./right.png"),
)
Overall these results are very promising, and show that our drivetrain is
performing within expectations. It consumes very little power, and therefore has
very little friction.
|
https://github.com/j10ccc/algorithm-analysis-homework-template-typst | https://raw.githubusercontent.com/j10ccc/algorithm-analysis-homework-template-typst/main/layout/headers/identification.typ | typst | #import "../../config.typ": frontmatter
#let identification(time_consume) = [
*姓名:* #frontmatter.name \
*学号:* #frontmatter.student_number \
*作业总耗时:* #time_consume
]
|
|
https://github.com/InternetFreedomFoundation/ghost-typst | https://raw.githubusercontent.com/InternetFreedomFoundation/ghost-typst/main/README.md | markdown | # Ghost Typst
A [Typst](https://typst.app) template which generates PDF by consuming data from Ghost JSON export.
Accompanying blog post can be found [here](https://djoker.tech/posts/programatically-generating-pdf/).
## Usage
Place the Ghost JSON export in the root directory of the project, rename the JSON file to `ghost.json` and run `typst compile main.typ`
## Screenshots
Here's the title page and an example Page with a Blogpost:

|
|
https://github.com/ckunte/m-one | https://raw.githubusercontent.com/ckunte/m-one/master/inc/sncurves.typ | typst | = S-N curves
In order to set a suitable design criteria, I am looking to compare two classes of S-N curves for a fatigue design, viz., E and F2, and I cannot find a handy plot to refer to, and it is frustrating when standards fail to include. So, I channel it to write some code to roll my own:
#figure(
image("/img/sncurves.svg", width: 100%),
caption: [
S-N curves in seawater with cathodic protection based on ISO 19902
]
) <snci>
The basic S-N curve equation is as follows, which one may know is from Paris-Erdogan law (fracture mechanics)@paris_erdogan:
#figure(
image("/img/sncurves-table.png", width: 100%),
caption: [
Basic representative S-N curves, ISO 19902
]
) <snt>
$ N = k_1 dot S^(-m) $
The standard does describe it in its logarithmic form, which is as follows:
$ log N = log k_1 - m dot log S $
and then it goes on to furnish its two sets of key components that form parts of the equation --- highlighted below. For graphing purposes, the above can also be written as:
$ S = (N / k_1)^(-1 / m) $
For example, and I am writing this for myself since I struggle with logarithms, if
$ log_10 k_1 = 12.18 $
then,
$ k_1 = 10^(12.18) $
Code for plotting hotspot stresses versus number of cycles is as follows.
#let sncurves = read("/src/sncurves.py")
#{linebreak();raw(sncurves, lang: "python")}
== Comparison of S-N curves between standards
+ See #link("http://dx.doi.org/10.13140/RG.2.2.14995.20006")[ISO 19902 and DNV-RP-C203]
+ See #link("http://dx.doi.org/10.13140/RG.2.2.28416.97289")[BS 7608 and DNV-RP-C203]
$ - * - $
|
|
https://github.com/Skimmeroni/Appunti | https://raw.githubusercontent.com/Skimmeroni/Appunti/main/Metodi%20Algebrici/Insiemi/Funzioni.typ | typst | Creative Commons Zero v1.0 Universal | #import "../Metodi_defs.typ": *
Siano $A$ e $B$ due insiemi. Una *funzione* (o *applicazione*) da $A$
a $B$ é una legge $f$ che ad ogni elemento di $A$ associa uno ed un
solo elemento di $B$:
$ f : A |-> B, f(a) = b $
Dove $A$ é detto *dominio* di $f$ e $B$ é detto *codominio* di $f$.
Di fatto, una funzione $f$ da $A$ a $B$ é un caso particolare di una
corrispondenza $cal(R)_(f)$ da $A$ a $B$ dove il secondo termine di
ciascuna coppia ordinata che la compone é sempre univoco:
$ f : A |-> B " equivale a " cal(R)_(f) : forall a in A,
exists ! b = f(a) in B : (a, b) in cal(R)_(f) $
Per ogni $a in A$, il suo "corrispettivo" in $B$, ovvero $b = f(a)$,
si dice *immagine* di $A$. L'insieme che contiene l'immagine di ciascun
elemento dell'insieme $A$, ovvero $f(A) = {f(a): a in A}$, viene chiamato
*immagine* di $f$, e viene indicato anche semplicemente con $Im(f)$.
Per ogni $b in B$, l'elemento $a$ di $A$ per il quale $b$ ne é il
"corrispettivo", ovvero $a: f(a) = b$, viene detto *controimmagine*
di $b$. L'insieme che contiene le controimmagini di ciascun elemento
dell'insieme $B$, ovvero ${a in A: f(a) in B}$, viene chiamato
*controimmagine* di $f$, e viene indicato anche semplicemente con
$Im^(-1) (f)$.
#example[
- La legge che associa a ciascun numero razionale $frac(a, b)$ associa
un numero intero $a + b$ non é una funzione. Questo perché $frac(a, b)
= frac(h a, h b) forall h != 0$, pertanto ad ogni $frac(a, b)$ é
associata una moltitudine di valori, non uno soltanto. Ad esempio,
alla frazione $frac(2, 3)$ viene associato sia $5$, sia $10$;
- La legge $f : ZZ -> ZZ, f(z) = z^(2)$, che associa a ciascun
numero intero il suo quadrato, é una funzione;
- Il sottoinsieme ${(z, 7), z in ZZ} subset.eq ZZ times ZZ$,
ovvero l'insieme composto da tutte le coppie ordinate del prodotto
cartesiano di $ZZ$ con sé stesso che hanno 7 come secondo elemento,
é una funzione. Tale sottoinsieme puó essere scritto in maniera piú
esplicita nella forma di legge come $f: ZZ -> ZZ, f(z) = 7$.
]
Siano dati due insiemi $A$ e $B$ ed una funzione $f : A |-> B$. Si
dice che $f$ é *iniettiva* se ad elementi distinti di $A$ vengono
sempre associati elementi distinti di $B$:
$ a_(1), a_(2) in A : a_(1) != a_(2) ==> f(a_(1)) != f(a_(2)) $
Si dice che $f$ é *suriettiva* se il codominio $B$ e l'insieme $f(A)$
coincidono, ovvero se ogni elemento di $B$ ha almeno una controimmagine:
$ forall b in B, exists a in A : f(a) = b $
Si dice che $f$ é *biettiva*, o *biunivoca*, se é sia iniettiva sia
suriettiva. In altre parole, $f$ é biettiva se ad elementi distinti
di $A$ vengono associati elementi distinti di $B$ e se ciascun
elemento di $B$ ha sempre una controimmagine:
$ forall b in B, exists ! a in A : f(a) = b $
#example[
- La funzione $f: ZZ -> ZZ, f(z) = 0$ non é iniettiva, perché ogni
elemento di $ZZ$ viene sempre associato allo stesso elemento di
$ZZ$ (lo $0$, in questo caso). Inoltre, non é suriettiva, perché
tutti gli elementi del codominio al di fuori di $0$ non hanno una
controimmagine;
- La funzione $f: ZZ |-> ZZ, f(z) = z^(2)$ non é iniettiva, perché
se per un certo $a in ZZ$ vale $b = f(a)$, anche per $-a in ZZ$
vale $b = f(-a)$. Ad esempio, $f(4) = f(-4) = 16$). Inoltre, non é
suriettiva, perché tutti gli elementi di $ZZ$ che non sono quadrati
perfetti non hanno una controimmagine. Ad esempio, non esiste un $a in
ZZ$ tale per cui $f(a) = 13$. Infatti, sebbene esistano due $a$ tali
per cui $f(a) = 13$, ovvero $plus.minus sqrt(13)$, questi non sono
numeri interi, pertanto non appartengono al dominio;
- La funzione $f: NN |-> ZZ, f(z) = z^(2)$ é iniettiva, perché
ad ogni elemento di $NN$ viene associato un elemento distinto di
$ZZ$. Non é peró suriettiva, perché tutti gli elementi di $ZZ$
che non sono quadrati perfetti non hanno una controimmagine;
- La funzione $f: ZZ |-> ZZ, f(z) = z + 1$ é iniettiva, perché
per ogni numero intero esiste uno ed un solo numero intero ottenuto
sommandovi uno. É inoltre anche suriettiva, perché per ogni numero
intero é sempre possibile trovare un'altro numero intero ottenuto
a partire dal precedente avendovi sommato uno. Pertanto, é una
funzione biettiva.
]
Per un qualsiasi insieme non vuoto $A$ é possibile costruire la *funzione
identitá* $i_(A)$ come la funzione che ad ogni elemento di $A$ associa sé
stesso. Formalmente:
$ i_(A): A |-> A, space i_(A)(a) = a space forall a in A $
Siano $A$, $B$, $C$ e $D$ quattro insiemi. Siano poi $f: A |-> B$ e
$g: C |-> D$ due funzioni, dove $Im(f) subset.eq C$. Viene detta *funzione
composta* di $f$ e di $g$ la funzione che si ottiene applicando la funzione
$g$ al risultato della funzione $f$, ovvero:
$ g compose f : A |-> D, space (g compose f)(a) = g(f(a)) space forall a in A $
#theorem[
La composizione di funzioni gode della proprietá associativa. Ovvero,
Siano $A$, $B$, $C$, $D$, $E$ e $F$ sei insiemi. Siano poi $f: A |-> B$,
$g: C |-> D$ e $h: E |-> F$ tre funzioni, dove $Im(f) subset.eq C$ e
$Im(g) subset.eq E$. Allora $h compose (g compose f) = (h compose g)
compose f$.
] <Composition-is-associative>
// #proof[
// Dimostrabile, da aggiungere
// ]
#lemma[
Siano $A$ e $B$ due insiemi, e sia $f: A |-> B$ una funzione su questi
definita. Allora, per qualsiasi $f$, $i_(B) compose f = f$ e $f compose
i_(A) = f$.
] <Composition-identity>
// #proof[
// Dimostrabile, da aggiungere
// ]
#corollary[
Sia $A$ un insieme e sia $f: A |-> A$ una funzione. La composizione di
funzioni ha nella funzione identitá l'elemento neutro rispetto all'insieme
$A^(A)$.
] <Composition-identity-is-neutral>
#proof[
Per il @Composition-identity, se $f: A |-> B$ é una funzione da un
insieme $A$ ad un insieme $B$, allora $i_(B) compose f = f$ e $f compose
i_(A) = f$. Nel caso particolare in cui $A = B$, si ha $f compose i_(A)
= i_(A) compose f = f$.
]
#theorem[
Siano $f: A |-> B$ e $g: B |-> C$ due funzioni, e sia $g compose f$ la
funzione composta di tali funzioni. Si ha allora:
+ Se $f$ e $g$ sono iniettive, allora $g compose f$ é iniettiva;
+ Se $f$ e $g$ sono suriettive, allora $g compose f$ é suriettiva;
+ Se $f$ e $g$ sono biettive, allora $g compose f$ é biettiva;
] <Composition-preserves-jection>
// #proof[
// Dimostrabile, da aggiungere
// ]
Dati due insiemi $A$ e $B$ ed una funzione $f: A |-> B$, si dice *funzione
inversa* di $f$ la funzione $f^(-1)$ tale che, per ogni elemento $b in B$,
$f^(-1)(b)$ é quell'unico $a in A$ tale per cui $f(a) = b$. Se per una
funzione $f$ é possibile costruire la funzione inversa $f^(-1)$, si dice
che $f$ é *invertibile*.
#theorem[
Una funzione $f: A |-> B$ é invertibile se e solo se é biettiva.
]
// #proof[
// Dimostrabile, da aggiungere
// ]
#lemma[
Sia $f: A |-> B$ una funzione invertibile e sia $g: B |-> A$ la sua
inversa. Allora $g compose f = i_(A)$ e $f compose g = i_(B)$. Nel
caso particolare in cui $A = B$, si ha $f compose g = g compose f
= i_(A)$.
] <Composition-inverse>
// #proof[
// Dimostrabile, da aggiungere
// ]
#corollary[
Sia $f: A |-> A$ una funzione invertibile e sia $g: A |-> A$ la sua
inversa. La composizione di funzioni ha nella funzione inversa l'inverso
rispetto all'insieme $A^(A)$.
] <Composition-inverse-is-inverse>
#proof[
Per il @Composition-inverse, se $f: A |-> B$ é una funzione invertibile e
$g: B |-> A$ é la sua inversa, allora $g compose f = i_(A)$ e $f compose g
= i_(B)$. Nel caso particolare in cui $A = B$, si ha $f compose g = g
compose f = i_(A)$.
]
Le funzioni biettive permettono di estendere la nozione di equipotenza anche
agli insiemi a cardinalitá infinita. Infatti, se per confrontare la cardinalitá
di insiemi a cardinalitá finita é sufficiente "contare" quanti elementi ha
ciascun insieme e comparare i due numeri (naturali) cosí ottenuti, per gli
insiemi a cardinalitá infinita questo non é possibile.
In particolare, siano $A$ e $B$ due insiemi. Tali insiemi sono equipotenti
se e soltanto se esiste (almeno) una funzione biettiva che ha $A$ per dominio
e $B$ per codominio.
#example[
Sia $f: NN |-> ZZ$ la funzione cosí definita:
$ f(n) = cases(
frac(n, 2) & "se" 2 | n,
frac(-n -1, 2) & "se" 2 divides.not n
) $
Tale funzione é biettiva, pertanto $NN$ e $ZZ$ sono equipotenti.
]
|
https://github.com/wildfire322/typst-x86_64-pc-windows-msvc | https://raw.githubusercontent.com/wildfire322/typst-x86_64-pc-windows-msvc/main/typst-x86_64-pc-windows-msvc/README.md | markdown | <h1 align="center">
<img alt="Typst" src="https://user-images.githubusercontent.com/17899797/226108480-722b770e-6313-40d7-84f2-26bebb55a281.png">
</h1>
<p align="center">
<a href="https://typst.app/docs/">
<img alt="Documentation" src="https://img.shields.io/website?down_message=offline&label=docs&up_color=007aff&up_message=online&url=https%3A%2F%2Ftypst.app%2Fdocs"
/></a>
<a href="https://typst.app/">
<img alt="Typst App" src="https://img.shields.io/website?down_message=offline&label=typst.app&up_color=239dad&up_message=online&url=https%3A%2F%2Ftypst.app"
/></a>
<a href="https://discord.gg/2uDybryKPe">
<img alt="Discord Server" src="https://img.shields.io/discord/1054443721975922748?color=5865F2&label=discord&labelColor=555"
/></a>
<a href="https://github.com/typst/typst/blob/main/LICENSE">
<img alt="Apache-2 License" src="https://img.shields.io/badge/license-Apache%202-brightgreen"
/></a>
<a href="https://typst.app/jobs/">
<img alt="Jobs at Typst" src="https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Ftypst.app%2Fassets%2Fdata%2Fshields.json&query=%24.jobs.text&label=jobs&color=%23A561FF&cacheSeconds=1800"
/></a>
</p>
Typst is a new markup-based typesetting system that is designed to be as powerful
as LaTeX while being much easier to learn and use. Typst has:
- Built-in markup for the most common formatting tasks
- Flexible functions for everything else
- A tightly integrated scripting system
- Math typesetting, bibliography management, and more
- Fast compile times thanks to incremental compilation
- Friendly error messages in case something goes wrong
This repository contains the Typst compiler and its CLI, which is everything you
need to compile Typst documents locally. For the best writing experience,
consider signing up to our [collaborative online editor][app] for free. It is
currently in public beta.
## Example
A [gentle introduction][tutorial] to Typst is available in our documentation.
However, if you want to see the power of Typst encapsulated in one image, here
it is:
<p align="center">
<img alt="Example" width="900" src="https://user-images.githubusercontent.com/17899797/228031796-ced0e452-fcee-4ae9-92da-b9287764ff25.png"/>
</p>
Let's dissect what's going on:
- We use _set rules_ to configure element properties like the size of pages or
the numbering of headings. By setting the page height to `auto`, it scales to
fit the content. Set rules accommodate the most common configurations. If you
need full control, you can also use [show rules][show] to completely redefine
the appearance of an element.
- We insert a heading with the `= Heading` syntax. One equals sign creates a top
level heading, two create a subheading and so on. Typst has more lightweight
markup like this, see the [syntax] reference for a full list.
- [Mathematical equations][math] are enclosed in dollar signs. By adding extra
spaces around the contents of an equation, we can put it into a separate block.
Multi-letter identifiers are interpreted as Typst definitions and functions
unless put into quotes. This way, we don't need backslashes for things like
`floor` and `sqrt`. And `phi.alt` applies the `alt` modifier to the `phi` to
select a particular symbol variant.
- Now, we get to some [scripting]. To input code into a Typst document, we can
write a hash followed by an expression. We define two variables and a
recursive function to compute the n-th fibonacci number. Then, we display the
results in a center-aligned table. The table function takes its cells
row-by-row. Therefore, we first pass the formulas `$F_1$` to `$F_8$` and then
the computed fibonacci numbers. We apply the spreading operator (`..`) to both
because they are arrays and we want to pass the arrays' items as individual
arguments.
<details>
<summary>Text version of the code example.</summary>
```typst
#set page(width: 10cm, height: auto)
#set heading(numbering: "1.")
= Fibonacci sequence
The Fibonacci sequence is defined through the
recurrence relation $F_n = F_(n-1) + F_(n-2)$.
It can also be expressed in _closed form:_
$ F_n = round(1 / sqrt(5) phi.alt^n), quad
phi.alt = (1 + sqrt(5)) / 2 $
#let count = 8
#let nums = range(1, count + 1)
#let fib(n) = (
if n <= 2 { 1 }
else { fib(n - 1) + fib(n - 2) }
)
The first #count numbers of the sequence are:
#align(center, table(
columns: count,
..nums.map(n => $F_#n$),
..nums.map(n => str(fib(n))),
))
```
</details>
## Installation
Typst's CLI is available from different sources:
- You can get sources and pre-built binaries for the latest release of Typst
from the [releases page][releases]. Download the archive for your platform and
place it in a directory that is in your `PATH`. To stay up to date with future
releases, you can simply run `typst update`.
- You can install Typst through different package managers. Note that the
versions in the package managers might lag behind the latest release.
- Linux: View [Typst on Repology][repology]
- macOS: `brew install typst`
- Windows: `winget install --id Typst.Typst`
- If you have a [Rust][rust] toolchain installed, you can also install the
latest development version with
`cargo install --git https://github.com/typst/typst --locked typst-cli`. Note that this
will be a "nightly" version that may be broken or not yet properly documented.
- Nix users can use the `typst` package with `nix-shell -p typst` or build and
run the bleeding edge version with `nix run github:typst/typst -- --version`.
- Docker users can run a prebuilt image with
`docker run -it ghcr.io/typst/typst:latest`.
## Usage
Once you have installed Typst, you can use it like this:
```sh
# Creates `file.pdf` in working directory.
typst compile file.typ
# Creates PDF file at the desired path.
typst compile path/to/source.typ path/to/output.pdf
```
You can also watch source files and automatically recompile on changes. This is
faster than compiling from scratch each time because Typst has incremental
compilation.
```sh
# Watches source files and recompiles on changes.
typst watch file.typ
```
Typst further allows you to add custom font paths for your project and list all
of the fonts it discovered:
```sh
# Adds additional directories to search for fonts.
typst compile --font-path path/to/fonts file.typ
# Lists all of the discovered fonts in the system and the given directory.
typst fonts --font-path path/to/fonts
# Or via environment variable (Linux syntax).
TYPST_FONT_PATHS=path/to/fonts typst fonts
```
For other CLI subcommands and options, see below:
```sh
# Prints available subcommands and options.
typst help
# Prints detailed usage of a subcommand.
typst help watch
```
If you prefer an integrated IDE-like experience with autocompletion and instant
preview, you can also check out the [Typst web app][app], which is currently in
public beta.
## Community
The main place where the community gathers is our [Discord server][discord].
Feel free to join there to ask questions, help out others, share cool things
you created with Typst, or just to chat.
Aside from that there are a few places where you can find things built by
the community:
- The official [package list](https://typst.app/docs/packages)
- The [Awesome Typst](https://github.com/qjcg/awesome-typst) repository
If you had a bad experience in our community, please [reach out to us][contact].
## Contributing
We would love to see contributions from the community. If you experience bugs,
feel free to open an issue. If you would like to implement a new feature or bug
fix, please follow the steps outlined in the [contribution guide][contributing].
To build Typst yourself, first ensure that you have the
[latest stable Rust][rust] installed. Then, clone this repository and build the
CLI with the following commands:
```sh
git clone https://github.com/typst/typst
cd typst
cargo build --release
```
The optimized binary will be stored in `target/release/`.
Another good way to contribute is by [sharing packages][packages] with the
community.
## Pronunciation and Spelling
IPA: /taɪpst/. "Ty" like in **Ty**pesetting and "pst" like in Hi**pst**er. When
writing about Typst, capitalize its name as a proper noun, with a capital "T".
## Design Principles
All of Typst has been designed with three key goals in mind: Power,
simplicity, and performance. We think it's time for a system that matches the
power of LaTeX, is easy to learn and use, all while being fast enough to realize
instant preview. To achieve these goals, we follow three core design principles:
- **Simplicity through Consistency:**
If you know how to do one thing in Typst, you should be able to transfer that
knowledge to other things. If there are multiple ways to do the same thing,
one of them should be at a different level of abstraction than the other. E.g.
it's okay that `= Introduction` and `#heading[Introduction]` do the same thing
because the former is just syntax sugar for the latter.
- **Power through Composability:**
There are two ways to make something flexible: Have a knob for everything or
have a few knobs that you can combine in many ways. Typst is designed with the
second way in mind. We provide systems that you can compose in ways we've
never even thought of. TeX is also in the second category, but it's a bit
low-level and therefore people use LaTeX instead. But there, we don't really
have that much composability. Instead, there's a package for everything
(`\usepackage{knob}`).
- **Performance through Incrementality:**
All Typst language features must accommodate for incremental compilation.
Luckily we have [`comemo`], a system for incremental compilation which does
most of the hard work in the background.
[docs]: https://typst.app/docs/
[app]: https://typst.app/
[discord]: https://discord.gg/2uDybryKPe
[tutorial]: https://typst.app/docs/tutorial/
[show]: https://typst.app/docs/reference/styling/#show-rules
[math]: https://typst.app/docs/reference/math/
[syntax]: https://typst.app/docs/reference/syntax/
[scripting]: https://typst.app/docs/reference/scripting/
[rust]: https://rustup.rs/
[releases]: https://github.com/typst/typst/releases/
[repology]: https://repology.org/project/typst/versions
[contact]: https://typst.app/contact
[architecture]: https://github.com/typst/typst/blob/main/docs/dev/architecture.md
[contributing]: https://github.com/typst/typst/blob/main/CONTRIBUTING.md
[packages]: https://github.com/typst/packages/
[`comemo`]: https://github.com/typst/comemo/
|
|
https://github.com/maucejo/elsearticle | https://raw.githubusercontent.com/maucejo/elsearticle/main/docs/manual.typ | typst | MIT License | #import "@preview/mantys:0.1.4": *
#import "@preview/cheq:0.1.0": *
#show: checklist.with(fill: eastern.lighten(95%), stroke: eastern, radius: .2em)
#let TeX = style(styles => {
set text(font: "New Computer Modern")
let e = measure("E", styles)
let T = "T"
let E = text(1em, baseline: e.height * 0.31, "E")
let X = "X"
box(T + h(-0.15em) + E + h(-0.125em) + X)
})
#let LaTeX = style(styles => {
set text(font: "New Computer Modern")
let a-size = 0.66em
let l = measure("L", styles)
let a = measure(text(a-size, "A"), styles)
let L = "L"
let A = box(scale(x: 110%, text(a-size, baseline: a.height - l.height, "A")))
box(L + h(-a.width * 0.67) + A + h(-a.width * 0.25) + TeX)
})
//-------------------------------------------------------------------------------------
#let abstract = [
#package[Elsearticle] is a Typst template that aims to mimic the Elsevier article #LaTeX class, a.k.a. elsearticle.cls, provided by Elsevier to format manuscript properly for submission to their journals.
]
#show: mantys.with(
name: "Elsearticle class for Typst",
version: "0.3.0",
date: datetime.today(),
license: "MIT",
authors: ("<NAME>"),
abstract: abstract
)
= About
#package[Elsearticle] is a Typst template that aims to mimic the Elsevier article #LaTeX class, a.k.a. elsearticle.cls, provided by Elsevier to format manuscript properly for submission to their journals.
#package[Elsearticle] is designed to be as close as possible to the original class, whose specification can be found in the #link("http://assets.ctfassets.net/o78em1y1w4i4/3ro3yQff1q67JHmLi1sAqV/1348e3852f277867230fc4b84a801734/elsdoc-1.pdf")[#text("documentation", fill: eastern)]. The template is still in development and may not be fully compatible with all Elsevier journals.
This manual provides an overview of the features of the #package[Elsearticle] template and how to use it.
#wbox[The template is provided as is by the Typst community and is not affiliated with Elsevier.]
= Usage
== Using Elsearticle
To use the #package[Elsearticle] template, you need to include the following line at the beginning of your `typ` file:
#codesnippet[```typ
#import "@preview/elsearticle:0.2.1": *
```
]
== Initializing the template
After importing #package[Elsearticle], you have to initialize the template by a show rule with the #cmd[elsearticle] command. This function takes an optional argument to specify the title of the document.
#codesnippet[```typ
#show: elsearticle.with(
...
)
```
]
#cmd[elsearticle] takes the following arguments:
#command("elsearticle", ..args(
title: none,
authors: (),
abstract: none,
journal: none,
keywords: none,
format: "preprint",
numcol: 1,
line-numbering: false,
[body])
)[#argument("title", default: none, types: "string")[Title of the paper]
#argument("authors", default: (), types: "array")[List of the authors of the paper
Each element of the #dtype("array") is a #dtype("dict") definining an author. The author #dtype("dictionary") has the following keys:
- `name` #dtype("string"): Name of the author
- `affiliation` #dtype("string") (optional): Affiliation of the author
- `corr` #dtypes("string", none) (optional): email address of the corresponding author
- `id` #dtype("string") (optional): ID of the author
#codesnippet[```typc
authors: (
(
name: "<NAME>",
affiliation: "Laboratory 1, University 1, City 1",
corr: "<EMAIL>",
id: "a",
),
(
name: "<NAME>",
affiliation: "Laboratory 2, University 2, City 2",
id: "b"
),
(name: "<NAME>"), // J. Dupont is in the same laboratory as J. Doe
)
```]
]
#argument("abstract", default: none, types: "content")[Abstract of the paper]
#argument("journal", default: none, types: "string")[Name of the journal]
#argument("keywords", default: none, types: "array")[List of the keywords of the paper
Each element of the #dtype("array") is a #dtype("string") representing a keyword
#codesnippet[```typc
keywords: ("Keyword 1", "Keyword 2")
```]
]
#argument("format", default: "review", types: "string")[Format of the paper. Possible values are "preprint", "review", "1p", "3p" and "5p"
]
#argument("numcol", default: 1, types: "number")[Number of columns of the paper. Possible values are 1 and 2
#ibox[According to the documentation of `elsearticle.cls` (see #link("https://assets.ctfassets.net/o78em1y1w4i4/3ro3yQff1q67JHmLi1sAqV/1348e3852f277867230fc4b84a801734/elsdoc-1.pdf", "here")), the number of columns is related to the format of the paper:
- 1p: Single column only
- 3p: Single or double column possible
- 5p: Double column only
To avoid unexpected behaviors, the value of the `numcol` argument is set to 1 by default and restricted to 1 or 2.]
]
#argument("line-numbering", default: false, types: "bool")[Enable line numbering in the document]
]
== Additional features
The #package[Elsearticle] template provides additional features to help you format your document properly.
=== Appendix
The template allows you to create appendices using the #cmd["appendix"] environment. The appendices are then numbered with capital letters (A, B, C, etc.). Figures, tables and equations are numbered accordingly, e.g. Eq. (A.1).
To activate the appendix environment, all you have to do is to place the following command in your document:
#codesnippet[
```typ
#show: appendix
// Appendix content here
```
]
=== Subfigures
Subfigures are not built-in features of Typst, but the #package[Elsearticle] template provides a way to handle them. It is based on the #package[subpar] package that allows you to create subfigures and properly reference them.
To create a subfigure, you can use the following syntax:
#codesnippet[
```typc
#subfigure(
figure(image("image1.png"), caption: []), <figa>,
figure(image("image2.png"), caption: []), <figb>,
columns: (1fr, 1fr),
caption: [(a) Left image and (b) Right image],
label: <fig>
)
```
]
#ibox[The #cmd("subfigure") function is a wrapper around the #cmd[subpar.grid] function. The numbering is adapted to the context of the document (normal section or appendix).]
=== Equations
The equations are numbered with the format "(1)", "(2)" in normal sections and with the format "(A.1)", "(A.2)" in appendices. In addition to these numbering patterns, the #package[Elsearticle] template provides the #cmd("nonumeq") to create unnumbered equations. The latter function can be used as follows:
#codesnippet[
```typ
#nonumeq[$
y = f(x)
$
]
```
]
= Roadmap
The #package[Elsearticle] template is still in development. Here are some of the features that are planned for future releases :
*Article format*
- [x] Preprint
- [x] Review
- [x] 1p
- [x] 3p
- [x] 5p
*Environment*
- [x] Implementation of the `appendix` environment
*Figures and tables*
- [x] Implementation of the `subfigure` environment
- [x] Proper referencing of figure, subfigures and tables w.r.t. the context
- [x] Recreation of the `link` to cross-reference figures, subfigures and tables
*Equations*
- [x] Proper referencing of equations w.r.t. the context
- [ ] Numbering each equation of a system as "(1a)" -- _On going discussions at the Typst dev level_
*Other features*
- [x] Line numbering - Use the built-in `par.line` function available from Typst v0.12 |
https://github.com/alberto-lazari/computer-science | https://raw.githubusercontent.com/alberto-lazari/computer-science/main/advanced-topics-pl/presentation/sections/improvements.typ | typst | #import "/common.typ": *
#new-section("Improvements")
#slide(title: "Syntax")[
#line-by-line(mode: "transparent")[
- LaTeX inconsistent syntax for implementation reasons: \
```latex \command{...}``` vs ```latex \begin{command} ... \end{command}```
- TeX can alter and create syntax:
#{
only(1)[```$x + y\]```]
only("2-")[```latex $x + y\]```]
}
- Typst has a well-defined syntax
]
]
#slide(title: "Macros")[
- Simpler and immediate #sym.arrow more intuitive
- No scoping #sym.arrow side effects and package conflicts
]
#slide(title: "Error reporting")[
- TeX's interactive error correction
- Well-defined syntax
- Type system
]
#slide(title: [Error reporting -- interactive correction])[
#show raw: it => { set text(size: .8em); it }
#grid(columns: (2fr, 3fr), gutter: 2em,
align(center + horizon)[
LaTeX
```
$x + y
Missing $ inserted.
```
],
align(center + horizon)[
Typst
```
$x + y
^
expected closing dollar sign
```
],
)
]
#slide(title: [Error reporting -- syntax])[
#grid(columns: (1fr, 1fr), gutter: 2em,
align(center + horizon)[
LaTeX
#set text(size: .7em)
```latex
\section
Missing \endcsname inserted.
Missing \endcsname inserted.
Missing \endcsname inserted.
...
```
],
align(center + horizon)[
Typst
#example(error: "missing argument: body
┌─ /sections/improvements.typ:94:16
│
94 │ #heading()
│ ^^ ")[
```typst
#heading()
```
]
],
)
]
#slide(title: [Error reporting -- type system])[
#grid(columns: (1fr, 1fr), gutter: 2em,
align(center + horizon)[
LaTeX
#set text(size: .7em)
```latex
\baselineskip=normal
Missing number, treated as zero.
Illegal unit of measure (pt inserted).
```
],
align(center + horizon)[
Typst
#example(error: "expected length, found string
┌─ /sections/improvements.typ:70:24
│
70 │ #set par(leading: \"normal\")
│ ^^^^^^^^ ")[
```typst
#set par(leading: "normal")
```
]
],
)
]
#slide(title: "Computational foundations")[
#only(1, [No data structures in tex #sym.arrow.double provides a package for everything] + grid(columns: (1fr, 1fr), gutter: 2em,
align(center + horizon)[
LaTeX
```latex
\usepackage{trimspaces}
\trim@post@space{Text }
```
],
align(center + horizon)[
Typst
```typst
#{ "Text ".trim(at: end) }
```
]
))
#show raw: it => { set text(size: .7em); it }
#only(2, grid(columns: (1fr, 1fr), gutter: 2em,
align(center + horizon)[
LaTeX
```latex
\usepackage{listofitems}
\def\tabelize#1{
\readlist\animals{#1}
\begin{table}
\textbf{Animal} \\
\foreachitem\a\in\animals{
\a \\
}
\end{table}
}
```
],
align(center + horizon)[
Typst
```typst
#let tabelize(str) = {
let animals = str.split(", ")
table([*Animal*], ..animals)
}
#tabelize("Tiger, Giraffe, Cougar")
```
]
))
#only(3, grid(columns: (1fr, 1fr), gutter: 2em,
align(center + horizon)[
LaTeX
```latex
\newcount\i \i=0
\loop
\advance \i by 1
Variable i = \the\i
\ifnum \i<5 \repeat
```
],
align(center + horizon)[
Typst
```typst
#let i = 0
#while i < 5 {
i += 1
[Variable i = #i]
}
```
]
))
]
|
|
https://github.com/dismint/docmint | https://raw.githubusercontent.com/dismint/docmint/main/compilers/quiz1.typ | typst | #import "template.typ": *
#show: template.with(
title: "Quiz 1 Review",
subtitle: "6.1100"
)
= Regular Expressions, NFA, DFA
An alphabet is represented by the symbol $Sigma$, and a *string* is created out of the atomic alphabet. A language is then a certain combination of strings.
Regular expressions, NFA, and DFA all have the same strength of representation. However, their use cases can vary depending on what information is being conveyed. The idea is that regular expressions can generalize what kind of words are in the language, whereas an automata can help determine if a given word is in the language.
#note(
title: "Regex in Math / CS"
)[
Keep in mind that we refer to regex in the context of Math, not necessarily the regex we see in programming, which has many more features than the simple operators of mathematical regex.
]
Conversion of NFA to DFA, results in exponential blowup - more specifically, $n$ states in NFA lead to at most $2^n$ states in DFA.
/ DFA: A deterministic finite automata which cannot contain $epsilon$ or have multiple transitions out of a state for a given token.
#define(
title: "Thompson's Construction"
)[
Thompson's defines a way to turn a regular expression into an NFA. Most rules are very simple, with the trickiest one being the star operator, which matches any number of times (including zero).
#twocol(
[
The image on the right shows an example of Thompson's rule for a harder case. From the starting node $bold(q)$, it is possible to reach the end state $bold(t)$ by either:
+ Matching nothing and taking the bottom path.
+ Matching the pattern $N(s)$ once by going straight through.
+ Matching the pattern $N(s)$ more than once by taking the top loop repeatedly.
],
bimg("img/star.png")
)
]
== Converting NFA $->$ DFA
To systematically convert a NFA to a DFA, consider all the possible starting states, including taking empty ($epsilon$) paths. For example, this node might be labeled ${1, 2, 3}$. Then, from all the states of the node, considering every possible match, and draw the corresponding arrows to the new node which contains the set of states from the NFA that match.
= Context Free Grammars and Top-Down Parsing
Context free grammars are stronger than regex, since they have the power to match certain things that regex can't. For example, one traditional example is ${a^n b^n}$. Another example might be nester statements, which regex has a very hard time handling.
There are several common points of concern in context free grammars:
/ Ambiguity: When an expression has the possibility of multiple parse trees.
/ Left-Recursion: If an expression starts with itself, it can match and loop infinitely.
/ Operator Precedence: Order of operations.
#twocol(
bimg("img/hack.png"),
[
The original grammar shown to the left has the possibility of an ambiguity for an expression like $1 - 2 + 3$, since it can either parse to the left or to the right. With the fix seen in the grammar in the right, we force the expression to generate to the left, by matching the `Int` on the right side.
]
)
To introduce order of operations into the mix, we can split a non-terminal into *layers* of non-terminals, which allow us to define a different order of precedence, with non-terminals closer to the `Start` non-terminal being of lower precedence.
== If Ambiguity
Another common example of ambiguity arises from a grammar which contains `if` statements.
#twocol(
[
If we try to parse a statement such as\
`if x then if y then z else w`\
with the grammar to the right, then it becomes uncertain whether this is\
`if x then {if y then z} else w`\
or alternatively\
`if x then {if y then {z} else w}`
],
bimg("img/if.png")
)
== Left Factoring
A solution to the above problem involves left factoring. We want to factor out any common prefixes to prevent ambiguity in the parsing. This can be seen on the left image below. In addition, we can extend a similar idea to perform *precedence climbing* to enforce order of operations. Consider the example on the right where we can use this concept to force a distinction between ${"plus", "minus"}$ and ${"div", "mul"}$
#twocol(
bimg("img/fixif.png", width: 100%),
bimg("img/oo.png", width: 100%)
)
#example(
title: "Eliminating Left-Recursion"
)[
In order to avoid matching ourselves on the left, we should always attempt to consume some token before moving forward.
]
= High Level IR and Semantics
Symbol tables
= Unoptimized Code Generation
|
|
https://github.com/AU-Master-Thesis/thesis | https://raw.githubusercontent.com/AU-Master-Thesis/thesis/main/sections/3-methodology/study-3/expectation.typ | typst | MIT License | #import "../../../lib/mod.typ": *
=== Expectations <s.m.expectations>
As a result of the tracking factor, $f_t$, design it is expected that the variables will be pulled towards the path, $P$, in a similar way that the interrobot factor, $f_i$, pushes the variables apart. This should show itself in a lower path deviation error than before.
|
https://github.com/Skimmeroni/Appunti | https://raw.githubusercontent.com/Skimmeroni/Appunti/main/Statistica4AI/Introduction/DiscreteRV.typ | typst | Creative Commons Zero v1.0 Universal | #import "../Stats4AI_definitions.typ": *
As already stated, Kolmogorov axioms define the properties of probability
but do not offer a method for assigning them to events. The simplest
approaches, such as assigning the same probability to each event, are
far to weak to model reality. A more powerful concept to be introduced
which can help model probability is the *random variable*.
A random variable can be conceived as a mapping from the sample space
to the real line. In other words, a random variable is a function that
assigns a probability to any possible event of the sample space. Given
a sample space $cal(S)$, a random variable $X$ for such sample space is
defined as $X : cal(S) |-> RR$, and the probability of such variable
to assume a certain value $x$ of the sample space is denoted as $P(X = x)$.
// ADD AN EXAMPLE OF A RANDOM VARIABLE
Random variables fall in two broader categories: *discrete* and *continuous*.
A random variable is said to be discrete if the set of values it can assume
is either finite or countably infinite. A random variable is said to be
continuous if the two following properties apply:
+ Its set of possible values consists either of all numbers in a
single (possibly infinite) interval on the real line or all numbers
in a disjoint union of such intervals;
+ The probability of the random variable to assume a specific value is
always zero.
The *probability mass function* (abbreviated as pmf) of a discrete random
variable $X$, denoted as $p(X)$, is a function that assigns a probability
to each possible value that such random variable can assume. More formally,
given a random variable $X$, for each value $x$ of its sample space the pmf
of $X$ is defined as:
$ p(x) = P(X = x) = P(omega: omega in cal(S), X(omega) = x) $
// Example goes here
The *cumulative distribution function* (abbreviated as cdf) of a discrete
random variable $X$, denoted as $F(X)$, is defined as the probability of
such random variable to assume a value less than or equal to a threshold.
More formally, given a random variable $X$, for each value $x$ of its sample
space the cdf of $X$ is defined as:
$ F(c) = P(X lt.eq x) = sum_(y: y lt.eq x) p(y) $
// Example goes here, OneNote one, with graphs and such
Some specific random variables have been studied extensively, mostly because
they model very well many phenomena in the real world. For this reason, such
random variables have proper names.
A (discrete) random variable that can only assume the values $0$ and $1$ is
called a *Bernoulli random variable*.
|
https://github.com/polarkac/MTG-Stories | https://raw.githubusercontent.com/polarkac/MTG-Stories/master/stories/026%20-%20Eldritch%20Moon/004_Innistrad's%20Last%20Hope.typ | typst | #import "@local/mtgstory:0.2.0": conf
#show: doc => conf(
"Innistrad's Last Hope",
set_name: "<NAME>",
story_date: datetime(day: 29, month: 06, year: 2016),
author: "<NAME>",
doc
)
#emph[Thanks to Nahiri's machinations, the Eldrazi titan Emrakul has been unleashed on Innistrad. Meanwhile, Liliana has been in a tower of Vess Manor, probing the powers—and painful repercussions—of the Chain Veil artifact. Ever since ] she quarreled with Jace#emph[, Liliana has decided she can only rely on herself to face her demons.]
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
Thin metal wires hung from the points of the Chain Veil. <NAME> could almost see her reflection in the spectral-glass vessels where the wires led, and in the latticework of the witchbane orb on the windowsill, and in the conductive tubes that led out the window and up onto the roof. The etchings in her face were just visible through the Veil. The lines in her skin matched the menacing light of the storm clouds outside. Lightning flickered appropriately.
Two demons still needed to die. But she had to make sure she wouldn't die herself when she managed to face them. The Chain Veil was a potent weapon, but potentially deadly to its wielder. If this worked, she could use the Veil safely. She wouldn't need the help of some mind mage who persisted in chasing some wild mystery across the provinces. And she could rid the Multiverse of her creditors once and for all.
"Are we ready?" Liliana asked.
#figure(image("004_Innistrad's Last Hope/01.jpg", width: 100%), caption: [The Chain Veil | Art by Volkan Baga], supplement: none, numbering: none)
The others in the tower with her had not displayed a fraction of the smarts of Cloak Boy, but they would have to suffice. The geistmage, Dierk, listed items to himself in a micro-whisper as he adjusted a series of nozzles and tightened clamps on the orb. Dierk's assistant, Gared, stood at the window, his one big eye switching back and forth between the equipment and the lightning storm outside the tower. Gared held his hand on an appropriately sizable lever.
"The collectors are raised, madam," the geistmage said. "And the storm is reaching its peak. But I feel obligated to point out that we'll be coursing an enormous dose of spectral energy directly into the artifact..."
"You don't have to warn me," Liliana said.
"...powered by the force of a lightning storm."
"Yes."
"While you are wearing it."
"I know."
"On your #emph[face] ."
Liliana rolled her eyes. "The flow of geist energy through the orb will thus act as a kind of spectral antenna, shunting the object's counterassault away from the subject, sublimating the backlash as harmless atmospheric static, circumventing all repercussions and thereby allowing free utilization of the artifact."
Dierk glanced at Gared and tapped his mouth with gloved fingertips. "That is the theory."
"Look, Dierk," Liliana said. "My friend recommended you because she thought you knew something about spirit inhabitation. Do you or don't you?"
"Of course I do, madam," Dierk said, taken aback.
"Then—?"
"Then let us proceed." Dierk adjusted the goggles over his eyes. "I should add...this will hurt."
"Pain is temporary," said Liliana, sitting back in the chair. The wires dangled from the hanging points of the Chain Veil. "Besides, we learn nothing by testing this on Gared."
Gared grinned. His larger eye shuttered for a moment like a reptile's. Dierk nodded to him, and he slammed down the big lever.
#figure(image("004_Innistrad's Last Hope/02.jpg", width: 100%), caption: [Art by <NAME>], supplement: none, numbering: none)
The witchbane orb hummed and dials flexed. Liliana could feel the links of the Veil touching the curves of her face.
"It's activated," Dierk said. "Now all we have to do is wait for a proximal bolt of—"
Lightning.
Liliana's teeth clenched involuntarily as the surge came. Writhing lassos of energy bloomed on the wires leading from the roof collectors, and the spirits of the dead followed immediately. Geists shrieked through the tubes, filling the orb and the reinforced glass with electro-spectral screams. A spray of sparks puffed from the equipment, but the circuit held.
A blast of howling energy looped through the Veil. Liliana could feel the weight of it lift from her cheeks slightly, its links floating against the force of gravity.
She glanced at the others. Dierk had given up trying to adjust clamps and switches and pressed his back to the wall, shielding his face with his arms. Gared reached a finger out toward a thrashing curl of energy and recoiled when he touched it. Between them she could see her markings shining in the equipment, the etched diagram of her demonic contract forming a reflection-halo around her.
This was when Liliana felt most beautiful—when she was about to wield a power that made others afraid.
She grasped the arms of the chair and called on the power of the Veil.
The backlash was immediate and total. The thousands of souls that resided in the Veil filled her with power, but the power was coupled with pain, and the pain was blinding venom. Inextricable from the magic it afforded. The geist circuit had not drawn off any of the backlash.
Beakers popped and the collectors blew out.
"I'm ending it!" Dierk said, reaching for the lever.
"#emph[No] ," Liliana said, her voice a dagger. Dierk retracted his hand.
The room shook. Liliana grasped the chair, trying to hold the room still, trying to hold in the scream that wanted desperately to get out, trying to see anything but the pain. #emph[Pain is temporary] .
When she couldn't contain it anymore, she cried out. Fuses blew and the tower went dark. The spectral howling ebbed away, until Liliana only heard her own exhausted breaths.
Gared struck a match and lit a lantern. The lab was a disaster zone. The equipment was ruined. Raindrops plashed on the windowsill.
Liliana unclasped the Chain Veil and slid it off her head. Blood seeped from her etchings.
"I mentioned the risks, madam," Dierk said.
She glared at him, imagining the geistmage's skin withering away and his skeleton jawing the words "#emph[I'm sorry] ." Instead she nodded her head toward the door. "You may see yourself out. Deliver the orb back to its owner." A boom of residual thunder was her punctuation.
Dierk quickly collected the spent witchbane orb and a few other items into his bag and left. The echoes of his footsteps receded down the spiral stairs. Gared gently pushed aside a pile of broken glass with his foot, but did not leave.
Liliana stowed the Chain Veil in a skirt pocket. Innistrad's best and brightest hadn't been of any help. Tomes and grimoires of spectral remedies sat askew. Not even Olivia's premier geist expert had been able to tame the Veil.
Liliana looked out the window at the storm that boomed over the countryside of Stensia, daubing at her skin-words with a handkerchief. In the gloom, Thraben glowed like a distant candle.
She loathed relying on someone else.
But it wasn't that she needed Cloak Boy, she told herself. It was merely that she needed people to need #emph[her] , so she had some warm bodies to stand between her and a couple of self-important demon lords.
If only he could #emph[owe] her somehow.
From downstairs came a man's scream. A snarling scuffle and a crash followed.
Liliana tossed her crimson-dotted handkerchief aside and spiraled down the stairs.
She heard and smelled them before she saw them—their guttural snarls and their slobbering, hungry whines. The reek of damp fur over the reek of blood.
#figure(image("004_Innistrad's Last Hope/03.jpg", width: 100%), caption: [Art by <NAME>], supplement: none, numbering: none)
Werewolves. Liliana's entire throne room was overrun.
And they looked—not sick, exactly, but warped, as if their flesh and bones had been putty in the hands of some unnatural mutating force. Their extremities bent in odd ways, folding and crinkling like mats of kelp.
But they were still werewolves, and they still had claws. Dierk lay on the floor, his chest raked open. The contents of his bag and his ribcage were both spilled out over the floor. His face was pale, locked in a stare of surprise, and he was exhaling his last breath like a flattening balloon.
The werewolves turned to Liliana, sniffing. One of them roared, and it had eyes where its tongue should be.
A suite of spells, deadly ones, each tailored to one of the werewolves in front of her—that's what this called for. Just enough power to dispense with each one, for just enough of them to clear a path to the door of the manor.
"Gared!" Liliana shouted over her shoulder. "Get your coat."
The Chain Veil did not budge from her pocket.
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
Hours later, the storm had subsided, but the countryside of Stensia had become a twisted zoo. Liliana noted that every passerby had something #emph[reshaped] about them. The bodies of roving vampires had the wrong silhouettes, always with too few of something, or too many. Anatomically improbable travelers raved prophecies of stone and sea at them as they staggered in diagonals.
Finally, Liliana, Gared, and—haltingly—Dierk, arrived at the monumental door.
Lurenbraum Fortress soared above them, a stark cliff with a citadel that protruded directly from the rock face. Higher up, the utilitarian architecture softened and elongated into tiers of ornate leaded windows, each one with its own floating chandelier of twinkling candles. In many of the windows, vampires peered down at them, wearing gleaming ancestral armor.
Liliana gestured for Gared to knock.
Gared gawked at the door's height. "You really know the lady of the house?" he asked.
Dierk, for his part, made a gurgling noise. The man's neck was broken, so his head rested at a weird angle and his throat looked lumpy. But at least his legs had gotten him here, and at least his arms had been capable of carrying the spent witchbane orb. Gared's long coat was strapped tight around Dierk's midsection, doing its best to hold the remainder of the dead man's insides in. Liliana raised her hand slightly, and Dierk squared his shoulders, but his head still dangled to one side. The desiccated tongue wouldn't stay completely inside his mouth, contributing to the gurgle. Liliana shrugged.
"I make it my business to know those who wield power," Liliana said. "As does she."
Gared banged on the door and stood back.
The door opened, and an imposing woman in an ornate gown—or possibly an ornate woman in an imposing gown—appeared. She held forth a priest's staff that radiated like hot embers in Liliana's face.
"She is not receiving human visitors," said the woman, flashing her fangs as she spoke. Her irises were black pits that seemed to smolder.
#figure(image("004_Innistrad's Last Hope/04.jpg", width: 100%), caption: [Bloodhall Priest | Art by <NAME>], supplement: none, numbering: none)
"I'm returning something that belongs to her," said Liliana.
The woman paused, visually inspecting Dierk and the spent witchbane orb he carried. "Leave it here. Then begone from this estate, before I call an invocation down upon you."
Gared made a movement to confront the vampiric priest, but Liliana stopped him with a touch. In a citadel full of vampires, one did not fight when there was still a chance to cajole. "I'd speak with Olivia directly, please. Tell her Liliana Vess would see her."
"I told you, she's not receiving mortals."
"Mortals!" Liliana laughed. "Bless your bloodless heart."
#figure(image("004_Innistrad's Last Hope/05.png", height: 40%), caption: [], supplement: none, numbering: none)
The vampire priest held her staff high, the jagged symbol at the tip warping the air with heat.
"Oh Liliana, my dear one!" <NAME> appeared in the door suddenly, dismissing the priest with a brief but vicious hiss. The priest stood to one side, bowing her head, but following Liliana with her eyes.
Olivia was glorious in black segmented armor. As usual, her feet didn't touch the floor. "Have you come to celebrate the good news?" she asked, ushering in her guests. "Come, come!"
"Just returning your orb," said Liliana. "And your geistmage. And hoping you might know the whereabouts of an acquaintance of mine." She smiled pleasantly at the priest as she passed by. "What exactly are we celebrating?"
Olivia took Liliana's arm, floating next to her and drawing her deeper into the citadel. "Why, the long wait is over! You haven't heard?"
They entered a broad gallery where elegant vampires stood or hovered at every staircase, every landing. Hundreds of eyes watched Liliana and her attendants as Olivia led them through the lower halls of the fortress. Every vampire that had ever held the name Voldaren seemed to be in the building, glowering in unison.
#figure(image("004_Innistrad's Last Hope/06.jpg", width: 100%), caption: [Indulgent Aristocrat | Art by <NAME>], supplement: none, numbering: none)
Liliana made a furtive motion with one hand. The corpse of Dierk the geistmage dragged itself over to an antique gilded chair, slumped into it, and went limp with the orb in its lap. The coat around his middle squished, holding its contents as best it could.
Olivia leaned over conspiratorially, squeezing Liliana's arm. "It's the archangel! #emph[Poof] !" She cackled. "A smear on the floor of Thraben Cathedral. Oh, but it's simply #emph[too] good."
"Avacyn is dead?" A small thought of Jace descended on her, like a moth landing in her hair. He had been on Avacyn's trail when they last spoke.
Olivia made an expansive sweep with her arm. "We of the night can rejoice, for the world is ours again! I #emph[was] quite cross when I had heard that she'd been freed from her little trap."
Liliana raised her eyebrows a millimeter.
"But Sorin has come to his senses and put #emph[down] that thing of his. And now, I must say, it's all worked out rather well, hasn't it?" Olivia laughed. She led Liliana on, through gallery after gallery. Gared disappeared into the maze.
Liliana kept up with Olivia. "And now you're raising an army."
#figure(image("004_Innistrad's Last Hope/07.jpg", width: 100%), caption: [Call the Bloodline | Art by Lake Hurwitz], supplement: none, numbering: none)
"Well, my dear, it turns out that whoever opened the Helvault—"
Liliana kept her face correctly polite.
"—set loose more than just the archangel," Olivia continued. "And more than just...those demon friends of yours. They released that #emph[other] one, as well. Drink?" She signaled to a nearby vampire. "You there, bring our guest a drink."
A vampire pushed a glass of wine into Liliana's hand—actual wine—and clanked away in his suit of ornate ancestral armor.
It was Liliana herself, of course, who had caused the Helvault to break open and spill its contents all over Innistrad. She had slain the demon Griselbrand, and the other consequences of its opening were of no importance to her. She hadn't seen any reason to let her vampire social acquaintances know any of this.
"And she seems quite miffed, now that she's free," Olivia went on. "Can't say that I blame her. As I said, I was cross before, but now I should #emph[adore] to know who set them all free, just to express my utter gratitude!"
Liliana didn't know who else might have escaped the Helvault, who was so important to Olivia. But she had an intuition it was connected to the changes she had seen all around Innistrad. The warped werewolves in her manor. The countryside of twisted vampires and raving doomsayers.
This was the kind of thing that fascinated Cloak Boy. Liliana just wanted some demons to die. But maybe the two could be linked after all.
They emerged into a broad, thickly carpeted drawing room. A tall, white-haired vampire in a long jacket stood with his back to them, looking out the tall windows into the night.
Liliana felt claws dig into her arm. "We know it was you," Olivia hissed, suddenly hovering just by her ear. "We know you freed them." She added, brightly, "Isn't that right, Sorin?"
<NAME> turned around to face them. He wore hatred like a flamboyant suit.
#figure(image("004_Innistrad's Last Hope/08.jpg", width: 100%), caption: [Sorin's Thirst | Art by <NAME>], supplement: none, numbering: none)
"#emph[You] ," he said.
"Look who's paid us a visit," Olivia said, her voice all dainty politeness once again. "Sorin, I believe you know <NAME>?"
"You did this," said Sorin. "You released the Lithomancer and brought this upon us."
Liliana wrenched her arm away from Olivia and gathered herself. She walked up to Sorin and looked him up and down. Finally she chuckled, picking a mote of dust off of Sorin's lapel. "I had business to attend to," she said. "Not my fault if your closet was full of skeletons."
"You had no right," Sorin said, each word like a blade on a whetstone.
"Sorin, you and I have another matter to attend to," said Olivia, floating around them. "But I'd be remiss if I didn't allow you two the opportunity to catch up, wouldn't I?"
Sorin brought his face close to Liliana's. "All this is because of you. The Lithomancer is free, and now we must face her."
"You have quite the vampiric army assembled," Liliana said. She smirked at him. "Or—let me guess—is it more of a #emph[defense] force? You slighted #emph[her] , didn't you?"
Sorin's fangs flared. "I told you when you came here as a #emph[pup] . Innistrad is mine. You meddle in my affairs, you die."
Liliana looked him in the eye, her fingers reaching down to touch the rings of the Chain Veil at her waist. The etchings began to glow on her skin, and her hair floated slightly. "Innistrad may be your domain, Sorin," she whispered. She patted him on the arm. "But #emph[death is mine] ."
Sorin snarled, whipping his arm away and pressing his forehead into hers. His eyes snapped just briefly to her neck.
"Now, my friends!" Olivia laughed lightly, putting herself between them. "As giddy as I'd be to see you two tear each other apart, all over my drawing room...Sorin, it looks like the time has come. Join me outside. Nahiri awaits." She gestured toward the tall windows, into the night.
Liliana was struck by what she saw through the glass. What had been the remnants of the lightning storm was now a swollen cluster of clouds churning over the coast of Nephalia. Tendrils of mist reached out in all directions. It was not just a few werewolves or vampires that were being warped. Whatever force had arrived—it threatened to tear apart all of Innistrad.
Olivia slid a sword from its sheath. "Liliana, dear one, I'm afraid you've exhausted my supply of geist experts and spectral toys. But perhaps you'd like to join us? You were the one who released Nahiri, after all. She may even want to thank you."
Liliana just watched the clouds. This was deep, ancient magic, world-altering and vindictive. "She caused this?"
"The petty act of a petty mage," Sorin murmured. "With a misguided sense of justice."
"So it was you who caused all this," Liliana said. "You wronged her!"
"And now we're off to wrong her again," Olivia said with a fanged grin.
Framed in the windows of the fortress, the atmospheric mass shifted slowly from its origin over the Nephalia coast, tipping toward Gavony province and the brightly lit High City. The sky seemed wrinkled and torn, Liliana thought, like those werewolves. It was as if the whole plane—the whole of Sorin's home world—had been tainted on purpose, warped from horizon to horizon, just because Sorin cared about it. Whoever Nahiri was, Liliana had to admit—she didn't do things halfway.
"Aren't you the least bit concerned about what her vengeance is doing to Innistrad?" Liliana asked. "Jace is"—she straightened herself—"there are thousands of people out there."
"This world is ruined," Sorin said. "She has made sure of that. And your Jace will die at Thraben with the rest of them."
"What Sorin means," Olivia said brightly, "is that stopping Nahiri will surely stop the unpleasantness she has wrought. We're on a heroic mission!"
Liliana glanced outside, then looked back at Olivia, now with a dreadful tenderness. "Oh, you sweet child."
Sorin slid his sword out of its scabbard, lazily, like an afterthought. "Let's go, Olivia." He turned and stalked out of the drawing room and out of the mansion without another word.
Olivia floated after him, and ranks of Voldaren vampires followed after, their armor echoing through the halls.
Liliana followed them out. As she saw Gared again, she said, "Gared, get your coat."
Gared looked sadly at his coat and began the task of extricating it from Dierk.
#v(0.35em)
#line(length: 100%, stroke: rgb(90%, 90%, 90%))
#v(0.35em)
They emerged out into the night. The wind howled now, great cones of suction thrashing the sky. A ruddy, otherworldly glow floated along the distended bellies of the clouds.
Liliana pushed her hair out of her face as it whipped side to side. She looked toward the distant hills of Gavony as great shadows coalesced over it. This is what Jace is trying to stop, she thought.
Sorin barely glanced back over his shoulder as he and the vampires assembled. Sorin pointed with his sword. "Come, Olivia," he intoned over the wind. "It's time for you to fulfill your end of the bargain."
Olivia smiled gaily and swept into the air. The vampire army marched off down the hill, swords and pikes and red-hot priest symbols held high—off into the mists, off to battle Nahiri.
Not to battle the horrors that Nahiri had wrought upon this world. Not to help out-of-his-mind Jace.
This world was destined to die, then, Liliana thought. Its protectors had all forsaken it. It was time to say goodbye. "Goodbye, <NAME>."
The sky uttered an unfathomable sound that shook Liliana's bones. In the distance, Thraben glittered like a fallen star resting on the horizon. "Goodbye, Cloak Boy."
But she found herself walking down the hill, on a different path from the vampires. She found herself on the road. She found herself passing a noosegraf, where the criminals lay in their graves, waiting out the eternal part of their sentence. She found herself reaching out. Corpses crawled up out of the earth. She kept walking. The corpses followed her.
She found herself passing another cemetery, and another. A little roadside shrine, a cursed iron-fenced diregraf, a mausoleum of honored cathar dead. Each time, she reached out. Each time, the dead obeyed her, wriggling themselves free from their rest and lurching along behind her.
#figure(image("004_Innistrad's Last Hope/09.jpg", width: 100%), caption: [Art by <NAME>], supplement: none, numbering: none)
As she walked in the direction of Thraben, she reached down to her waist. She could almost hear the scores of spectral essences sneering at her, #emph[chanting] at her from within the Chain Veil—over the sound of the zombies dutifully lurching and foot-dragging their way down the road behind her.
Sorin and Olivia weren't going to do anything about the crisis Nahiri had caused. And the only person she could count on to understand—he and his broken, irritating, fathomless brain—was following his curiosity directly into messy, twisted, and almost certainly inevitable death.
It wasn't that she needed him. It was simply that she needed someone to need #emph[her] .
"Well, Gared," she said loudly into the wind.
She raised her arms, feeling the etchings like hot blood vessels in her skin.
"Looks as though I'm..."
A dozen more zombies lurched out of the ground, compelled to follow in her wake of necromantic power.
"...this world's..."
The corpses did not seem warped—at least, no more warped than their disheveled bones had already become by their years in the ground. The restless dead seemed to shrug off the effects. Liliana smirked.
"...last hope."
#figure(image("004_Innistrad's Last Hope/10.jpg", width: 100%), caption: [Art by <NAME>], supplement: none, numbering: none)
|
|
https://github.com/mitinarseny/invoice | https://raw.githubusercontent.com/mitinarseny/invoice/main/src/utils.typ | typst | #let parse_date(s) = {
let (year, month, day) = s.split("-").map(int)
datetime(year: year, month: month, day: day)
}
#let format_date(d) = d.display("[day] [month repr:short] [year]")
#let start_of_month(d) = datetime(
year: d.year(),
month: d.month(),
day: 1,
)
#let end_of_month(d) = {
let month = d.month()
while d.month() == month {
d += duration(days: 1)
}
d - duration(days: 1)
}
#let mailto(addr) = link("mailto:" + addr)
#let tel(number) = link("tel:" + number)
#let explorer_address(address, template) = template.replace("{}", address)
#let explorer_address_link(address, template) = link(explorer_address(address, template), address)
#let token_address(token, url_template: none) = {
if url_template != none {
link(
explorer_address(token.address, url_template),
[#token.name (#token.symbol)]
)
} else {
[#token.name (#token.symbol)]
}
} |
|
https://github.com/Tiggax/zakljucna_naloga | https://raw.githubusercontent.com/Tiggax/zakljucna_naloga/main/src/figures/mod.typ | typst | #import "rk4.typ": plot as rk4_plot
#import "rk4.typ": comparison_table as compare_e_v_rk_table
#import "rk4.typ": rk4_compare as compare_e_v_rk
#import "monod.typ": example as monod_plot
#import "monod.typ": function as monod_function
#import "bioreactor.typ": reactor, batch_reactor
#import "pid.typ": pid_graph
#import "pid.typ": pid_fn as pid_figure
#import "simulation.typ": constants as constants_fig
#import "simulation.typ": constants_diff as constants_diff_fig
#import "simulation.typ": get_plot as sim_plot
#import "neldermead.typ": simplex as simplex_table
#import "neldermead.typ": graph as nedel_graph |
|
https://github.com/typst/packages | https://raw.githubusercontent.com/typst/packages/main/packages/preview/fruitify/0.1.0/example-documentation.typ | typst | Apache License 2.0 | #set page(margin: (x: 2cm, y: 1cm))
#set raw(lang: "typst")
#import "fruitify.typ": fruitify-setup, fruitify, reset-symbols, vegetables
#let shown-imports = (
fruitify-setup: fruitify-setup,
fruitify: fruitify,
reset-symbols: reset-symbols,
vegetables: vegetables
)
#raw(block: true, `#import "@preview/fruitify:0.1.0": `.text + shown-imports.keys().join(", "))
#let showeval(block: true, it) = {
let eval-text = if it.text.ends-with(regex("[.,]")) {
it.text.slice(0, -1)
} else {
it.text
}
raw(block: block, it.text)
eval(eval-text, mode: "markup", scope: shown-imports)
}
=== Basic usage
```
#show math.equation: fruitify
```
#show math.equation: fruitify
By default, it will look like this (the symbol order is predetermined):
$ x + 2 y = z sin(2 theta.alt) $
$ a^2 + b^2 = c^2 $
Each single letter in equations is replaced with a symbol.
This is done by index of occurrence, so *which* letters you use doesn't affect the output,
only *when* you first used them, relative to each other.
The package will produce an error when you run out of symbols:
```
$ #"qwertyuiopasdfghjklzxcvbnm".codepoints().join(" ") $
// => error: assertion failed: equation has more characters than possible symbols
```
=== Keep symbol map across equations
By default, different equations are isolated from each other
in order to avoid using up all available symbols.
You can disable this with
#showeval[`#fruitify-setup(reuse: true).`]
Now, the letter$->$symbol map will be kept across the whole document.
$ x + 2 y = z sin(2 theta.alt) $
$ a^2 + b^2 = c^2 $
$ y - x = 0 $
Manually reset it with:
#showeval[`#reset-symbols()`]
=== Randomize symbol order
You can choose a random seed that determines a different symbol order.
This is implemented using pseudo-random number generation,
but it is fully deterministic. The same seed will always give you the same order.
The seed can be an integer or an array of 16 integers, each of which has to be in the interval $[0, 255]$.
#showeval[`#fruitify-setup(random-seed: 2)`]
$ x + 2 y = z sin(2 theta.alt) $
$ a^2 + b^2 = c^2 $
The default (no PRNG, just take the symbols in their hard-coded order) is reachable via
#showeval[`#fruitify-setup(random-seed: none).`]
=== Use other symbols
#let predefined = ([`fruits` (the default)], "vegetables")
The `symbols` option takes an array of strings. This can be anything,
but the following #predefined.len() are predefined:
#list(..predefined.map(x => if type(x) == str { raw(x) } else { x }))
#showeval[`#fruitify-setup(symbols: vegetables)`]
(Don't forget to #showeval(block: false)[`#reset-symbols()`]
when using `reuse: true`, otherwise this won't affect any already-used letters.)
$ x + 2 y = z sin(2 theta.alt) $
$ a^2 + b^2 = c^2 $ |
https://github.com/Gekkio/gb-ctr | https://raw.githubusercontent.com/Gekkio/gb-ctr/main/chapter/console/intro.typ | typst | Creative Commons Attribution Share Alike 4.0 International | #import "../../common.typ": *
== Introduction
The original Game Boy and its successors were the most popular and financially successful handheld consoles in the 1990s and early 2000s with several millions units sold and a large catalogue of officially published games. Unlike many older consoles, Game Boys use only a single integrated System-on-a-Chip (SoC) for almost everything, and this SoC includes the processor (CPU) core, some memories, and various peripherals.
#caveat[
The Game Boy SoC is sometimes called the "CPU", even though it has a large amount of other peripherals as well. For example, the Game Boy Pocket SoC literally has the text "CPU MGB" on it, even though the CPU core takes only a small fraction of the entire chip area. This terminology is therefore misleading, and is like calling a computer motherboard and all connected expansion cards and storage devices the "CPU".
This document always makes a clear distiction between the entire chip (SoC) and the processor inside it (the CPU _core_).
]
Most Game Boy consoles are handhelds, starting from the original Game Boy in 1989, ending with the Game Boy Micro in 2005. In addition to handheld devices, Game Boy SoCs are also used in some accessories meant for other consoles, such as the Super Game Boy for the SNES/SFC.
Game Boy consoles and their SoCs can be categorized based on three supported technical architectures:
- GB: the original Game Boy architecture with a Sharp SM83 CPU core and 4-level grayscale graphics
- GBC: a mostly backwards compatible extension to the GB architecture that adds color graphics and small improvements
- GBA: a completely different architecture based on the ARM processor instruction set and a completely redesigned set of peripherals. *This document does not cover GBA architecture, because it has little in common with GB/GBC*. GBA-based consoles and chips are only mentioned for their backwards compatibility with GB/GBC architectures.
@console-summary lists all officially released Game Boy consoles, including handhelds and accessories for other consoles. Every model has an internal codename, such as original Game Boy's codename Dot Matrix Game (DMG), that is also present on the mainboard.
#caveat[
This document refers to different console models usually by their unique codename to prevent confusion. For example, using the abbreviation GBP could refer to either Game Boy Pocket or Game Boy Player, but there's no confusion when MGB and GBS are used instead.
In this document GBC refers to the technical architecture, while CGB refers to Game Boy Color consoles specifically. Likewise, GBA refers to the architecture and AGB to exactly one console model.
]
#figure(
table(
columns: 6,
align: left,
table.header(
[*Console name*], [*Codename*], [*SoC type*], [*GB*], [*GBC*], [*GBA*],
),
table.cell(colspan: 6)[_Handhelds_],
[Game Boy], [DMG], [DMG-CPU], [✓], [], [],
[Game Boy Pocket], [MGB], [CPU MGB], [✓], [], [],
[Game Boy Light], [MGL], [CPU MGB], [✓], [], [],
[Game Boy Color], [CGB], [CPU CGB], [✓], [✓], [],
[Game Boy Advance], [AGB], [CPU AGB], [✓], [✓], [✓],
[Game Boy Advance SP], [AGS], [CPU AGB], [✓], [✓], [✓],
[Game Boy Micro], [OXY], [CPU AGB], [], [], [✓],
table.cell(colspan: 6)[_Accessories_],
[Super Game Boy], [SGB], [SGB-CPU], [✓], [], [],
[Super Game Boy 2], [SGB2], [CPU SGB2], [✓], [], [],
[Game Boy Player], [GBS], [CPU AGB], [✓], [✓], [✓],
),
caption: "Summary of Game Boy consoles"
) <console-summary>
|
https://github.com/AxiomOfChoices/Typst | https://raw.githubusercontent.com/AxiomOfChoices/Typst/master/Courses/Math%2018_745%20-%20Lie%20Groups%20and%20Lie%20Algebras/Assignments/Assignment%203.typ | typst | #import "/Templates/generic.typ": latex, header
#import "@preview/ctheorems:1.1.0": *
#import "/Templates/math.typ": *
#import "/Templates/assignment.typ": *
#show: doc => header(title: "Assignment 3", name: "<NAME>", doc)
#let lecture = counter("lecture")
#lecture.step()
#let update_lecture = () => {
lecture.step()
counter(heading).update(0)
}
#let bonus_problem = {
pagebreak()
block(text([*Bonus Exercise*], size: 17pt))
}
#show: latex
#let NumberingAfter(doc) = {
let level = 1
set heading(
numbering: (..numbers) => if numbers.pos().len() <= level {
return context numbering(
"1.1",
lecture.get().first(),
..numbers,
)
},
supplement: "Exercise",
)
show heading: it => {
if (it.numbering == none) {
return it
}
if (it.level > 1) {
return text(it, size: 14pt)
}
let numbers = counter(heading).at(it.location())
let display-number = numbering(it.numbering, ..numbers)
let body = it.body
// if (numbers.last() > 1) {
pagebreak(weak: true)
// }
block(text([*#body #display-number*], size: 17pt))
}
doc
};
#show: thmrules
#let col(x, clr) = text(fill: clr)[$#x$]
#let bar(el) = $overline(#el)$
#show: NumberingAfter
#set enum(numbering: "(a)")
*Sources consulted* \
Classmates: <NAME>. \
Texts: Class Notes.
#update_lecture()
#update_lecture()
#update_lecture()
#update_lecture()
#update_lecture()
= Exercise
== Statement
Show that any non-abelian 3-dimensional nilpotent Lie Algebra is isomorphic to $heis_3$.
== Solution
As we saw in class, any nilpotent Lie Algebra has non-trivial center, so let $Z(frak(g))$ be that center.
Now since $frak(g)$ is non-abelian the center is not the whole of $frak(g)$, and so it has either dimension $1$ or $2$.
Now from a previous assignment we know that we cannot have $dim(Z(frak(g))) = n - 1 = 2$ so we have $dim(Z(frak(g))) = 1$. Now we know that $dim(Z(frak(g))) = n - 2$ and so again by the last assignment we have either $frak(g) = heis_3$ or $frak(g) = frak(h) plus.circle Z(frak(g))$ where $frak(h)$ is the canonical non-abelian Lie algebra of dimension 2. Aiming for a contradiction, we assume that we have a basis $a,b,c$ with
$
[a,b] = -[b,a] = a
$
being the only non-trivial bracket. We then have $[frak(g), frak(g)] = span(a)$ and so $frak(g)^n = span(a)$ for all $n >= 1$. Thus in this case it is not a nilpotent algebra so we are done.
= Exercise
== Statement
Let $FF$ be a field of characteristic $2$, and $V = FF[x] quo x^2 FF[x]$ be the representation of $heis_3$, given by $p |-> diff/(diff x), q |-> x, c |-> I$. Show that $V = V_((lambda))^(heis_3)$, but $lambda$ is not a linear function on $heis_3$. Compute $lambda$.
== Solution
Let $a_0 + a_1 x$ be any element of $V$ and $b_0 p + b_1 q + b_2 c$ be any element of $heis_3$. We have
$
(b_0 p + b_1 q + b_2 c) (a_0 + a_1 x)
&= b_0 a_1 + b_1 a_0 x + b_2 a_0 + b_2 a_1 x
\ &= (b_0 a_1 + b_2 a_0) + (b_1 a_0 + b_2 a_1) x
$
so we can say that $b_0 p + b_1 q + b_2 c$ acts as
$
mat(b_2, b_0; b_1, b_2)
$
the characteristic polynomial is
$
(b_2 - lambda)^2 - b_0 b_1
=
lambda^2 + b_2^2 + b_0 b_1
=
lambda^2 + det mat(b_2, b_0; b_1, b_2).
$
now if we have a solution $rho$ then we have
$
(lambda - rho)^2 = lambda^2 + det mat(b_2, b_0; b_1, b_2)
$
so it has no other eigenvalues except for $rho$, hence it is $V = V_lambda$ (I am assuming that the field is perfect as I do not think it holds otherwise). We thus have that
$
lambda = sqrt(b_2^2 + b_0 b_1),
$
this is clearly not linear because
$
lambda(p + q) = sqrt(1) = 1 != 0 + 0 = lambda(p) + lambda(q)
$
= Exercise
== Statement
By the example of the adjoint representation of the 2 dimensional non-abelian solvable Lie algebra. Show that the generalized weight space decomposition fails for solvable Lie algebras that are not nilpotent.
== Solution
Let ${a,b}$ be a basis for $frak(h)$ with $[a,b] = a$, then we have
$
ad a = mat(0,1;0,0), quad ad b = mat(-1,0;0,0).
$
Now $ad b$ has two separate eigenvalues, so if they had a generalized weight space decomposition then $ad a$ must be diagonal, but it is not diagonal so they cannot have a decomposition.
= Exercise
== Statement
Let $frak(g) = gl_n (FF)$ and $frak(h) = {"diagonal matrices"}$. Find the generalized weight space decomposition for the tautological and adjoint representation of $frak(g)$ restricted to $frak(h)$ and show that
$
pi(frak(g)_((alpha))^(frak(h))) V_((lambda))^(frak(h)) seq V_((lambda + alpha))^(frak(h))
$
and that
$
[frak(g)_((alpha))^frak(h), frak(g)_((beta))^frak(h))] seq frak(g)_((alpha + beta))^((frak(h)))
$
== Solution
For $V = FF^n$ we have
$
V = plus.big_(i=1)^n span(e_i)
$
and we have
$
span(e_i) = V_(lambda_i)^(frak(h))
$
where $lambda_i$ sends a diagonal matrix to its $i$-th diagonal entry.
Now for the adjoint representation, we write any matrix $M in gl_n (FF)$ as
$
M = sum_(i,j <= n) a^(i j) e_(i j)
$
then we have,
$
span(e_(i j)) seq frak(g)^(frak(h))_(lambda_i - lambda_j)
$
which we can because we can compute
$
[e_(i i), e_(i j)] = e_(i i) e_(i j) - e_(i j) e_(i i) = e_(i j)\
[e_(j j), e_(i j)] = e_(j j) e_(i j) - e_(i j) e_(j j) = -e_(i j).
$
And clearly
$
V = plus.big_(i,j=1)^n span(e_(i j)),
$
so this is the weight decomposition.
We now fix $i,j$ and set $alpha = lambda_i - lambda_j$, this gives us
$
pi(frak(g)_((alpha))^frak(h))
V_((lambda_k))^frak(h)
=
span(e_(i j))
span(e_k)
=
cases(span(e_i) : j = k, 0 : "otherwise")
$
but notice that we have exactly $alpha + lambda_k = lambda_i$ exactly when $j = k$.
Now fix $i,j$ and $k,ell$ then we have
$
[span(e_(i j)), span(e_(k ell))] =
span(e_(i j) e_(k ell) - e_(k l) e_(i j)) =
cases(span(e_(i ell)) : j = k\, i != ell, span(e_(k j)) : i = ell\, j != k, 0 : "otherwise")
$
which is exactly equal to $V_(lambda_(i) - lambda_(j) + lambda_k - lambda_ell)^frak(h)$.
#update_lecture()
= Exercise
== Statement
Show that the Zariski topology is indeed a topology.
== Solution
First we check that $nothing, FF^n$ are closed, we know that $F({}) = FF^n$ and that $F({0}) = FF^n$.
Let $A$ and $B$ be two closed sets, then we have $A = F(S_A)$ and $B = F(S_B)$ for some sets of polynomials $S_A, S_B$. We set
$
S := { p(x) dot q(x) : p in S_A, q in S_B }
$
and find that
$
F(S) = F(S_A) union F(S_B) = A union B
$
so their union is closed.
Finally let ${S_alpha}_(alpha in I)$ be any collection of closed sets. Then we have
$
F(union.big_(alpha in I) S_alpha) = sect.big_(alpha in I) F(S_alpha)
$
and so the intersection of any family of closed sets is also a closed set.
= Exercise
== Statement
Let
$
det(ad a - lambda I) = (-lambda)^d + c_(d-1) (a) (-lambda)^(d-1) + ... + c_0 (a)
$
show that $c_(d - j) (a)$ is a homogeneous polynomial of degree $d - j$.
== Solution
We consider $ad (t a)$, we have
$
det(ad t a - lambda I) &= t^d (ad(a - lambda/t I) )= t^d ((-lambda/t)^d + c_(d-1) (a) (-lambda/t)^(d-1) + ... + c_0 (a))
\ &= t^d (1/t^d (-lambda)^d + 1/t^(d-1) c_(d-1) (a) (-lambda)^(d-1) + ... + c_0 (a))
\ &= (-lambda)^d + t c_(d-1) (a) (-lambda)^(d-1) + ... + c_0 (a) t^d.
$
At the same time we also have
$
det(ad t a - lambda I)
= (-lambda)^d + c_(d-1) (t a) (-lambda)^(d-1) + ... + c_0 (t a) t^d.
$
and so we have that $c_(j)$ is homogeneous of degree $d - j$.
= Exercise
== Statement
+ Show that the Jordan decomposition of $ad a$ in $gl(gl_n (FF))$ is given by
$
ad a = (ad a_s) + (ad a_n)
$
where $a = a_s + a_n$ is the Jordan decomposition of $a in gl_n (FF)$.
+ If $lambda_1,...,lambda_n$ are the eigenvalues of $a_s$ then $lambda_i - lambda_j$ are the eigenvalues of $ad a_s$.
+ $ad a_s$ has the same eigenvalues of $ad a$.
== Solution
+ Up to a change of basis we may assume that $a_s$ is diagonal, and so $a_s in frak(h)$ and thus $gl_n (FF)$ decomposes as $product_lambda frak(g)_(lambda)^(frak(h))$ so by definition any element $x in frak(h)$ acts with eigenvalues $lambda(x)$ on $gl_n (FF)$, so $ad a_s$ is diagonal. On the other hand, $a_n$ is nilpotent and so we saw in class that $ad a_n$ is also nilpotent. It is thus enough to check that $ad a_s$ and $ad a_n$ commute. But we have
$
[ad a_s, ad a_n] b = [a_s, [a_n, b]] - [a_n, [a_s, b]] = -[[a_s,a_n],b],
$
but $[a_s,a_n]$ is zero since they commute by assumption.
+ As we saw in the previous question the weights of $frak(h)$ on $gl_n (FF)$ are $lambda_i - lambda_j$ where $lambda_i$ is the functional taking an element of $frak(h)$ to its $i$-th diagonal entry. Now this diagonal entry is also exactly the $i$-th eigenvalue of that element. Hence by applying this to $a_s$ we get exactly the desired result.
+ This is immediate from part a since this is the Jordan decomposition.
= Exercise
== Statement
Show that
+ $rank(gl_n (FF)) = n$.
+ The discriminant of $gl_n (FF)$ is given by #h(1fr)
$
c_n (a) = product_(i != j) (lambda_i - lambda_j)
$
where $lambda_i$'s are all eigenvalues of $a in gl_n (FF)$ (over $ov(FF)$), taken with their multiplicities.
+ Compute the discriminant $c_2 (a)$ in $gl_2 (FF)$ in terms of the matrix coefficients of $a$:
$
c_2 (a) = 4 det a - (tr a)^2
$
== Solution
+ We know that $ad a$ has eigenvalues $lambda_i - lambda_j$ so these are all the roots of the characteristic polynomial. Hence we have
$
det(ad a - lambda I) = product_(i,j=1)^n (lambda_i - lambda_j - lambda)
$
now exactly $n$ of these terms are of the form $lambda_i - lambda_i - lambda = - lambda$ so we have $c_j (a) = 0$ for all $j <= n - 1$. On the other hand if $a$ has $n$ different eigenvalues then we have
$
c_n (a) = product_(i != j) (lambda_i - lambda_j)
$
which is then non-zero.
+ This follows immediately from the previous discussion.
+ We have for $a = mat(lambda_1, 0; 0, lambda_2)$ that
$
product_(i!=j)^2 (lambda_i - lambda_j) =
- (lambda_1 - lambda_2)^2 = 2 lambda_1 lambda_2 - lambda_1^2 - lambda_2^2
= 4 lambda_1 lambda_2 - (lambda_1 + lambda_2)^2
\ = 4 det(a) - tr(a)^2
$
|
|
https://github.com/Ttajika/class | https://raw.githubusercontent.com/Ttajika/class/main/seminar/lib/bxbibwrite.typ | typst | #let (
bibliography-list,
bib-item,
use-bib-item-ref,
) = {
let title-default = "Bibliography"
let numbering-default = "[1]"
let figure-kind = "bx-bib-item"
let end-mark = <bx-bib-end>
let bib-counter = counter(figure.where(kind: figure-kind))
let max-key-width = state("bx-max-key-width", 0pt)
let get-numbering(list-num, fig-num) = {
// When the numbering for listing is given, it is used.
// Otherwise the figure.numbering is applied,
// but adjust it if it is at default.
if list-num != auto { list-num }
else if fig-num == "1" { "[1]" }
else { fig-num }
}
let get-auto-key-width() = {
let marks = query(selector(end-mark).after(here()))
if marks.len() == 0 { return max-key-width.final() }
max-key-width.at(marks.first().location())
}
// Makes the key string.
let format-key(num, count, key, supplement) = {
// If key is not given, count should be shown.
let val = if key != none { key } else { count }
// Delegate if numbering is a function.
if type(num) == function {
return num(val, supplement)
}
// Parse the pattern.
let pos = none
let cs = num.codepoints()
let pos = cs.position(c => {
c in "1aAiI" or not ("2" in numbering(c + "1", 2, 1))
})
if pos == none { return num } // abnormal
// Replace the counter symbol with the real value.
if type(val) == int {
val = numbering(cs.at(pos), val)
}
if supplement != none {
val = { val; ", "; supplement }
}
cs.at(pos) = val
cs.join("")
}
// Prints the bibliography list.
let bibliography-list(
title: title-default,
heading-level: 1,
numbering: auto,
key-width: auto,
body-indent: 0.65em,
spacing: auto,
adjust-spacing: auto,
body,
) = {
let use-adjuster = (
if adjust-spacing != auto { adjust-spacing == true }
else { spacing == auto }
)
// Print the heading.
if heading-level != none {
let head = heading(level: heading-level, numbering: none, title)
if use-adjuster {
block(below: 0pt, head)
} else {
head
}
}
// The figure (i.e. bib-item) is laid out here.
show figure.where(kind: figure-kind): it => {
assert(it.body.func() == metadata)
let width = (
if key-width != auto { key-width }
else { get-auto-key-width() }
)
let count = it.counter.get().at(0)
let data = it.body.value
let numbering = get-numbering(numbering, it.numbering)
let key = format-key(numbering, count, data.key, none)
let kbox = box(width: width, {key; h(1fr)})
let entry = enum(
numbering: _ => kbox, // simply show the result
body-indent: body-indent,
tight: true,
enum.item(count, data.body),
)
if spacing == auto {
block(entry)
} else {
block(entry, above: spacing, below: spacing)
}
let metric = measure(key)
max-key-width.update(val => calc.max(val, metric.width))
}
if use-adjuster {
block(height: 0pt)
}
max-key-width.update(0pt)
body
[#metadata(none)#end-mark]
}
// The helper for bib-item.
let do-bib-item(key, label, body) = {
// Make a figure with the data.
let fig = figure(
kind: figure-kind,
supplement: key,
outlined: false,
metadata((
key: key,
body: body,
)),
)
// Then output with the label.
[#fig#label]
if key != none {
bib-counter.update(v => v - 1) // no increment
}
}
// A Bibliography entry.
let bib-item(
key: none,
..args, // [label,] body
) = {
args = args.pos()
assert(
args.len() >= 1,
message: "missing argument in 'bib-item': body",
)
assert(
args.len() <= 2,
message: "unexpected argument in 'bib-item'",
)
let (lbl, body) = if (args.len() == 1) {
(none,) + args
} else { args }
assert(
lbl == none or type(lbl) == label,
message: "'label' must be none or label, found " + str(type(lbl)),
)
do-bib-item(key, lbl, body)
}
let use-bib-item-ref(
numbering: numbering-default,
body,
) = {
show figure.where(kind: figure-kind): set figure(
numbering: numbering,
)
show ref: it => {
let el = it.element
if not (el != none and
el.func() == figure and el.kind == figure-kind) {
return it
}
let count = bib-counter.at(el.location()).at(0)
let sup = it.supplement
if sup == auto { sup = none }
format-key(el.numbering, count, el.supplement, sup)
}
body
}
( // export
bibliography-list,
bib-item,
use-bib-item-ref,
)
}
|
|
https://github.com/jamesrswift/typst-chem-par | https://raw.githubusercontent.com/jamesrswift/typst-chem-par/main/tests/test1/test.typ | typst | MIT License | #import "/src/lib.typ": *
#set page(width: 30em, height: auto, margin: 1em)
#show: chem-style
H2O, H2SO4, K2Cr2O7
Cl-, O2-, H+, Mg2+
tert-butyl lithium, o-xylene, trans-anethole
N,N-dimethyltryptamine,
N,N′-Dicyclohexylcarbodiimide
syn-, anti-, exo-, endo-
alpha-pinene, beta-hydride
D2O, Et2O, EtOAc2, OPR3
D-glucose, L-limonene
Acetone-d6. |
https://github.com/JonathanPlasse/modern-cv | https://raw.githubusercontent.com/JonathanPlasse/modern-cv/main/resume.typ | typst | #import "@preview/modern-cv:0.6.0": *
#show: resume.with(
author: (
firstname: "Jonathan",
lastname: "Plasse",
email: "<EMAIL>",
// homepage: "",
phone: "(+33) 7 81 32 02 04",
github: "JonathanPlasse",
// twitter: "",
// scholar: "",
// orcid: "0000-0000-0000-000X",
// birth: "",
linkedin: "jonathan-plasse",
// address: "",
positions: (
"Ingénieur Logiciel",
// "Software Architect",
// "Developer",
),
),
date: datetime.today().display(),
language: "fr",
colored-headers: true,
show-footer: false,
)
#let small-github-link(github-path) = {
[
#fa-icon("github", fill: color-darkgray) #link(
"https://github.com/" + github-path,
github-path,
)
]
}
= Experience
#resume-entry(
title: "Responsable Projet Logiciel Fullstack - Drone Light Show",
location: "Avignonet-Lauragais",
date: "Décembre 2023 - Août 2024",
description: "Drotek",
)
#resume-item[
- Gestion d'équipe du développement de l'interface de contrôle de flotte de drones et de l'ERP interne
- Développement du contrôle d'un essaim de drones en direct
- Intégration du contrôle du départ par Timecode pour synchronisé avec les régies son et lumière
- Développement d'une méthode d'auto RTL sans collision rapide
- Ajout de la gestion procédurale de couleur avec Geometry Nodes pour la création de spectacle
- Intégration du logiciel de création de spectacle de drone avec FWsim et Finale 3D
- Mise en place de l'Intégration Continue
]
#resume-entry(
title: "Ingénieur Logiciel - Drone Light Show",
location: "Avignonet-Lauragais",
date: "Septembre 2020 - Novembre 2023",
description: "Drotek",
)
#resume-item[
- Développement de l'extension Blender de création de Spectacles Lumineux
- Développement de l'interface de contrôle des drones
- Création d'un simulateur de drones pour tester virtuellement le système à plus de 1000 drones
- Développement d'un algorithme de Pathfinding pour les transitions de formes des essaim de drones sans collision
]
#resume-entry(
title: "Stage de fin d'étude - Drone Light Show",
location: "Avignonet-Lauragais",
date: "Mars 2020 - Août 2023",
description: "Drotek",
)
#resume-item[
- Étude et Mise en place de sécurité sur l'autopilote PX4 pour flotte de drone
- Développement d'une méthode de compensation du bruit moteur sur le magnétomètre des drones
- Développement du mode de vol de drone lumière en essaim synchronisés
]
= Compétences
#resume-skill-item(
"Lang. de prog.",
(strong("Python"), strong("Rust"), strong("C/C++"), "TypeScript", "SQL", "OCaml"),
)
#resume-skill-item(
"Technologie",
(strong("Git"), strong("Linux"), "Blender", "Yocto", "Github Actions", "LaTeX/Typst"),
)
#resume-skill-item("Langues", (strong("Anglais"), "Allemand", "Japonais"))
= Formation
#resume-entry(
title: "Télécom Physique Strasbourg",
location: "Illkirch-Graffenstaden",
date: "2017 - 2020",
description: "Diplôme d'ingénieur généraliste, Ingénierie des Systèmes, Automatique et Vision",
)
#resume-item[
- Activités et associations : Secrétaire de l'Association Robot Télécom Strasbourg
- Physique Strasbourg Ingénierie
]
= Loisirs
#resume-entry(
title: "Contributions à l'Open Source"
)
#resume-item[
- #small-github-link("astral-sh/ruff"), un linter et formatteur Python écrit en Rust, 107+ contributions
- #small-github-link("empicano/aiomqtt"), un client MQTT Python async/await, 16+ contributions
- #small-github-link("nutti/fake-bpy-module"), un project ajoutant la complétion et type-checking de l'API Python de Blender, 25+ contributions
- #small-github-link("HypothesisWorks/hypothesis"), une bibliothèque Python pour le Property-based Testing, 5+ contributions
]
#resume-entry(
title: "Jonglage à 5 balles"
)
#resume-entry(
title: "Meditation"
)
#resume-entry(
title: "Piano, 7 ans"
) |
|
https://github.com/chubetho/Bachelor_Thesis | https://raw.githubusercontent.com/chubetho/Bachelor_Thesis/main/chapters/introduction.typ | typst | #import "@preview/glossarium:0.4.1": glspl
= Introduction
In the early days of the World Wide Web, the frontend was relatively simple. The first website, published in 1991 by British scientist <NAME> while working at The European Organization for Nuclear Research, was a basic, static webpage created using @html @_BirthWebCERN_. These early static websites were hosted on servers with content that remained unchanged unless manually updated by a webmaster. The level of interactivity was minimal, primarily focused on displaying information in a straightforward, text-based manner. As the web evolved, the complexity and expectations of web applications grew, leading to more sophisticated and interactive frontend designs.
Today, various modern methodologies have been developed to meet the diverse needs of different projects. One of the most notable approaches is @spa, which offers a highly interactive user experience characterized by dynamic content loading and smooth navigations. Despite these benefits, @spa often falls short in terms of performance compared to traditional server-side rendering, particularly concerning speed and @seo. A hybrid approach that effectively combines the strengths of both methodologies is universal rendering. This approach starts by rendering the application on the server, which results in faster load times and improved @seo. Following this initial load, the client-side manages subsequent interactions, thereby maintaining the high level of interactivity characteristic of #glspl("spa").
Despite these advancements, as the functionality of a frontend application grows, it becomes a complex and large structure. This growth leads to challenges in scaling and maintaining large-scale applications, especially when multiple teams are involved in development. This issue stands in contrast to the backend, which has increasingly adopted the microservices architecture over the past decade. In this architectural style, the backend is divided into smaller, independently deployable services, enhancing flexibility, scalability, and ease of maintenance.
The frontend of a web application functions as the presentation layer that users initially see and interact with, including everything users experience visually and navigate through such as layouts, buttons, images, and forms. A well-designed frontend not only ensures that the application is visually appealing but also guarantees responsiveness, reliability, and simplicity. Because of its direct effects on user experience and satisfaction, frontend development has become a critical and complex task that companies must approach with thorough attention.
== Motivation
The motivation behind this study arises from the challenges faced by the @dklb website, a platform for online lottery services, allowing users to participate in various lottery games and access related information provided by LOTTO Berlin @_LOTTOBerlin_. This web application, originally developed in 2014, is based on an outdated monolithic architecture. Although this architectural design was effective and sufficient at that time, it has since fallen behind modern standards, leading to a series of critical issues that demand consideration.
One of the primary issues is the lack of flexibility. Since all components of the system are tightly interconnected, updating or modifying any individual part requires redeployment of the entire application. This process is time-consuming and introduces the risk of potential downtime, which negatively affects user experience and business operations.
Maintainability is another major drawback. As this legacy system ages, maintaining it has become increasingly complex and error-prone. Extensive testing and coordination are required to ensure changes do not cause new problems elsewhere in the application. Furthermore, the reliance on old technologies and a large codebase makes it challenging to attract skilled developers and poses difficulties for new junior developers, who may struggle to explore and understand the codebase. These factors collectively slow down development cycles and delay the release of new features.
Additionally, the current frontend architecture of the @dklb project is misaligned with agile development practices. The tight coupling of components prevents parallel development efforts and continuous integration, as teams are unable to work independently on different parts of the application. This misalignment further underscores the urgent need for a modern architectural approach that can address the limitations of the current system.
#pagebreak()
== Objective <section_objective>
In response to the challenges faced by the current application, MULTA MEDIO, the company responsible for its development and maintenance, has decided to undertake a complete rewrite of the application. A central concern in this process is the selection of an appropriate frontend architecture. With the growing interest in micro frontend architecture and its benefits, as demonstrated by many large corporations, this study aims to explore the feasibility and potential advantages of adopting this approach for the @dklb application. The results of this investigation are intended to provide in-depth insights and a thorough analysis, which inform the decision-making process regarding the architectural transition.
This study is guided by the following primary research questions (RQs):
- RQ1: How does adopting micro frontend architecture specifically affect the flexibility, maintainability, scalability, and performance of a web application?
- RQ2: Can the micro frontend approach effectively mitigate the specific challenges and limitations inherent in the current monolithic architecture of the @dklb project?
To address these questions, a comprehensive experiment, including multiple stages from planning to deployment, will be conducted. This experiment will serve as a proof of concept, designed to replicate a real-world project scenario to evaluate the impact of the proposed micro frontend architecture on the identified aspects.
== Structure
This study on micro frontend architecture is organized into eight chapters, each building progressively to provide a thorough exploration of the topic. The first chapter, which concludes here, introduces the study and defines its objectives. The second chapter examines the monolithic architecture and the shift towards microservices in the backend, illustrating how this evolution has influenced trends in frontend development. It also introduces the concept of Domain-Driven Design, particularly in relation to micro frontends. With this foundational knowledge in place, chapter three presents a review of the current state of the art, exploring the motivations for adopting micro frontend architecture through both academic research and practical case studies.
In chapter four, a decision framework is used to guide the selection of the most suitable approaches for implementing micro frontends, which are explored in greater detail in chapter five. Chapter six presents an in-depth experiment, demonstrating the multiple stages of implementing micro frontend architecture within the context of the @dklb project. The evaluation of this experiment, including comparisons with the monolithic @spa architecture, and a discussion of the advantages, disadvantages, and limitations of the study, is covered in chapter seven. Finally, chapter eight summarizes the findings and offers suggestions for future research directions.
#pagebreak(weak: true)
|
|
https://github.com/lucannez64/Notes | https://raw.githubusercontent.com/lucannez64/Notes/master/Maths_Exercices_21_05_2024.typ | typst | #import "@preview/bubble:0.1.0": *
#import "@preview/fletcher:0.4.3" as fletcher: diagram, node, edge
#import "@preview/cetz:0.2.2": canvas, draw, tree
#import "@preview/cheq:0.1.0": checklist
#import "@preview/typpuccino:0.1.0": macchiato
#import "@preview/wordometer:0.1.1": *
#import "@preview/tablem:0.1.0": tablem
#show: bubble.with(
title: "Maths",
subtitle: "21/05/2024",
author: "<NAME>",
affiliation: "LHB",
year: "2023/2024",
class: "101",
logo: image("JOJO_magazine_Spring_2022_cover-min-modified.png"),
)
#set page(footer: context [
#set text(8pt)
#set align(center)
#text("page "+ counter(page).display())
]
)
#set heading(numbering: "1.1")
#show: checklist.with(fill: luma(95%), stroke: blue, radius: .2em)
= Exercice 53 p 460
== Partie A
===
#tablem[
|$x_i$|$1$|$2$|$3$|$4$|$5$|$6$|Total|
|-----|-|-|-|-|-|-|-----|
|$p_i$|$1/6$|$1/6$|$1/6$|$1/6$|$1/6$|$1/6$|$1$|
|$p_i x_i$|$1/6$|$1/3$|$1/2$|$2/3$|$5/6$|$1$|$7/2$|
]
#tablem[
|$x_i$|$1$|$2$|$3$|$4$|$5$|$6$|$7$|$8$|$9$|$10$|$11$|$12$|Total|
|-----|-|-|-|-|-|-|-|-|-|--|--|--|----|
|$p_i$|$1/12$|$1/12$|$1/12$|$1/12$|$1/12$|$1/12$|$1/12$|$1/12$|$1/12$|$1/12$|$1/12$|$1/12$|$1$|
|$p_i x_i$|$1/12$|$1/6$|$1/4$|$1/3$|$5/12$|$1/2$|$7/12$|$2/3$|$3/4$|$10/12$|$11/12$|$1$|$13/2$|
]
===
$Z = X + Y$
Ainsi $E(Z) = E(X) + E(Y) = 13/2 + 7/2 = 10$
===
Les deux lancers sont indépendants car on lance deux dés différents
===
Les deux variables étant indépendantes $V(Z) = V(X) + V(Y) = 35/12 + 143/12 = 89/6$
== Partie B
===
Il y a $12 times 6 = 72$ issues possibles et parmi elles il y a $6$ avec un total supérieur à 15
$(6; 10), (5;11) , (6;11), (4;12), (5;12) , (6;12)$
Ainsi la probabilité d'obtenir ce bonus est de $6/72= 1/12$
===
Comme dit précédemment deux lancers sont indépendants soit $B$ la variable aléatoire qui associe 1 si un joueur a eu le bonus, 0 s’il ne l’a pas eu, et $S_n$ le nombre de bonus obtenus sur n lancers de deux dés alors $S_n$ suit une loi binomiale de paramètre n=$n$ et p=$1/12$
$E(S_n) = n times 1/12 = n/12$
et $V(S_n) = n times 1/12 times 11/12 = (11 n)/24$
===
$Z_n = n Z$
$E(Z_n + S_n) = E(Z_n) + E(S_n) = n E(Z)+ n/12$
$= 10n + n/12 = (121 n)/12$
===
On cherche le plus petit $n$ tel que $E(Z_n + S_n) gt.eq 300$
$ (121 n)/12 gt.eq 300 $
$ n gt.eq 3600/121 approx 29.75 $
Ainsi le nombre moyen de lancers pour finir la partie est de $30$ lancers.
|
|
https://github.com/antonWetzel/Masterarbeit | https://raw.githubusercontent.com/antonWetzel/Masterarbeit/main/packages/placeholder.typ | typst | #import "todo.typ": *
#let placeholder = (
location,
width: 100%,
height: auto,
alt: none,
fit: "cover",
) => {
let fill = orange.lighten(30%)
let location = align(center + horizon, todo(location))
if height == auto {
layout(size => {
box(width: size.width * width, height: size.width * width * 9 / 16, fill: fill, stroke: black, radius: 5pt, location)
})
} else {
box(width: width, height: height, fill: fill, stroke: black, radius: 5pt, location)
}
}
|
|
https://github.com/exAClior/CPSQuantum2024 | https://raw.githubusercontent.com/exAClior/CPSQuantum2024/master/main.typ | typst | #import "@preview/touying:0.4.2": *
#import "@preview/touying-simpl-hkustgz:0.1.0" as hkustgz-theme
#let s = hkustgz-theme.register()
// Global information configuration
#let s = (s.methods.info)(
self: s,
title: [CPS Quantum Computing],
subtitle: [Talk Excerpts],
author: [<NAME>],
date: datetime.today(),
institution: [HKUST(GZ)],
)
// Extract methods
#let (init, slides) = utils.methods(s)
#show: init
// Extract slide functions
#let (slide, empty-slide, title-slide, outline-slide, new-section-slide, ending-slide) = utils.slides(s)
#show: slides.with()
#outline-slide()
= Correlated Decoding and Algorithmic Fault-Tolerance in Quantum Computing
== Resource Estimation @Beverland2022AssessingRT
- Fault-tolerance requires too much resource
#figure(image("pics/resourceestimation.png", width: 70%),caption: [Resource Estimation for Various Application and Hardware Quality.])
== Correlated Decoding @Cain2024CorrelatedDO
- For transversal entangling gates "errors detected on one logical qubit can contain information about which errors occurred on other logical qubits"
#figure(image("pics/correlated_threshold.png",width: 40%), caption: [In Simulation with Correlated Error, threshold is improved from 5.1% to 10.3%])
- "if we can perform fewer than d rounds between transversal CNOTs, by leveraging the deterministic propagation of stabilizer measurement errors through CNOTs to verify stabilizer measurements using surrounding rounds of syndrome extraction"
- "stabilizer measurement errors near transversal CNOTs generate hyperedges that make conventional decoding approaches challenging"
#figure(image("pics/optimal_senumber.png", width: 50%))
== Algorithmic Fault-Tolerance @Zhou2024AlgorithmicFT
- "a key component of many schemes for achieving universality is magic state teleportation, which crucially relies on the ability to realize feed-forward operations."
- "such feed-forward operations require on-the-fly interpretation of logical measurements, followed by a subsequent conditional gate, when only a subset of the logical qubits have been measured"
- "Surprisingly, we find that these inconsistencies can be accounted for in classical processing, with a reinterpretation of subsequent measurement results"
#figure(image("pics/algorithmic_ft.png"))
== Summary
#figure(image("pics/aftsummary.jpg",width: 100%))
= Quantum Computing At the Physical Layer
== Summary @Huang2021QuantumIS
- "a quantum processor with such an instruction set, designing an instruction by replacing iSWAP with its matrix square root SQiSW can both reduce the gate error and improve compilation capabilities substantially"
- "taking only roughly half of the time of iSWAP, the SQiSW gate is expected to be implemented with much higher fidelity. Moreover, it has superior compilation capabilities than iSWAP in the task of compiling arbitrary two-qubit gates. An iFRB experiment, which can benchmark non-Clifford gates, on our capacitively coupled fluxonium quantum processor shows the gate error is reduced by 41% and the Haar random two-qubit gate error is reduced by 50% compared to iSWAP on the same chip"
- #link("https://ymsc.tsinghua.edu.cn/info/1057/3741.htm")[Online Recoding of Talk]
= The Ultimate Boundaries of Quantum Causality
== Picture
#figure(image("pics/IMG20240819094006.jpg"))
#figure(image("pics/IMG20240819094104.jpg"))
#figure(image("pics/IMG20240819094219.jpg"))
#figure(image("pics/IMG20240819094243.jpg"))
#figure(image("pics/IMG20240819095319.jpg"))
#figure(image("pics/IMG20240819095414.jpg"))
#figure(image("pics/IMG20240819095632.jpg"))
== Notes
- Indefinite Causal order used to improve metrology with continuous variable system https://www.nature.com/articles/s41567-023-02046-y
- Indefinite causal order: wasn’t it just a relaxation of the quantum channel between two parties, of-course it could violated some CHSH like inequality (ref Porf Xin Wang’s course)
- Def Indefinite Causal Order: If you cannot represent the process of two parties as a probabilitistic combination of two processes in one where A happens before B and the other otherwise.
- What is the boundary of violation of all possible experiment and all possible order….
- Quantum mechanics + causal order violates some inequality but not maximal (algebraically)
= Constant-Overhead Fault-Tolerant Quantum Computing With Reconfigurable Atom Arrays
== Summary
- "Practical implementation of product qLDPC code by (globally) rearranging atoms"
- "High-threshold, constant-space-overhead memory under circuit-level fault-tolerant design"
- "Fault-tolerant commputation via teleportation to topological codes"
- "qLDPC-based FT quantum computer"
== qLDPC is necessary
- For surface code $[[n,k,d]]$, "$n/k = d^2$"
- "Connectivity constrains quantum code" @Bravyi2009TradeoffsFR
- "High-rate qLDPC code: constant space overhead" $[[n,Theta(n),Theta(n^(alpha>=1/2))]]$ @[email protected]@Panteleev2021AsymptoticallyGQ
- Con of qLDPC
- "requires non-local and complicated connection"
- "Large code size & low threshold"
- #link("https://www.youtube.com/watch?v=5tfj4ArAJa0")[QIP2024 Talk]
== Example of qLDPC code: Hypergraph product Code @Xu2023ConstantoverheadFQ
- "The HGP code is constructed from two classical LDPC codes"
#figure(image("pics/hypergraph_product_code.png", width: 50%))
#figure(image("pics/qldpc_performance.png",width: 50%))
== Logical Operation with qLDPC code @Xu2023ConstantoverheadFQ
- "performingfault-tolerant logical operations and perform the first numerical simulation of logical gate performance on qLDPC codes"
- "We teleport the logical information between the qLDPC memory and ancillary topological codes using a measurement-based circuit"
- "prescribed logical measurements are implemented using lattice surgery"
- "Universal logical operations can then be performed in the topological codes using standard techniques"
- "To use these LDPC codes for quantum computation,
one must be able to fault-tolerantly implement a universal set of protected logic gates. While Ref. [32] establishes a method to perform quantum computation using faulttolerant gate teleportation [39], the cost associated with the distillation of the requisite resource state [40] is not understood well in the practical regime of interest" @Cohen2021LowoverheadFQ
== Recording of Talk
- #link("https://www.youtube.com/watch?v=5tfj4ArAJa0")[QIP2024 Talk]
- #link("https://www.youtube.com/watch?v=rBuKW87xrag")[QEC2023 Talk]
= Entanglement Renormalization and Tensor Network Representation of Chern Insulator
== Summary
- Question: Can tensor network formalism represent physically interesting materials: eg. free-fermion Chern Insulator ?
- Chirality and gappedness cannot co-exist in tensor network representation of Chern insulator
- Zipper: separate long-range entangled state from the short range entanglement by some BASIS CHANGE @Wong2022ZipperER
- Catch: bond dimensino of PEPS grows polynomially as the size of the system.
== Chern Insulator: the "trouble maker"
- locally describable
- dressed product states
- Atomic Insulator
- Quasi-1D Description (e.g. quantum Hall)
- ??? non-chiral topological order ... toric code)
- Physically Interesting
- Chiral states
- Semi-metals
- Critical states
- Metals
- ...
== Difficulties with Chiral Tensor Networks
- General picture: PEPS with finite bond dimensions can be chiral but @[email protected]
- No gapped, local parent Hamiltonian
- Algebraically decaying correlation (which might approximate the short-range part well)
- No-go proven for free fermions w/ translation invariance @PhysRevB.92.205307
- Interaction not as rigorously studied but the overall picture still holds @[email protected]@Weerda2023FractionalQH
== Photos
#figure(image("pics/ap1.png",width: 50%))
#figure(image("pics/zipper.png", width:50%))
#figure(image("pics/zipper2.png", width: 50%))
= References
#bibliography("refs.bib",style: "american-physics-society")
|
|
https://github.com/Ttajika/class | https://raw.githubusercontent.com/Ttajika/class/main/seminar/lib/setting.typ | typst | #import "useful_functions.typ": *
#import "useful_package.typ": *
#let par_block(it,font, leading:0.5em) = { if it.block == true{
return par(leading:leading)[#text(font: font)[#it]]//; set par(leading:.5em)}
}else{return text(font: font)[#it]}}
#let sans-font = ("TeX Gyre Heros","<NAME>")
#let project(
title: "",
abstract: none,
authors: (),
institutions: (),
notes: (),
date: Today(),
body-font: ("TeX G<NAME>","<NAME>"),
sans-font: ("TeX G<NAME>","<NAME>"),
math-font: ("TeX Gyre Termes Math","<NAME>"),
leading: 1.0em,
lang: "en",
font-size: 12pt,
style: "report",
body,
) = {
// Set the document's basic properties.
// set document(author: authors, title: title)
set page(numbering: "1", number-align: center, margin: 1in)
// Set body font family.
set text(font: body-font, lang:lang)
show heading: set text(font: sans-font)
show figure.where(kind:image): set figure(supplement: "図")
show figure.where(kind:table): set figure(supplement: "表")
show par: it => {
v(0.1em)
it
v(0.1em)
}
set heading(numbering: "1.1.", supplement: [Section])
set footnote(numbering: "*")
// Title row.
if style == "report" {
align(center)[
#text(font: sans-font, weight: 700, 1.75em)[#title]
]
// Author information.
let author_note = authors.zip(notes)
pad(
top: 0.5em,
bottom: 0.5em,
x: 2em,
grid(
columns: (1fr,) * calc.min(3, authors.len()),
gutter: 1em,
..authors.map(author => align(center, [#text(size:1.2em)[#author]])),
..institutions.map(institutions => align(center, strong(institutions)))
),
)
align(center)[#text(font: body-font, weight: 500, 1em)[#date]]
}
if style == "dissertation" {
page(numbering: none)[
#align(center+horizon)[
#text(font: sans-font, weight: 700, 2.75em)[#title]
]
#v(4cm)
// Author information.
#let author_note = authors.zip(notes)
#pad(
top: 0.5em,
bottom: 0.5em,
x: 2em,
grid(
columns: (1fr,) * calc.min(3, authors.len()),
gutter: 1em,
..authors.map(author => align(center, [#text(size:1.2em)[#author]])),
..institutions.map(institutions => align(center, strong(institutions)))
),
)
#align(center+horizon)[#text(font: body-font, weight: 500, 1.2em)[提出日:#date]]
]
counter(page).update(1)
outline(title:"目次")
pagebreak()
}
// Main body. 基本設定.
// paragraphの設定. indent 1em, 行送り1.2em
set par(justify: true, first-line-indent: 1em, leading: leading)
set block(spacing: leading)
// fontの設定
set text(font: body-font, size:font-size)
set text(cjk-latin-spacing: auto)
//数式フォントの設定
show math.equation: it => {
if it.block == true {par_block(it,math-font,leading:leading)
v(-leading)
h(1em)
}
else {par_block(it,math-font)}
}
// footnoteの設定
set footnote(numbering: "1")
counter(footnote).update(0)
show emph: set text(font: body-font)
show strong: set text(font: sans-font)
set enum(numbering: "1.a.")
show ref: it => eq_refstyle(it,lang:lang)
//定理環境その他の設定
show figure: it => {
let c_eq = counter_body(it)
let thenumber = numbering(
it.numbering,
..c_eq.at(it.location()))
if it.kind in theo_list{
let name = cap_body(it.caption)
my_thm_style(trans.at(lang).at(it.kind), name, thenumber, it.body)
}
else if it.kind in defi_list{
let name = cap_body(it.caption)
my_defi_style(trans.at(lang).at(it.kind), name, thenumber, it.body)
}
else if it.kind == "Proof" {
let name = cap_body(it.caption)
my_proof_style(trans.at(lang).at(it.kind), name, it.numbering, it.body,lang)}
else {it}
}
if abstract != none {
align(center)[#text(1em, strong[#abstract_name.at(lang)])]
block(inset: (left:6%, right:6%))[
#text(0.95em)[#par( leading: 0.8*leading)[#abstract]]]}
body
}
|
|
https://github.com/Error-418-SWE/Documenti | https://raw.githubusercontent.com/Error-418-SWE/Documenti/src/2%20-%20RTB/Documentazione%20interna/Verbali/24-02-04/24-02-04.typ | typst | #import "/template.typ": *
#show: project.with(
date: "04/02/24",
subTitle: "Meeting di retrospettiva e pianificazione",
docType: "verbale",
authors: (
"<NAME>",
),
timeStart: "15:00",
timeEnd: "15:40",
);
= Ordine del giorno
- Discussione riguardo esito RTB con professor Cardin;
- Discussione riguardo stato di avanzamento e RTB con professor Vardanega;
- Pianificazione sprint, ruoli e prossimi obiettivi.
== Valutazione esito RTB
Il gruppo ha aperto la riunione discutendo l'esito della prima parte del colloquio RTB svolta con il professor Cardin. Si è discusso in particolare delle criticità rilevate nel documento di `Analisi dei Requisiti`, che possono essere riassunte in:
- alcuni errori nella modellazione;
- poca specificità degli UC;
- assente tracciamento casi d'uso/requisiti.
== Pianificazione seconda parte RTB
La situazione corrente è stata esaminata dal gruppo e sono stati definiti obbiettivi "a lungo termine" da concludere entro la seconda parte del colloquio RTB con il professor Vardanega.
In particolare si è visto:
- aggiornamento in seguito al "big-bang" del `Piano di Progetto`;
- modifica di alcune metriche del `Piano di Qualifica` non compatibili con il modello di sviluppo scelto dal gruppo;
- necessità di preparare le slide per l'incontro con il professore;
- priorità data a: `Analisi dei Requisiti`, `Piano di Qualifica` e `Norme di Progetto`.
== Pianificazione
La pianificazione per il prossimo sprint si è focalizzata nella:
- suddivisione ruoli e task;
- revisione suddivisione ruoli preventivati a causa di un prolungamento nei tempi della valutazione RTB oltre le attese;
- rimozione dei Programmatori e reinserimento degli Analisti tra i ruoli dello sprint in vista del lavoro sull' `Analisi dei Requisiti`.
|
|
https://github.com/DrakeAxelrod/cvss.typ | https://raw.githubusercontent.com/DrakeAxelrod/cvss.typ/main/cvss/0.1.0/src/tests.typ | typst | MIT License |
#let _cvss = plugin("cvss.wasm")
#let get-result(
got,
expected,
) = {
let lower-end = expected - 0.11
let upper-end = expected + 0.11
if got >= lower-end and got <= upper-end {
text(fill: green.darken(30%), weight: 900, "PASS")
} else {
text(fill: red, weight: 900, "FAIL")
}
}
#let test(
vec,
expected,
) = {
let score = float(str(_cvss.score(bytes(vec))))
let severity = str(_cvss.severity(bytes(vec)))
// assert(score == expected, message: "expected score " + str(expected) + " but got " + str(score))
block(
breakable: false,
inset: 0.5em,
stroke: 1pt + black,
radius: 0.25em,
width: 100%,
fill: gray.lighten(50%),
[
// === #vec
// / Score: #score
// / Expected: #expected
// / Severity: #severity
// / Result: #get-result(score, expected)
#align(center, heading(level: 3, vec))
#v(-0.5em)
#table(
columns: (1fr, 1fr, 1fr, 1fr),
align: center,
stroke: none,
[*Score*], [*Severity*], [*Expected*], [*Result*],
[#score], severity, [#expected], get-result(score, expected)
)
])
}
== CVSS v2.0 tests
CVSS v2.0 Severity steps
- 0.0 - None
- 0.1 - 3.9 Low
- 4.0 - 6.9 Medium
- 7.0 - 10.0 High
// CVSS v2.0 tests
#test("CVSS:2.0/AV:L/AC:L/Au:M/C:N/I:N/A:N", 0)
#test("CVSS:2.0/AV:L/AC:H/Au:M/C:N/I:N/A:P", 0.9)
#test("CVSS:2.0/AV:L/AC:H/Au:M/C:N/I:N/A:C", 3.7)
#test("CVSS:2.0/AV:L/AC:H/Au:M/C:N/I:P/A:P", 2.4)
#test("CVSS:2.0/AV:L/AC:H/Au:M/C:N/I:P/A:C", 4.4)
#test("CVSS:2.0/AV:L/AC:H/Au:M/C:N/I:C/A:C", 5.3)
#test("CVSS:2.0/AV:L/AC:H/Au:M/C:P/I:C/A:C", 5.6)
#test("CVSS:2.0/AV:L/AC:H/Au:N/C:P/I:C/A:C", 5.9)
#test("CVSS:2.0/AV:L/AC:L/Au:N/C:P/I:C/A:C", 6.8)
#test("CVSS:2.0/AV:N/AC:L/Au:N/C:P/I:C/A:C", 9.7)
// CVSS v3.0 tests
CVSS v3.0 Severity steps
- 0.0 - None
- 0.1 - 3.9 Low
- 4.0 - 6.9 Medium
- 7.0 - 8.9 High
- 9.0 - 10.0 Critical
#test("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:N", 0)
#test("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L", 5.3)
#test("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H", 7.5)
#test("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:L/A:L", 6.5)
#test("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:L/A:H", 8.6)
#test("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", 9.8)
#test("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:C/C:L/I:L/A:N", 7.2)
// CVSS v3.1 tests
CVSS v3.1 Severity steps
- 0.0 - None
- 0.1 - 3.9 Low
- 4.0 - 6.9 Medium
- 7.0 - 8.9 High
- 9.0 - 10.0 Critical
#test("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:N", 0)
#test("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L", 5.3)
#test("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H", 7.5)
#test("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:L/A:L", 6.5)
#test("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:L/A:H", 8.6)
#test("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", 9.8)
#test("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:L/I:L/A:N", 7.2)
// CVSS v4.0 tests
#test(
"CVSS:4.0/AV:L/AC:H/AT:N/PR:L/UI:A/VC:N/VI:L/VA:L/SC:N/SI:N/SA:N",
1.0
),
#test(
"CVSS:4.0/AV:A/AC:H/AT:P/PR:H/UI:A/VC:H/VI:H/VA:L/SC:L/SI:L/SA:L",
5.3
),
#test(
"CVSS:4.0/AV:N/AC:H/AT:P/PR:L/UI:P/VC:H/VI:H/VA:L/SC:L/SI:L/SA:L",
7.5
),
#test(
"CVSS:4.0/AV:A/AC:H/AT:P/PR:L/UI:P/VC:H/VI:H/VA:L/SC:L/SI:L/SA:L",
5.3
),
#test(
"CVSS:4.0/AV:A/AC:L/AT:N/PR:L/UI:P/VC:H/VI:H/VA:L/SC:L/SI:L/SA:L",
6.9
),
#test(
"CVSS:4.0/AV:A/AC:L/AT:N/PR:N/UI:P/VC:H/VI:H/VA:L/SC:L/SI:L/SA:L",
8.6
),
#test(
"CVSS:4.0/AV:A/AC:L/AT:N/PR:N/UI:N/VC:H/VI:H/VA:L/SC:L/SI:L/SA:L",
8.7
),
#test(
"CVSS:4.0/AV:L/AC:L/AT:N/PR:N/UI:N/VC:H/VI:H/VA:L/SC:L/SI:L/SA:L",
8.6
),
#test(
"CVSS:4.0/AV:L/AC:L/AT:N/PR:H/UI:N/VC:H/VI:H/VA:L/SC:L/SI:L/SA:L",
8.3
),
#test(
"CVSS:4.0/AV:L/AC:L/AT:N/PR:H/UI:N/VC:H/VI:H/VA:L/SC:H/SI:L/SA:L",
9.2
),
#test(
"CVSS:4.0/AV:L/AC:L/AT:N/PR:H/UI:A/VC:N/VI:N/VA:L/SC:H/SI:L/SA:L",
4.4
),
#test(
"CVSS:4.0/AV:L/AC:L/AT:P/PR:H/UI:A/VC:N/VI:N/VA:N/SC:H/SI:L/SA:L",
2.1
),
#test(
"CVSS:4.0/AV:N/AC:L/AT:N/PR:N/UI:N/VC:H/VI:H/VA:H/SC:H/SI:H/SA:H",
10.0
),
// currently not working with M+METRIC scores
#test(
"CVSS:4.0/AV:L/AC:L/AT:P/PR:H/UI:A/VC:N/VI:N/VA:N/SC:H/SI:L/SA:L/MAV:N",
4.5
),
#test(
"CVSS:4.0/AV:N/AC:L/AT:N/PR:N/UI:N/VC:N/VI:N/VA:N/SC:N/SI:N/SA:N/E:A/CR:H/IR:M/AR:L/MSC:L/MSI:H/MSA:L",
7.8
),
#test(
"CVSS:4.0/AV:N/AC:L/AT:N/PR:N/UI:N/VC:N/VI:N/VA:N/SC:N/SI:N/SA:N/E:A/CR:H/IR:M/AR:L/MAC:L/MAT:P/MUI:N/MSC:L/MSI:H/MSA:L",
7.0
),
#test(
"CVSS:4.0/AV:N/AC:L/AT:N/PR:N/UI:N/VC:N/VI:L/VA:L/SC:N/SI:H/SA:N/E:A/CR:H/IR:M/AR:L/MAC:L/MAT:P/MUI:N/MSC:L/MSI:H/MSA:L/S:P/AU:N/R:U/V:C/RE:L",
7.0
),
#test(
"CVSS:4.0/AV:L/AC:H/AT:N/PR:L/UI:A/VC:N/VI:L/VA:L/SC:N/SI:H/SA:N/E:A/CR:H/IR:M/AR:L/MAC:L/MAT:P/MUI:N/MSC:L/MSI:H/MSA:L/S:P/AU:N/R:U/V:C/RE:L",
4.8
)
|
https://github.com/iceghost/typst-at-hcmut | https://raw.githubusercontent.com/iceghost/typst-at-hcmut/main/contents/02.typ | typst | Apache License 2.0 | = Cài đặt
Dưới đây là hướng dẫn cài đặt Typst nếu bạn sử dụng Typst
trên máy tính cá nhân thay vì trên #link("https://typst.app", [web app]).
== Typst
Ở Windows, xài `winget`:
```console
winget install Typst.Typst
```
Ở MacOS, nếu xài `brew`:
```console
brew install typst
```
Còn lại, các bạn tự tải file executable
từ trang #link("https://github.com/typst/typst/releases/latest", [GitHub])
rồi bỏ vào chỗ nào đó trong `PATH`, ví dụ như `/usr/bin`.
== Visual Studio Code extensions
Nếu bạn sử dụng VSCode, trong file `.vscode/extensions.json`
có liệt kê các extensions hữu ích khi làm việc với Typst.
Với extensions `typst-lsp`, mình tắt chế độ tự động tạo file pdf
vì nó làm thư mục của bạn khá bừa bộn.
Bạn có thể bật lên lại thử, có thể bạn thích điều này.
== Font LaTeX
Các bạn có thể sử dụng hai link sau để tải các font
được sử dụng trong các tài liệu LaTeX:
- Latin Modern Roman: https://www.fontsquirrel.com/fonts/download/Latin-Modern-Roman
- Latin Modern Mono: https://www.fontsquirrel.com/fonts/download/Latin-Modern-Mono
Các bạn có thể tìm hiểu thêm về các font này tại trang https://fontsquirrel.com
|
https://github.com/juicebox-systems/ceremony | https://raw.githubusercontent.com/juicebox-systems/ceremony/main/instructions/model.typ | typst | MIT License | // The model represents the state of the real world as it changes during the
// ceremony.
//
// The variables that are tracked in the model are useful to catch errors in
// the instructions. Many other things that may seem important are not tracked.
// For example, tracking which HSM is active wouldn't be valuable because the
// HSMs are used sequentially.
//
// Due to Typst's execution model, most of the functions in this module return
// content that must be displayed. Often the content is empty, but it must
// still go into the document. This will happen "by default" in most contexts.
// See https://typst.app/docs/reference/meta/state/#definitions-update for more
// info.
#import "debug.typ": debug_level, debug_text, error
//////// Model Variables and Values
// A state variable tracking which smartcard is in the smartcard reader, if
// any.
#let card_reader = state("card_reader", none)
#let card_reader_values = (none, "ACS", "OCS")
// A boolean state variable tracking whether the smartcard reader is connected
// to the current HSM.
#let card_reader_connected = state("card_reader_connected", false)
// A container of boolean state variables that track whether a particular
// software component or files have been created/installed/loaded.
#let components_loaded = state("components_loaded", (
// `ceremony vendor install codesafe` is done.
codesafe: false,
// The `entrust_init` executable is on the filesystem (either built from
// source or restored from the realm DVD).
entrust_init: false,
// `entrust_hsm.sar` and `userdata.sar` are on the filesystem (either just
// signed or restored from the realm DVD).
sar_files: false,
// `ceremony vendor install secworld` is done.
secworld: false,
// The MAC, noise, and record key blobs, along with the 'world' file, are on
// the filesystem (either just generated or restored from the realm DVD).
simple_keys: false,
))
// A boolean state variable tracking whether the computer is turned on.
#let computer_on = state("computer_on", false)
// A boolean state variable tracking whether the computer power supply is
// plugged in to power.
#let computer_plugged_in = state("computer_plugged_in", false)
// A state variable tracking which DVD is in the computer's DVD burner, if any.
#let dvd_drive = state("dvd_drive", none)
#let boot_dvd = "boot DVD"
#let boot_dvd_title = "Boot DVD"
#let realm_dvd = "realm DVD"
#let realm_dvd_title = "Realm DVD"
#let vendor_dvd = "vendor DVD"
#let vendor_dvd_title = "Vendor DVD"
#let dvd_drive_values = (none, boot_dvd, realm_dvd, vendor_dvd)
// A boolean state variable tracking whether the current HSM has been inserted
// into the computer's PCIe slot.
#let hsm_installed = state("hsm_installed", false)
// A state variable tracking the operational mode of the HSM. `none` can
// indicate that the HSM is not installed, the computer is off, or the mode is
// unknown.
#let hsm_mode = state("hsm_mode", none)
#let hsm_mode_values = (none, "initialization", "maintenance", "operational")
// A boolean state variable tracking whether the antistatic wrist strap is
// connecting the operator to the computer chassis.
#let wrist_strap_connected = state("wrist_strap_connected", false)
//////// Debug functions
// Returns a dictionary of all model variables, mapping from their names to
// content representing their current values.
#let debug_model() = (
card_reader: card_reader.display(repr),
card_reader_connected: card_reader_connected.display(repr),
computer_on: computer_on.display(repr),
computer_plugged_in: computer_plugged_in.display(repr),
components_loaded: components_loaded.display(c => table(
columns: 2,
..c.pairs().map(((component, is_loaded)) =>
(component, repr(is_loaded))
).flatten(),
)),
dvd_drive: dvd_drive.display(repr),
hsm_installed: hsm_installed.display(repr),
hsm_mode: hsm_mode.display(repr),
wrist_strap_connected: wrist_strap_connected.display(repr),
)
//////// Helper functions
#let is_boolean(v) = v == false or v == true
// Test for is_boolean.
{
#assert(is_boolean(true))
#assert(is_boolean(false))
#assert(not is_boolean(1))
#assert(not is_boolean(0))
#assert(not is_boolean(none))
#assert(not is_boolean("true"))
}
// Returns the name of a state variable. This is a convenient hack used for
// better debug messages.
#let state_key(state_var) = {
assert(type(state_var) == state, message: repr(state_var))
repr(state_var).match(regex("^state\(\"(.+)\",")).captures.at(0)
}
// Test for `state_key`.
#assert(
state_key(card_reader) == "card_reader",
message: "got " + repr(state_key(card_reader)),
)
// Helper for `require_*` functions.
#let join_messages(..messages) = {
let nonempty = messages.pos()
.filter((m) => m != none and m != "")
if nonempty == () {
""
} else {
nonempty.join(": ")
}
}
// Test for `join_messages`.
#for (input, expected) in (
((), ""),
(("", none), ""),
((none, "a", ""), "a"),
(("a", none, "", "b"), "a: b"),
) {
let actual = join_messages(..input)
assert(
actual == expected,
message: "expected " + repr(expected) + " but got " + repr(actual),
)
}
// Asserts that a state variable has the given value.
#let require_eq(state, required, message: none) = {
if debug_level >= 2 {
block(debug_text[
Requires #state_key(state) is #repr(required).
])
}
state.display((actual) => {
if actual != required {
error(join_messages(
message,
(
state_key(state) +
" must be " +
repr(required) +
" but found " +
repr(actual)
),
))
}
})
}
// Asserts that a dictionary state variable has a given key set to the given
// value.
#let require_key_eq(state, key, value, message: none) = {
assert(is_boolean(value))
if debug_level >= 2 {
block(debug_text[
Requires #state_key(state) has #repr(key) set to #repr(value).
])
}
state.display((actual) => {
if actual.at(key) != value {
error(join_messages(
message,
state_key(state) + " must have " + repr(key) + " set to " + repr(value)
))
}
})
}
//////// State assertion functions
#let assert_card_reader(card, message: none) = {
require_eq(card_reader, card, message: message)
}
#let assert_card_reader_connected(value, message: none) = {
require_eq(card_reader_connected, value, message: message)
}
#let assert_component_loaded(component, is_loaded, message: none) = {
require_key_eq(components_loaded, component, is_loaded, message: message)
}
#let assert_computer_on(value, message: none) = {
require_eq(computer_on, value, message: message)
}
#let assert_computer_plugged_in(value, message: none) = {
require_eq(computer_plugged_in, value, message: message)
}
#let assert_dvd_drive(disc, message: none) = {
require_eq(dvd_drive, disc, message: message)
}
#let assert_hsm_installed(value, message: none) = {
require_eq(hsm_installed, value, message: message)
}
#let assert_hsm_mode(mode, message: none) = {
require_eq(hsm_mode, mode, message: message)
}
#let assert_wrist_strap_connected(value, message: none) = {
require_eq(wrist_strap_connected, value, message: message)
}
// Requires that `card` is in the smartcard reader and the smartcard reader is
// connected to the HSM. `card` must not be none.
#let assert_card(card, message: none) = {
assert(card != none)
assert_card_reader(card, message: message)
assert_card_reader_connected(
true,
message: "card reader must be connected to access " + card
)
}
//////// State manipulation functions
#let set_card_reader(from: -1, to: -1) = {
assert_card_reader(from)
assert(
card_reader_values.contains(to),
message: "set_card_reader: to is invalid smartcard/none",
)
assert(
from != to,
message: "set_card_reader: no change",
)
card_reader.update(to)
}
#let set_card_reader_connected(value) = {
assert(
is_boolean(value),
message: "invalid card_reader_connected value",
)
assert_card_reader_connected(not value)
// Since the PCI bracket isn't on the HSM, there's a good chance that
// plugging/unplugging the card reader would knock the HSM out of the PCI
// slot. That might be bad for the HSM or set off tamper alarms.
assert_computer_plugged_in(
false,
message: (
"power must be off when plugging in card reader, since it may knock " +
"the HSM out of the PCI slot"
),
)
card_reader_connected.update(value)
}
#let clear_all_components_loaded() = {
components_loaded.update((old) => {
for component in old.keys() {
old.insert(component, false)
old
}
})
}
#let set_component_loaded(component, is_loaded) = {
assert(is_boolean(is_loaded))
assert_component_loaded(component, not is_loaded)
assert_computer_on(
true,
message: "computer must be on to (un)load components",
)
components_loaded.update((old) => {
old.insert(component, is_loaded)
old
})
}
#let set_computer_on(value) = {
assert(
is_boolean(value),
message: "invalid computer_on value"
)
assert_computer_on(not value)
if value {
assert_computer_plugged_in(
true,
message: "computer must be plugged in to power on",
)
hsm_installed.display(installed => if installed {
// The HSM boots back up into operational mode because that's what the
// hardware switch is set to.
hsm_mode.update("operational")
})
} else {
hsm_installed.display(installed => if installed {
// The HSM boots back up into operational mode because that's what the
// hardware switch is set to.
hsm_mode.update("none")
})
}
clear_all_components_loaded()
computer_on.update(value)
}
#let set_computer_plugged_in(value) = {
assert(
is_boolean(value),
message: "invalid computer_plugged_in value"
)
assert_computer_plugged_in(not value)
// This computer seems to power on when plugged in if it was powered on when
// last unplugged. The ceremony instructions always power off the computer
// before unplugging it.
assert_computer_on(
false,
message: "computer must be off when unplugging or plugging in power",
)
computer_plugged_in.update(value)
}
#let set_dvd_drive(from: -1, to: -1) = {
assert_dvd_drive(from)
assert(
dvd_drive_values.contains(to),
message: "set_dvd_drive: to is invalid disc/none",
)
assert(
from != to,
message: "set_dvd_drive: no change",
)
assert_computer_on(
true,
message: "computer must be on to eject DVD tray",
)
dvd_drive.update(to)
}
#let set_hsm_installed(value) = {
assert(
is_boolean(value),
message: "invalid hsm_installed value"
)
assert_hsm_installed(not value)
assert_computer_plugged_in(
false,
message: "power must be disconnected to (un)install HSM " +
"(the computer doesn't have a power switch)",
)
assert_wrist_strap_connected(
true,
message: "wrist strap needed to (un)install HSM"
)
assert_card_reader_connected(
false,
message: "card reader must be unplugged to (un)install HSM"
)
hsm_mode.update(none)
hsm_installed.update(value)
}
#let set_hsm_mode(mode) = {
assert(
hsm_mode_values.contains(mode),
message: "invalid HSM mode/none",
)
assert_computer_on(
true,
message: "computer must be on to set HSM mode",
)
assert_hsm_installed(
true,
message: "HSM must be installed to set HSM mode",
)
assert_component_loaded(
"secworld",
true,
message: "secworld must be loaded to set HSM mode"
)
hsm_mode.update(mode)
}
#let set_wrist_strap_connected(value) = {
assert(
is_boolean(value),
message: "invalid wrist_strap_connected value",
)
assert_wrist_strap_connected(not value)
wrist_strap_connected.update(value)
}
|
https://github.com/EpicEricEE/typst-equate | https://raw.githubusercontent.com/EpicEricEE/typst-equate/master/tests/template.typ | typst | MIT License | #import "/src/lib.typ": equate
#set page(width: 6cm, height: auto, margin: 1em)
#show: equate
|
https://github.com/Skimmeroni/Appunti | https://raw.githubusercontent.com/Skimmeroni/Appunti/main/Metodi%20Algebrici/Insiemi/Definizione.typ | typst | Creative Commons Zero v1.0 Universal | #import "../Metodi_defs.typ": *
Prende il nome di *insieme* una qualsiasi collezione di oggetti, detti
_elementi_ o _membri_ dell'insieme. In genere, gli insiemi vengono denotati
con le lettere maiuscole dell'alfabeto latino, mentre i loro elementi con le
lettere minuscole. Per indicare che l'oggetto $a$ é membro dell'insieme $A$
viene usata la notazione $a in A$, e si dice che $a$ _appartiene_ ad $A$.
Per rappresentare gli elementi che appartengono ad un insieme é possibile
sia in maniera *estensionale*, ovvero semplicemente "elencandoli", oppure
in maniera *intensionale*, ovvero specificando una certa proprietá che é
posseduta da tutti ed i soli elementi di quell'insieme. Formalmente, viene
usata questa notazione:
$ underbracket(A = {a_(1)\, a_(2)\, a_(3)\, ...}, "forma estensionale")
space space space underbracket(A = {a : a "possiede la proprietá
caratteristica di" A}, "forma intensionale") $
#example[
Sia $A$ l'insieme che contiene i colori che possono comparire in un pixel.
$A$ puó venire descritto equivalentemente nei due modi:
$ A = {"rosso", "verde", "blu"} space space space
A = {a : a "é uno dei colori presenti in un pixel"} $
]
Si noti come un insieme possa essere a sua volta trattato come un oggetto,
e quindi essere membro di un'altro insieme. Inoltre, non é ammesso che un
insieme contenga piú "copie" dello stesso oggetto. Infine, l'ordine in cui
gli elementi di un insieme sono disposti non é rilevante.
Siano $A$ e $B$ due insiemi. Si dice che $B$ é un *sottoinsieme* di $A$ se
ogni membro di $B$ é anche membro di $A$, e si indica con $B subset.eq A$.
Equivalentemente, si dice che $A$ é un *soprainsieme* di $B$ se ogni membro
di $B$ é anche membro di $A$, e si indica con $A supset.eq B$. Formalmente:
$ B subset.eq A "se e solo se" forall x in B, x in A
space space space
A supset.eq B "se e solo se" forall x in B, x in A $
Due insiemi $A$ e $B$ sono *uguali* se contengono gli stessi elementi,
ovvero se $A subset.eq B$ e $B subset.eq A$, e si indica con $A = B$.
Due insiemi $A$ e $B$ sono diversi se esiste almeno un elemento di $A$
che non é contenuto in $B$ oppure se esiste almeno un elemento di $B$
non contenuto in $A$, e si indica con $A != B$. Si noti come non sia
ammesso che due insiemi siano uguali e distinti. Ovvero, se per due
insiemi $A$ e $B$ vale $A = B$, allora $A$ e $B$ sono lo stesso insieme.
Siano $A$ e $B$ due insiemi. Se $B$ é un sottoinsieme di $A$ ed al contempo
non é uguale ad $A$ si dice che $B$ é un *sottoinsieme proprio* di $A$, e si
indica con $B subset A$. Equivalentemente, se $A$ é un soprainsieme di $B$ ed
al contempo non é uguale a $B$, si dice che $A$ é un *soprainsieme proprio*
di $B$, e si indica con $A supset B$. Formalmente:
$ B subset A "se e solo se" forall x in B, x in A " e " B != A
space space space
A supset B "se e solo se" forall x in B, x in A " e " B != A $
Per indicare che l'insieme $B$ _non_ é un sottoinsieme di $A$ viene usata
la notazione $B subset.eq.not A$, mentre per indicare che $B$ _non_ é un
sottoinsieme proprio di $A$ viene usata la notazione $B subset.not A$.
Similmente, per indicare che l'insieme $A$ _non_ é un soprainsieme di $B$
viene usata la notazione $A supset.eq.not B$, mentre per indicare che $A$
_non_ é un soprainsieme proprio di $B$ viene usata la notazione $A supset.not
B$.
#lemma[
Per qualsiasi insieme $A$ valgono: $A subset.eq A$, $A supset.eq A$,
$A = A$, $A subset.not A$, $A supset.not A$.
] <A-subsupset-itself>
#proof[
+ Per definizione, $A subset.eq A "se e solo se" forall x in A, x in A$.
Essendo $forall x in A, x in A$ una tautologia, si ha $A subset.eq A$;
+ Analoga alla precedente;
+ Dato che $A subset.eq A$ e $A supset.eq A$, si ha $A = A$;
+ Dato che $A subset.eq A$ e $A = A$, si ha $A subset.not A$;
+ Analoga alla precedente.
]
L'insieme che non contiene alcun elemento viene detto *insieme vuoto*, e
si indica con $emptyset$ oppure con ${}$.
#lemma[
L'insieme vuoto é sottoinsieme di ogni insieme (compreso di sé stesso).
] <emptyset-subset-every-set>
#proof[
Dato un qualsiasi insieme $A$, $emptyset$ é un sottoinsieme di $A$ se
ogni membro di $emptyset$ é anche membro di $A$. Dato che $emptyset$
é l'insieme che non ha alcun membro, di fatto rispetta sempre questa
definizione, anche nel caso in cui $A = emptyset$.
]
A partire da un insieme $A$ é possibile costruire l'*insieme potenza* di
$A$, o *insieme delle parti* di $A$, come l'insieme che contiene tutti i
sottoinsiemi di $A$. L'insieme potenza di $A$ viene indicato con $cal(P) (A)$.
#lemma[
Per qualsiasi insieme $A$ (compreso $emptyset$), valgono $emptyset in
cal(P) (A)$ e $A in cal(P) (A)$.
]
#proof[
Dal @A-subsupset-itself si ha $emptyset subset.eq A$, mentre dal
@emptyset-subset-every-set si ha $A subset.eq A$. Avendo definito
$cal(P) (A)$ come l'insieme che contiene tutti i sottoinsiemi di
$A$, $cal(P) (A)$ conterrá certamente (almeno) questi due.
]
#example[
Sia $A = {"rosso", "verde", "blu"}$. Si ha:
$ cal(P) (A) = {emptyset, {"rosso"}, {"verde"}, {"blu"},
{"rosso", "verde"}, {"rosso", "blu"}, {"verde", "blu"},
{"rosso", "verde", "blu"}} $
]
Dati due insiemi $A$ e $B$, viene detto *unione* di $A$ e di $B$
l'insieme che contiene tutti gli elementi o di $A$ o di $B$, e si
indica con $A union B$:
$ A union B = {x : x in A or x in B} $
Si noti come "$or$" non vada inteso in senso disgiuntivo. Ovvero, un
certo elemento $x$ appartiene ad $A union B$ se appartiene ad $A$, se
appartiene a $B$ oppure se appartiene ad entrambi.
#example[
Siano $A = {"rosso", "verde", "blu"}$ e $B = {"verde", "giallo",
"rosa", "nero"}$. Si ha:
$ A union B = {"rosso", "verde", "blu", "giallo", "rosa", "nero"} $
]
Dati due insiemi $A$ e $B$, viene detto *intersezione* di $A$ e di $B$
l'insieme che contiene tutti gli elementi di $A$ e di $B$, e si indica
con $A sect B$:
$ A sect B = {x : x in A and x in B} $
Si noti come "$and$" vada inteso in senso disgiuntivo. Ovvero, un
certo elemento $x$ appartiene ad $A sect B$ se e soltanto se appartiene
contemporaneamente sia ad $A$ che a $B$.
Se l'intersezione di due insiemi é l'insieme vuoto, ovvero se non esiste
alcun elemento che sia presente contemporaneamente in entrambi gli insiemi,
si dice che tali insiemi sono *disgiunti*.
#example[
Siano $A = {"rosso", "verde", "blu"}$ e $B = {"verde", "giallo",
"rosa", "nero"}$. Si ha:
$ A sect B = {"verde"} $
]
É possibile generalizzare l'unione di $k$ insiemi $A_(1), A_(2), A_(3),
..., A_(k)$ come l'insieme che contiene tutti gli $x$ che compaiono in
almeno uno dei $k$ insiemi:
$ union.big_(i = 1)^(k) A_(i) = (... (A_(1) union (A_(2) union
(A_(3) union ...)))) union A_(k) = {x : exists i in {1, 2, ..., k} : x
in A_(i)} $
Allo stesso modo, é possibile generalizzare l'intersezione di $k$ insiemi
$A_(1), A_(2), A_(3), ..., A_(k)$ come l'insieme che contiene tutti gli
$x$ che compaiono in tutti e $k$ gli insiemi:
$ sect.big_(i = 1)^(k) A_(i) = (... (A_(1) sect (A_(2) sect (A_(3) sect ...))))
sect A_(k) = {x : x in A_(i) forall i in {1, 2, ..., k}} $
#lemma[
Siano $A$, $B$ e $C$ tre insiemi. Per la loro unione e la loro
intersezione valgono le proprietá:
#grid(
columns: (0.625fr, 0.875fr, 1fr),
[
*Commutativa*:
- $A sect B = B sect A$;
- $A union B = B union A$.
],
[
*Associativa*:
- $(A sect B) sect C = A sect (B sect C)$;
- $(A union B) union C = A union (B union C)$.
],
[
*Distributiva*:
- $A sect (B union C) = (A sect B) union (A sect C)$;
- $A union (B sect C) = (A union B) sect (A union C)$.
]
)
Il risultato viene generalizzato a $k$ insiemi.
]
Dati due insiemi $A$ e $B$, viene detta *differenza* di $A$ e $B$
l'insieme che contiene tutti gli elementi di $A$ che non sono contenuti
in $B$, e si indica con $A - B$:
$ A - B = {x: x in A and x in.not B} $
Siano $A$ e $B$ due insiemi tali per cui $B subset.eq A$. L'insieme
$A - B$ viene detto *complemento* di $B$ rispetto ad $A$, e si indica
con $overline(B)$. Quando é noto dal contesto rispetto a quale insieme
un certo insieme viene complementato, questo viene omesso.
#theorem("Leggi di <NAME>")[
Siano $A$ e $B$ due sottoinsiemi di un certo insieme $U$. Si ha:
$ overline(A sect B) = overline(A) union overline(B)
space space space
overline(A union B) = overline(A) sect overline(B) $
Il risultato viene generalizzato a $k$ insiemi.
]
Siano $A$ e $B$ due insiemi. Viene detto *prodotto cartesiano* di $A$ e di
$B$ l'insieme costituito da tutte le possibili coppie ordinate costruite a
partire dagli elementi di $A$ e di $B$, e si indica con $A times B$.
$ A times B = {(a, b) : a in A and b in B} $
#example[
Siano $A = {"rosso", "verde", "blu"}$ e $B = {"verde", "giallo",
"rosa", "nero"}$. Si ha:
$ A times B = {&("rosso", "verde"), ("rosso", "giallo"),
("rosso", "rosa"), ("rosso", "nero"), \ &("verde", "verde"),
("verde", "giallo"), ("verde", "rosa"), ("verde", "nero"), \
&("blu", "verde"), ("blu", "giallo"), ("blu", "rosa"), ("blu", "nero")} $
]
Il prodotto cartesiano fra due insiemi puó essere generalizzato a $k$
insiemi $A_(1), A_(2), ..., A_(k)$ come all'insieme costruito da tutte
le possibili $k$-uple ordinate costruite a partire dagli elementi di
ogni $A_(i)$ per $i = {1, ..., k}$:
$ product_(i = 1)^(k) A_(i) = A_(1) times A_(2) times ... times A_(k) =
{(a_(1), a_(2), ..., a_(k)) : a_(1) in A_(1) and a_(2) in A_(2) and ...
and a_(k) in A_(k)} $
Nel caso particolare in cui tutti e $k$ gli insiemi $A_(1), A_(2), ...,
A_(k)$ siano tutti uguali ad un certo insieme $A$, per indicare il loro
prodotto cartesiano si scrive semplicemente $A^(k)$.
Dato un insieme $A$, il numero di elementi che questo contiene é detto
*cardinalitá* e si indica con $|A|$. La cardinalitá di un insieme puó
essere sia _finita_ che _infinita_, pertanto é ammesso che un insieme
possa contenere infiniti elementi. Due insiemi (anche distinti) con la
stessa cardinalitá si dicono *equipotenti*.
|
https://github.com/catppuccin/typst | https://raw.githubusercontent.com/catppuccin/typst/main/src/flavors.typ | typst | MIT License | #import "flavors/catppuccin-latte.typ": latte as _latte
#import "flavors/catppuccin-frappe.typ": frappe as _frappe
#import "flavors/catppuccin-macchiato.typ": macchiato as _macchiato
#import "flavors/catppuccin-mocha.typ": mocha as _mocha
/// The Latte color palette.
///
/// ==== Example
/// #example(
/// ```typ
/// #let theme = themes.latte
/// #let palette = get-palette(theme)
/// Selected theme: #palette.name #palette.emoji
/// ```, ratio: 1.6)
///
/// -> flavor
#let latte = _latte
/// The Frappé color palette.
///
/// ==== Example
/// #example(
/// ```typ
/// #let theme = themes.frappe
/// #let palette = get-palette(theme)
/// Selected theme: #palette.name #palette.emoji
/// ```, ratio: 1.6)
///
/// -> flavor
#let frappe = _frappe
/// The Macchiato color palette.
///
/// ==== Example
/// #example(
/// ```typ
/// #let theme = themes.macchiato
/// #let palette = get-palette(theme)
/// Selected theme: #palette.name #palette.emoji
/// ```, ratio: 1.6)
///
/// -> flavor
#let macchiato = _macchiato
/// The Mocha color palette.
///
/// ==== Example
/// #example(
/// ```typ
/// #let theme = themes.mocha
/// #let palette = get-palette(theme)
/// Selected theme: #palette.name #palette.emoji
/// ```, ratio: 1.6)
///
/// -> flavor
#let mocha = _mocha
|
https://github.com/Ttajika/typst_template_tajika | https://raw.githubusercontent.com/Ttajika/typst_template_tajika/main/libs/my_short_hand.typ | typst |
#let cdot = $dots.c$
#let piv = $"piv"$
#let iff = $arrow.double.r.l.long$
#let sgn = $op("sgn")$
#let conv = $op("conv")$ |
|
https://github.com/astrojhgu/medical_expense_reimbursement | https://raw.githubusercontent.com/astrojhgu/medical_expense_reimbursement/master/reimbursement.typ | typst | #let name="张三"
#let dep="某部"
#let id="9527"
#let phone="13812341234"
#let total="1234567.89"
#let date="2024/2/29"
#let ninvoice="1"
//dont touch following codes
//#let menzhen="12345.10"
//#let zhuyuan="1000.11"
#let (yyyy,mm,dd)=date.split("/")
#let mod(n, m) = {
while n >= m {
n -= m
}
return n
}
#let arabic_to_chinese(num_str:str)={
let chinese_numerals = ("0": "零", "1": "壹", "2": "贰", "3": "叁", "4": "肆", "5": "伍", "6": "陆", "7": "柒", "8": "捌", "9": "玖")
let unit_list = ("", "拾", "佰", "仟")
let decimal_list = ("角", "分")
let result=""
let (integer_part, ..decimal_part)=num_str.split(".")
let n=calc.min(decimal_part.len(),2)
decimal_part=decimal_part.slice(0,n)
let integer_result = ""
for (i,digit) in integer_part.rev().clusters().enumerate(){
if digit!="0"{
integer_result=chinese_numerals.at(digit)+unit_list.at(mod(i,4))+integer_result
}
else if (i==0 or mod(i,4)!=0 and integer_result.at(0)!="零"){
integer_result="零"+integer_result
}
if mod(i,4)==3 and integer_part.len()>4{
integer_result="万"+integer_result
}else if mod(i, 8)==7{
integer_result="亿"+integer_result
}
//result+=str(i)
}
result+=integer_result+"元"
if decimal_part.len()>0{
for (i, digit) in decimal_part.at(0).clusters().enumerate(){
if digit!="0"{
result+=chinese_numerals.at(digit)+decimal_list.at(i)
}
else if i==0 {
result+="零"
}
}
}else{
result+="整"
}
result
}
#set page(paper:"a4",flipped: true,margin:(top:75pt, bottom:0pt))
#let zh_hei = ("SimHei",)
#set align(center)
#let ts=0.3cm//title spacing
#set text(size:24pt,font: zh_hei,spacing:200%)
国#h(ts)家#h(ts)天#h(ts)文#h(ts)台#h(ts)医#h(ts)疗#h(ts)费#h(ts)报#h(ts)销#h(ts)单
#v(-0.5cm)
#line(stroke: 1.5pt, length:60%)
#set text(size:14pt,spacing:100%)
#set align(right)
#yyyy 年#mm 月#dd 日#h(5cm) 单据数#ninvoice 张#h(1cm)
#set align(left)
#grid(columns:(1.0cm,1cm,0.5cm,26cm),
[],
block()[
#set text(size:15pt,spacing:100%)
#path(fill:none, stroke: (dash:"loosely-dotted",thickness:1.5pt), closed:false, (8pt,0pt),(8pt,100pt))
装\ #v(1cm)订\ #v(1cm)线
#path(fill:none, stroke: (dash:"loosely-dotted",thickness:1.5pt), closed:false, (8pt,0pt),(8pt,100pt))
],[]
,[
#table(columns:(6cm,9cm,3.8cm,3.8cm),
align:horizon+center,
inset:(y:22pt),
[部~~~~~~~~~~~~~门],[#dep],[工~资~号],[#id],
[职~~~工~~~姓~~~名],[#name],[联系电话],[#phone],
[发~票~金~额~(大写)],[
#set text(size:13pt)
#arabic_to_chinese(num_str:total)],[报销比例],[],
[住院实报金额(大写)],[
#set text(size:13pt)
//#arabic_to_chinese(num_str:zhuyuan)
],[小写金额],[
//#zhuyuan
],
[门诊实报金额(大写)],[
#set text(size:13pt)
//#arabic_to_chinese(num_str:menzhen)
],[小写金额],[
//#menzhen
],
[医~~务~~室~~意~~见],[],[报~销~人],[],
)
#h(0.5cm)台领导#h(2.5cm)人事处#h(3.5cm)报销单位负责人#h(2.5cm)审核#h(2.5cm)出纳
]
)
|
|
https://github.com/piepert/philodidaktik-hro-phf-ifp | https://raw.githubusercontent.com/piepert/philodidaktik-hro-phf-ifp/main/src/parts/ephid/unterrichtsplanung/kurzentwurf.typ | typst | Other | #import "/src/template.typ": *
== #ix("Plan für eine Unterrichtsstunde", "Plan für eine Unterrichtsstunde", "Unterrichtsplan") <ephid-unterrichtsplanung>
In der Einführungsveranstaltung für die Philosophiedidaktik wird ein tabellarischer Aufbau zur Planung eines Unterrichtskurzentwurfes vorgeschlagen.
#align(center, block(align(left, table(columns: 6,
stroke: none,
column-gutter: 0.5em,
..([Zeit/Phase], [Inhalt], [Lehrerverhalten\ (geplant)], [Schülerverhalten\ (erwartet)], [Methoden\ /Sozialform], [Medien]).map(e => strong(e)),
..([...],)*6
))))
In den Spalten werden folgende Dinge dargestellt:
+ *Zeit/Phase:* Phase des Unterrichts, Uhrzeit
+ *Inhalt:* grobe inhaltliche Umreißung
+ *Lehrerverhalten (geplant):* Aufgabenstellungen und Tätigkeiten der Lehrperson
+ *Schülerverhalten (erwartet):* #ix("Erwartungshorizont") der Aufgaben
+ *Methoden/Sozialformen:* angewendete Methoden und Benennung der genutzten Sozialform
+ *Medien:* Materialien und Quellenangaben für die Medien
Im Standardmodell #ix("PEST") werden für den Kurzentwurf in tabellarischer Form die folgenden Phasen angeführt:
+ *Eröffnung/Motivationsphase* (Teil der Problematisierungsphase): In der Motivationsphase wird versucht in den SuS intrinsische Motivation zu erzeugen.
+ *Problematisierungsphase:* In der Problematisierungsphase wird das Thema der Stunde durch die SuS erarbeitet und kontrovers dargestellt.
+ *Erarbeitungsphase:* Nachdem das Problem herausgestellt wurde, wird es durch die SuS inhaltlich bearbeitet und Kompetenzen werden generiert.
+ *Sicherungsphase:* Die Sicherungsphase ermöglicht es den SuS die Inhalte zu vergleichen und zu festigen.
+ *Transferphase:* In der Transferphase wird das erarbeitete Wissen auf ein neues Beispiel angewendet, um sicherzugehen, dass die gewonnen Fertigkeiten anwendbar sind.
+ *Reserve/Hausaufgaben:* Falls die vorherigen Phasen schneller beendet wurden als geplant, kann eine Reserve weitere Übungen beinhalten und alternativ zur Übung als Hausaufgabe aufgegeben werden.
Zu jedem Grobentwurf gehört ein *Thema*, ein *#ix("Grobziel")*, eine *Liste an #ix("Feinzielen", "Feinziel")*, die in der Stunde erreicht werden soll. Die Stunde wird in den Kontext einer Unterrichtsreihe eingereiht. Die gewählten Kompetenzen müssen für die Klassenstufe und die Menge der Inhalte auf die Länge der Stunde angepasst werden.
Sie finden auf den drei folgenden Seiten ein Beispiel für einen #ix("Plan für eine Unterrichtsstunde", "Plan für eine Unterrichtsstunde", "Unterrichtsplan") zum Thema #ix("Erkenntnistheorie") und #ix([Descartes'], "Descartes, René") #ix("cogito-Argument"), das formal den Anforderungen der Klausur entspricht.
<ephid-plan-descartes>
#include "/src/kurzentwuerfe/uebung01/main.typ" |
https://github.com/jgm/typst-hs | https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/show-text-05.typ | typst | Other | // See also: https://github.com/mTvare6/hello-world.rs
#show regex("(?i)rust"): it => [#it (🚀)]
Rust is memory-safe and blazingly fast. Let's rewrite everything in rust.
|
https://github.com/gvallinder/KTHThesis_Typst | https://raw.githubusercontent.com/gvallinder/KTHThesis_Typst/main/Chapter1.typ | typst | MIT License | #import "Template/kth_thesis.typ": chapter
#show: doc => chapter(title: "Introduction", doc)
#lorem(40) Something something footnote #footnote("Please note that this footnote is a footnote, not just a note in the footer.") something.
== Heading 2
#lorem(20)
Math is very easy to do in Typst, for example @MyEq,
$ sum_(k=0)^n k
&= 1 + ... + n \
&= (n(n+1)) / 2 $ <MyEq>
== Heading 2
#lorem(100) Here I am referring to my table, @MyTable.
#figure(
table(
columns: 2,
[*Lorem*], [567(89)],
[*Ipsum*], [0.12(34)],
align: center
),
caption: lorem(28)
) <MyTable>
#lorem(100) |
https://github.com/typst/packages | https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-0E00.typ | typst | Apache License 2.0 | #let data = (
(),
("THAI CHARACTER KO KAI", "Lo", 0),
("THAI CHARACTER KHO KHAI", "Lo", 0),
("THAI CHARACTER KHO KHUAT", "Lo", 0),
("THAI CHARACTER KHO KHWAI", "Lo", 0),
("THAI CHARACTER KHO KHON", "Lo", 0),
("THAI CHARACTER KHO RAKHANG", "Lo", 0),
("THAI CHARACTER NGO NGU", "Lo", 0),
("THAI CHARACTER CHO CHAN", "Lo", 0),
("THAI CHARACTER CHO CHING", "Lo", 0),
("THAI CHARACTER CHO CHANG", "Lo", 0),
("THAI CHARACTER SO SO", "Lo", 0),
("THAI CHARACTER CHO CHOE", "Lo", 0),
("THAI CHARACTER YO YING", "Lo", 0),
("THAI CHARACTER DO CHADA", "Lo", 0),
("THAI CHARACTER TO PATAK", "Lo", 0),
("THAI CHARACTER THO THAN", "Lo", 0),
("THAI CHARACTER THO NANGMONTHO", "Lo", 0),
("THAI CHARACTER THO PHUTHAO", "Lo", 0),
("THAI CHARACTER NO NEN", "Lo", 0),
("THAI CHARACTER DO DEK", "Lo", 0),
("THAI CHARACTER TO TAO", "Lo", 0),
("THAI CHARACTER THO THUNG", "Lo", 0),
("THAI CHARACTER THO THAHAN", "Lo", 0),
("THAI CHARACTER THO THONG", "Lo", 0),
("THAI CHARACTER NO NU", "Lo", 0),
("THAI CHARACTER BO BAIMAI", "Lo", 0),
("THAI CHARACTER PO PLA", "Lo", 0),
("THAI CHARACTER PHO PHUNG", "Lo", 0),
("THAI CHARACTER FO FA", "Lo", 0),
("THAI CHARACTER PHO PHAN", "Lo", 0),
("THAI CHARACTER FO FAN", "Lo", 0),
("THAI CHARACTER PHO SAMPHAO", "Lo", 0),
("THAI CHARACTER MO MA", "Lo", 0),
("THAI CHARACTER YO YAK", "Lo", 0),
("THAI CHARACTER RO RUA", "Lo", 0),
("THAI CHARACTER RU", "Lo", 0),
("THAI CHARACTER LO LING", "Lo", 0),
("THAI CHARACTER LU", "Lo", 0),
("THAI CHARACTER WO WAEN", "Lo", 0),
("THAI CHARACTER SO SALA", "Lo", 0),
("THAI CHARACTER SO RUSI", "Lo", 0),
("THAI CHARACTER SO SUA", "Lo", 0),
("THAI CHARACTER HO HIP", "Lo", 0),
("THAI CHARACTER LO CHULA", "Lo", 0),
("THAI CHARACTER O ANG", "Lo", 0),
("THAI CHARACTER HO NOKHUK", "Lo", 0),
("THAI CHARACTER PAIYANNOI", "Lo", 0),
("THAI CHARACTER SARA A", "Lo", 0),
("THAI CHARACTER MAI HAN-AKAT", "Mn", 0),
("THAI CHARACTER SARA AA", "Lo", 0),
("THAI CHARACTER SARA AM", "Lo", 0),
("THAI CHARACTER SARA I", "Mn", 0),
("THAI CHARACTER SARA II", "Mn", 0),
("THAI CHARACTER SARA UE", "Mn", 0),
("THAI CHARACTER SARA UEE", "Mn", 0),
("THAI CHARACTER SARA U", "Mn", 103),
("THAI CHARACTER SARA UU", "Mn", 103),
("THAI CHARACTER PHINTHU", "Mn", 9),
(),
(),
(),
(),
("THAI CURRENCY SYMBOL BAHT", "Sc", 0),
("THAI CHARACTER SARA E", "Lo", 0),
("THAI CHARACTER SARA AE", "Lo", 0),
("THAI CHARACTER SARA O", "Lo", 0),
("THAI CHARACTER SARA AI MAIMUAN", "Lo", 0),
("THAI CHARACTER SARA AI MAIMALAI", "Lo", 0),
("THAI CHARACTER LAKKHANGYAO", "Lo", 0),
("THAI CHARACTER MAIYAMOK", "Lm", 0),
("THAI CHARACTER MAITAIKHU", "Mn", 0),
("THAI CHARACTER MAI EK", "Mn", 107),
("THAI CHARACTER MAI THO", "Mn", 107),
("THAI CHARACTER MAI TRI", "Mn", 107),
("THAI CHARACTER MAI CHATTAWA", "Mn", 107),
("THAI CHARACTER THANTHAKHAT", "Mn", 0),
("THAI CHARACTER NIKHAHIT", "Mn", 0),
("THAI CHARACTER YAMAKKAN", "Mn", 0),
("THAI CHARACTER FONGMAN", "Po", 0),
("THAI DIGIT ZERO", "Nd", 0),
("THAI DIGIT ONE", "Nd", 0),
("THAI DIGIT TWO", "Nd", 0),
("THAI DIGIT THREE", "Nd", 0),
("THAI DIGIT FOUR", "Nd", 0),
("THAI DIGIT FIVE", "Nd", 0),
("THAI DIGIT SIX", "Nd", 0),
("THAI DIGIT SEVEN", "Nd", 0),
("THAI DIGIT EIGHT", "Nd", 0),
("THAI DIGIT NINE", "Nd", 0),
("THAI CHARACTER ANGKHANKHU", "Po", 0),
("THAI CHARACTER KHOMUT", "Po", 0),
)
|
https://github.com/Slyde-R/not-jku-thesis-template | https://raw.githubusercontent.com/Slyde-R/not-jku-thesis-template/main/template.typ | typst | MIT No Attribution | #import "titlepage.typ": *
#import "disclaimer.typ": *
#import "acknowledgement.typ": *
#import "abstract.typ": *
#import "@preview/wordometer:0.1.2": word-count, total-words
#let jku-thesis(
thesis-type: "Bachlor/Maser/etc",
degree: "The degree",
program: "The Program",
supervisor: "Your Supervisor",
advisors: ("The first advisor", "The second advisor"),
department: "The Deparment",
author: "<NAME>",
date: "The Submission Date",
place-of-submission: "Place of Submission", // for declaration
title: "Title",
abstract-en: [English Abstract],
abstract-de: none,
acknowledgements: none,
show-title-in-header: true,
draft: true,
body,
) = {
let draft_string = ""
if draft{
draft_string = "DRAFT - "
}
set document(author: author, title: draft_string + title)
set page(
numbering: "1", // this is necessary for the glossary
//number-align: center,
margin: (left: 2.5cm+1cm, // binding correction of 1cm for single sided printing
right: 2.5cm,
y: 2.9cm),
header: context{[
#if counter(page).get().first() > 2 [
#place(top+right ,float: false, dx: 0cm, dy:1cm)[
#box(width: 25%)[
#align(right)[#image("JKU.png", height: 35pt)]
]
]
]
#set text(8pt)
#if show-title-in-header [
#author - #title
]
#if draft [
DRAFT
]
]},
footer: context [//overwrite numbering
#text(size:9pt)[
#table(
stroke: none,
columns: (1fr, auto, 1fr),
align: (left, center, right),
inset: 5pt,
[],[],[],
)
]
]
)
titlepage(
thesis-type: thesis-type,
degree: degree,
program: program,
supervisor: supervisor,
advisors: advisors,
department: department,
author: author,
date: date ,
title: title
)
pagebreak()
disclaimer(
date: date,
place-of-submission: place-of-submission,
thesis-type: thesis-type,
author: author,
)
if acknowledgements != none [ // optional
#acknowledgement(acknowledgements)
]
abstract(lang: "en")[#abstract-en]
if abstract-de != none [ // optional
#abstract(lang: "de")[#abstract-de]
]
counter(page).update(1)
body
} |
https://github.com/cwreed/cv | https://raw.githubusercontent.com/cwreed/cv/main/src/modules/skills.typ | typst | #import "../template.typ": cvSkill
#let programming-skill = cvSkill(type: [Programming], info: [Python, R, SQL, Bash])
#let ml-tools-skill = cvSkill(
type: [ML Tools],
info: [PyTorch( Lightning), scikit-learn, experiment tracking (e.g., Hydra, Guild AI), TensorRT],
)
#let devops-skill = cvSkill(type: [DevOps], info: [Git, Docker, Terraform, dbt, AWS])
#let statistics-skill = cvSkill(
type: [Statistics],
info: [Machine learning, deep learning, computer vision, time series, geospatial, probabilistic models, Bayesian inference, hypothesis testing, A/B testing, structural causal models, generative models],
)
#let other-skill = cvSkill(
type: [Other],
info: [Research, writing, public speaking, food systems, ecology, food security, climate change, music (bass)],
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.